summaryrefslogtreecommitdiff
path: root/ft
diff options
context:
space:
mode:
authorYoni Fogel <yoni@tokutek.com>2012-07-25 20:49:56 +0000
committerYoni Fogel <yoni@tokutek.com>2013-04-17 00:01:01 -0400
commitc527ba297f51755c3bae22e3d2b48f18d51e60b3 (patch)
treedc1348249742e83444184654029b4b190e9209b5 /ft
parent36242fd401ce5a2e668ee2763ef47f8b810b0e7e (diff)
downloadmariadb-git-c527ba297f51755c3bae22e3d2b48f18d51e60b3.tar.gz
refs #5081 Replace all usage:
BOOL->bool FALSE->false TRUE->true u_int*_t->uint*_t Also poisoned all of the variables git-svn-id: file:///svn/toku/tokudb@46157 c7de825b-a66e-492c-adef-691d508d4ae1
Diffstat (limited to 'ft')
-rw-r--r--ft/background_job_manager.cc3
-rw-r--r--ft/block_allocator.cc76
-rw-r--r--ft/block_allocator.h24
-rw-r--r--ft/block_table.cc74
-rw-r--r--ft/block_table.h10
-rw-r--r--ft/cachetable.cc278
-rw-r--r--ft/cachetable.h84
-rw-r--r--ft/checkpoint.cc22
-rw-r--r--ft/checkpoint.h8
-rw-r--r--ft/compression-ratio/cratio.cc6
-rw-r--r--ft/dbufio.cc32
-rw-r--r--ft/fifo.cc2
-rw-r--r--ft/fifo.h6
-rw-r--r--ft/ft-cachetable-wrappers.cc34
-rw-r--r--ft/ft-cachetable-wrappers.h20
-rw-r--r--ft/ft-flusher.cc100
-rw-r--r--ft/ft-flusher.h6
-rw-r--r--ft/ft-hot-flusher.cc10
-rw-r--r--ft/ft-internal.h84
-rw-r--r--ft/ft-node-deserialize.cc8
-rw-r--r--ft/ft-ops.cc434
-rw-r--r--ft/ft-ops.h42
-rw-r--r--ft/ft-search.h4
-rw-r--r--ft/ft-serialize.cc60
-rw-r--r--ft/ft-test-helpers.cc10
-rw-r--r--ft/ft-verify.cc10
-rw-r--r--ft/ft.cc18
-rw-r--r--ft/ft.h8
-rw-r--r--ft/ft_msg.cc8
-rw-r--r--ft/ft_msg.h4
-rw-r--r--ft/ft_node-serialize.cc188
-rw-r--r--ft/ftdump.cc86
-rw-r--r--ft/ftloader-callback.cc2
-rw-r--r--ft/ftloader-internal.h40
-rw-r--r--ft/ftloader.cc150
-rw-r--r--ft/ftloader.h4
-rw-r--r--ft/fttypes.h50
-rw-r--r--ft/ftverify.cc25
-rw-r--r--ft/hash-benchmarks/hash-benchmark-manually-open.cc40
-rw-r--r--ft/hash-benchmarks/hash-benchmark.cc120
-rw-r--r--ft/hashfun.h6
-rw-r--r--ft/kibbutz.cc1
-rw-r--r--ft/le-cursor.h8
-rw-r--r--ft/leafentry.h22
-rw-r--r--ft/log-internal.h42
-rw-r--r--ft/log.h10
-rw-r--r--ft/log_upgrade.cc8
-rw-r--r--ft/logcursor.cc30
-rw-r--r--ft/logformat.cc111
-rw-r--r--ft/logger.cc246
-rw-r--r--ft/logger.h74
-rw-r--r--ft/minicron.cc18
-rw-r--r--ft/minicron.h14
-rw-r--r--ft/omt-tmpl.cc1
-rw-r--r--ft/omt-tmpl.h1
-rw-r--r--ft/omt.cc196
-rw-r--r--ft/omt.h30
-rw-r--r--ft/queue.cc18
-rw-r--r--ft/queue.h6
-rw-r--r--ft/rbuf.h24
-rw-r--r--ft/recover.cc82
-rw-r--r--ft/recover.h4
-rw-r--r--ft/roll.cc28
-rw-r--r--ft/rollback-apply.cc18
-rw-r--r--ft/rollback-ct-callbacks.cc14
-rw-r--r--ft/rollback-ct-callbacks.h8
-rw-r--r--ft/rollback.cc12
-rw-r--r--ft/rollback.h2
-rw-r--r--ft/rwlock.cc4
-rw-r--r--ft/sub_block.cc16
-rw-r--r--ft/sub_block.h32
-rw-r--r--ft/sub_block_map.h8
-rw-r--r--ft/tdb_logprint.cc40
-rw-r--r--ft/tests/block_allocator_test.cc36
-rw-r--r--ft/tests/cachetable-3969.cc14
-rw-r--r--ft/tests/cachetable-4302.cc22
-rw-r--r--ft/tests/cachetable-4357.cc6
-rw-r--r--ft/tests/cachetable-4365.cc8
-rw-r--r--ft/tests/cachetable-4545.cc36
-rw-r--r--ft/tests/cachetable-5097.cc42
-rw-r--r--ft/tests/cachetable-all-write.cc14
-rw-r--r--ft/tests/cachetable-checkpoint-pending.cc18
-rw-r--r--ft/tests/cachetable-checkpoint-pinned-nodes.cc32
-rw-r--r--ft/tests/cachetable-checkpoint-prefetched-nodes.cc36
-rw-r--r--ft/tests/cachetable-checkpoint-test.cc14
-rw-r--r--ft/tests/cachetable-cleaner-checkpoint.cc24
-rw-r--r--ft/tests/cachetable-cleaner-checkpoint2.cc24
-rw-r--r--ft/tests/cachetable-cleaner-thread-attrs-accumulate.cc30
-rw-r--r--ft/tests/cachetable-cleaner-thread-empty-cachetable.cc2
-rw-r--r--ft/tests/cachetable-cleaner-thread-everything-pinned.cc8
-rw-r--r--ft/tests/cachetable-cleaner-thread-nothing-needs-flushing.cc8
-rw-r--r--ft/tests/cachetable-cleaner-thread-simple.cc10
-rw-r--r--ft/tests/cachetable-clock-all-pinned.cc2
-rw-r--r--ft/tests/cachetable-clock-eviction.cc34
-rw-r--r--ft/tests/cachetable-clock-eviction2.cc36
-rw-r--r--ft/tests/cachetable-clock-eviction3.cc36
-rw-r--r--ft/tests/cachetable-clock-eviction4.cc38
-rw-r--r--ft/tests/cachetable-clone-checkpoint.cc30
-rw-r--r--ft/tests/cachetable-clone-partial-fetch-pinned-node.cc32
-rw-r--r--ft/tests/cachetable-clone-partial-fetch.cc36
-rw-r--r--ft/tests/cachetable-clone-pin-nonblocking.cc32
-rw-r--r--ft/tests/cachetable-clone-unpin-remove.cc30
-rw-r--r--ft/tests/cachetable-count-pinned-test.cc6
-rw-r--r--ft/tests/cachetable-debug-test.cc4
-rw-r--r--ft/tests/cachetable-eviction-close-test.cc26
-rw-r--r--ft/tests/cachetable-eviction-close-test2.cc26
-rw-r--r--ft/tests/cachetable-eviction-getandpin-test.cc26
-rw-r--r--ft/tests/cachetable-eviction-getandpin-test2.cc12
-rw-r--r--ft/tests/cachetable-fd-test.cc2
-rw-r--r--ft/tests/cachetable-flush-during-cleaner.cc6
-rw-r--r--ft/tests/cachetable-flush-test.cc10
-rw-r--r--ft/tests/cachetable-getandpin-test.cc16
-rw-r--r--ft/tests/cachetable-kibbutz_and_flush_cachefile.cc14
-rw-r--r--ft/tests/cachetable-partial-fetch.cc42
-rw-r--r--ft/tests/cachetable-pin-checkpoint.cc38
-rw-r--r--ft/tests/cachetable-pin-nonblocking-checkpoint-clean.cc10
-rw-r--r--ft/tests/cachetable-prefetch-checkpoint-test.cc18
-rw-r--r--ft/tests/cachetable-prefetch-close-leak-test.cc16
-rw-r--r--ft/tests/cachetable-prefetch-close-test.cc30
-rw-r--r--ft/tests/cachetable-prefetch-flowcontrol-test.cc20
-rw-r--r--ft/tests/cachetable-prefetch-getandpin-test.cc48
-rw-r--r--ft/tests/cachetable-prefetch-maybegetandpin-test.cc6
-rw-r--r--ft/tests/cachetable-prefetch2-test.cc6
-rw-r--r--ft/tests/cachetable-put-checkpoint.cc58
-rw-r--r--ft/tests/cachetable-put-test.cc6
-rw-r--r--ft/tests/cachetable-rename-test.cc16
-rw-r--r--ft/tests/cachetable-scan.cc18
-rw-r--r--ft/tests/cachetable-simple-clone.cc54
-rw-r--r--ft/tests/cachetable-simple-clone2.cc48
-rw-r--r--ft/tests/cachetable-simple-maybe-get-pin.cc4
-rw-r--r--ft/tests/cachetable-simple-pin-dep-nodes.cc64
-rw-r--r--ft/tests/cachetable-simple-pin-nonblocking.cc36
-rw-r--r--ft/tests/cachetable-simple-pin.cc36
-rw-r--r--ft/tests/cachetable-simple-put-dep-nodes.cc66
-rw-r--r--ft/tests/cachetable-simple-unpin-remove-checkpoint.cc10
-rw-r--r--ft/tests/cachetable-simple-verify.cc4
-rw-r--r--ft/tests/cachetable-test.cc108
-rw-r--r--ft/tests/cachetable-test2.cc24
-rw-r--r--ft/tests/cachetable-unpin-and-remove-test.cc14
-rw-r--r--ft/tests/cachetable-unpin-remove-and-checkpoint.cc6
-rw-r--r--ft/tests/cachetable-unpin-test.cc6
-rw-r--r--ft/tests/cachetable-writer-thread-limit.cc10
-rw-r--r--ft/tests/ft-bfe-query.cc50
-rw-r--r--ft/tests/ft-clock-test.cc24
-rw-r--r--ft/tests/ft-serialize-benchmark.cc10
-rw-r--r--ft/tests/ft-serialize-sub-block-test.cc6
-rw-r--r--ft/tests/ft-serialize-test.cc206
-rw-r--r--ft/tests/ft-test-cursor-2.cc2
-rw-r--r--ft/tests/ft-test-cursor.cc24
-rw-r--r--ft/tests/ft-test.cc28
-rw-r--r--ft/tests/ftloader-test-bad-generate.cc10
-rw-r--r--ft/tests/ftloader-test-extractor-errors.cc10
-rw-r--r--ft/tests/ftloader-test-extractor.cc12
-rw-r--r--ft/tests/ftloader-test-merge-files-dbufio.cc16
-rw-r--r--ft/tests/ftloader-test-open.cc4
-rw-r--r--ft/tests/ftloader-test-writer-errors.cc6
-rw-r--r--ft/tests/ftloader-test-writer.cc6
-rw-r--r--ft/tests/ftloader-test.cc40
-rw-r--r--ft/tests/is_empty.cc20
-rw-r--r--ft/tests/keyrange.cc26
-rw-r--r--ft/tests/le-cursor-provdel.cc18
-rw-r--r--ft/tests/le-cursor-right.cc16
-rw-r--r--ft/tests/le-cursor-walk.cc8
-rw-r--r--ft/tests/log-test-maybe-trim.cc6
-rw-r--r--ft/tests/log-test5.cc2
-rw-r--r--ft/tests/log-test6.cc2
-rw-r--r--ft/tests/log-test7.cc2
-rw-r--r--ft/tests/logcursor-timestamp.cc4
-rw-r--r--ft/tests/msnfilter.cc2
-rw-r--r--ft/tests/omt-test.cc142
-rw-r--r--ft/tests/orthopush-flush.cc34
-rw-r--r--ft/tests/pqueue-test.cc2
-rw-r--r--ft/tests/queue-test.cc12
-rw-r--r--ft/tests/recovery-bad-last-entry.cc16
-rw-r--r--ft/tests/recovery-cbegin-cend-hello.cc6
-rw-r--r--ft/tests/recovery-cbegin-cend.cc4
-rw-r--r--ft/tests/recovery-cbegin.cc6
-rw-r--r--ft/tests/recovery-cend-cbegin.cc6
-rw-r--r--ft/tests/recovery-datadir-is-file.cc2
-rw-r--r--ft/tests/recovery-fopen-missing-file.cc6
-rw-r--r--ft/tests/recovery-hello.cc10
-rw-r--r--ft/tests/recovery-lsn-error-during-forward-scan.cc8
-rw-r--r--ft/tests/recovery-no-datadir.cc2
-rw-r--r--ft/tests/shortcut.cc2
-rw-r--r--ft/tests/test-assert.cc4
-rw-r--r--ft/tests/test-checkpoint-during-flush.cc32
-rw-r--r--ft/tests/test-checkpoint-during-merge.cc32
-rw-r--r--ft/tests/test-checkpoint-during-rebalance.cc28
-rw-r--r--ft/tests/test-checkpoint-during-split.cc36
-rw-r--r--ft/tests/test-dirty-flushes-on-cleaner.cc8
-rw-r--r--ft/tests/test-flushes-on-cleaner.cc14
-rw-r--r--ft/tests/test-ft-overflow.cc4
-rw-r--r--ft/tests/test-ft-txns.h2
-rw-r--r--ft/tests/test-leafentry-nested.cc76
-rw-r--r--ft/tests/test-merges-on-cleaner.cc6
-rw-r--r--ft/tests/test-pick-child-to-flush.cc6
-rw-r--r--ft/tests/test.h20
-rw-r--r--ft/tests/test1308a.cc4
-rw-r--r--ft/tests/test3856.cc4
-rw-r--r--ft/tests/test3884.cc16
-rw-r--r--ft/tests/test4115.cc4
-rw-r--r--ft/tests/test4244.cc4
-rw-r--r--ft/tests/test_block_allocator_merge.cc46
-rw-r--r--ft/tests/test_logcursor.cc4
-rw-r--r--ft/tests/test_partitioned_counter.cc1
-rw-r--r--ft/tests/x1764-test.cc20
-rw-r--r--ft/tests/xid_lsn_independent.cc12
-rw-r--r--ft/txn.cc26
-rw-r--r--ft/txn.h12
-rw-r--r--ft/txn_manager.cc16
-rw-r--r--ft/txn_manager.h2
-rw-r--r--ft/ule.cc186
-rw-r--r--ft/ule.h6
-rw-r--r--ft/wbuf.h52
-rw-r--r--ft/x1764-speedup/x1764-speedup-test.cc26
-rw-r--r--ft/x1764.cc86
-rw-r--r--ft/x1764.h10
-rw-r--r--ft/xids-internal.h2
-rw-r--r--ft/xids.cc42
-rw-r--r--ft/xids.h10
-rw-r--r--ft/ybt.h2
221 files changed, 3324 insertions, 3339 deletions
diff --git a/ft/background_job_manager.cc b/ft/background_job_manager.cc
index 71d76b354e6..8099cc665f2 100644
--- a/ft/background_job_manager.cc
+++ b/ft/background_job_manager.cc
@@ -5,7 +5,6 @@
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#include <config.h>
-#include <stdbool.h>
#include <toku_pthread.h>
#include "kibbutz.h"
#include "background_job_manager.h"
@@ -13,7 +12,7 @@
struct background_job_manager_struct {
bool accepting_jobs;
- u_int32_t num_jobs;
+ uint32_t num_jobs;
toku_cond_t jobs_wait;
toku_mutex_t jobs_lock;
};
diff --git a/ft/block_allocator.cc b/ft/block_allocator.cc
index 01a1215174d..02ede5134e5 100644
--- a/ft/block_allocator.cc
+++ b/ft/block_allocator.cc
@@ -11,18 +11,18 @@
// Previous implementation used next_fit, but now use first_fit since we are moving blocks around to reduce file size.
struct block_allocator {
- u_int64_t reserve_at_beginning; // How much to reserve at the beginning
- u_int64_t alignment; // Block alignment
- u_int64_t n_blocks; // How many blocks
- u_int64_t blocks_array_size; // How big is the blocks_array. Must be >= n_blocks.
+ uint64_t reserve_at_beginning; // How much to reserve at the beginning
+ uint64_t alignment; // Block alignment
+ uint64_t n_blocks; // How many blocks
+ uint64_t blocks_array_size; // How big is the blocks_array. Must be >= n_blocks.
struct block_allocator_blockpair *blocks_array; // These blocks are sorted by address.
- u_int64_t n_bytes_in_use; // including the reserve_at_beginning
+ uint64_t n_bytes_in_use; // including the reserve_at_beginning
};
void
block_allocator_validate (BLOCK_ALLOCATOR ba) {
- u_int64_t i;
- u_int64_t n_bytes_in_use = ba->reserve_at_beginning;
+ uint64_t i;
+ uint64_t n_bytes_in_use = ba->reserve_at_beginning;
for (i=0; i<ba->n_blocks; i++) {
n_bytes_in_use += ba->blocks_array[i].size;
if (i>0) {
@@ -42,7 +42,7 @@ block_allocator_validate (BLOCK_ALLOCATOR ba) {
#if 0
void
block_allocator_print (BLOCK_ALLOCATOR ba) {
- u_int64_t i;
+ uint64_t i;
for (i=0; i<ba->n_blocks; i++) {
printf("%" PRId64 ":%" PRId64 " ", ba->blocks_array[i].offset, ba->blocks_array[i].size);
}
@@ -52,7 +52,7 @@ block_allocator_print (BLOCK_ALLOCATOR ba) {
#endif
void
-create_block_allocator (BLOCK_ALLOCATOR *ba, u_int64_t reserve_at_beginning, u_int64_t alignment) {
+create_block_allocator (BLOCK_ALLOCATOR *ba, uint64_t reserve_at_beginning, uint64_t alignment) {
BLOCK_ALLOCATOR XMALLOC(result);
result->reserve_at_beginning = reserve_at_beginning;
result->alignment = alignment;
@@ -73,10 +73,10 @@ destroy_block_allocator (BLOCK_ALLOCATOR *bap) {
}
static void
-grow_blocks_array_by (BLOCK_ALLOCATOR ba, u_int64_t n_to_add) {
+grow_blocks_array_by (BLOCK_ALLOCATOR ba, uint64_t n_to_add) {
if (ba->n_blocks + n_to_add > ba->blocks_array_size) {
- u_int64_t new_size = ba->n_blocks + n_to_add;
- u_int64_t at_least = ba->blocks_array_size * 2;
+ uint64_t new_size = ba->n_blocks + n_to_add;
+ uint64_t at_least = ba->blocks_array_size * 2;
if (at_least > new_size) {
new_size = at_least;
}
@@ -92,10 +92,10 @@ grow_blocks_array (BLOCK_ALLOCATOR ba) {
}
void
-block_allocator_merge_blockpairs_into (u_int64_t d, struct block_allocator_blockpair dst[/*d*/],
- u_int64_t s, const struct block_allocator_blockpair src[/*s*/])
+block_allocator_merge_blockpairs_into (uint64_t d, struct block_allocator_blockpair dst[/*d*/],
+ uint64_t s, const struct block_allocator_blockpair src[/*s*/])
{
- u_int64_t tail = d+s;
+ uint64_t tail = d+s;
while (d>0 && s>0) {
struct block_allocator_blockpair *dp = &dst[d-1];
struct block_allocator_blockpair const *sp = &src[s-1];
@@ -137,12 +137,12 @@ compare_blockpairs (const void *av, const void *bv) {
}
void
-block_allocator_alloc_blocks_at (BLOCK_ALLOCATOR ba, u_int64_t n_blocks, struct block_allocator_blockpair pairs[/*n_blocks*/])
+block_allocator_alloc_blocks_at (BLOCK_ALLOCATOR ba, uint64_t n_blocks, struct block_allocator_blockpair pairs[/*n_blocks*/])
// See the documentation in block_allocator.h
{
VALIDATE(ba);
qsort(pairs, n_blocks, sizeof(*pairs), compare_blockpairs);
- for (u_int64_t i=0; i<n_blocks; i++) {
+ for (uint64_t i=0; i<n_blocks; i++) {
assert(pairs[i].offset >= ba->reserve_at_beginning);
assert(pairs[i].offset%ba->alignment == 0);
ba->n_bytes_in_use += pairs[i].size;
@@ -155,7 +155,7 @@ block_allocator_alloc_blocks_at (BLOCK_ALLOCATOR ba, u_int64_t n_blocks, struct
}
void
-block_allocator_alloc_block_at (BLOCK_ALLOCATOR ba, u_int64_t size, u_int64_t offset) {
+block_allocator_alloc_block_at (BLOCK_ALLOCATOR ba, uint64_t size, uint64_t offset) {
struct block_allocator_blockpair p = {.offset = offset, .size=size};
// Just do a linear search for the block.
// This data structure is a sorted array (no gaps or anything), so the search isn't really making this any slower than the insertion.
@@ -163,15 +163,15 @@ block_allocator_alloc_block_at (BLOCK_ALLOCATOR ba, u_int64_t size, u_int64_t of
block_allocator_alloc_blocks_at(ba, 1, &p);
}
-static inline u_int64_t
-align (u_int64_t value, BLOCK_ALLOCATOR ba)
+static inline uint64_t
+align (uint64_t value, BLOCK_ALLOCATOR ba)
// Effect: align a value by rounding up.
{
return ((value+ba->alignment-1)/ba->alignment)*ba->alignment;
}
void
-block_allocator_alloc_block (BLOCK_ALLOCATOR ba, u_int64_t size, u_int64_t *offset) {
+block_allocator_alloc_block (BLOCK_ALLOCATOR ba, uint64_t size, uint64_t *offset) {
grow_blocks_array(ba);
ba->n_bytes_in_use += size;
if (ba->n_blocks==0) {
@@ -184,7 +184,7 @@ block_allocator_alloc_block (BLOCK_ALLOCATOR ba, u_int64_t size, u_int64_t *offs
}
// Implement first fit.
{
- u_int64_t end_of_reserve = align(ba->reserve_at_beginning, ba);
+ uint64_t end_of_reserve = align(ba->reserve_at_beginning, ba);
if (end_of_reserve + size <= ba->blocks_array[0].offset ) {
// Check to see if the space immediately after the reserve is big enough to hold the new block.
struct block_allocator_blockpair *bp = &ba->blocks_array[0];
@@ -197,12 +197,12 @@ block_allocator_alloc_block (BLOCK_ALLOCATOR ba, u_int64_t size, u_int64_t *offs
return;
}
}
- for (u_int64_t blocknum = 0; blocknum +1 < ba->n_blocks; blocknum ++) {
+ for (uint64_t blocknum = 0; blocknum +1 < ba->n_blocks; blocknum ++) {
// Consider the space after blocknum
struct block_allocator_blockpair *bp = &ba->blocks_array[blocknum];
- u_int64_t this_offset = bp[0].offset;
- u_int64_t this_size = bp[0].size;
- u_int64_t answer_offset = align(this_offset + this_size, ba);
+ uint64_t this_offset = bp[0].offset;
+ uint64_t this_size = bp[0].size;
+ uint64_t answer_offset = align(this_offset + this_size, ba);
if (answer_offset + size > bp[1].offset) continue; // The block we want doesn't fit after this block.
// It fits, so allocate it here.
memmove(bp+2, bp+1, (ba->n_blocks - blocknum -1)*sizeof(*bp));
@@ -216,7 +216,7 @@ block_allocator_alloc_block (BLOCK_ALLOCATOR ba, u_int64_t size, u_int64_t *offs
// It didn't fit anywhere, so fit it on the end.
assert(ba->n_blocks < ba->blocks_array_size);
struct block_allocator_blockpair *bp = &ba->blocks_array[ba->n_blocks];
- u_int64_t answer_offset = align(bp[-1].offset+bp[-1].size, ba);
+ uint64_t answer_offset = align(bp[-1].offset+bp[-1].size, ba);
bp->offset = answer_offset;
bp->size = size;
ba->n_blocks++;
@@ -225,7 +225,7 @@ block_allocator_alloc_block (BLOCK_ALLOCATOR ba, u_int64_t size, u_int64_t *offs
}
static int64_t
-find_block (BLOCK_ALLOCATOR ba, u_int64_t offset)
+find_block (BLOCK_ALLOCATOR ba, uint64_t offset)
// Find the index in the blocks array that has a particular offset. Requires that the block exist.
// Use binary search so it runs fast.
{
@@ -234,12 +234,12 @@ find_block (BLOCK_ALLOCATOR ba, u_int64_t offset)
assert(ba->blocks_array[0].offset == offset);
return 0;
}
- u_int64_t lo = 0;
- u_int64_t hi = ba->n_blocks;
+ uint64_t lo = 0;
+ uint64_t hi = ba->n_blocks;
while (1) {
assert(lo<hi); // otherwise no such block exists.
- u_int64_t mid = (lo+hi)/2;
- u_int64_t thisoff = ba->blocks_array[mid].offset;
+ uint64_t mid = (lo+hi)/2;
+ uint64_t thisoff = ba->blocks_array[mid].offset;
//printf("lo=%" PRId64 " hi=%" PRId64 " mid=%" PRId64 " thisoff=%" PRId64 " offset=%" PRId64 "\n", lo, hi, mid, thisoff, offset);
if (thisoff < offset) {
lo = mid+1;
@@ -252,7 +252,7 @@ find_block (BLOCK_ALLOCATOR ba, u_int64_t offset)
}
void
-block_allocator_free_block (BLOCK_ALLOCATOR ba, u_int64_t offset) {
+block_allocator_free_block (BLOCK_ALLOCATOR ba, uint64_t offset) {
VALIDATE(ba);
int64_t bn = find_block(ba, offset);
assert(bn>=0); // we require that there is a block with that offset. Might as well abort if no such block exists.
@@ -262,14 +262,14 @@ block_allocator_free_block (BLOCK_ALLOCATOR ba, u_int64_t offset) {
VALIDATE(ba);
}
-u_int64_t
-block_allocator_block_size (BLOCK_ALLOCATOR ba, u_int64_t offset) {
+uint64_t
+block_allocator_block_size (BLOCK_ALLOCATOR ba, uint64_t offset) {
int64_t bn = find_block(ba, offset);
assert(bn>=0); // we require that there is a block with that offset. Might as well abort if no such block exists.
return ba->blocks_array[bn].size;
}
-u_int64_t
+uint64_t
block_allocator_allocated_limit (BLOCK_ALLOCATOR ba) {
if (ba->n_blocks==0) return ba->reserve_at_beginning;
else {
@@ -279,7 +279,7 @@ block_allocator_allocated_limit (BLOCK_ALLOCATOR ba) {
}
int
-block_allocator_get_nth_block_in_layout_order (BLOCK_ALLOCATOR ba, u_int64_t b, u_int64_t *offset, u_int64_t *size)
+block_allocator_get_nth_block_in_layout_order (BLOCK_ALLOCATOR ba, uint64_t b, uint64_t *offset, uint64_t *size)
// Effect: Consider the blocks in sorted order. The reserved block at the beginning is number 0. The next one is number 1 and so forth.
// Return the offset and size of the block with that number.
// Return 0 if there is a block that big, return nonzero if b is too big.
@@ -324,7 +324,7 @@ block_allocator_get_unused_statistics(BLOCK_ALLOCATOR ba, TOKU_DB_FRAGMENTATION
}
//Deal with space between blocks:
- for (u_int64_t blocknum = 0; blocknum +1 < ba->n_blocks; blocknum ++) {
+ for (uint64_t blocknum = 0; blocknum +1 < ba->n_blocks; blocknum ++) {
// Consider the space after blocknum
struct block_allocator_blockpair *bp = &ba->blocks_array[blocknum];
uint64_t this_offset = bp[0].offset;
diff --git a/ft/block_allocator.h b/ft/block_allocator.h
index a6de762b99d..3f6500d4559 100644
--- a/ft/block_allocator.h
+++ b/ft/block_allocator.h
@@ -39,7 +39,7 @@
typedef struct block_allocator *BLOCK_ALLOCATOR;
-void create_block_allocator (BLOCK_ALLOCATOR * ba, u_int64_t reserve_at_beginning, u_int64_t alignment);
+void create_block_allocator (BLOCK_ALLOCATOR * ba, uint64_t reserve_at_beginning, uint64_t alignment);
// Effect: Create a block allocator, in which the first RESERVE_AT_BEGINNING bytes are not put into a block.
// All blocks be start on a multiple of ALIGNMENT.
// Aborts if we run out of memory.
@@ -56,7 +56,7 @@ void destroy_block_allocator (BLOCK_ALLOCATOR *ba);
// ba (IN/OUT):
-void block_allocator_alloc_block_at (BLOCK_ALLOCATOR ba, u_int64_t size, u_int64_t offset);
+void block_allocator_alloc_block_at (BLOCK_ALLOCATOR ba, uint64_t size, uint64_t offset);
// Effect: Allocate a block of the specified size at a particular offset.
// Aborts if anything goes wrong.
// The performance of this function may be as bad as Theta(N), where N is the number of blocks currently in use.
@@ -70,15 +70,15 @@ void block_allocator_alloc_block_at (BLOCK_ALLOCATOR ba, u_int64_t size, u_int64
struct block_allocator_blockpair {
- u_int64_t offset;
- u_int64_t size;
+ uint64_t offset;
+ uint64_t size;
};
-void block_allocator_alloc_blocks_at (BLOCK_ALLOCATOR ba, u_int64_t n_blocks, struct block_allocator_blockpair *pairs);
+void block_allocator_alloc_blocks_at (BLOCK_ALLOCATOR ba, uint64_t n_blocks, struct block_allocator_blockpair *pairs);
// Effect: Take pairs in any order, and add them all, as if we did block_allocator_alloc_block() on each pair.
// This should run in time O(N + M log M) where N is the number of blocks in ba, and M is the number of new blocks.
// Modifies: pairs (sorts them).
-void block_allocator_alloc_block (BLOCK_ALLOCATOR ba, u_int64_t size, u_int64_t *offset);
+void block_allocator_alloc_block (BLOCK_ALLOCATOR ba, uint64_t size, uint64_t *offset);
// Effect: Allocate a block of the specified size at an address chosen by the allocator.
// Aborts if anything goes wrong.
// The block address will be a multiple of the alignment.
@@ -87,7 +87,7 @@ void block_allocator_alloc_block (BLOCK_ALLOCATOR ba, u_int64_t size, u_int64_t
// size (IN): The size of the block. (The size does not have to be aligned.)
// offset (OUT): The location of the block.
-void block_allocator_free_block (BLOCK_ALLOCATOR ba, u_int64_t offset);
+void block_allocator_free_block (BLOCK_ALLOCATOR ba, uint64_t offset);
// Effect: Free the block at offset.
// Requires: There must be a block currently allocated at that offset.
// Parameters:
@@ -95,7 +95,7 @@ void block_allocator_free_block (BLOCK_ALLOCATOR ba, u_int64_t offset);
// offset (IN): The offset of the block.
-u_int64_t block_allocator_block_size (BLOCK_ALLOCATOR ba, u_int64_t offset);
+uint64_t block_allocator_block_size (BLOCK_ALLOCATOR ba, uint64_t offset);
// Effect: Return the size of the block that starts at offset.
// Requires: There must be a block currently allocated at that offset.
// Parameters:
@@ -110,14 +110,14 @@ void block_allocator_print (BLOCK_ALLOCATOR ba);
// Effect: Print information about the block allocator.
// Rationale: This is probably useful only for debugging.
-u_int64_t block_allocator_allocated_limit (BLOCK_ALLOCATOR ba);
+uint64_t block_allocator_allocated_limit (BLOCK_ALLOCATOR ba);
// Effect: Return the unallocated block address of "infinite" size.
// That is, return the smallest address that is above all the allocated blocks.
// Rationale: When writing the root FIFO we don't know how big the block is.
// So we start at the "infinite" block, write the fifo, and then
// allocate_block_at of the correct size and offset to account for the root FIFO.
-int block_allocator_get_nth_block_in_layout_order (BLOCK_ALLOCATOR ba, u_int64_t b, u_int64_t *offset, u_int64_t *size);
+int block_allocator_get_nth_block_in_layout_order (BLOCK_ALLOCATOR ba, uint64_t b, uint64_t *offset, uint64_t *size);
// Effect: Consider the blocks in sorted order. The reserved block at the beginning is number 0. The next one is number 1 and so forth.
// Return the offset and size of the block with that number.
// Return 0 if there is a block that big, return nonzero if b is too big.
@@ -130,8 +130,8 @@ void block_allocator_get_unused_statistics(BLOCK_ALLOCATOR ba, TOKU_DB_FRAGMENTA
// report->data_bytes is filled in
// report->checkpoint_bytes_additional is filled in
-void block_allocator_merge_blockpairs_into (u_int64_t d, struct block_allocator_blockpair dst[/*d*/],
- u_int64_t s, const struct block_allocator_blockpair src[/*s*/]);
+void block_allocator_merge_blockpairs_into (uint64_t d, struct block_allocator_blockpair dst[/*d*/],
+ uint64_t s, const struct block_allocator_blockpair src[/*s*/]);
// Effect: Merge dst[d] and src[s] into dst[d+s], merging in place.
// Initially dst and src hold sorted arrays (sorted by increasing offset).
// Finally dst contains all d+s elements sorted in order.
diff --git a/ft/block_table.cc b/ft/block_table.cc
index f380fc6afa6..c1449bea790 100644
--- a/ft/block_table.cc
+++ b/ft/block_table.cc
@@ -80,14 +80,14 @@ struct block_table {
//forward decls
static int64_t calculate_size_on_disk (struct translation *t);
-static inline BOOL translation_prevents_freeing (struct translation *t, BLOCKNUM b, struct block_translation_pair *old_pair);
+static inline bool translation_prevents_freeing (struct translation *t, BLOCKNUM b, struct block_translation_pair *old_pair);
static inline void lock_for_blocktable (BLOCK_TABLE bt);
static inline void unlock_for_blocktable (BLOCK_TABLE bt);
static void
-ft_set_dirty(FT ft, BOOL for_checkpoint){
+ft_set_dirty(FT ft, bool for_checkpoint){
toku_mutex_assert_locked(&ft->blocktable->mutex);
assert(ft->h->type == FT_CURRENT);
if (for_checkpoint) {
@@ -100,9 +100,9 @@ ft_set_dirty(FT ft, BOOL for_checkpoint){
}
static void
-maybe_truncate_file(BLOCK_TABLE bt, int fd, u_int64_t size_needed_before) {
+maybe_truncate_file(BLOCK_TABLE bt, int fd, uint64_t size_needed_before) {
toku_mutex_assert_locked(&bt->mutex);
- u_int64_t new_size_needed = block_allocator_allocated_limit(bt->block_allocator);
+ uint64_t new_size_needed = block_allocator_allocated_limit(bt->block_allocator);
//Save a call to toku_os_get_file_size (kernel call) if unlikely to be useful.
if (new_size_needed < size_needed_before && new_size_needed < bt->safe_file_size) {
nb_mutex_lock(&bt->safe_file_size_lock, &bt->mutex);
@@ -190,7 +190,7 @@ maybe_optimize_translation(struct translation *t) {
t->smallest_never_used_blocknum.b = b.b;
if (t->length_of_array/4 > t->smallest_never_used_blocknum.b) {
//We're using more memory than necessary to represent this now. Reduce.
- u_int64_t new_length = t->smallest_never_used_blocknum.b * 2;
+ uint64_t new_length = t->smallest_never_used_blocknum.b * 2;
XREALLOC_N(new_length, t->block_translation);
t->length_of_array = new_length;
//No need to zero anything out.
@@ -218,8 +218,8 @@ toku_block_translation_note_start_checkpoint_unlocked (BLOCK_TABLE bt) {
maybe_optimize_translation(&bt->current);
copy_translation(&bt->inprogress, &bt->current, TRANSLATION_INPROGRESS);
- bt->checkpoint_skipped = FALSE;
- bt->checkpoint_failed = FALSE;
+ bt->checkpoint_skipped = false;
+ bt->checkpoint_failed = false;
}
//#define PRNTF(str, b, siz, ad, bt) printf("%s[%d] %s %" PRId64 " %" PRId64 " %" PRId64 "\n", __FUNCTION__, __LINE__, str, b, siz, ad); fflush(stdout); if (bt) block_allocator_validate(((BLOCK_TABLE)(bt))->block_allocator);
@@ -230,7 +230,7 @@ void
toku_block_translation_note_failed_checkpoint (BLOCK_TABLE bt) {
lock_for_blocktable(bt);
assert(bt->inprogress.block_translation);
- bt->checkpoint_failed = TRUE;
+ bt->checkpoint_failed = true;
unlock_for_blocktable(bt);
}
@@ -240,7 +240,7 @@ toku_block_translation_note_skipped_checkpoint (BLOCK_TABLE bt) {
//Purpose, alert block translation that the checkpoint was skipped, e.x. for a non-dirty header
lock_for_blocktable(bt);
assert(bt->inprogress.block_translation);
- bt->checkpoint_skipped = TRUE;
+ bt->checkpoint_skipped = true;
unlock_for_blocktable(bt);
}
@@ -276,7 +276,7 @@ void
toku_block_translation_note_end_checkpoint (BLOCK_TABLE bt, int fd) {
// Free unused blocks
lock_for_blocktable(bt);
- u_int64_t allocated_limit_at_start = block_allocator_allocated_limit(bt->block_allocator);
+ uint64_t allocated_limit_at_start = block_allocator_allocated_limit(bt->block_allocator);
assert(bt->inprogress.block_translation);
if (bt->checkpoint_skipped || bt->checkpoint_failed) {
cleanup_failed_checkpoint(bt);
@@ -367,7 +367,7 @@ toku_ft_unlock (FT ft) {
// This is a special debugging function used only in the brt-serialize-test.
void
-toku_block_alloc(BLOCK_TABLE bt, u_int64_t size, u_int64_t *offset) {
+toku_block_alloc(BLOCK_TABLE bt, uint64_t size, uint64_t *offset) {
lock_for_blocktable(bt);
PRNTF("allocSomethingUnknown", 0L, (int64_t)size, 0L, bt);
block_allocator_alloc_block(bt->block_allocator, size, offset);
@@ -377,7 +377,7 @@ PRNTF("allocSomethingUnknownd", 0L, (int64_t)size, (int64_t)*offset, bt);
// Also used only in brt-serialize-test.
void
-toku_block_free(BLOCK_TABLE bt, u_int64_t offset) {
+toku_block_free(BLOCK_TABLE bt, uint64_t offset) {
lock_for_blocktable(bt);
PRNTF("freeSOMETHINGunknown", 0L, 0L, offset, bt);
block_allocator_free_block(bt->block_allocator, offset);
@@ -394,9 +394,9 @@ calculate_size_on_disk (struct translation *t) {
}
// We cannot free the disk space allocated to this blocknum if it is still in use by the given translation table.
-static inline BOOL
+static inline bool
translation_prevents_freeing(struct translation *t, BLOCKNUM b, struct block_translation_pair *old_pair) {
- BOOL r = (BOOL)
+ bool r = (bool)
(t->block_translation &&
b.b < t->smallest_never_used_blocknum.b &&
old_pair->u.diskoff == t->block_translation[b.b].u.diskoff);
@@ -404,7 +404,7 @@ translation_prevents_freeing(struct translation *t, BLOCKNUM b, struct block_tra
}
static void
-blocknum_realloc_on_disk_internal (BLOCK_TABLE bt, BLOCKNUM b, DISKOFF size, DISKOFF *offset, FT ft, BOOL for_checkpoint) {
+blocknum_realloc_on_disk_internal (BLOCK_TABLE bt, BLOCKNUM b, DISKOFF size, DISKOFF *offset, FT ft, bool for_checkpoint) {
toku_mutex_assert_locked(&bt->mutex);
ft_set_dirty(ft, for_checkpoint);
@@ -412,7 +412,7 @@ blocknum_realloc_on_disk_internal (BLOCK_TABLE bt, BLOCKNUM b, DISKOFF size, DIS
struct block_translation_pair old_pair = t->block_translation[b.b];
PRNTF("old", b.b, old_pair.size, old_pair.u.diskoff, bt);
//Free the old block if it is not still in use by the checkpoint in progress or the previous checkpoint
- BOOL cannot_free = (BOOL)
+ bool cannot_free = (bool)
((!for_checkpoint && translation_prevents_freeing(&bt->inprogress, b, &old_pair)) ||
translation_prevents_freeing(&bt->checkpointed, b, &old_pair));
if (!cannot_free && old_pair.u.diskoff!=diskoff_unused) {
@@ -420,7 +420,7 @@ PRNTF("Freed", b.b, old_pair.size, old_pair.u.diskoff, bt);
block_allocator_free_block(bt->block_allocator, old_pair.u.diskoff);
}
- u_int64_t allocator_offset;
+ uint64_t allocator_offset;
//Allocate a new block
block_allocator_alloc_block(bt->block_allocator, size, &allocator_offset);
t->block_translation[b.b].u.diskoff = allocator_offset;
@@ -456,7 +456,7 @@ ensure_safe_write_unlocked(BLOCK_TABLE bt, int fd, DISKOFF block_size, DISKOFF b
}
void
-toku_blocknum_realloc_on_disk (BLOCK_TABLE bt, BLOCKNUM b, DISKOFF size, DISKOFF *offset, FT ft, int fd, BOOL for_checkpoint) {
+toku_blocknum_realloc_on_disk (BLOCK_TABLE bt, BLOCKNUM b, DISKOFF size, DISKOFF *offset, FT ft, int fd, bool for_checkpoint) {
lock_for_blocktable(bt);
struct translation *t = &bt->current;
verify_valid_freeable_blocknum(t, b);
@@ -480,7 +480,7 @@ blocknum_alloc_translation_on_disk_unlocked (BLOCK_TABLE bt) {
//Allocate a new block
int64_t size = calculate_size_on_disk(t);
- u_int64_t offset;
+ uint64_t offset;
block_allocator_alloc_block(bt->block_allocator, size, &offset);
PRNTF("blokAllokator", 1L, size, offset, bt);
t->block_translation[b.b].u.diskoff = offset;
@@ -499,7 +499,7 @@ toku_serialize_translation_to_wbuf(BLOCK_TABLE bt, int fd, struct wbuf *w,
blocknum_alloc_translation_on_disk_unlocked(bt);
{
//Init wbuf
- u_int64_t size_translation = calculate_size_on_disk(t);
+ uint64_t size_translation = calculate_size_on_disk(t);
assert((int64_t)size_translation==t->block_translation[b.b].size);
if (0)
printf("%s:%d writing translation table of size_translation %" PRIu64 " at %" PRId64 "\n", __FILE__, __LINE__, size_translation, t->block_translation[b.b].u.diskoff);
@@ -515,7 +515,7 @@ toku_serialize_translation_to_wbuf(BLOCK_TABLE bt, int fd, struct wbuf *w,
wbuf_DISKOFF(w, t->block_translation[i].u.diskoff);
wbuf_DISKOFF(w, t->block_translation[i].size);
}
- u_int32_t checksum = x1764_finish(&w->checksum);
+ uint32_t checksum = x1764_finish(&w->checksum);
wbuf_int(w, checksum);
*address = t->block_translation[b.b].u.diskoff;
*size = t->block_translation[b.b].size;
@@ -549,9 +549,9 @@ maybe_expand_translation (struct translation *t) {
// given that one more never-used blocknum will soon be used.
if (t->length_of_array <= t->smallest_never_used_blocknum.b) {
//expansion is necessary
- u_int64_t new_length = t->smallest_never_used_blocknum.b * 2;
+ uint64_t new_length = t->smallest_never_used_blocknum.b * 2;
XREALLOC_N(new_length, t->block_translation);
- u_int64_t i;
+ uint64_t i;
for (i = t->length_of_array; i < new_length; i++) {
t->block_translation[i].u.next_free_blocknum = freelist_null;
t->block_translation[i].size = size_is_free;
@@ -583,7 +583,7 @@ toku_allocate_blocknum_unlocked(BLOCK_TABLE bt, BLOCKNUM *res, FT ft) {
t->block_translation[result.b].size = 0;
verify_valid_freeable_blocknum(t, result);
*res = result;
- ft_set_dirty(ft, FALSE);
+ ft_set_dirty(ft, false);
}
void
@@ -607,7 +607,7 @@ free_blocknum_in_translation(struct translation *t, BLOCKNUM b)
}
static void
-free_blocknum_unlocked(BLOCK_TABLE bt, BLOCKNUM *bp, FT ft, BOOL for_checkpoint) {
+free_blocknum_unlocked(BLOCK_TABLE bt, BLOCKNUM *bp, FT ft, bool for_checkpoint) {
// Effect: Free a blocknum.
// If the blocknum holds the only reference to a block on disk, free that block
toku_mutex_assert_locked(&bt->mutex);
@@ -625,7 +625,7 @@ free_blocknum_unlocked(BLOCK_TABLE bt, BLOCKNUM *bp, FT ft, BOOL for_checkpoint)
//If the size is 0, no disk block has ever been assigned to this blocknum.
if (old_pair.size > 0) {
//Free the old block if it is not still in use by the checkpoint in progress or the previous checkpoint
- BOOL cannot_free = (BOOL)
+ bool cannot_free = (bool)
(translation_prevents_freeing(&bt->inprogress, b, &old_pair) ||
translation_prevents_freeing(&bt->checkpointed, b, &old_pair));
if (!cannot_free) {
@@ -638,7 +638,7 @@ PRNTF("free_blocknum_free", b.b, old_pair.size, old_pair.u.diskoff, bt);
}
void
-toku_free_blocknum(BLOCK_TABLE bt, BLOCKNUM *bp, FT ft, BOOL for_checkpoint) {
+toku_free_blocknum(BLOCK_TABLE bt, BLOCKNUM *bp, FT ft, bool for_checkpoint) {
lock_for_blocktable(bt);
free_blocknum_unlocked(bt, bp, ft, for_checkpoint);
unlock_for_blocktable(bt);
@@ -746,12 +746,12 @@ blocktable_create_internal (void) {
nb_mutex_init(&bt->safe_file_size_lock);
//There are two headers, so we reserve space for two.
- u_int64_t reserve_per_header = BLOCK_ALLOCATOR_HEADER_RESERVE;
+ uint64_t reserve_per_header = BLOCK_ALLOCATOR_HEADER_RESERVE;
//Must reserve in multiples of BLOCK_ALLOCATOR_ALIGNMENT
//Round up the per-header usage if necessary.
//We want each header aligned.
- u_int64_t remainder = BLOCK_ALLOCATOR_HEADER_RESERVE % BLOCK_ALLOCATOR_ALIGNMENT;
+ uint64_t remainder = BLOCK_ALLOCATOR_HEADER_RESERVE % BLOCK_ALLOCATOR_ALIGNMENT;
if (remainder!=0) {
reserve_per_header += BLOCK_ALLOCATOR_ALIGNMENT;
reserve_per_header -= remainder;
@@ -783,17 +783,17 @@ translation_default(struct translation *t) { // destination into which to creat
static int
translation_deserialize_from_buffer(struct translation *t, // destination into which to deserialize
DISKOFF location_on_disk, //Location of translation_buffer
- u_int64_t size_on_disk,
+ uint64_t size_on_disk,
unsigned char * translation_buffer) { // buffer with serialized translation
int r = 0;
assert(location_on_disk!=0);
t->type = TRANSLATION_CHECKPOINTED;
{
// check the checksum
- u_int32_t x1764 = x1764_memory(translation_buffer, size_on_disk - 4);
- u_int64_t offset = size_on_disk - 4;
+ uint32_t x1764 = x1764_memory(translation_buffer, size_on_disk - 4);
+ uint64_t offset = size_on_disk - 4;
//printf("%s:%d read from %ld (x1764 offset=%ld) size=%ld\n", __FILE__, __LINE__, block_translation_address_on_disk, offset, block_translation_size_on_disk);
- u_int32_t stored_x1764 = toku_dtoh32(*(int*)(translation_buffer + offset));
+ uint32_t stored_x1764 = toku_dtoh32(*(int*)(translation_buffer + offset));
if (x1764 != stored_x1764) {
fprintf(stderr, "Translation table checksum failure: calc=0x%08x read=0x%08x\n", x1764, stored_x1764);
r = TOKUDB_BAD_CHECKSUM;
@@ -831,7 +831,7 @@ blocktable_note_translation (BLOCK_ALLOCATOR allocator, struct translation *t) {
// Previously this added blocks one at a time. Now we make an array and pass it in so it can be sorted and merged. See #3218.
struct block_allocator_blockpair *MALLOC_N(t->smallest_never_used_blocknum.b, pairs);
- u_int64_t n_pairs = 0;
+ uint64_t n_pairs = 0;
for (int64_t i=0; i<t->smallest_never_used_blocknum.b; i++) {
struct block_translation_pair pair = t->block_translation[i];
if (pair.size > 0) {
@@ -889,7 +889,7 @@ toku_blocktable_create_new(BLOCK_TABLE *btp) {
}
int
-toku_blocktable_iterate (BLOCK_TABLE bt, enum translation_type type, BLOCKTABLE_CALLBACK f, void *extra, BOOL data_only, BOOL used_only) {
+toku_blocktable_iterate (BLOCK_TABLE bt, enum translation_type type, BLOCKTABLE_CALLBACK f, void *extra, bool data_only, bool used_only) {
struct translation *src;
int r = 0;
@@ -938,7 +938,7 @@ frag_helper(BLOCKNUM UU(b), int64_t size, int64_t address, void *extra) {
void
toku_blocktable_internal_fragmentation (BLOCK_TABLE bt, int64_t *total_sizep, int64_t *used_sizep) {
frag_extra info = {0,0};
- int r = toku_blocktable_iterate(bt, TRANSLATION_CHECKPOINTED, frag_helper, &info, FALSE, TRUE);
+ int r = toku_blocktable_iterate(bt, TRANSLATION_CHECKPOINTED, frag_helper, &info, false, true);
assert(r==0);
if (total_sizep) *total_sizep = info.total_space;
@@ -949,7 +949,7 @@ void
toku_realloc_descriptor_on_disk_unlocked(BLOCK_TABLE bt, DISKOFF size, DISKOFF *offset, FT ft) {
toku_mutex_assert_locked(&bt->mutex);
BLOCKNUM b = make_blocknum(RESERVED_BLOCKNUM_DESCRIPTOR);
- blocknum_realloc_on_disk_internal(bt, b, size, offset, ft, FALSE);
+ blocknum_realloc_on_disk_internal(bt, b, size, offset, ft, false);
}
void
diff --git a/ft/block_table.h b/ft/block_table.h
index b8ff95a20ec..c7faa087ff8 100644
--- a/ft/block_table.h
+++ b/ft/block_table.h
@@ -36,7 +36,7 @@ void toku_maybe_truncate_file_on_open(BLOCK_TABLE bt, int fd);
//Blocknums
void toku_allocate_blocknum(BLOCK_TABLE bt, BLOCKNUM *res, FT h);
void toku_allocate_blocknum_unlocked(BLOCK_TABLE bt, BLOCKNUM *res, FT h);
-void toku_free_blocknum(BLOCK_TABLE bt, BLOCKNUM *b, FT h, BOOL for_checkpoint);
+void toku_free_blocknum(BLOCK_TABLE bt, BLOCKNUM *b, FT h, bool for_checkpoint);
void toku_verify_blocknum_allocated(BLOCK_TABLE bt, BLOCKNUM b);
void toku_block_verify_no_data_blocks_except_root_unlocked(BLOCK_TABLE bt, BLOCKNUM root);
void toku_block_verify_no_free_blocknums(BLOCK_TABLE bt);
@@ -45,7 +45,7 @@ void toku_realloc_descriptor_on_disk_unlocked(BLOCK_TABLE bt, DISKOFF size, DISK
void toku_get_descriptor_offset_size(BLOCK_TABLE bt, DISKOFF *offset, DISKOFF *size);
//Blocks and Blocknums
-void toku_blocknum_realloc_on_disk(BLOCK_TABLE bt, BLOCKNUM b, DISKOFF size, DISKOFF *offset, FT ft, int fd, BOOL for_checkpoint);
+void toku_blocknum_realloc_on_disk(BLOCK_TABLE bt, BLOCKNUM b, DISKOFF size, DISKOFF *offset, FT ft, int fd, bool for_checkpoint);
void toku_translate_blocknum_to_offset_size(BLOCK_TABLE bt, BLOCKNUM b, DISKOFF *offset, DISKOFF *size);
//Serialization
@@ -57,8 +57,8 @@ void toku_block_table_swap_for_redirect(BLOCK_TABLE old_bt, BLOCK_TABLE new_bt);
//DEBUG ONLY (ftdump included), tests included
void toku_blocknum_dump_translation(BLOCK_TABLE bt, BLOCKNUM b);
void toku_dump_translation_table(FILE *f, BLOCK_TABLE bt);
-void toku_block_alloc(BLOCK_TABLE bt, u_int64_t size, u_int64_t *offset);
-void toku_block_free(BLOCK_TABLE bt, u_int64_t offset);
+void toku_block_alloc(BLOCK_TABLE bt, uint64_t size, uint64_t *offset);
+void toku_block_free(BLOCK_TABLE bt, uint64_t offset);
typedef int(*BLOCKTABLE_CALLBACK)(BLOCKNUM b, int64_t size, int64_t address, void *extra);
enum translation_type {TRANSLATION_NONE=0,
TRANSLATION_CURRENT,
@@ -66,7 +66,7 @@ enum translation_type {TRANSLATION_NONE=0,
TRANSLATION_CHECKPOINTED,
TRANSLATION_DEBUG};
-int toku_blocktable_iterate(BLOCK_TABLE bt, enum translation_type type, BLOCKTABLE_CALLBACK f, void *extra, BOOL data_only, BOOL used_only);
+int toku_blocktable_iterate(BLOCK_TABLE bt, enum translation_type type, BLOCKTABLE_CALLBACK f, void *extra, bool data_only, bool used_only);
void toku_blocktable_internal_fragmentation(BLOCK_TABLE bt, int64_t *total_sizep, int64_t *used_sizep);
void toku_block_table_get_fragmentation_unlocked(BLOCK_TABLE bt, TOKU_DB_FRAGMENTATION report);
diff --git a/ft/cachetable.cc b/ft/cachetable.cc
index 1a370f2bc4d..e6049d499ed 100644
--- a/ft/cachetable.cc
+++ b/ft/cachetable.cc
@@ -30,12 +30,12 @@
// These should be in the cachetable object, but we make them file-wide so that gdb can get them easily.
// They were left here after engine status cleanup (#2949, rather than moved into the status struct)
// so they are still easily available to the debugger and to save lots of typing.
-static u_int64_t cachetable_miss;
-static u_int64_t cachetable_misstime; // time spent waiting for disk read
-static u_int64_t cachetable_puts; // how many times has a newly created node been put into the cachetable?
-static u_int64_t cachetable_prefetches; // how many times has a block been prefetched into the cachetable?
-static u_int64_t cachetable_evictions;
-static u_int64_t cleaner_executions; // number of times the cleaner thread's loop has executed
+static uint64_t cachetable_miss;
+static uint64_t cachetable_misstime; // time spent waiting for disk read
+static uint64_t cachetable_puts; // how many times has a newly created node been put into the cachetable?
+static uint64_t cachetable_prefetches; // how many times has a block been prefetched into the cachetable?
+static uint64_t cachetable_evictions;
+static uint64_t cleaner_executions; // number of times the cleaner thread's loop has executed
static CACHETABLE_STATUS_S ct_status;
@@ -85,7 +85,7 @@ struct ctpair {
enum cachetable_dirty dirty;
- u_int32_t fullhash;
+ uint32_t fullhash;
CACHETABLE_FLUSH_CALLBACK flush_callback;
CACHETABLE_PARTIAL_EVICTION_EST_CALLBACK pe_est_callback;
@@ -97,9 +97,9 @@ struct ctpair {
PAIR clock_next,clock_prev; // In clock.
PAIR hash_chain;
- u_int32_t count; // clock count
+ uint32_t count; // clock count
- BOOL checkpoint_pending; // If this is on, then we have got to write the pair out to disk before modifying it.
+ bool checkpoint_pending; // If this is on, then we have got to write the pair out to disk before modifying it.
PAIR pending_next;
PAIR pending_prev;
@@ -115,7 +115,7 @@ static PAIR_ATTR const zero_attr = {
.leaf_size = 0,
.rollback_size = 0,
.cache_pressure_size = 0,
- .is_valid = TRUE
+ .is_valid = true
};
static void maybe_flush_some (CACHETABLE ct, long size);
@@ -129,8 +129,8 @@ static inline void ctpair_destroy(PAIR p) {
// The cachetable is as close to an ENV as we get.
// cachetable_mutex
struct cachetable {
- u_int32_t n_in_table; // number of pairs in the hash table
- u_int32_t table_size; // number of buckets in the hash table
+ uint32_t n_in_table; // number of pairs in the hash table
+ uint32_t table_size; // number of buckets in the hash table
PAIR *table; // hash table
PAIR clock_head; // of clock . head is the next thing to be up for decrement.
PAIR cleaner_head; // for cleaner thread. head is the next thing to look at for possible cleaning.
@@ -148,15 +148,15 @@ struct cachetable {
KIBBUTZ checkpointing_kibbutz; // small pool for checkpointing cloned pairs
LSN lsn_of_checkpoint_in_progress;
- u_int32_t checkpoint_num_files; // how many cachefiles are in the checkpoint
- u_int32_t checkpoint_num_txns; // how many transactions are in the checkpoint
+ uint32_t checkpoint_num_files; // how many cachefiles are in the checkpoint
+ uint32_t checkpoint_num_txns; // how many transactions are in the checkpoint
PAIR pending_head; // list of pairs marked with checkpoint_pending
struct rwlock pending_lock; // multiple writer threads, single checkpoint thread,
// see comments in toku_cachetable_begin_checkpoint to understand
// purpose of the pending_lock
struct minicron checkpointer; // the periodic checkpointing thread
struct minicron cleaner; // the periodic cleaner thread
- u_int32_t cleaner_iterations; // how many times to run the cleaner per
+ uint32_t cleaner_iterations; // how many times to run the cleaner per
// cleaner period (minicron has a
// minimum period of 1s so if you want
// more frequent cleaner runs you must
@@ -232,7 +232,7 @@ struct cachefile {
CACHEFILE next;
CACHEFILE next_in_checkpoint;
struct toku_list pairs_for_cachefile; // list of pairs for this cachefile
- BOOL for_checkpoint; //True if part of the in-progress checkpoint
+ bool for_checkpoint; //True if part of the in-progress checkpoint
// If set and the cachefile closes, the file will be removed.
// Clients must not operate on the cachefile after setting this,
@@ -248,7 +248,7 @@ struct cachefile {
void *userdata;
int (*log_fassociate_during_checkpoint)(CACHEFILE cf, void *userdata); // When starting a checkpoint we must log all open files.
int (*log_suppress_rollback_during_checkpoint)(CACHEFILE cf, void *userdata); // When starting a checkpoint we must log which files need rollbacks suppressed
- int (*close_userdata)(CACHEFILE cf, int fd, void *userdata, char **error_string, BOOL lsnvalid, LSN); // when closing the last reference to a cachefile, first call this function.
+ int (*close_userdata)(CACHEFILE cf, int fd, void *userdata, char **error_string, bool lsnvalid, LSN); // when closing the last reference to a cachefile, first call this function.
int (*begin_checkpoint_userdata)(LSN lsn_of_checkpoint, void *userdata); // before checkpointing cachefiles call this function.
int (*checkpoint_userdata)(CACHEFILE cf, int fd, void *userdata); // when checkpointing a cachefile, call this function.
int (*end_checkpoint_userdata)(CACHEFILE cf, int fd, void *userdata); // after checkpointing cachefiles call this function.
@@ -291,45 +291,45 @@ checkpoint_thread (void *cachetable_v)
return r;
}
-int toku_set_checkpoint_period (CACHETABLE ct, u_int32_t new_period) {
+int toku_set_checkpoint_period (CACHETABLE ct, uint32_t new_period) {
return toku_minicron_change_period(&ct->checkpointer, new_period);
}
-u_int32_t toku_get_checkpoint_period (CACHETABLE ct) {
+uint32_t toku_get_checkpoint_period (CACHETABLE ct) {
return toku_minicron_get_period(&ct->checkpointer);
}
-u_int32_t toku_get_checkpoint_period_unlocked (CACHETABLE ct) {
+uint32_t toku_get_checkpoint_period_unlocked (CACHETABLE ct) {
return toku_minicron_get_period_unlocked(&ct->checkpointer);
}
-int toku_set_cleaner_period (CACHETABLE ct, u_int32_t new_period) {
+int toku_set_cleaner_period (CACHETABLE ct, uint32_t new_period) {
return toku_minicron_change_period(&ct->cleaner, new_period);
}
-u_int32_t toku_get_cleaner_period (CACHETABLE ct) {
+uint32_t toku_get_cleaner_period (CACHETABLE ct) {
return toku_minicron_get_period(&ct->cleaner);
}
-u_int32_t toku_get_cleaner_period_unlocked (CACHETABLE ct) {
+uint32_t toku_get_cleaner_period_unlocked (CACHETABLE ct) {
return toku_minicron_get_period_unlocked(&ct->cleaner);
}
-int toku_set_cleaner_iterations (CACHETABLE ct, u_int32_t new_iterations) {
+int toku_set_cleaner_iterations (CACHETABLE ct, uint32_t new_iterations) {
cachetable_lock(ct);
ct->cleaner_iterations = new_iterations;
cachetable_unlock(ct);
return 0;
}
-u_int32_t toku_get_cleaner_iterations (CACHETABLE ct) {
+uint32_t toku_get_cleaner_iterations (CACHETABLE ct) {
cachetable_lock(ct);
- u_int32_t retval = toku_get_cleaner_iterations_unlocked(ct);
+ uint32_t retval = toku_get_cleaner_iterations_unlocked(ct);
cachetable_unlock(ct);
return retval;
}
-u_int32_t toku_get_cleaner_iterations_unlocked (CACHETABLE ct) {
+uint32_t toku_get_cleaner_iterations_unlocked (CACHETABLE ct) {
return ct->cleaner_iterations;
}
@@ -373,7 +373,7 @@ int toku_create_cachetable(CACHETABLE *result, long size_limit, LSN UU(initial_l
return 0;
}
-u_int64_t toku_cachetable_reserve_memory(CACHETABLE ct, double fraction) {
+uint64_t toku_cachetable_reserve_memory(CACHETABLE ct, double fraction) {
cachetable_lock(ct);
cachetable_wait_write(ct);
uint64_t reserved_memory = fraction*(ct->size_limit-ct->size_reserved);
@@ -552,7 +552,7 @@ int toku_cachefile_set_fd (CACHEFILE cf, int fd, const char *fname_in_env) {
if (r != 0) {
r=get_error_errno(); close(fd); goto cleanup; // no change for t:2444
}
- if (cf->close_userdata && (r = cf->close_userdata(cf, cf->fd, cf->userdata, 0, FALSE, ZERO_LSN))) {
+ if (cf->close_userdata && (r = cf->close_userdata(cf, cf->fd, cf->userdata, 0, false, ZERO_LSN))) {
goto cleanup;
}
cf->close_userdata = NULL;
@@ -601,7 +601,7 @@ static void remove_cf_from_cachefiles_list (CACHEFILE cf) {
}
int
-toku_cachefile_close(CACHEFILE *cfp, char **error_string, BOOL oplsn_valid, LSN oplsn) {
+toku_cachefile_close(CACHEFILE *cfp, char **error_string, bool oplsn_valid, LSN oplsn) {
int r, close_error = 0;
CACHEFILE cf = *cfp;
CACHETABLE ct = cf->cachetable;
@@ -677,10 +677,10 @@ int toku_cachefile_flush (CACHEFILE cf) {
// The idea here is to mix the bits thoroughly so that we don't have to do modulo by a prime number.
// Instead we can use a bitmask on a table of size power of two.
// This hash function does yield improved performance on ./db-benchmark-test-tokudb and ./scanscan
-static inline u_int32_t rot(u_int32_t x, u_int32_t k) {
+static inline uint32_t rot(uint32_t x, uint32_t k) {
return (x<<k) | (x>>(32-k));
}
-static inline u_int32_t final (u_int32_t a, u_int32_t b, u_int32_t c) {
+static inline uint32_t final (uint32_t a, uint32_t b, uint32_t c) {
c ^= b; c -= rot(b,14);
a ^= c; a -= rot(c,11);
b ^= a; b -= rot(a,25);
@@ -691,24 +691,24 @@ static inline u_int32_t final (u_int32_t a, u_int32_t b, u_int32_t c) {
return c;
}
-u_int32_t toku_cachetable_hash (CACHEFILE cachefile, BLOCKNUM key)
+uint32_t toku_cachetable_hash (CACHEFILE cachefile, BLOCKNUM key)
// Effect: Return a 32-bit hash key. The hash key shall be suitable for using with bitmasking for a table of size power-of-two.
{
- return final(cachefile->filenum.fileid, (u_int32_t)(key.b>>32), (u_int32_t)key.b);
+ return final(cachefile->filenum.fileid, (uint32_t)(key.b>>32), (uint32_t)key.b);
}
// has ct locked on entry
// This function MUST NOT release and reacquire the cachetable lock
// Its callers (toku_cachetable_put_with_dep_pairs) depend on this behavior.
-static void cachetable_rehash (CACHETABLE ct, u_int32_t newtable_size) {
+static void cachetable_rehash (CACHETABLE ct, uint32_t newtable_size) {
// printf("rehash %p %d %d %d\n", t, primeindexdelta, ct->n_in_table, ct->table_size);
assert(newtable_size>=4 && ((newtable_size & (newtable_size-1))==0));
PAIR *XCALLOC_N(newtable_size, newtable);
- u_int32_t i;
+ uint32_t i;
//printf("%s:%d newtable_size=%d\n", __FILE__, __LINE__, newtable_size);
assert(newtable!=0);
- u_int32_t oldtable_size = ct->table_size;
+ uint32_t oldtable_size = ct->table_size;
ct->table_size=newtable_size;
for (i=0; i<newtable_size; i++) newtable[i]=0;
for (i=0; i<oldtable_size; i++) {
@@ -860,13 +860,13 @@ static void cachetable_free_pair(CACHETABLE ct, PAIR p) {
cachetable_evictions++;
cachetable_unlock(ct);
PAIR_ATTR new_attr = p->attr;
- // Note that flush_callback is called with write_me FALSE, so the only purpose of this
- // call is to tell the brt layer to evict the node (keep_me is FALSE).
+ // Note that flush_callback is called with write_me false, so the only purpose of this
+ // call is to tell the brt layer to evict the node (keep_me is false).
// Also, because we have already removed the PAIR from the cachetable in
// cachetable_remove_pair, we cannot pass in p->cachefile and p->cachefile->fd
// for the first two parameters, as these may be invalid (#5171), so, we
// pass in NULL and -1, dummy values
- flush_callback(NULL, -1, key, value, &disk_data, write_extraargs, old_attr, &new_attr, FALSE, FALSE, TRUE, FALSE);
+ flush_callback(NULL, -1, key, value, &disk_data, write_extraargs, old_attr, &new_attr, false, false, true, false);
cachetable_lock(ct);
@@ -899,9 +899,9 @@ static void cachetable_maybe_remove_and_free_pair (CACHETABLE ct, PAIR p) {
static void cachetable_only_write_locked_data(
CACHETABLE ct,
PAIR p,
- BOOL for_checkpoint,
+ bool for_checkpoint,
PAIR_ATTR* new_attr,
- BOOL is_clone
+ bool is_clone
)
{
CACHETABLE_FLUSH_CALLBACK flush_callback = p->flush_callback;
@@ -911,7 +911,7 @@ static void cachetable_only_write_locked_data(
void *disk_data = p->disk_data;
void *write_extraargs = p->write_extraargs;
PAIR_ATTR old_attr = p->attr;
- BOOL dowrite = TRUE;
+ bool dowrite = true;
cachetable_unlock(ct);
@@ -926,7 +926,7 @@ static void cachetable_only_write_locked_data(
old_attr,
new_attr,
dowrite,
- is_clone ? FALSE : TRUE, // keep_me (only keep if this is not cloned pointer)
+ is_clone ? false : true, // keep_me (only keep if this is not cloned pointer)
for_checkpoint,
is_clone //is_clone
);
@@ -950,8 +950,8 @@ static void cachetable_write_locked_pair(CACHETABLE ct, PAIR p) {
PAIR_ATTR old_attr = p->attr;
PAIR_ATTR new_attr = p->attr;
rwlock_read_lock(&ct->pending_lock, &ct->mutex);
- BOOL for_checkpoint = p->checkpoint_pending;
- p->checkpoint_pending = FALSE;
+ bool for_checkpoint = p->checkpoint_pending;
+ p->checkpoint_pending = false;
// grabbing the disk_nb_mutex here ensures that
// after this point, no one is writing out a cloned value
// if we grab the disk_nb_mutex inside the if clause,
@@ -963,7 +963,7 @@ static void cachetable_write_locked_pair(CACHETABLE ct, PAIR p) {
// there should be no cloned value data
assert(p->cloned_value_data == NULL);
if (p->dirty) {
- cachetable_only_write_locked_data(ct, p, for_checkpoint, &new_attr, FALSE);
+ cachetable_only_write_locked_data(ct, p, for_checkpoint, &new_attr, false);
//
// now let's update variables
//
@@ -1127,7 +1127,7 @@ static bool run_eviction_on_pair(PAIR curr_in_clock, CACHETABLE ct) {
}
}
else {
- assert(FALSE);
+ assert(false);
}
}
else {
@@ -1149,7 +1149,7 @@ static void maybe_flush_some (CACHETABLE ct, long size) {
curr_cachekey.b = INT64_MAX; // create initial value so compiler does not complain
FILENUM curr_filenum;
curr_filenum.fileid = UINT32_MAX; // create initial value so compiler does not complain
- BOOL set_val = FALSE;
+ bool set_val = false;
while ((ct->clock_head) && (size + ct->size_current > ct->size_limit + ct->size_evicting)) {
PAIR curr_in_clock = ct->clock_head;
@@ -1164,7 +1164,7 @@ static void maybe_flush_some (CACHETABLE ct, long size) {
}
if (nb_mutex_users(&curr_in_clock->value_nb_mutex) || nb_mutex_users(&curr_in_clock->disk_nb_mutex)) {
if (!set_val) {
- set_val = TRUE;
+ set_val = true;
curr_cachekey = ct->clock_head->key;
curr_filenum = ct->clock_head->cachefile->filenum;
}
@@ -1172,10 +1172,10 @@ static void maybe_flush_some (CACHETABLE ct, long size) {
else {
bool eviction_run = run_eviction_on_pair(curr_in_clock, ct);
if (eviction_run) {
- set_val = FALSE;
+ set_val = false;
}
else if (!set_val) {
- set_val = TRUE;
+ set_val = true;
curr_cachekey = ct->clock_head->key;
curr_filenum = ct->clock_head->cachefile->filenum;
}
@@ -1207,7 +1207,7 @@ void toku_cachetable_maybe_flush_some(CACHETABLE ct) {
// Its callers (toku_cachetable_put_with_dep_pairs) depend on this behavior.
static PAIR cachetable_insert_at(CACHETABLE ct,
CACHEFILE cachefile, CACHEKEY key, void *value,
- u_int32_t fullhash,
+ uint32_t fullhash,
PAIR_ATTR attr,
CACHETABLE_WRITE_CALLBACK write_callback,
enum cachetable_dirty dirty) {
@@ -1235,7 +1235,7 @@ static PAIR cachetable_insert_at(CACHETABLE ct,
nb_mutex_init(&p->disk_nb_mutex);
pair_add_to_clock(ct, p);
toku_list_push(&cachefile->pairs_for_cachefile, &p->next_for_cachefile);
- u_int32_t h = fullhash & (ct->table_size-1);
+ uint32_t h = fullhash & (ct->table_size-1);
p->hash_chain = ct->table[h];
ct->table[h] = p;
ct->n_in_table++;
@@ -1252,7 +1252,7 @@ static PAIR cachetable_insert_at(CACHETABLE ct,
static int cachetable_put_internal(
CACHEFILE cachefile,
CACHEKEY key,
- u_int32_t fullhash,
+ uint32_t fullhash,
void*value,
PAIR_ATTR attr,
CACHETABLE_WRITE_CALLBACK write_callback
@@ -1263,10 +1263,10 @@ static int cachetable_put_internal(
PAIR p;
for (p=ct->table[fullhash&(cachefile->cachetable->table_size-1)]; p; p=p->hash_chain) {
if (p->key.b==key.b && p->cachefile==cachefile) {
- // Ideally, we would like to just assert(FALSE) here
+ // Ideally, we would like to just assert(false) here
// and not return an error, but as of Dr. Noga,
// cachetable-test2 depends on this behavior.
- // To replace the following with an assert(FALSE)
+ // To replace the following with an assert(false)
// we need to change the behavior of cachetable-test2
//
// Semantically, these two asserts are not strictly right. After all, when are two functions eq?
@@ -1298,7 +1298,7 @@ static int cachetable_put_internal(
// ct is locked on entry
// gets pair if exists, and that is all.
-static int cachetable_get_pair (CACHEFILE cachefile, CACHEKEY key, u_int32_t fullhash, PAIR* pv) {
+static int cachetable_get_pair (CACHEFILE cachefile, CACHEKEY key, uint32_t fullhash, PAIR* pv) {
CACHETABLE ct = cachefile->cachetable;
PAIR p;
int r = -1;
@@ -1327,7 +1327,7 @@ clone_pair(CACHETABLE ct, PAIR p) {
p->value_data,
&p->cloned_value_data,
&new_attr,
- TRUE,
+ true,
p->write_extraargs
);
cachetable_lock(ct);
@@ -1339,7 +1339,7 @@ clone_pair(CACHETABLE ct, PAIR p) {
// it doesn't matter whether we clear
// the pending bit before the clone
// or after the clone
- p->checkpoint_pending = FALSE;
+ p->checkpoint_pending = false;
p->dirty = CACHETABLE_CLEAN;
if (new_attr.is_valid) {
p->attr = new_attr;
@@ -1360,9 +1360,9 @@ static void checkpoint_cloned_pair(void* extra) {
cachetable_only_write_locked_data(
ct,
p,
- TRUE, //for_checkpoint
+ true, //for_checkpoint
&new_attr,
- TRUE //is_clone
+ true //is_clone
);
nb_mutex_unlock(&p->disk_nb_mutex);
bjm_remove_background_job(ct->checkpoint_clones_bjm);
@@ -1419,7 +1419,7 @@ write_locked_pair_for_checkpoint(CACHETABLE ct, PAIR p)
// is that to clear the bit, we must have both the PAIR lock
// and the pending lock
//
- p->checkpoint_pending = FALSE;
+ p->checkpoint_pending = false;
}
}
@@ -1457,9 +1457,9 @@ write_pair_for_checkpoint_thread (CACHETABLE ct, PAIR p)
cachetable_only_write_locked_data(
ct,
p,
- TRUE, //for_checkpoint
+ true, //for_checkpoint
&attr,
- TRUE //is_clone
+ true //is_clone
);
nb_mutex_unlock(&p->disk_nb_mutex);
}
@@ -1472,7 +1472,7 @@ write_pair_for_checkpoint_thread (CACHETABLE ct, PAIR p)
// is that to clear the bit, we must have both the PAIR lock
// and the pending lock
//
- p->checkpoint_pending = FALSE;
+ p->checkpoint_pending = false;
nb_mutex_unlock(&p->value_nb_mutex);
}
}
@@ -1486,14 +1486,14 @@ write_pair_for_checkpoint_thread (CACHETABLE ct, PAIR p)
//
static void checkpoint_dependent_pairs(
CACHETABLE ct,
- u_int32_t num_dependent_pairs, // number of dependent pairs that we may need to checkpoint
+ uint32_t num_dependent_pairs, // number of dependent pairs that we may need to checkpoint
CACHEFILE* dependent_cfs, // array of cachefiles of dependent pairs
CACHEKEY* dependent_keys, // array of cachekeys of dependent pairs
- u_int32_t* dependent_fullhash, //array of fullhashes of dependent pairs
+ uint32_t* dependent_fullhash, //array of fullhashes of dependent pairs
enum cachetable_dirty* dependent_dirty // array stating dirty/cleanness of dependent pairs
)
{
- for (u_int32_t i =0; i < num_dependent_pairs; i++) {
+ for (uint32_t i =0; i < num_dependent_pairs; i++) {
PAIR curr_dep_pair = NULL;
int r = cachetable_get_pair(
dependent_cfs[i],
@@ -1525,13 +1525,13 @@ int toku_cachetable_put_with_dep_pairs(
PAIR_ATTR attr,
CACHETABLE_WRITE_CALLBACK write_callback,
void *get_key_and_fullhash_extra,
- u_int32_t num_dependent_pairs, // number of dependent pairs that we may need to checkpoint
+ uint32_t num_dependent_pairs, // number of dependent pairs that we may need to checkpoint
CACHEFILE* dependent_cfs, // array of cachefiles of dependent pairs
CACHEKEY* dependent_keys, // array of cachekeys of dependent pairs
- u_int32_t* dependent_fullhash, //array of fullhashes of dependent pairs
+ uint32_t* dependent_fullhash, //array of fullhashes of dependent pairs
enum cachetable_dirty* dependent_dirty, // array stating dirty/cleanness of dependent pairs
CACHEKEY* key,
- u_int32_t* fullhash
+ uint32_t* fullhash
)
{
//
@@ -1594,7 +1594,7 @@ int toku_cachetable_put_with_dep_pairs(
}
-int toku_cachetable_put(CACHEFILE cachefile, CACHEKEY key, u_int32_t fullhash, void*value, PAIR_ATTR attr,
+int toku_cachetable_put(CACHEFILE cachefile, CACHEKEY key, uint32_t fullhash, void*value, PAIR_ATTR attr,
CACHETABLE_WRITE_CALLBACK write_callback
) {
CACHETABLE ct = cachefile->cachetable;
@@ -1631,7 +1631,7 @@ do_partial_fetch(
PAIR p,
CACHETABLE_PARTIAL_FETCH_CALLBACK pf_callback,
void *read_extraargs,
- BOOL keep_pair_locked
+ bool keep_pair_locked
)
{
PAIR_ATTR old_attr = p->attr;
@@ -1659,7 +1659,7 @@ void toku_cachetable_pf_pinned_pair(
void* read_extraargs,
CACHEFILE cf,
CACHEKEY key,
- u_int32_t fullhash
+ uint32_t fullhash
)
{
PAIR_ATTR attr;
@@ -1683,14 +1683,14 @@ void toku_cachetable_pf_pinned_pair(
int toku_cachetable_get_and_pin (
CACHEFILE cachefile,
CACHEKEY key,
- u_int32_t fullhash,
+ uint32_t fullhash,
void**value,
long *sizep,
CACHETABLE_WRITE_CALLBACK write_callback,
CACHETABLE_FETCH_CALLBACK fetch_callback,
CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK pf_req_callback,
CACHETABLE_PARTIAL_FETCH_CALLBACK pf_callback,
- BOOL may_modify_value,
+ bool may_modify_value,
void* read_extraargs // parameter for fetch_callback, pf_req_callback, and pf_callback
)
{
@@ -1727,12 +1727,12 @@ static void cachetable_fetch_pair(
PAIR p,
CACHETABLE_FETCH_CALLBACK fetch_callback,
void* read_extraargs,
- BOOL keep_pair_locked
+ bool keep_pair_locked
)
{
// helgrind
CACHEKEY key = p->key;
- u_int32_t fullhash = p->fullhash;
+ uint32_t fullhash = p->fullhash;
void *toku_value = NULL;
void *disk_data = NULL;
@@ -1766,16 +1766,16 @@ static void cachetable_fetch_pair(
if (0) printf("%s:%d %" PRId64 " complete\n", __FUNCTION__, __LINE__, key.b);
}
-static BOOL resolve_checkpointing_fast(PAIR p) {
+static bool resolve_checkpointing_fast(PAIR p) {
return !(p->checkpoint_pending && (p->dirty == CACHETABLE_DIRTY) && !p->clone_callback);
}
static void checkpoint_pair_and_dependent_pairs(
CACHETABLE ct,
PAIR p,
- u_int32_t num_dependent_pairs, // number of dependent pairs that we may need to checkpoint
+ uint32_t num_dependent_pairs, // number of dependent pairs that we may need to checkpoint
CACHEFILE* dependent_cfs, // array of cachefiles of dependent pairs
CACHEKEY* dependent_keys, // array of cachekeys of dependent pairs
- u_int32_t* dependent_fullhash, //array of fullhashes of dependent pairs
+ uint32_t* dependent_fullhash, //array of fullhashes of dependent pairs
enum cachetable_dirty* dependent_dirty // array stating dirty/cleanness of dependent pairs
)
{
@@ -1819,19 +1819,19 @@ static void checkpoint_pair_and_dependent_pairs(
int toku_cachetable_get_and_pin_with_dep_pairs (
CACHEFILE cachefile,
CACHEKEY key,
- u_int32_t fullhash,
+ uint32_t fullhash,
void**value,
long *sizep,
CACHETABLE_WRITE_CALLBACK write_callback,
CACHETABLE_FETCH_CALLBACK fetch_callback,
CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK pf_req_callback,
CACHETABLE_PARTIAL_FETCH_CALLBACK pf_callback,
- BOOL may_modify_value,
+ bool may_modify_value,
void* read_extraargs, // parameter for fetch_callback, pf_req_callback, and pf_callback
- u_int32_t num_dependent_pairs, // number of dependent pairs that we may need to checkpoint
+ uint32_t num_dependent_pairs, // number of dependent pairs that we may need to checkpoint
CACHEFILE* dependent_cfs, // array of cachefiles of dependent pairs
CACHEKEY* dependent_keys, // array of cachekeys of dependent pairs
- u_int32_t* dependent_fullhash, //array of fullhashes of dependent pairs
+ uint32_t* dependent_fullhash, //array of fullhashes of dependent pairs
enum cachetable_dirty* dependent_dirty // array stating dirty/cleanness of dependent pairs
)
{
@@ -1858,7 +1858,7 @@ int toku_cachetable_get_and_pin_with_dep_pairs (
}
cachetable_unlock(ct);
- BOOL partial_fetch_required = pf_req_callback(p->value_data,read_extraargs);
+ bool partial_fetch_required = pf_req_callback(p->value_data,read_extraargs);
// shortcutting a path to getting the user the data
// helps scalability for in-memory workloads
if (!partial_fetch_required) {
@@ -1878,7 +1878,7 @@ int toku_cachetable_get_and_pin_with_dep_pairs (
// so we do a sanity check here.
assert(!p->dirty);
- do_partial_fetch(ct, cachefile, p, pf_callback, read_extraargs, TRUE);
+ do_partial_fetch(ct, cachefile, p, pf_callback, read_extraargs, true);
}
goto got_value;
}
@@ -1915,7 +1915,7 @@ int toku_cachetable_get_and_pin_with_dep_pairs (
// Retrieve the value of the PAIR from disk.
// The pair being fetched will be marked as pending if a checkpoint happens during the
// fetch because begin_checkpoint will mark as pending any pair that is locked even if it is clean.
- cachetable_fetch_pair(ct, cachefile, p, fetch_callback, read_extraargs, TRUE);
+ cachetable_fetch_pair(ct, cachefile, p, fetch_callback, read_extraargs, true);
cachetable_miss++;
cachetable_misstime += get_tnow() - t0;
goto got_value;
@@ -1938,7 +1938,7 @@ got_value:
// For example, imagine that we can modify a bit in a dirty parent, or modify a bit in a clean child, then we should modify
// the dirty parent (which will have to do I/O eventually anyway) rather than incur a full block write to modify one bit.
// Similarly, if the checkpoint is actually pending, we don't want to block on it.
-int toku_cachetable_maybe_get_and_pin (CACHEFILE cachefile, CACHEKEY key, u_int32_t fullhash, void**value) {
+int toku_cachetable_maybe_get_and_pin (CACHEFILE cachefile, CACHEKEY key, uint32_t fullhash, void**value) {
CACHETABLE ct = cachefile->cachetable;
PAIR p;
int r = -1;
@@ -1965,7 +1965,7 @@ int toku_cachetable_maybe_get_and_pin (CACHEFILE cachefile, CACHEKEY key, u_int3
//Used by flusher threads to possibly pin child on client thread if pinning is cheap
//Same as toku_cachetable_maybe_get_and_pin except that we don't care if the node is clean or dirty (return the node regardless).
//All other conditions remain the same.
-int toku_cachetable_maybe_get_and_pin_clean (CACHEFILE cachefile, CACHEKEY key, u_int32_t fullhash, void**value) {
+int toku_cachetable_maybe_get_and_pin_clean (CACHEFILE cachefile, CACHEKEY key, uint32_t fullhash, void**value) {
CACHETABLE ct = cachefile->cachetable;
PAIR p;
int r = -1;
@@ -1988,7 +1988,7 @@ int toku_cachetable_maybe_get_and_pin_clean (CACHEFILE cachefile, CACHEKEY key,
}
static int
-cachetable_unpin_internal(CACHEFILE cachefile, CACHEKEY key, u_int32_t fullhash, enum cachetable_dirty dirty, PAIR_ATTR attr, BOOL have_ct_lock, BOOL flush)
+cachetable_unpin_internal(CACHEFILE cachefile, CACHEKEY key, uint32_t fullhash, enum cachetable_dirty dirty, PAIR_ATTR attr, bool have_ct_lock, bool flush)
// size==0 means that the size didn't change.
{
CACHETABLE ct = cachefile->cachetable;
@@ -2018,20 +2018,20 @@ cachetable_unpin_internal(CACHEFILE cachefile, CACHEKEY key, u_int32_t fullhash,
return r;
}
-int toku_cachetable_unpin(CACHEFILE cachefile, CACHEKEY key, u_int32_t fullhash, enum cachetable_dirty dirty, PAIR_ATTR attr) {
+int toku_cachetable_unpin(CACHEFILE cachefile, CACHEKEY key, uint32_t fullhash, enum cachetable_dirty dirty, PAIR_ATTR attr) {
// By default we don't have the lock
- return cachetable_unpin_internal(cachefile, key, fullhash, dirty, attr, FALSE, TRUE);
+ return cachetable_unpin_internal(cachefile, key, fullhash, dirty, attr, false, true);
}
-int toku_cachetable_unpin_ct_prelocked_no_flush(CACHEFILE cachefile, CACHEKEY key, u_int32_t fullhash, enum cachetable_dirty dirty, PAIR_ATTR attr) {
+int toku_cachetable_unpin_ct_prelocked_no_flush(CACHEFILE cachefile, CACHEKEY key, uint32_t fullhash, enum cachetable_dirty dirty, PAIR_ATTR attr) {
// We hold the cachetable mutex.
- return cachetable_unpin_internal(cachefile, key, fullhash, dirty, attr, TRUE, FALSE);
+ return cachetable_unpin_internal(cachefile, key, fullhash, dirty, attr, true, false);
}
static void
run_unlockers (UNLOCKERS unlockers) {
while (unlockers) {
assert(unlockers->locked);
- unlockers->locked = FALSE;
+ unlockers->locked = false;
unlockers->f(unlockers->extra);
unlockers=unlockers->next;
}
@@ -2040,14 +2040,14 @@ run_unlockers (UNLOCKERS unlockers) {
int toku_cachetable_get_and_pin_nonblocking (
CACHEFILE cf,
CACHEKEY key,
- u_int32_t fullhash,
+ uint32_t fullhash,
void**value,
long* UU(sizep),
CACHETABLE_WRITE_CALLBACK write_callback,
CACHETABLE_FETCH_CALLBACK fetch_callback,
CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK pf_req_callback,
CACHETABLE_PARTIAL_FETCH_CALLBACK pf_callback,
- BOOL may_modify_value,
+ bool may_modify_value,
void *read_extraargs,
UNLOCKERS unlockers
)
@@ -2091,7 +2091,7 @@ int toku_cachetable_get_and_pin_nonblocking (
// when calling pf_req_callback, and if possible, returns the PAIR to the user without
// reacquiring the cachetable lock
cachetable_unlock(ct);
- BOOL partial_fetch_required = pf_req_callback(p->value_data,read_extraargs);
+ bool partial_fetch_required = pf_req_callback(p->value_data,read_extraargs);
//
// Just because the PAIR exists does necessarily mean the all the data the caller requires
// is in memory. A partial fetch may be required, which is evaluated above
@@ -2102,7 +2102,7 @@ int toku_cachetable_get_and_pin_nonblocking (
cachetable_lock(ct);
run_unlockers(unlockers); // The contract says the unlockers are run with the ct lock being held.
// Now wait for the I/O to occur.
- do_partial_fetch(ct, cf, p, pf_callback, read_extraargs, FALSE);
+ do_partial_fetch(ct, cf, p, pf_callback, read_extraargs, false);
cachetable_unlock(ct);
return TOKUDB_TRY_AGAIN;
}
@@ -2140,8 +2140,8 @@ int toku_cachetable_get_and_pin_nonblocking (
assert(p);
nb_mutex_lock(&p->value_nb_mutex, &ct->mutex);
run_unlockers(unlockers); // we hold the ct mutex.
- u_int64_t t0 = get_tnow();
- cachetable_fetch_pair(ct, cf, p, fetch_callback, read_extraargs, FALSE);
+ uint64_t t0 = get_tnow();
+ cachetable_fetch_pair(ct, cf, p, fetch_callback, read_extraargs, false);
cachetable_miss++;
cachetable_misstime += get_tnow() - t0;
cachetable_unlock(ct);
@@ -2172,7 +2172,7 @@ static void cachetable_reader(void* extra) {
cpargs->p,
cpargs->fetch_callback,
cpargs->read_extraargs,
- FALSE
+ false
);
cachetable_unlock(ct);
bjm_remove_background_job(cf->bjm);
@@ -2184,24 +2184,24 @@ static void cachetable_partial_reader(void* extra) {
CACHEFILE cf = cpargs->p->cachefile;
CACHETABLE ct = cf->cachetable;
cachetable_lock(ct);
- do_partial_fetch(ct, cpargs->p->cachefile, cpargs->p, cpargs->pf_callback, cpargs->read_extraargs, FALSE);
+ do_partial_fetch(ct, cpargs->p->cachefile, cpargs->p, cpargs->pf_callback, cpargs->read_extraargs, false);
cachetable_unlock(ct);
bjm_remove_background_job(cf->bjm);
toku_free(cpargs);
}
-int toku_cachefile_prefetch(CACHEFILE cf, CACHEKEY key, u_int32_t fullhash,
+int toku_cachefile_prefetch(CACHEFILE cf, CACHEKEY key, uint32_t fullhash,
CACHETABLE_WRITE_CALLBACK write_callback,
CACHETABLE_FETCH_CALLBACK fetch_callback,
CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK pf_req_callback,
CACHETABLE_PARTIAL_FETCH_CALLBACK pf_callback,
void *read_extraargs,
- BOOL *doing_prefetch)
+ bool *doing_prefetch)
// Effect: See the documentation for this function in cachetable.h
{
int r = 0;
if (doing_prefetch) {
- *doing_prefetch = FALSE;
+ *doing_prefetch = false;
}
CACHETABLE ct = cf->cachetable;
cachetable_lock(ct);
@@ -2237,13 +2237,13 @@ int toku_cachefile_prefetch(CACHEFILE cf, CACHEKEY key, u_int32_t fullhash,
cpargs->read_extraargs = read_extraargs;
toku_kibbutz_enq(ct->ct_kibbutz, cachetable_reader, cpargs);
if (doing_prefetch) {
- *doing_prefetch = TRUE;
+ *doing_prefetch = true;
}
}
else if (nb_mutex_users(&p->value_nb_mutex)==0) {
// nobody else is using the node, so we should go ahead and prefetch
nb_mutex_lock(&p->value_nb_mutex, &ct->mutex);
- BOOL partial_fetch_required = pf_req_callback(p->value_data, read_extraargs);
+ bool partial_fetch_required = pf_req_callback(p->value_data, read_extraargs);
if (partial_fetch_required) {
r = bjm_add_background_job(cf->bjm);
@@ -2254,7 +2254,7 @@ int toku_cachefile_prefetch(CACHEFILE cf, CACHEKEY key, u_int32_t fullhash,
cpargs->read_extraargs = read_extraargs;
toku_kibbutz_enq(ct->ct_kibbutz, cachetable_partial_reader, cpargs);
if (doing_prefetch) {
- *doing_prefetch = TRUE;
+ *doing_prefetch = true;
}
}
else {
@@ -2270,7 +2270,7 @@ int toku_cachefile_prefetch(CACHEFILE cf, CACHEKEY key, u_int32_t fullhash,
int toku_cachetable_rename (CACHEFILE cachefile, CACHEKEY oldkey, CACHEKEY newkey) {
CACHETABLE ct = cachefile->cachetable;
PAIR *ptr_to_p,p;
- u_int32_t fullhash = toku_cachetable_hash(cachefile, oldkey);
+ uint32_t fullhash = toku_cachetable_hash(cachefile, oldkey);
cachetable_lock(ct);
for (ptr_to_p = &ct->table[fullhash&(ct->table_size-1)], p = *ptr_to_p;
p;
@@ -2278,8 +2278,8 @@ int toku_cachetable_rename (CACHEFILE cachefile, CACHEKEY oldkey, CACHEKEY newke
if (p->key.b==oldkey.b && p->cachefile==cachefile) {
*ptr_to_p = p->hash_chain;
p->key = newkey;
- u_int32_t new_fullhash = toku_cachetable_hash(cachefile, newkey);
- u_int32_t nh = new_fullhash&(ct->table_size-1);
+ uint32_t new_fullhash = toku_cachetable_hash(cachefile, newkey);
+ uint32_t nh = new_fullhash&(ct->table_size-1);
p->fullhash = new_fullhash;
p->hash_chain = ct->table[nh];
ct->table[nh] = p;
@@ -2297,11 +2297,11 @@ void toku_cachefile_verify (CACHEFILE cf) {
void toku_cachetable_verify (CACHETABLE ct) {
cachetable_lock(ct);
- u_int32_t num_found = 0;
+ uint32_t num_found = 0;
// First clear all the verify flags by going through the hash chains
{
- u_int32_t i;
+ uint32_t i;
for (i=0; i<ct->table_size; i++) {
PAIR p;
for (p=ct->table[i]; p; p=p->hash_chain) {
@@ -2314,11 +2314,11 @@ void toku_cachetable_verify (CACHETABLE ct) {
// Now go through the clock chain, make sure everything in the LRU chain is hashed.
{
PAIR p;
- BOOL is_first = TRUE;
+ bool is_first = true;
for (p=ct->clock_head; ct->clock_head!=NULL && (p!=ct->clock_head || is_first); p=p->clock_next) {
- is_first=FALSE;
+ is_first=false;
PAIR p2;
- u_int32_t fullhash = p->fullhash;
+ uint32_t fullhash = p->fullhash;
//assert(fullhash==toku_cachetable_hash(p->cachefile, p->key));
for (p2=ct->table[fullhash&(ct->table_size-1)]; p2; p2=p2->hash_chain) {
if (p2==p) {
@@ -2351,9 +2351,9 @@ static void cachetable_flush_pair_for_close(void* extra) {
cachetable_only_write_locked_data(
ct,
p,
- FALSE, // not for a checkpoint, as we assert above
+ false, // not for a checkpoint, as we assert above
&attr,
- FALSE // not a clone
+ false // not a clone
);
p->dirty = CACHETABLE_CLEAN;
cachetable_unlock(ct);
@@ -2486,7 +2486,7 @@ toku_cachetable_close (CACHETABLE *ctp) {
}
cachetable_lock(ct);
cachetable_flush_cachefile(ct, NULL);
- u_int32_t i;
+ uint32_t i;
for (i=0; i<ct->table_size; i++) {
if (ct->table[i]) return -1;
}
@@ -2518,7 +2518,7 @@ int toku_cachetable_unpin_and_remove (
CACHETABLE ct = cachefile->cachetable;
PAIR p;
cachetable_lock(ct);
- u_int32_t fullhash = toku_cachetable_hash(cachefile, key);
+ uint32_t fullhash = toku_cachetable_hash(cachefile, key);
for (p=ct->table[fullhash&(ct->table_size-1)]; p; p=p->hash_chain) {
if (p->key.b==key.b && p->cachefile==cachefile) {
p->dirty = CACHETABLE_CLEAN; // clear the dirty bit. We're just supposed to remove it.
@@ -2531,10 +2531,10 @@ int toku_cachetable_unpin_and_remove (
//
// take care of key removal
//
- BOOL for_checkpoint = p->checkpoint_pending;
+ bool for_checkpoint = p->checkpoint_pending;
// now let's wipe out the pending bit, because we are
// removing the PAIR
- p->checkpoint_pending = FALSE;
+ p->checkpoint_pending = false;
//
// Here is a tricky thing.
// Later on in this function, we may release the
@@ -2635,7 +2635,7 @@ int toku_cachetable_unpin_and_remove (
}
static int
-set_filenum_in_array(OMTVALUE hv, u_int32_t index, void*arrayv) {
+set_filenum_in_array(OMTVALUE hv, uint32_t index, void*arrayv) {
FILENUM *array = (FILENUM *) arrayv;
FT h = (FT) hv;
array[index] = toku_cachefile_filenum(h->cf);
@@ -2739,7 +2739,7 @@ toku_cachetable_begin_checkpoint (CACHETABLE ct, TOKULOGGER logger) {
}
cf->next_in_checkpoint = ct->cachefiles_in_checkpoint;
ct->cachefiles_in_checkpoint = cf;
- cf->for_checkpoint = TRUE;
+ cf->for_checkpoint = true;
}
}
@@ -2816,7 +2816,7 @@ toku_cachetable_begin_checkpoint (CACHETABLE ct, TOKULOGGER logger) {
// belongs in the checkpoint.
// Now let's go back to the writer thread:
// - because the checkpoint pending bit was not set for the PAIR, the for_checkpoint parameter
- // passed into the flush callback is FALSE.
+ // passed into the flush callback is false.
// - as a result, the PAIR is written to disk, the current translation table is updated, but the
// inprogress translation table is NOT updated.
// - the PAIR is marked as clean because it was just written to disk
@@ -2846,7 +2846,7 @@ toku_cachetable_begin_checkpoint (CACHETABLE ct, TOKULOGGER logger) {
// we may end up clearing the pending bit before the
// current lock is ever released.
if (p->dirty || nb_mutex_writers(&p->value_nb_mutex)) {
- p->checkpoint_pending = TRUE;
+ p->checkpoint_pending = true;
if (ct->pending_head) {
ct->pending_head->pending_prev = p;
}
@@ -2968,7 +2968,7 @@ toku_cachetable_end_checkpoint(CACHETABLE ct, TOKULOGGER logger,
while ((cf = ct->cachefiles_in_checkpoint)) {
ct->cachefiles_in_checkpoint = cf->next_in_checkpoint;
cf->next_in_checkpoint = NULL;
- cf->for_checkpoint = FALSE;
+ cf->for_checkpoint = false;
// checking for function existing so that this function
// can be called from cachetable tests
if (cf->note_unpin_by_checkpoint) {
@@ -2996,7 +2996,7 @@ FILENUM toku_cachefile_filenum (CACHEFILE cf) {
// debug functions
int toku_cachetable_assert_all_unpinned (CACHETABLE ct) {
- u_int32_t i;
+ uint32_t i;
int some_pinned=0;
cachetable_lock(ct);
for (i=0; i<ct->table_size; i++) {
@@ -3031,7 +3031,7 @@ int toku_cachefile_count_pinned (CACHEFILE cf, int print_them) {
}
void toku_cachetable_print_state (CACHETABLE ct) {
- u_int32_t i;
+ uint32_t i;
cachetable_lock(ct);
for (i=0; i<ct->table_size; i++) {
PAIR p = ct->table[i];
@@ -3063,7 +3063,7 @@ int toku_cachetable_get_key_state (CACHETABLE ct, CACHEKEY key, CACHEFILE cf, vo
int *dirty_ptr, long long *pin_ptr, long *size_ptr) {
PAIR p;
int r = -1;
- u_int32_t fullhash = toku_cachetable_hash(cf, key);
+ uint32_t fullhash = toku_cachetable_hash(cf, key);
cachetable_lock(ct);
for (p = ct->table[fullhash&(ct->table_size-1)]; p; p = p->hash_chain) {
if (p->key.b == key.b && p->cachefile == cf) {
@@ -3088,7 +3088,7 @@ toku_cachefile_set_userdata (CACHEFILE cf,
void *userdata,
int (*log_fassociate_during_checkpoint)(CACHEFILE, void*),
int (*log_suppress_rollback_during_checkpoint)(CACHEFILE, void*),
- int (*close_userdata)(CACHEFILE, int, void*, char**, BOOL, LSN),
+ int (*close_userdata)(CACHEFILE, int, void*, char**, bool, LSN),
int (*checkpoint_userdata)(CACHEFILE, int, void*),
int (*begin_checkpoint_userdata)(LSN, void*),
int (*end_checkpoint_userdata)(CACHEFILE, int, void*),
@@ -3136,7 +3136,7 @@ toku_cachefile_is_unlink_on_close(CACHEFILE cf) {
return cf->unlink_on_close;
}
-u_int64_t toku_cachefile_size(CACHEFILE cf) {
+uint64_t toku_cachefile_size(CACHEFILE cf) {
int64_t file_size;
int fd = toku_cachefile_get_fd(cf);
int r = toku_os_get_file_size(fd, &file_size);
@@ -3205,8 +3205,8 @@ toku_cleaner_thread (void *cachetable_v)
int r;
CACHETABLE ct = (CACHETABLE) cachetable_v;
assert(ct);
- u_int32_t num_iterations = toku_get_cleaner_iterations(ct);
- for (u_int32_t i = 0; i < num_iterations; ++i) {
+ uint32_t num_iterations = toku_get_cleaner_iterations(ct);
+ for (uint32_t i = 0; i < num_iterations; ++i) {
cleaner_executions++;
cachetable_lock(ct);
PAIR best_pair = NULL;
@@ -3265,7 +3265,7 @@ toku_cleaner_thread (void *cachetable_v)
write_locked_pair_for_checkpoint(ct, best_pair);
}
- BOOL cleaner_callback_called = FALSE;
+ bool cleaner_callback_called = false;
// it's theoretically possible that after writing a PAIR for checkpoint, the
// PAIR's heuristic tells us nothing needs to be done. It is not possible
@@ -3278,7 +3278,7 @@ toku_cleaner_thread (void *cachetable_v)
best_pair->fullhash,
best_pair->write_extraargs);
assert_zero(r);
- cleaner_callback_called = TRUE;
+ cleaner_callback_called = true;
cachetable_lock(ct);
}
diff --git a/ft/cachetable.h b/ft/cachetable.h
index 67b1da371c9..663c5d5501c 100644
--- a/ft/cachetable.h
+++ b/ft/cachetable.h
@@ -28,12 +28,12 @@
typedef BLOCKNUM CACHEKEY;
-int toku_set_cleaner_period (CACHETABLE ct, u_int32_t new_period);
-u_int32_t toku_get_cleaner_period (CACHETABLE ct);
-u_int32_t toku_get_cleaner_period_unlocked (CACHETABLE ct);
-int toku_set_cleaner_iterations (CACHETABLE ct, u_int32_t new_iterations);
-u_int32_t toku_get_cleaner_iterations (CACHETABLE ct);
-u_int32_t toku_get_cleaner_iterations_unlocked (CACHETABLE ct);
+int toku_set_cleaner_period (CACHETABLE ct, uint32_t new_period);
+uint32_t toku_get_cleaner_period (CACHETABLE ct);
+uint32_t toku_get_cleaner_period_unlocked (CACHETABLE ct);
+int toku_set_cleaner_iterations (CACHETABLE ct, uint32_t new_iterations);
+uint32_t toku_get_cleaner_iterations (CACHETABLE ct);
+uint32_t toku_get_cleaner_iterations_unlocked (CACHETABLE ct);
// cachetable operations
@@ -121,14 +121,14 @@ enum cachetable_dirty {
// When for_checkpoint is true, this was a 'pending' write
// Returns: 0 if success, otherwise an error number.
// Can access fd (fd is protected by a readlock during call)
-typedef void (*CACHETABLE_FLUSH_CALLBACK)(CACHEFILE, int fd, CACHEKEY key, void *value, void **disk_data, void *write_extraargs, PAIR_ATTR size, PAIR_ATTR* new_size, BOOL write_me, BOOL keep_me, BOOL for_checkpoint, BOOL is_clone);
+typedef void (*CACHETABLE_FLUSH_CALLBACK)(CACHEFILE, int fd, CACHEKEY key, void *value, void **disk_data, void *write_extraargs, PAIR_ATTR size, PAIR_ATTR* new_size, bool write_me, bool keep_me, bool for_checkpoint, bool is_clone);
// The fetch callback is called when a thread is attempting to get and pin a memory
// object and it is not in the cachetable.
// Returns: 0 if success, otherwise an error number. The address and size of the object
// associated with the key are returned.
// Can access fd (fd is protected by a readlock during call)
-typedef int (*CACHETABLE_FETCH_CALLBACK)(CACHEFILE, int fd, CACHEKEY key, u_int32_t fullhash, void **value_data, void **disk_data, PAIR_ATTR *sizep, int *dirtyp, void *read_extraargs);
+typedef int (*CACHETABLE_FETCH_CALLBACK)(CACHEFILE, int fd, CACHEKEY key, uint32_t fullhash, void **value_data, void **disk_data, PAIR_ATTR *sizep, int *dirtyp, void *read_extraargs);
// The cachetable calls the partial eviction estimate callback to determine if
// partial eviction is a cheap operation that may be called by on the client thread
@@ -148,15 +148,15 @@ typedef void (*CACHETABLE_PARTIAL_EVICTION_EST_CALLBACK)(void *ftnode_pv, void*
// Requires a write lock to be held on the PAIR in the cachetable while this function is called
typedef int (*CACHETABLE_PARTIAL_EVICTION_CALLBACK)(void *ftnode_pv, PAIR_ATTR old_attr, PAIR_ATTR* new_attr, void *write_extraargs);
-// The cachetable calls this function to determine if get_and_pin call requires a partial fetch. If this function returns TRUE,
+// The cachetable calls this function to determine if get_and_pin call requires a partial fetch. If this function returns true,
// then the cachetable will subsequently call CACHETABLE_PARTIAL_FETCH_CALLBACK to perform
-// a partial fetch. If this function returns FALSE, then the PAIR's value is returned to the caller as is.
+// a partial fetch. If this function returns false, then the PAIR's value is returned to the caller as is.
//
// An alternative to having this callback is to always call CACHETABLE_PARTIAL_FETCH_CALLBACK, and let
// CACHETABLE_PARTIAL_FETCH_CALLBACK decide whether to do any partial fetching or not.
// There is no particular reason why this alternative was not chosen.
// Requires: a read lock to be held on the PAIR
-typedef BOOL (*CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK)(void *ftnode_pv, void *read_extraargs);
+typedef bool (*CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK)(void *ftnode_pv, void *read_extraargs);
// The cachetable calls the partial fetch callback when a thread needs to read or decompress a subset of a PAIR into memory.
// An example is needing to read a basement node into memory. Another example is decompressing an internal node's
@@ -167,9 +167,9 @@ typedef BOOL (*CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK)(void *ftnode_pv, void
typedef int (*CACHETABLE_PARTIAL_FETCH_CALLBACK)(void *value_data, void* disk_data, void *read_extraargs, int fd, PAIR_ATTR *sizep);
// TODO(leif) XXX TODO XXX
-typedef int (*CACHETABLE_CLEANER_CALLBACK)(void *ftnode_pv, BLOCKNUM blocknum, u_int32_t fullhash, void *write_extraargs);
+typedef int (*CACHETABLE_CLEANER_CALLBACK)(void *ftnode_pv, BLOCKNUM blocknum, uint32_t fullhash, void *write_extraargs);
-typedef void (*CACHETABLE_CLONE_CALLBACK)(void* value_data, void** cloned_value_data, PAIR_ATTR* new_attr, BOOL for_checkpoint, void* write_extraargs);
+typedef void (*CACHETABLE_CLONE_CALLBACK)(void* value_data, void** cloned_value_data, PAIR_ATTR* new_attr, bool for_checkpoint, void* write_extraargs);
typedef struct {
CACHETABLE_FLUSH_CALLBACK flush_callback;
@@ -180,14 +180,14 @@ typedef struct {
void* write_extraargs; // parameter for flush_callback, pe_est_callback, pe_callback, and cleaner_callback
} CACHETABLE_WRITE_CALLBACK;
-typedef void (*CACHETABLE_GET_KEY_AND_FULLHASH)(CACHEKEY* cachekey, u_int32_t* fullhash, void* extra);
+typedef void (*CACHETABLE_GET_KEY_AND_FULLHASH)(CACHEKEY* cachekey, uint32_t* fullhash, void* extra);
-typedef void (*CACHETABLE_REMOVE_KEY)(CACHEKEY* cachekey, BOOL for_checkpoint, void* extra);
+typedef void (*CACHETABLE_REMOVE_KEY)(CACHEKEY* cachekey, bool for_checkpoint, void* extra);
void toku_cachefile_set_userdata(CACHEFILE cf, void *userdata,
int (*log_fassociate_during_checkpoint)(CACHEFILE, void*),
int (*log_suppress_rollback_during_checkpoint)(CACHEFILE, void*),
- int (*close_userdata)(CACHEFILE, int, void*, char **/*error_string*/, BOOL, LSN),
+ int (*close_userdata)(CACHEFILE, int, void*, char **/*error_string*/, bool, LSN),
int (*checkpoint_userdata)(CACHEFILE, int, void*),
int (*begin_checkpoint_userdata)(LSN, void*),
int (*end_checkpoint_userdata)(CACHEFILE, int, void*),
@@ -214,13 +214,13 @@ int toku_cachetable_put_with_dep_pairs(
PAIR_ATTR attr,
CACHETABLE_WRITE_CALLBACK write_callback,
void *get_key_and_fullhash_extra,
- u_int32_t num_dependent_pairs, // number of dependent pairs that we may need to checkpoint
+ uint32_t num_dependent_pairs, // number of dependent pairs that we may need to checkpoint
CACHEFILE* dependent_cfs, // array of cachefiles of dependent pairs
CACHEKEY* dependent_keys, // array of cachekeys of dependent pairs
- u_int32_t* dependent_fullhash, //array of fullhashes of dependent pairs
+ uint32_t* dependent_fullhash, //array of fullhashes of dependent pairs
enum cachetable_dirty* dependent_dirty, // array stating dirty/cleanness of dependent pairs
CACHEKEY* key,
- u_int32_t* fullhash
+ uint32_t* fullhash
);
@@ -230,7 +230,7 @@ int toku_cachetable_put_with_dep_pairs(
// value pairs may be evicted from the cachetable when the cachetable gets too big.
// Returns: 0 if the memory object is placed into the cachetable, otherwise an
// error number.
-int toku_cachetable_put(CACHEFILE cf, CACHEKEY key, u_int32_t fullhash,
+int toku_cachetable_put(CACHEFILE cf, CACHEKEY key, uint32_t fullhash,
void *value, PAIR_ATTR size,
CACHETABLE_WRITE_CALLBACK write_callback
);
@@ -249,19 +249,19 @@ int toku_cachetable_put(CACHEFILE cf, CACHEKEY key, u_int32_t fullhash,
int toku_cachetable_get_and_pin_with_dep_pairs (
CACHEFILE cachefile,
CACHEKEY key,
- u_int32_t fullhash,
+ uint32_t fullhash,
void**value,
long *sizep,
CACHETABLE_WRITE_CALLBACK write_callback,
CACHETABLE_FETCH_CALLBACK fetch_callback,
CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK pf_req_callback,
CACHETABLE_PARTIAL_FETCH_CALLBACK pf_callback,
- BOOL may_modify_value,
+ bool may_modify_value,
void* read_extraargs, // parameter for fetch_callback, pf_req_callback, and pf_callback
- u_int32_t num_dependent_pairs, // number of dependent pairs that we may need to checkpoint
+ uint32_t num_dependent_pairs, // number of dependent pairs that we may need to checkpoint
CACHEFILE* dependent_cfs, // array of cachefiles of dependent pairs
CACHEKEY* dependent_keys, // array of cachekeys of dependent pairs
- u_int32_t* dependent_fullhash, //array of fullhashes of dependent pairs
+ uint32_t* dependent_fullhash, //array of fullhashes of dependent pairs
enum cachetable_dirty* dependent_dirty // array stating dirty/cleanness of dependent pairs
);
@@ -274,14 +274,14 @@ int toku_cachetable_get_and_pin_with_dep_pairs (
int toku_cachetable_get_and_pin (
CACHEFILE cachefile,
CACHEKEY key,
- u_int32_t fullhash,
+ uint32_t fullhash,
void**value,
long *sizep,
CACHETABLE_WRITE_CALLBACK write_callback,
CACHETABLE_FETCH_CALLBACK fetch_callback,
CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK pf_req_callback,
CACHETABLE_PARTIAL_FETCH_CALLBACK pf_callback,
- BOOL may_modify_value,
+ bool may_modify_value,
void* read_extraargs // parameter for fetch_callback, pf_req_callback, and pf_callback
);
@@ -292,11 +292,11 @@ void toku_cachetable_pf_pinned_pair(
void* read_extraargs,
CACHEFILE cf,
CACHEKEY key,
- u_int32_t fullhash
+ uint32_t fullhash
);
struct unlockers {
- BOOL locked;
+ bool locked;
void (*f)(void*extra);
void *extra;
UNLOCKERS next;
@@ -308,38 +308,38 @@ struct unlockers {
int toku_cachetable_get_and_pin_nonblocking (
CACHEFILE cf,
CACHEKEY key,
- u_int32_t fullhash,
+ uint32_t fullhash,
void**value,
long *sizep,
CACHETABLE_WRITE_CALLBACK write_callback,
CACHETABLE_FETCH_CALLBACK fetch_callback,
CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK pf_req_callback __attribute__((unused)),
CACHETABLE_PARTIAL_FETCH_CALLBACK pf_callback __attribute__((unused)),
- BOOL may_modify_value,
+ bool may_modify_value,
void *read_extraargs, // parameter for fetch_callback, pf_req_callback, and pf_callback
UNLOCKERS unlockers
);
#define CAN_RELEASE_LOCK_DURING_IO
-int toku_cachetable_maybe_get_and_pin (CACHEFILE, CACHEKEY, u_int32_t /*fullhash*/, void**);
+int toku_cachetable_maybe_get_and_pin (CACHEFILE, CACHEKEY, uint32_t /*fullhash*/, void**);
// Effect: Maybe get and pin a memory object.
// This function is similar to the get_and_pin function except that it
// will not attempt to fetch a memory object that is not in the cachetable or requires any kind of blocking to get it.
// Returns: If the the item is already in memory, then return 0 and store it in the
// void**. If the item is not in memory, then return a nonzero error number.
-int toku_cachetable_maybe_get_and_pin_clean (CACHEFILE, CACHEKEY, u_int32_t /*fullhash*/, void**);
+int toku_cachetable_maybe_get_and_pin_clean (CACHEFILE, CACHEKEY, uint32_t /*fullhash*/, void**);
// Effect: Like maybe get and pin, but may pin a clean pair.
-int toku_cachetable_unpin(CACHEFILE, CACHEKEY, u_int32_t fullhash, enum cachetable_dirty dirty, PAIR_ATTR size);
+int toku_cachetable_unpin(CACHEFILE, CACHEKEY, uint32_t fullhash, enum cachetable_dirty dirty, PAIR_ATTR size);
// Effect: Unpin a memory object
// Modifies: If the memory object is in the cachetable, then OR the dirty flag,
// update the size, and release the read lock on the memory object.
// Returns: 0 if success, otherwise returns an error number.
// Requires: The ct is locked.
-int toku_cachetable_unpin_ct_prelocked_no_flush(CACHEFILE, CACHEKEY, u_int32_t fullhash, enum cachetable_dirty dirty, PAIR_ATTR size);
+int toku_cachetable_unpin_ct_prelocked_no_flush(CACHEFILE, CACHEKEY, uint32_t fullhash, enum cachetable_dirty dirty, PAIR_ATTR size);
// Effect: The same as tokud_cachetable_unpin, except that the ct must not be locked.
// Requires: The ct is NOT locked.
@@ -347,13 +347,13 @@ int toku_cachetable_unpin_and_remove (CACHEFILE, CACHEKEY, CACHETABLE_REMOVE_KEY
// Effect: Remove an object from the cachetable. Don't write it back.
// Requires: The object must be pinned exactly once.
-int toku_cachefile_prefetch(CACHEFILE cf, CACHEKEY key, u_int32_t fullhash,
+int toku_cachefile_prefetch(CACHEFILE cf, CACHEKEY key, uint32_t fullhash,
CACHETABLE_WRITE_CALLBACK write_callback,
CACHETABLE_FETCH_CALLBACK fetch_callback,
CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK pf_req_callback,
CACHETABLE_PARTIAL_FETCH_CALLBACK pf_callback,
void *read_extraargs, // parameter for fetch_callback, pf_req_callback, and pf_callback
- BOOL *doing_prefetch);
+ bool *doing_prefetch);
// Effect: Prefetch a memory object for a given key into the cachetable
// Precondition: The cachetable mutex is NOT held.
// Postcondition: The cachetable mutex is NOT held.
@@ -387,9 +387,9 @@ int toku_cachetable_rename (CACHEFILE cachefile, CACHEKEY oldkey, CACHEKEY newke
// the cachetable. The flush callback is called for each of these objects. The
// close function does not return until all of the objects are evicted. The cachefile
// object is freed.
-// If oplsn_valid is TRUE then use oplsn as the LSN of the close instead of asking the logger. oplsn_valid being TRUE is only allowed during recovery, and requires that you are removing the last reference (otherwise the lsn wouldn't make it in.)
+// If oplsn_valid is true then use oplsn as the LSN of the close instead of asking the logger. oplsn_valid being true is only allowed during recovery, and requires that you are removing the last reference (otherwise the lsn wouldn't make it in.)
// Returns: 0 if success, otherwise returns an error number.
-int toku_cachefile_close (CACHEFILE*, char **error_string, BOOL oplsn_valid, LSN oplsn);
+int toku_cachefile_close (CACHEFILE*, char **error_string, bool oplsn_valid, LSN oplsn);
// Flush the cachefile.
// Effect: Flush everything owned by the cachefile from the cachetable. All dirty
@@ -429,9 +429,9 @@ TOKULOGGER toku_cachefile_logger (CACHEFILE);
FILENUM toku_cachefile_filenum (CACHEFILE);
// Effect: Return a 32-bit hash key. The hash key shall be suitable for using with bitmasking for a table of size power-of-two.
-u_int32_t toku_cachetable_hash (CACHEFILE cachefile, CACHEKEY key);
+uint32_t toku_cachetable_hash (CACHEFILE cachefile, CACHEKEY key);
-u_int32_t toku_cachefile_fullhash_of_header (CACHEFILE cachefile);
+uint32_t toku_cachefile_fullhash_of_header (CACHEFILE cachefile);
// debug functions
@@ -462,7 +462,7 @@ void toku_cachetable_print_hash_histogram (void) __attribute__((__visibility__("
void toku_cachetable_maybe_flush_some(CACHETABLE ct);
// for stat64
-u_int64_t toku_cachefile_size(CACHEFILE cf);
+uint64_t toku_cachefile_size(CACHEFILE cf);
typedef enum {
CT_MISS = 0,
@@ -484,7 +484,7 @@ typedef enum {
} ct_status_entry;
typedef struct {
- BOOL initialized;
+ bool initialized;
TOKU_ENGINE_STATUS_ROW_S status[CT_STATUS_NUM_ROWS];
} CACHETABLE_STATUS_S, *CACHETABLE_STATUS;
diff --git a/ft/checkpoint.cc b/ft/checkpoint.cc
index f68a4ffad20..a92d57202a8 100644
--- a/ft/checkpoint.cc
+++ b/ft/checkpoint.cc
@@ -110,9 +110,9 @@ static LSN last_completed_checkpoint_lsn;
static toku_pthread_rwlock_t checkpoint_safe_lock;
static toku_pthread_rwlock_t multi_operation_lock;
-static BOOL initialized = FALSE; // sanity check
-static volatile BOOL locked_mo = FALSE; // true when the multi_operation write lock is held (by checkpoint)
-static volatile BOOL locked_cs = FALSE; // true when the checkpoint_safe write lock is held (by checkpoint)
+static bool initialized = false; // sanity check
+static volatile bool locked_mo = false; // true when the multi_operation write lock is held (by checkpoint)
+static volatile bool locked_cs = false; // true when the checkpoint_safe write lock is held (by checkpoint)
// Note following static functions are called from checkpoint internal logic only,
@@ -131,7 +131,7 @@ multi_operation_lock_init(void) {
#endif
toku_pthread_rwlock_init(&multi_operation_lock, &attr);
pthread_rwlockattr_destroy(&attr);
- locked_mo = FALSE;
+ locked_mo = false;
}
static void
@@ -142,19 +142,19 @@ multi_operation_lock_destroy(void) {
static void
multi_operation_checkpoint_lock(void) {
toku_pthread_rwlock_wrlock(&multi_operation_lock);
- locked_mo = TRUE;
+ locked_mo = true;
}
static void
multi_operation_checkpoint_unlock(void) {
- locked_mo = FALSE;
+ locked_mo = false;
toku_pthread_rwlock_wrunlock(&multi_operation_lock);
}
static void
checkpoint_safe_lock_init(void) {
toku_pthread_rwlock_init(&checkpoint_safe_lock, NULL);
- locked_cs = FALSE;
+ locked_cs = false;
}
static void
@@ -165,12 +165,12 @@ checkpoint_safe_lock_destroy(void) {
static void
checkpoint_safe_checkpoint_lock(void) {
toku_pthread_rwlock_wrlock(&checkpoint_safe_lock);
- locked_cs = TRUE;
+ locked_cs = true;
}
static void
checkpoint_safe_checkpoint_unlock(void) {
- locked_cs = FALSE;
+ locked_cs = false;
toku_pthread_rwlock_wrunlock(&checkpoint_safe_lock);
}
@@ -211,14 +211,14 @@ void
toku_checkpoint_init(void) {
multi_operation_lock_init();
checkpoint_safe_lock_init();
- initialized = TRUE;
+ initialized = true;
}
void
toku_checkpoint_destroy(void) {
multi_operation_lock_destroy();
checkpoint_safe_lock_destroy();
- initialized = FALSE;
+ initialized = false;
}
#define SET_CHECKPOINT_FOOTPRINT(x) STATUS_VALUE(CP_FOOTPRINT) = footprint_offset + x
diff --git a/ft/checkpoint.h b/ft/checkpoint.h
index eb083bbf8cc..0b476694945 100644
--- a/ft/checkpoint.h
+++ b/ft/checkpoint.h
@@ -8,12 +8,12 @@
#ident "$Id$"
-int toku_set_checkpoint_period(CACHETABLE ct, u_int32_t new_period);
+int toku_set_checkpoint_period(CACHETABLE ct, uint32_t new_period);
//Effect: Change [end checkpoint (n) - begin checkpoint (n+1)] delay to
// new_period seconds. 0 means disable.
-u_int32_t toku_get_checkpoint_period(CACHETABLE ct);
-u_int32_t toku_get_checkpoint_period_unlocked(CACHETABLE ct);
+uint32_t toku_get_checkpoint_period(CACHETABLE ct);
+uint32_t toku_get_checkpoint_period_unlocked(CACHETABLE ct);
/******
@@ -109,7 +109,7 @@ typedef enum {
} cp_status_entry;
typedef struct {
- BOOL initialized;
+ bool initialized;
TOKU_ENGINE_STATUS_ROW_S status[CP_STATUS_NUM_ROWS];
} CHECKPOINT_STATUS_S, *CHECKPOINT_STATUS;
diff --git a/ft/compression-ratio/cratio.cc b/ft/compression-ratio/cratio.cc
index 8dcc5c63eec..df3e84be023 100644
--- a/ft/compression-ratio/cratio.cc
+++ b/ft/compression-ratio/cratio.cc
@@ -38,7 +38,7 @@ measure_header (int fd, toku_off_t off, // read header from this offset
r=pread(fd, fbuf, 12, off);
assert(r==12);
assert(memcmp(fbuf,"tokudata",8)==0);
- int bsize = toku_dtoh32(*(u_int32_t*)(fbuf+8));
+ int bsize = toku_dtoh32(*(uint32_t*)(fbuf+8));
//printf("Bsize=%d\n", bsize);
(*usize)+=bsize;
assert(bsize<=NSIZE);
@@ -64,8 +64,8 @@ measure_node (int fd, toku_off_t off, // read header from this offset
assert(r==24);
//printf("fbuf[0..7]=%c%c%c%c%c%c%c%c\n", fbuf[0], fbuf[1], fbuf[2], fbuf[3], fbuf[4], fbuf[5], fbuf[6], fbuf[7]);
assert(memcmp(fbuf,"tokuleaf",8)==0 || memcmp(fbuf, "tokunode", 8)==0);
- assert(8==toku_dtoh32(*(u_int32_t*)(fbuf+8))); // check file version
- int bsize = toku_dtoh32(*(u_int32_t*)(fbuf+20));
+ assert(8==toku_dtoh32(*(uint32_t*)(fbuf+8))); // check file version
+ int bsize = toku_dtoh32(*(uint32_t*)(fbuf+20));
//printf("Bsize=%d\n", bsize);
(*usize)+=bsize;
diff --git a/ft/dbufio.cc b/ft/dbufio.cc
index d7f5410de8f..eac99d7ddf0 100644
--- a/ft/dbufio.cc
+++ b/ft/dbufio.cc
@@ -22,14 +22,14 @@ struct dbufio_file {
// need the mutex to modify these
struct dbufio_file *next;
- BOOL second_buf_ready; // if true, the i/o thread is not touching anything.
+ bool second_buf_ready; // if true, the i/o thread is not touching anything.
// consumers own [0], i/o thread owns [1], they are swapped by the consumer only when the condition mutex is held and second_buf_ready is true.
char *buf[2];
size_t n_in_buf[2];
int error_code[2]; // includes errno or eof. [0] is the error code associated with buf[0], [1] is the code for buf[1]
- BOOL io_done;
+ bool io_done;
};
@@ -48,7 +48,7 @@ struct dbufio_fileset {
struct dbufio_file *head, *tail; // must have the mutex to fiddle with these.
size_t bufsize; // the bufsize is the constant (the same for all buffers).
- BOOL panic;
+ bool panic;
int panic_errno;
toku_pthread_t iothread;
};
@@ -68,11 +68,11 @@ static void panic (DBUFIO_FILESET bfs, int r) {
if (bfs->panic) return;
// may need a cilk fake mutex here to convince the race detector that it's OK.
bfs->panic_errno = r; // Don't really care about a race on this variable... Writes to it are atomic, so at least one good panic reason will be stored.
- bfs->panic = TRUE;
+ bfs->panic = true;
return;
}
-static BOOL paniced (DBUFIO_FILESET bfs) {
+static bool paniced (DBUFIO_FILESET bfs) {
// may need a cilk fake mutex here to convince the race detector that it's OK.
return bfs->panic;
}
@@ -110,7 +110,7 @@ static void* io_thread (void *v)
} else {
// Some I/O needs to be done.
//printf("%s:%d Need I/O\n", __FILE__, __LINE__);
- assert(dbf->second_buf_ready == FALSE);
+ assert(dbf->second_buf_ready == false);
assert(!dbf->io_done);
bfs->head = dbf->next;
if (bfs->head==NULL) bfs->tail=NULL;
@@ -132,7 +132,7 @@ static void* io_thread (void *v)
// End of file. Save it.
dbf->error_code[1] = EOF;
dbf->n_in_buf[1] = 0;
- dbf->io_done = TRUE;
+ dbf->io_done = true;
} else {
dbf->error_code[1] = 0;
@@ -152,7 +152,7 @@ static void* io_thread (void *v)
bfs->n_not_done--;
}
//printf("%s:%d n_not_done=%d\n", __FILE__, __LINE__, bfs->n_not_done);
- dbf->second_buf_ready = TRUE;
+ dbf->second_buf_ready = true;
toku_cond_broadcast(&bfs->cond);
//printf("%s:%d did broadcast=%d\n", __FILE__, __LINE__, bfs->n_not_done);
// Still have the lock so go around the loop
@@ -166,7 +166,7 @@ int create_dbufio_fileset (DBUFIO_FILESET *bfsp, int N, int fds[/*N*/], size_t b
int result = 0;
DBUFIO_FILESET MALLOC(bfs);
if (bfs==0) { result = get_error_errno(); }
- BOOL mutex_inited = FALSE, cond_inited = FALSE;
+ bool mutex_inited = false, cond_inited = false;
if (result==0) {
MALLOC_N(N, bfs->files);
if (bfs->files==NULL) { result = get_error_errno(); }
@@ -179,11 +179,11 @@ int create_dbufio_fileset (DBUFIO_FILESET *bfsp, int N, int fds[/*N*/], size_t b
//printf("%s:%d here\n", __FILE__, __LINE__);
if (result==0) {
toku_mutex_init(&bfs->mutex, NULL);
- mutex_inited = TRUE;
+ mutex_inited = true;
}
if (result==0) {
toku_cond_init(&bfs->cond, NULL);
- cond_inited = TRUE;
+ cond_inited = true;
}
if (result==0) {
bfs->N = N;
@@ -194,7 +194,7 @@ int create_dbufio_fileset (DBUFIO_FILESET *bfsp, int N, int fds[/*N*/], size_t b
bfs->files[i].offset_in_buf = 0;
bfs->files[i].offset_in_file = 0;
bfs->files[i].next = NULL;
- bfs->files[i].second_buf_ready = FALSE;
+ bfs->files[i].second_buf_ready = false;
for (int j=0; j<2; j++) {
if (result==0) {
MALLOC_N(bufsize, bfs->files[i].buf[j]);
@@ -203,7 +203,7 @@ int create_dbufio_fileset (DBUFIO_FILESET *bfsp, int N, int fds[/*N*/], size_t b
bfs->files[i].n_in_buf[j] = 0;
bfs->files[i].error_code[j] = 0;
}
- bfs->files[i].io_done = FALSE;
+ bfs->files[i].io_done = false;
{
ssize_t r = toku_os_read(bfs->files[i].fd, bfs->files[i].buf[0], bufsize);
if (r<0) {
@@ -211,7 +211,7 @@ int create_dbufio_fileset (DBUFIO_FILESET *bfsp, int N, int fds[/*N*/], size_t b
break;
} else if (r==0) {
// it's EOF
- bfs->files[i].io_done = TRUE;
+ bfs->files[i].io_done = true;
bfs->n_not_done--;
bfs->files[i].error_code[0] = EOF;
} else {
@@ -222,7 +222,7 @@ int create_dbufio_fileset (DBUFIO_FILESET *bfsp, int N, int fds[/*N*/], size_t b
}
}
bfs->bufsize = bufsize;
- bfs->panic = FALSE;
+ bfs->panic = false;
bfs->panic_errno = 0;
}
//printf("Creating IO thread\n");
@@ -331,7 +331,7 @@ int dbufio_fileset_read (DBUFIO_FILESET bfs, int filenum, void *buf_v, size_t co
dbf->buf[1] = tmp;
}
dbf->error_code[0] = dbf->error_code[1];
- dbf->second_buf_ready = FALSE;
+ dbf->second_buf_ready = false;
dbf->offset_in_buf = 0;
if (!dbf->io_done) {
// Don't enqueue it if the I/O is all done.
diff --git a/ft/fifo.cc b/ft/fifo.cc
index ee2c4a03043..dcf0a44567b 100644
--- a/ft/fifo.cc
+++ b/ft/fifo.cc
@@ -153,7 +153,7 @@ void toku_fifo_clone(FIFO orig_fifo, FIFO* cloned_fifo) {
*cloned_fifo = new_fifo;
}
-BOOL toku_are_fifos_same(FIFO fifo1, FIFO fifo2) {
+bool toku_are_fifos_same(FIFO fifo1, FIFO fifo2) {
return (
fifo1->memory_used == fifo2->memory_used &&
memcmp(fifo1->memory, fifo2->memory, fifo1->memory_used) == 0
diff --git a/ft/fifo.h b/ft/fifo.h
index 17a8230079f..633630247d6 100644
--- a/ft/fifo.h
+++ b/ft/fifo.h
@@ -63,7 +63,7 @@ unsigned long toku_fifo_memory_size_in_use(FIFO fifo); // return how much memor
unsigned long toku_fifo_memory_footprint(FIFO fifo); // return how much memory the fifo occupies
//These two are problematic, since I don't want to malloc() the bytevecs, but dequeueing the fifo frees the memory.
-//int toku_fifo_peek_deq (FIFO, bytevec *key, ITEMLEN *keylen, bytevec *data, ITEMLEN *datalen, u_int32_t *type, TXNID *xid);
+//int toku_fifo_peek_deq (FIFO, bytevec *key, ITEMLEN *keylen, bytevec *data, ITEMLEN *datalen, uint32_t *type, TXNID *xid);
//int toku_fifo_peek_deq_cmdstruct (FIFO, FT_MSG, DBT*, DBT*); // fill in the FT_MSG, using the two DBTs for the DBT part.
void toku_fifo_iterate(FIFO, void(*f)(bytevec key,ITEMLEN keylen,bytevec data,ITEMLEN datalen, enum ft_msg_type type, MSN msn, XIDS xids, bool is_fresh, void*), void*);
@@ -78,7 +78,7 @@ void toku_fifo_iterate(FIFO, void(*f)(bytevec key,ITEMLEN keylen,bytevec data,IT
MSN msnvar = e->msn; \
XIDS xidsvar = &e->xids_s; \
bytevec keyvar = xids_get_end_of_array(xidsvar); \
- bytevec datavar = (const u_int8_t*)keyvar + e->keylen; \
+ bytevec datavar = (const uint8_t*)keyvar + e->keylen; \
bool is_freshvar = e->is_fresh; \
body; \
} })
@@ -94,7 +94,7 @@ const struct fifo_entry *toku_fifo_get_entry(FIFO fifo, long off);
void toku_fifo_clone(FIFO orig_fifo, FIFO* cloned_fifo);
-BOOL toku_are_fifos_same(FIFO fifo1, FIFO fifo2);
+bool toku_are_fifos_same(FIFO fifo1, FIFO fifo2);
diff --git a/ft/ft-cachetable-wrappers.cc b/ft/ft-cachetable-wrappers.cc
index c59faf89908..0ed926c8c92 100644
--- a/ft/ft-cachetable-wrappers.cc
+++ b/ft/ft-cachetable-wrappers.cc
@@ -13,7 +13,7 @@
static void
ftnode_get_key_and_fullhash(
BLOCKNUM* cachekey,
- u_int32_t* fullhash,
+ uint32_t* fullhash,
void* extra)
{
FT h = (FT) extra;
@@ -26,18 +26,18 @@ ftnode_get_key_and_fullhash(
void
cachetable_put_empty_node_with_dep_nodes(
FT h,
- u_int32_t num_dependent_nodes,
+ uint32_t num_dependent_nodes,
FTNODE* dependent_nodes,
BLOCKNUM* name, //output
- u_int32_t* fullhash, //output
+ uint32_t* fullhash, //output
FTNODE* result)
{
FTNODE XMALLOC(new_node);
CACHEFILE dependent_cf[num_dependent_nodes];
BLOCKNUM dependent_keys[num_dependent_nodes];
- u_int32_t dependent_fullhash[num_dependent_nodes];
+ uint32_t dependent_fullhash[num_dependent_nodes];
enum cachetable_dirty dependent_dirty_bits[num_dependent_nodes];
- for (u_int32_t i = 0; i < num_dependent_nodes; i++) {
+ for (uint32_t i = 0; i < num_dependent_nodes; i++) {
dependent_cf[i] = h->cf;
dependent_keys[i] = dependent_nodes[i]->thisnodename;
dependent_fullhash[i] = toku_cachetable_hash(h->cf, dependent_nodes[i]->thisnodename);
@@ -69,10 +69,10 @@ create_new_ftnode_with_dep_nodes(
FTNODE *result,
int height,
int n_children,
- u_int32_t num_dependent_nodes,
+ uint32_t num_dependent_nodes,
FTNODE* dependent_nodes)
{
- u_int32_t fullhash = 0;
+ uint32_t fullhash = 0;
BLOCKNUM name;
cachetable_put_empty_node_with_dep_nodes(
@@ -122,18 +122,18 @@ int
toku_pin_ftnode(
FT_HANDLE brt,
BLOCKNUM blocknum,
- u_int32_t fullhash,
+ uint32_t fullhash,
UNLOCKERS unlockers,
ANCESTORS ancestors,
const PIVOT_BOUNDS bounds,
FTNODE_FETCH_EXTRA bfe,
- BOOL may_modify_node,
- BOOL apply_ancestor_messages, // this BOOL is probably temporary, for #3972, once we know how range query estimates work, will revisit this
+ bool may_modify_node,
+ bool apply_ancestor_messages, // this bool is probably temporary, for #3972, once we know how range query estimates work, will revisit this
FTNODE *node_p,
- BOOL* msgs_applied)
+ bool* msgs_applied)
{
void *node_v;
- *msgs_applied = FALSE;
+ *msgs_applied = false;
int r = toku_cachetable_get_and_pin_nonblocking(
brt->ft->cf,
blocknum,
@@ -165,19 +165,19 @@ void
toku_pin_ftnode_off_client_thread(
FT h,
BLOCKNUM blocknum,
- u_int32_t fullhash,
+ uint32_t fullhash,
FTNODE_FETCH_EXTRA bfe,
- BOOL may_modify_node,
- u_int32_t num_dependent_nodes,
+ bool may_modify_node,
+ uint32_t num_dependent_nodes,
FTNODE* dependent_nodes,
FTNODE *node_p)
{
void *node_v;
CACHEFILE dependent_cf[num_dependent_nodes];
BLOCKNUM dependent_keys[num_dependent_nodes];
- u_int32_t dependent_fullhash[num_dependent_nodes];
+ uint32_t dependent_fullhash[num_dependent_nodes];
enum cachetable_dirty dependent_dirty_bits[num_dependent_nodes];
- for (u_int32_t i = 0; i < num_dependent_nodes; i++) {
+ for (uint32_t i = 0; i < num_dependent_nodes; i++) {
dependent_cf[i] = h->cf;
dependent_keys[i] = dependent_nodes[i]->thisnodename;
dependent_fullhash[i] = toku_cachetable_hash(h->cf, dependent_nodes[i]->thisnodename);
diff --git a/ft/ft-cachetable-wrappers.h b/ft/ft-cachetable-wrappers.h
index 0edda765b37..16761820bbd 100644
--- a/ft/ft-cachetable-wrappers.h
+++ b/ft/ft-cachetable-wrappers.h
@@ -18,10 +18,10 @@
void
cachetable_put_empty_node_with_dep_nodes(
FT h,
- u_int32_t num_dependent_nodes,
+ uint32_t num_dependent_nodes,
FTNODE* dependent_nodes,
BLOCKNUM* name, //output
- u_int32_t* fullhash, //output
+ uint32_t* fullhash, //output
FTNODE* result
);
@@ -36,7 +36,7 @@ create_new_ftnode_with_dep_nodes(
FTNODE *result,
int height,
int n_children,
- u_int32_t num_dependent_nodes,
+ uint32_t num_dependent_nodes,
FTNODE* dependent_nodes
);
@@ -64,15 +64,15 @@ int
toku_pin_ftnode(
FT_HANDLE brt,
BLOCKNUM blocknum,
- u_int32_t fullhash,
+ uint32_t fullhash,
UNLOCKERS unlockers,
ANCESTORS ancestors,
const PIVOT_BOUNDS pbounds,
FTNODE_FETCH_EXTRA bfe,
- BOOL may_modify_node,
- BOOL apply_ancestor_messages, // this BOOL is probably temporary, for #3972, once we know how range query estimates work, will revisit this
+ bool may_modify_node,
+ bool apply_ancestor_messages, // this bool is probably temporary, for #3972, once we know how range query estimates work, will revisit this
FTNODE *node_p,
- BOOL* msgs_applied
+ bool* msgs_applied
);
/**
@@ -86,10 +86,10 @@ void
toku_pin_ftnode_off_client_thread(
FT h,
BLOCKNUM blocknum,
- u_int32_t fullhash,
+ uint32_t fullhash,
FTNODE_FETCH_EXTRA bfe,
- BOOL may_modify_node,
- u_int32_t num_dependent_nodes,
+ bool may_modify_node,
+ uint32_t num_dependent_nodes,
FTNODE* dependent_nodes,
FTNODE *node_p
);
diff --git a/ft/ft-flusher.cc b/ft/ft-flusher.cc
index 3fb9fe54662..1b6b14034d9 100644
--- a/ft/ft-flusher.cc
+++ b/ft/ft-flusher.cc
@@ -171,7 +171,7 @@ ft_merge_child(
FT h,
FTNODE node,
int childnum_to_merge,
- BOOL *did_react,
+ bool *did_react,
struct flusher_advice *fa);
static int
@@ -237,7 +237,7 @@ default_merge_child(struct flusher_advice *fa,
//
// it is responsibility of ft_merge_child to unlock parent
//
- BOOL did_react;
+ bool did_react;
ft_merge_child(h, parent, childnum, &did_react, fa);
}
@@ -296,7 +296,7 @@ flt_flusher_advice_init(struct flusher_advice *fa, struct flush_status_update_ex
}
struct ctm_extra {
- BOOL is_last_child;
+ bool is_last_child;
DBT target_key;
};
@@ -368,11 +368,11 @@ ct_maybe_merge_child(struct flusher_advice *fa,
// to be merged
//
if (childnum == (parent->n_children - 1)) {
- ctme.is_last_child = TRUE;
+ ctme.is_last_child = true;
pivot_to_save = childnum - 1;
}
else {
- ctme.is_last_child = FALSE;
+ ctme.is_last_child = false;
pivot_to_save = childnum;
}
const DBT *pivot = &parent->childkeys[pivot_to_save];
@@ -397,12 +397,12 @@ ct_maybe_merge_child(struct flusher_advice *fa,
{
toku_ft_grab_treelock(h);
- u_int32_t fullhash;
+ uint32_t fullhash;
CACHEKEY root;
toku_calculate_root_offset_pointer(h, &root, &fullhash);
struct ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, h);
- toku_pin_ftnode_off_client_thread(h, root, fullhash, &bfe, TRUE, 0, NULL, &root_node);
+ toku_pin_ftnode_off_client_thread(h, root, fullhash, &bfe, true, 0, NULL, &root_node);
toku_assert_entire_node_in_memory(root_node);
toku_ft_release_treelock(h);
@@ -568,7 +568,7 @@ verify_all_in_mempool(FTNODE node)
}
}
-static u_int64_t
+static uint64_t
ftleaf_disk_size(FTNODE node)
// Effect: get the disk size of a leafentry
{
@@ -592,7 +592,7 @@ ftleaf_disk_size(FTNODE node)
static void
ftleaf_get_split_loc(
FTNODE node,
- u_int64_t sumlesizes,
+ uint64_t sumlesizes,
int* bn_index, // which basement within leaf
int* le_index // which key within basement
)
@@ -601,7 +601,7 @@ ftleaf_get_split_loc(
// le_index is index into OMT of the last key that should be on the left side of the split.
{
assert(node->height == 0);
- u_int32_t size_so_far = 0;
+ uint32_t size_so_far = 0;
for (int i = 0; i < node->n_children; i++) {
OMT curr_buffer = BLB_BUFFER(node, i);
uint32_t n_leafentries = toku_omt_size(curr_buffer);
@@ -685,8 +685,8 @@ ftleaf_split(
FTNODE *nodea,
FTNODE *nodeb,
DBT *splitk,
- BOOL create_new_node,
- u_int32_t num_dependent_nodes,
+ bool create_new_node,
+ uint32_t num_dependent_nodes,
FTNODE* dependent_nodes)
// Effect: Split a leaf node.
// Argument "node" is node to be split.
@@ -712,7 +712,7 @@ ftleaf_split(
FTNODE B;
- u_int32_t fullhash;
+ uint32_t fullhash;
BLOCKNUM name;
if (create_new_node) {
@@ -755,7 +755,7 @@ ftleaf_split(
{
{
// TODO: (Zardosht) see if we can/should make this faster, we iterate over the rows twice
- u_int64_t sumlesizes=0;
+ uint64_t sumlesizes=0;
sumlesizes = ftleaf_disk_size(node);
// TODO: (Zardosht) #3537, figure out serial insertion optimization again later
// split in half
@@ -815,7 +815,7 @@ ftleaf_split(
// handle the move of a subset of data in last_bn_on_left from node to B
if (!split_on_boundary) {
BP_STATE(B,curr_dest_bn_index) = PT_AVAIL;
- u_int32_t diff_size = 0;
+ uint32_t diff_size = 0;
destroy_basement_node (BLB(B, curr_dest_bn_index)); // Destroy B's empty OMT, so I can rebuild it from an array
set_BNULL(B, curr_dest_bn_index);
set_BLB(B, curr_dest_bn_index, toku_create_empty_bn_no_buffer());
@@ -896,7 +896,7 @@ ft_nonleaf_split(
FTNODE *nodea,
FTNODE *nodeb,
DBT *splitk,
- u_int32_t num_dependent_nodes,
+ uint32_t num_dependent_nodes,
FTNODE* dependent_nodes)
{
//VERIFY_NODE(t,node);
@@ -993,7 +993,7 @@ ft_split_child(
dep_nodes[0] = node;
dep_nodes[1] = child;
if (child->height==0) {
- ftleaf_split(h, child, &nodea, &nodeb, &splitk, TRUE, 2, dep_nodes);
+ ftleaf_split(h, child, &nodea, &nodeb, &splitk, true, 2, dep_nodes);
} else {
ft_nonleaf_split(h, child, &nodea, &nodeb, &splitk, 2, dep_nodes);
}
@@ -1080,7 +1080,7 @@ merge_leaf_nodes(FTNODE a, FTNODE b)
b->dirty = 1;
OMT a_last_buffer = BLB_BUFFER(a, a->n_children-1);
- // this BOOL states if the last basement node in a has any items or not
+ // this bool states if the last basement node in a has any items or not
// If it does, then it stays in the merge. If it does not, the last basement node
// of a gets eliminated because we do not have a pivot to store for it (because it has no elements)
const bool a_has_tail = toku_omt_size(a_last_buffer) > 0;
@@ -1119,7 +1119,7 @@ merge_leaf_nodes(FTNODE a, FTNODE b)
a->totalchildkeylens += keylen;
}
- u_int32_t offset = a_has_tail ? a->n_children : a->n_children - 1;
+ uint32_t offset = a_has_tail ? a->n_children : a->n_children - 1;
for (int i = 0; i < b->n_children; i++) {
a->bp[i+offset] = b->bp[i];
memset(&b->bp[i],0,sizeof(b->bp[0]));
@@ -1151,7 +1151,7 @@ balance_leaf_nodes(
merge_leaf_nodes(a,b);
// now split them
// because we are not creating a new node, we can pass in no dependent nodes
- ftleaf_split(NULL, a, &a, &b, splitk, FALSE, 0, NULL);
+ ftleaf_split(NULL, a, &a, &b, splitk, false, 0, NULL);
return 0;
}
@@ -1161,34 +1161,34 @@ maybe_merge_pinned_leaf_nodes(
FTNODE a,
FTNODE b,
DBT *parent_splitk,
- BOOL *did_merge,
- BOOL *did_rebalance,
+ bool *did_merge,
+ bool *did_rebalance,
DBT *splitk)
-// Effect: Either merge a and b into one one node (merge them into a) and set *did_merge = TRUE.
+// Effect: Either merge a and b into one one node (merge them into a) and set *did_merge = true.
// (We do this if the resulting node is not fissible)
-// or distribute the leafentries evenly between a and b, and set *did_rebalance = TRUE.
+// or distribute the leafentries evenly between a and b, and set *did_rebalance = true.
// (If a and be are already evenly distributed, we may do nothing.)
{
unsigned int sizea = toku_serialize_ftnode_size(a);
unsigned int sizeb = toku_serialize_ftnode_size(b);
if ((sizea + sizeb)*4 > (a->nodesize*3)) {
// the combined size is more than 3/4 of a node, so don't merge them.
- *did_merge = FALSE;
+ *did_merge = false;
if (sizea*4 > a->nodesize && sizeb*4 > a->nodesize) {
// no need to do anything if both are more than 1/4 of a node.
- *did_rebalance = FALSE;
+ *did_rebalance = false;
toku_clone_dbt(splitk, *parent_splitk);
return;
}
// one is less than 1/4 of a node, and together they are more than 3/4 of a node.
toku_free(parent_splitk->data); // We don't need the parent_splitk any more. If we need a splitk (if we don't merge) we'll malloc a new one.
- *did_rebalance = TRUE;
+ *did_rebalance = true;
int r = balance_leaf_nodes(a, b, splitk);
assert(r==0);
} else {
// we are merging them.
- *did_merge = TRUE;
- *did_rebalance = FALSE;
+ *did_merge = true;
+ *did_rebalance = false;
toku_init_dbt(splitk);
toku_free(parent_splitk->data); // if we are merging, the splitk gets freed.
merge_leaf_nodes(a, b);
@@ -1200,8 +1200,8 @@ maybe_merge_pinned_nonleaf_nodes(
const DBT *parent_splitk,
FTNODE a,
FTNODE b,
- BOOL *did_merge,
- BOOL *did_rebalance,
+ bool *did_merge,
+ bool *did_rebalance,
DBT *splitk)
{
toku_assert_entire_node_in_memory(a);
@@ -1229,8 +1229,8 @@ maybe_merge_pinned_nonleaf_nodes(
a->dirty = 1;
b->dirty = 1;
- *did_merge = TRUE;
- *did_rebalance = FALSE;
+ *did_merge = true;
+ *did_rebalance = false;
toku_init_dbt(splitk);
STATUS_VALUE(FT_FLUSHER_MERGE_NONLEAF)++;
@@ -1242,12 +1242,12 @@ maybe_merge_pinned_nodes(
DBT *parent_splitk,
FTNODE a,
FTNODE b,
- BOOL *did_merge,
- BOOL *did_rebalance,
+ bool *did_merge,
+ bool *did_rebalance,
DBT *splitk)
-// Effect: either merge a and b into one node (merge them into a) and set *did_merge = TRUE.
+// Effect: either merge a and b into one node (merge them into a) and set *did_merge = true.
// (We do this if the resulting node is not fissible)
-// or distribute a and b evenly and set *did_merge = FALSE and *did_rebalance = TRUE
+// or distribute a and b evenly and set *did_merge = false and *did_rebalance = true
// (If a and be are already evenly distributed, we may do nothing.)
// If we distribute:
// For leaf nodes, we distribute the leafentries evenly.
@@ -1293,7 +1293,7 @@ maybe_merge_pinned_nodes(
static void merge_remove_key_callback(
BLOCKNUM *bp,
- BOOL for_checkpoint,
+ bool for_checkpoint,
void *extra)
{
FT h = (FT) extra;
@@ -1309,7 +1309,7 @@ ft_merge_child(
FT h,
FTNODE node,
int childnum_to_merge,
- BOOL *did_react,
+ bool *did_react,
struct flusher_advice *fa)
{
// this function should not be called
@@ -1337,10 +1337,10 @@ ft_merge_child(
FTNODE childa, childb;
{
- u_int32_t childfullhash = compute_child_fullhash(h->cf, node, childnuma);
+ uint32_t childfullhash = compute_child_fullhash(h->cf, node, childnuma);
struct ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, h);
- toku_pin_ftnode_off_client_thread(h, BP_BLOCKNUM(node, childnuma), childfullhash, &bfe, TRUE, 1, &node, &childa);
+ toku_pin_ftnode_off_client_thread(h, BP_BLOCKNUM(node, childnuma), childfullhash, &bfe, true, 1, &node, &childa);
}
// for test
call_flusher_thread_callback(flt_flush_before_pin_second_node_for_merge);
@@ -1348,10 +1348,10 @@ ft_merge_child(
FTNODE dep_nodes[2];
dep_nodes[0] = node;
dep_nodes[1] = childa;
- u_int32_t childfullhash = compute_child_fullhash(h->cf, node, childnumb);
+ uint32_t childfullhash = compute_child_fullhash(h->cf, node, childnumb);
struct ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, h);
- toku_pin_ftnode_off_client_thread(h, BP_BLOCKNUM(node, childnumb), childfullhash, &bfe, TRUE, 2, dep_nodes, &childb);
+ toku_pin_ftnode_off_client_thread(h, BP_BLOCKNUM(node, childnumb), childfullhash, &bfe, true, 2, dep_nodes, &childb);
}
if (toku_bnc_n_entries(BNC(node,childnuma))>0) {
@@ -1364,7 +1364,7 @@ ft_merge_child(
// now we have both children pinned in main memory, and cachetable locked,
// so no checkpoints will occur.
- BOOL did_merge, did_rebalance;
+ bool did_merge, did_rebalance;
{
DBT splitk;
toku_init_dbt(&splitk);
@@ -1378,7 +1378,7 @@ ft_merge_child(
}
//toku_verify_estimates(t,childa);
// the tree did react if a merge (did_merge) or rebalance (new spkit key) occurred
- *did_react = (BOOL)(did_merge || did_rebalance);
+ *did_react = (bool)(did_merge || did_rebalance);
if (did_merge) {
assert(!splitk.data);
} else {
@@ -1479,13 +1479,13 @@ flush_some_child(
int r;
BLOCKNUM targetchild = BP_BLOCKNUM(parent, childnum);
toku_verify_blocknum_allocated(h->blocktable, targetchild);
- u_int32_t childfullhash = compute_child_fullhash(h->cf, parent, childnum);
+ uint32_t childfullhash = compute_child_fullhash(h->cf, parent, childnum);
FTNODE child;
struct ftnode_fetch_extra bfe;
// Note that we don't read the entire node into memory yet.
// The idea is let's try to do the minimum work before releasing the parent lock
fill_bfe_for_min_read(&bfe, h);
- toku_pin_ftnode_off_client_thread(h, targetchild, childfullhash, &bfe, TRUE, 1, &parent, &child);
+ toku_pin_ftnode_off_client_thread(h, targetchild, childfullhash, &bfe, true, 1, &parent, &child);
// for test
call_flusher_thread_callback(ft_flush_aflter_child_pin);
@@ -1609,7 +1609,7 @@ flush_some_child(
fa->maybe_merge_child(fa, h, parent, childnum, child, fa->extra);
}
else {
- assert(FALSE);
+ assert(false);
}
}
@@ -1652,7 +1652,7 @@ int
toku_ftnode_cleaner_callback(
void *ftnode_pv,
BLOCKNUM blocknum,
- u_int32_t fullhash,
+ uint32_t fullhash,
void *extraargs)
{
FTNODE node = (FTNODE) ftnode_pv;
@@ -1784,7 +1784,7 @@ flush_node_on_background_thread(FT h, FTNODE parent)
//
void *node_v;
FTNODE child;
- u_int32_t childfullhash = compute_child_fullhash(h->cf, parent, childnum);
+ uint32_t childfullhash = compute_child_fullhash(h->cf, parent, childnum);
int r = toku_cachetable_maybe_get_and_pin_clean (
h->cf,
BP_BLOCKNUM(parent,childnum),
diff --git a/ft/ft-flusher.h b/ft/ft-flusher.h
index 8015821982c..36370d65a64 100644
--- a/ft/ft-flusher.h
+++ b/ft/ft-flusher.h
@@ -88,8 +88,8 @@ ftleaf_split(
FTNODE *nodea,
FTNODE *nodeb,
DBT *splitk,
- BOOL create_new_node,
- u_int32_t num_dependent_nodes,
+ bool create_new_node,
+ uint32_t num_dependent_nodes,
FTNODE* dependent_nodes
);
@@ -109,7 +109,7 @@ ft_nonleaf_split(
FTNODE *nodea,
FTNODE *nodeb,
DBT *splitk,
- u_int32_t num_dependent_nodes,
+ uint32_t num_dependent_nodes,
FTNODE* dependent_nodes
);
diff --git a/ft/ft-hot-flusher.cc b/ft/ft-hot-flusher.cc
index 31817ac8212..701d7a135e4 100644
--- a/ft/ft-hot-flusher.cc
+++ b/ft/ft-hot-flusher.cc
@@ -75,7 +75,7 @@ hot_set_highest_key(struct hot_flusher_extra *flusher)
// Otherwise, let's copy all the contents from one key to the other.
void *source = flusher->max_current_key.data;
void *destination = flusher->highest_pivot_key.data;
- u_int32_t size = flusher->max_current_key.size;
+ uint32_t size = flusher->max_current_key.size;
destination = toku_xrealloc(destination, size);
memcpy(destination, source, size);
@@ -95,7 +95,7 @@ hot_set_key(DBT *key, FTNODE parent, int childnum)
DBT *pivot = &parent->childkeys[childnum];
void *data = key->data;
- u_int32_t size = pivot->size;
+ uint32_t size = pivot->size;
data = toku_xrealloc(data, size);
memcpy(data, pivot->data, size);
@@ -263,7 +263,7 @@ toku_ft_hot_optimize(FT_HANDLE brt,
do {
FTNODE root;
CACHEKEY root_key;
- u_int32_t fullhash;
+ uint32_t fullhash;
{
toku_ft_grab_treelock(brt->ft);
@@ -277,7 +277,7 @@ toku_ft_hot_optimize(FT_HANDLE brt,
(BLOCKNUM) root_key,
fullhash,
&bfe,
- TRUE,
+ true,
0,
NULL,
&root);
@@ -349,7 +349,7 @@ toku_ft_hot_optimize(FT_HANDLE brt,
// More diagnostics.
{
- BOOL success = false;
+ bool success = false;
if (r == 0) { success = true; }
{
diff --git a/ft/ft-internal.h b/ft/ft-internal.h
index 2cd7b4c35e7..e50a7cf9139 100644
--- a/ft/ft-internal.h
+++ b/ft/ft-internal.h
@@ -79,7 +79,7 @@ struct ftnode_fetch_extra {
// parameters needed to find out which child needs to be decompressed (so it can be read)
ft_search_t* search;
DBT *range_lock_left_key, *range_lock_right_key;
- BOOL left_is_neg_infty, right_is_pos_infty;
+ bool left_is_neg_infty, right_is_pos_infty;
// states if we should try to aggressively fetch basement nodes
// that are not specifically needed for current query,
// but may be needed for other cursor operations user is doing
@@ -87,7 +87,7 @@ struct ftnode_fetch_extra {
// and the user is doing a dictionary wide scan, then
// even though a query may only want one basement node,
// we fetch all basement nodes in a leaf node.
- BOOL disable_prefetching;
+ bool disable_prefetching;
// this value will be set during the fetch_callback call by toku_ftnode_fetch_callback or toku_ftnode_pf_req_callback
// thi callbacks need to evaluate this anyway, so we cache it here so the search code does not reevaluate it
int child_to_read;
@@ -189,8 +189,8 @@ struct ftnode_disk_data {
// The SIZE is the size of the compressed partition.
// Rationale: We cannot store the size from the beginning of the node since we don't know how big the header will be.
// However, later when we are doing aligned writes, we won't be able to store the size from the end since we want things to align.
- u_int32_t start;
- u_int32_t size;
+ uint32_t start;
+ uint32_t size;
};
#define BP_START(node_dd,i) ((node_dd)[i].start)
#define BP_SIZE(node_dd,i) ((node_dd)[i].size)
@@ -223,7 +223,7 @@ struct __attribute__((__packed__)) ftnode_partition {
// clock count used to for pe_callback to determine if a node should be evicted or not
// for now, saturating the count at 1
- u_int8_t clock_count;
+ uint8_t clock_count;
// How many bytes worth of work was performed by messages in each buffer.
uint64_t workdone;
@@ -240,7 +240,7 @@ struct ftnode {
uint32_t build_id; // build_id (svn rev number) of software that wrote this node to disk
int height; /* height is always >= 0. 0 for leaf, >0 for nonleaf. */
int dirty;
- u_int32_t fullhash;
+ uint32_t fullhash;
int n_children; //for internal nodes, if n_children==TREE_FANOUT+1 then the tree needs to be rebalanced.
// for leaf nodes, represents number of basement nodes
unsigned int totalchildkeylens;
@@ -333,7 +333,7 @@ enum {
FT_PIVOT_FRONT_COMPRESS = 8,
};
-u_int32_t compute_child_fullhash (CACHEFILE cf, FTNODE node, int childnum);
+uint32_t compute_child_fullhash (CACHEFILE cf, FTNODE node, int childnum);
// The brt_header is not managed by the cachetable. Instead, it hangs off the cachefile as userdata.
@@ -492,7 +492,7 @@ struct ft_handle {
on_redirect_callback redirect_callback;
void *redirect_callback_extra;
struct toku_list live_ft_handle_link;
- BOOL did_set_flags;
+ bool did_set_flags;
struct ft_options options;
};
@@ -515,22 +515,22 @@ int toku_serialize_ftnode_to_memory (FTNODE node,
FTNODE_DISK_DATA* ndd,
unsigned int basementnodesize,
enum toku_compression_method compression_method,
- BOOL do_rebalancing,
- BOOL in_parallel,
+ bool do_rebalancing,
+ bool in_parallel,
/*out*/ size_t *n_bytes_to_write,
/*out*/ char **bytes_to_write);
-int toku_serialize_ftnode_to(int fd, BLOCKNUM, FTNODE node, FTNODE_DISK_DATA* ndd, BOOL do_rebalancing, FT h, BOOL for_checkpoint);
+int toku_serialize_ftnode_to(int fd, BLOCKNUM, FTNODE node, FTNODE_DISK_DATA* ndd, bool do_rebalancing, FT h, bool for_checkpoint);
int toku_serialize_rollback_log_to (int fd, BLOCKNUM blocknum, ROLLBACK_LOG_NODE log,
FT h,
- BOOL for_checkpoint);
-int toku_deserialize_rollback_log_from (int fd, BLOCKNUM blocknum, u_int32_t fullhash, ROLLBACK_LOG_NODE *logp, FT h);
+ bool for_checkpoint);
+int toku_deserialize_rollback_log_from (int fd, BLOCKNUM blocknum, uint32_t fullhash, ROLLBACK_LOG_NODE *logp, FT h);
int toku_deserialize_bp_from_disk(FTNODE node, FTNODE_DISK_DATA ndd, int childnum, int fd, struct ftnode_fetch_extra* bfe);
int toku_deserialize_bp_from_compressed(FTNODE node, int childnum, DESCRIPTOR desc, ft_compare_func cmp);
-int toku_deserialize_ftnode_from (int fd, BLOCKNUM off, u_int32_t /*fullhash*/, FTNODE *ftnode, FTNODE_DISK_DATA* ndd, struct ftnode_fetch_extra* bfe);
+int toku_deserialize_ftnode_from (int fd, BLOCKNUM off, uint32_t /*fullhash*/, FTNODE *ftnode, FTNODE_DISK_DATA* ndd, struct ftnode_fetch_extra* bfe);
// <CER> For verifying old, non-upgraded nodes (versions 13 and 14).
int
-decompress_from_raw_block_into_rbuf(u_int8_t *raw_block, size_t raw_block_size, struct rbuf *rb, BLOCKNUM blocknum);
+decompress_from_raw_block_into_rbuf(uint8_t *raw_block, size_t raw_block_size, struct rbuf *rb, BLOCKNUM blocknum);
//
//////////////// <CER> TODO: Move these function declarations
@@ -538,9 +538,9 @@ int
deserialize_ft_from_fd_into_rbuf(int fd,
toku_off_t offset_of_header,
struct rbuf *rb,
- u_int64_t *checkpoint_count,
+ uint64_t *checkpoint_count,
LSN *checkpoint_lsn,
- u_int32_t * version_p);
+ uint32_t * version_p);
int
deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ft, uint32_t version);
@@ -635,14 +635,14 @@ STAT64INFO_S toku_get_and_clear_basement_stats(FTNODE leafnode);
void toku_evict_bn_from_memory(FTNODE node, int childnum, FT h);
void toku_ft_status_update_pivot_fetch_reason(struct ftnode_fetch_extra *bfe);
-extern void toku_ftnode_clone_callback(void* value_data, void** cloned_value_data, PAIR_ATTR* new_attr, BOOL for_checkpoint, void* write_extraargs);
-extern void toku_ftnode_flush_callback (CACHEFILE cachefile, int fd, BLOCKNUM nodename, void *ftnode_v, void** UU(disk_data), void *extraargs, PAIR_ATTR size, PAIR_ATTR* new_size, BOOL write_me, BOOL keep_me, BOOL for_checkpoint, BOOL is_clone);
-extern int toku_ftnode_fetch_callback (CACHEFILE cachefile, int fd, BLOCKNUM nodename, u_int32_t fullhash, void **ftnode_pv, void** UU(disk_data), PAIR_ATTR *sizep, int*dirty, void*extraargs);
+extern void toku_ftnode_clone_callback(void* value_data, void** cloned_value_data, PAIR_ATTR* new_attr, bool for_checkpoint, void* write_extraargs);
+extern void toku_ftnode_flush_callback (CACHEFILE cachefile, int fd, BLOCKNUM nodename, void *ftnode_v, void** UU(disk_data), void *extraargs, PAIR_ATTR size, PAIR_ATTR* new_size, bool write_me, bool keep_me, bool for_checkpoint, bool is_clone);
+extern int toku_ftnode_fetch_callback (CACHEFILE cachefile, int fd, BLOCKNUM nodename, uint32_t fullhash, void **ftnode_pv, void** UU(disk_data), PAIR_ATTR *sizep, int*dirty, void*extraargs);
extern void toku_ftnode_pe_est_callback(void* ftnode_pv, void* disk_data, long* bytes_freed_estimate, enum partial_eviction_cost *cost, void* write_extraargs);
extern int toku_ftnode_pe_callback (void *ftnode_pv, PAIR_ATTR old_attr, PAIR_ATTR* new_attr, void *extraargs);
-extern BOOL toku_ftnode_pf_req_callback(void* ftnode_pv, void* read_extraargs);
+extern bool toku_ftnode_pf_req_callback(void* ftnode_pv, void* read_extraargs);
int toku_ftnode_pf_callback(void* ftnode_pv, void* UU(disk_data), void* read_extraargs, int fd, PAIR_ATTR* sizep);
-extern int toku_ftnode_cleaner_callback( void *ftnode_pv, BLOCKNUM blocknum, u_int32_t fullhash, void *extraargs);
+extern int toku_ftnode_cleaner_callback( void *ftnode_pv, BLOCKNUM blocknum, uint32_t fullhash, void *extraargs);
static inline CACHETABLE_WRITE_CALLBACK get_write_callbacks_for_node(FT h) {
CACHETABLE_WRITE_CALLBACK wc;
@@ -659,7 +659,7 @@ static const FTNODE null_ftnode=0;
// Values to be used to update ftcursor if a search is successful.
struct ft_cursor_leaf_info_to_be {
- u_int32_t index;
+ uint32_t index;
OMT omt;
};
@@ -672,14 +672,14 @@ struct ft_cursor_leaf_info {
struct ft_cursor {
struct toku_list cursors_link;
FT_HANDLE ft_handle;
- BOOL prefetching;
+ bool prefetching;
DBT key, val; // The key-value pair that the cursor currently points to
DBT range_lock_left_key, range_lock_right_key;
- BOOL left_is_neg_infty, right_is_pos_infty;
- BOOL is_snapshot_read; // true if query is read_committed, false otherwise
- BOOL is_leaf_mode;
- BOOL disable_prefetching;
- BOOL is_temporary;
+ bool left_is_neg_infty, right_is_pos_infty;
+ bool is_snapshot_read; // true if query is read_committed, false otherwise
+ bool is_leaf_mode;
+ bool disable_prefetching;
+ bool is_temporary;
TOKUTXN ttxn;
struct ft_cursor_leaf_info leaf_info;
};
@@ -696,10 +696,10 @@ static inline void fill_bfe_for_full_read(struct ftnode_fetch_extra *bfe, FT h)
bfe->search = NULL;
bfe->range_lock_left_key = NULL;
bfe->range_lock_right_key = NULL;
- bfe->left_is_neg_infty = FALSE;
- bfe->right_is_pos_infty = FALSE;
+ bfe->left_is_neg_infty = false;
+ bfe->right_is_pos_infty = false;
bfe->child_to_read = -1;
- bfe->disable_prefetching = FALSE;
+ bfe->disable_prefetching = false;
}
//
@@ -714,9 +714,9 @@ static inline void fill_bfe_for_subset_read(
ft_search_t* search,
DBT *left,
DBT *right,
- BOOL left_is_neg_infty,
- BOOL right_is_pos_infty,
- BOOL disable_prefetching
+ bool left_is_neg_infty,
+ bool right_is_pos_infty,
+ bool disable_prefetching
)
{
invariant(h->h->type == FT_CURRENT);
@@ -744,10 +744,10 @@ static inline void fill_bfe_for_min_read(struct ftnode_fetch_extra *bfe, FT h) {
bfe->search = NULL;
bfe->range_lock_left_key = NULL;
bfe->range_lock_right_key = NULL;
- bfe->left_is_neg_infty = FALSE;
- bfe->right_is_pos_infty = FALSE;
+ bfe->left_is_neg_infty = false;
+ bfe->right_is_pos_infty = false;
bfe->child_to_read = -1;
- bfe->disable_prefetching = FALSE;
+ bfe->disable_prefetching = false;
}
static inline void destroy_bfe_for_prefetch(struct ftnode_fetch_extra *bfe) {
@@ -807,7 +807,7 @@ struct pivot_bounds {
};
// FIXME needs toku prefix
-void maybe_apply_ancestors_messages_to_node (FT_HANDLE t, FTNODE node, ANCESTORS ancestors, struct pivot_bounds const * const bounds, BOOL* msgs_applied);
+void maybe_apply_ancestors_messages_to_node (FT_HANDLE t, FTNODE node, ANCESTORS ancestors, struct pivot_bounds const * const bounds, bool* msgs_applied);
int
toku_ft_search_which_child(
@@ -907,7 +907,7 @@ typedef enum {
} ft_upgrade_status_entry;
typedef struct {
- BOOL initialized;
+ bool initialized;
TOKU_ENGINE_STATUS_ROW_S status[FT_UPGRADE_STATUS_NUM_ROWS];
} FT_UPGRADE_STATUS_S, *FT_UPGRADE_STATUS;
@@ -922,7 +922,7 @@ typedef enum {
} le_status_entry;
typedef struct {
- BOOL initialized;
+ bool initialized;
TOKU_ENGINE_STATUS_ROW_S status[LE_STATUS_NUM_ROWS];
} LE_STATUS_S, *LE_STATUS;
@@ -987,7 +987,7 @@ void
toku_ft_bn_apply_cmd_once (
BASEMENTNODE bn,
const FT_MSG cmd,
- u_int32_t idx,
+ uint32_t idx,
LEAFENTRY le,
uint64_t *workdonep,
STAT64INFO stats_to_update
diff --git a/ft/ft-node-deserialize.cc b/ft/ft-node-deserialize.cc
index 321dea9889d..05127e3c7be 100644
--- a/ft/ft-node-deserialize.cc
+++ b/ft/ft-node-deserialize.cc
@@ -108,8 +108,8 @@ check_node_info_checksum(struct rbuf *rb)
{
int r = 0;
// Verify checksum of header stored.
- u_int32_t checksum = x1764_memory(rb->buf, rb->ndone);
- u_int32_t stored_checksum = rbuf_int(rb);
+ uint32_t checksum = x1764_memory(rb->buf, rb->ndone);
+ uint32_t stored_checksum = rbuf_int(rb);
if (stored_checksum != checksum) {
// TODO: dump_bad_block(rb->buf, rb->size);
@@ -143,8 +143,8 @@ int
check_legacy_end_checksum(struct rbuf *rb)
{
int r = 0;
- u_int32_t expected_xsum = rbuf_int(rb);
- u_int32_t actual_xsum = x1764_memory(rb->buf, rb->size - 4);
+ uint32_t expected_xsum = rbuf_int(rb);
+ uint32_t actual_xsum = x1764_memory(rb->buf, rb->size - 4);
if (expected_xsum != actual_xsum) {
r = TOKUDB_BAD_CHECKSUM;
}
diff --git a/ft/ft-ops.cc b/ft/ft-ops.cc
index 6a524ff225d..d9f39fa12ea 100644
--- a/ft/ft-ops.cc
+++ b/ft/ft-ops.cc
@@ -231,9 +231,9 @@ toku_assert_entire_node_in_memory(FTNODE node) {
assert(is_entire_node_in_memory(node));
}
-static u_int32_t
+static uint32_t
get_leaf_num_entries(FTNODE node) {
- u_int32_t result = 0;
+ uint32_t result = 0;
int i;
toku_assert_entire_node_in_memory(node);
for ( i = 0; i < node->n_children; i++) {
@@ -281,12 +281,12 @@ toku_bnc_nbytesinbuf(NONLEAF_CHILDINFO bnc)
return toku_fifo_buffer_size_in_use(bnc->buffer);
}
-// return TRUE if the size of the buffers plus the amount of work done is large enough. (But return false if there is nothing to be flushed (the buffers empty)).
+// return true if the size of the buffers plus the amount of work done is large enough. (But return false if there is nothing to be flushed (the buffers empty)).
bool
toku_ft_nonleaf_is_gorged (FTNODE node) {
- u_int64_t size = toku_serialize_ftnode_size(node);
+ uint64_t size = toku_serialize_ftnode_size(node);
- bool buffers_are_empty = TRUE;
+ bool buffers_are_empty = true;
toku_assert_entire_node_in_memory(node);
//
// the nonleaf node is gorged if the following holds true:
@@ -301,7 +301,7 @@ toku_ft_nonleaf_is_gorged (FTNODE node) {
}
for (int child = 0; child < node->n_children; ++child) {
if (toku_bnc_nbytesinbuf(BNC(node, child)) > 0) {
- buffers_are_empty = FALSE;
+ buffers_are_empty = false;
break;
}
}
@@ -316,7 +316,7 @@ static void ft_verify_flags(FT ft, FTNODE node) {
int toku_ft_debug_mode = 0;
-u_int32_t compute_child_fullhash (CACHEFILE cf, FTNODE node, int childnum) {
+uint32_t compute_child_fullhash (CACHEFILE cf, FTNODE node, int childnum) {
assert(node->height>0 && childnum<node->n_children);
return toku_cachetable_hash(cf, BP_BLOCKNUM(node, childnum));
}
@@ -326,7 +326,7 @@ static void __attribute__((__unused__))
ft_leaf_check_leaf_stats (FTNODE node)
{
assert(node);
- assert(FALSE);
+ assert(false);
// static int count=0; count++;
// if (node->height>0) return;
// struct subtree_estimates e = calc_leaf_stats(node);
@@ -418,7 +418,7 @@ ftnode_cachepressure_size(FTNODE node)
retval += BP_WORKDONE(node, i);
}
else {
- assert(FALSE);
+ assert(false);
}
}
}
@@ -468,7 +468,7 @@ ftnode_memory_size (FTNODE node)
}
}
else {
- assert(FALSE);
+ assert(false);
}
}
return retval;
@@ -483,7 +483,7 @@ PAIR_ATTR make_ftnode_pair_attr(FTNODE node) {
.leaf_size = (node->height > 0) ? 0 : size,
.rollback_size = 0,
.cache_pressure_size = cachepressure_size,
- .is_valid = TRUE
+ .is_valid = true
};
return result;
}
@@ -495,7 +495,7 @@ PAIR_ATTR make_invalid_pair_attr(void) {
.leaf_size = 0,
.rollback_size = 0,
.cache_pressure_size = 0,
- .is_valid = FALSE
+ .is_valid = false
};
return result;
}
@@ -581,7 +581,7 @@ toku_get_and_clear_basement_stats(FTNODE leafnode) {
return deltas;
}
-static void ft_status_update_flush_reason(FTNODE node, BOOL for_checkpoint) {
+static void ft_status_update_flush_reason(FTNODE node, bool for_checkpoint) {
if (node->height == 0) {
if (for_checkpoint) {
__sync_fetch_and_add(&STATUS_VALUE(FT_DISK_FLUSH_LEAF_FOR_CHECKPOINT), 1);
@@ -603,7 +603,7 @@ static void ft_status_update_flush_reason(FTNODE node, BOOL for_checkpoint) {
static void ftnode_update_disk_stats(
FTNODE ftnode,
FT ft,
- BOOL for_checkpoint
+ bool for_checkpoint
)
{
STAT64INFO_S deltas = ZEROSTATS;
@@ -643,7 +643,7 @@ void toku_ftnode_clone_callback(
void* value_data,
void** cloned_value_data,
PAIR_ATTR* new_attr,
- BOOL for_checkpoint,
+ bool for_checkpoint,
void* write_extraargs
)
{
@@ -692,7 +692,7 @@ void toku_ftnode_clone_callback(
*new_attr = make_ftnode_pair_attr(node);
}
else {
- new_attr->is_valid = FALSE;
+ new_attr->is_valid = false;
}
*cloned_value_data = cloned_node;
}
@@ -707,10 +707,10 @@ void toku_ftnode_flush_callback (
void *extraargs,
PAIR_ATTR size __attribute__((unused)),
PAIR_ATTR* new_size,
- BOOL write_me,
- BOOL keep_me,
- BOOL for_checkpoint,
- BOOL is_clone
+ bool write_me,
+ bool keep_me,
+ bool for_checkpoint,
+ bool is_clone
)
{
FT h = (FT) extraargs;
@@ -763,7 +763,7 @@ toku_ft_status_update_pivot_fetch_reason(struct ftnode_fetch_extra *bfe)
}
}
-int toku_ftnode_fetch_callback (CACHEFILE UU(cachefile), int fd, BLOCKNUM nodename, u_int32_t fullhash,
+int toku_ftnode_fetch_callback (CACHEFILE UU(cachefile), int fd, BLOCKNUM nodename, uint32_t fullhash,
void **ftnode_pv, void** disk_data, PAIR_ATTR *sizep, int *dirtyp, void *extraargs) {
assert(extraargs);
assert(*ftnode_pv == NULL);
@@ -828,11 +828,11 @@ void toku_ftnode_pe_est_callback(
// after compression, it is simply the size of compressed
// data on disk plus the size of the struct that holds it
FTNODE_DISK_DATA ndd = (FTNODE_DISK_DATA) disk_data;
- u_int32_t compressed_data_size = BP_SIZE(ndd, i);
+ uint32_t compressed_data_size = BP_SIZE(ndd, i);
compressed_data_size += sizeof(struct sub_block);
// now get the space taken now
- u_int32_t decompressed_data_size = get_avail_internal_node_partition_size(node,i);
+ uint32_t decompressed_data_size = get_avail_internal_node_partition_size(node,i);
bytes_to_free += (decompressed_data_size - compressed_data_size);
}
}
@@ -933,7 +933,7 @@ int toku_ftnode_pe_callback (void *ftnode_pv, PAIR_ATTR UU(old_attr), PAIR_ATTR*
continue;
}
else {
- assert(FALSE);
+ assert(false);
}
}
}
@@ -953,9 +953,9 @@ exit:
// - touch the necessary partition's clock. The reason we do it here is so that there is one central place it is done, and not done
// by all the various callers
//
-BOOL toku_ftnode_pf_req_callback(void* ftnode_pv, void* read_extraargs) {
+bool toku_ftnode_pf_req_callback(void* ftnode_pv, void* read_extraargs) {
// placeholder for now
- BOOL retval = FALSE;
+ bool retval = false;
FTNODE node = (FTNODE) ftnode_pv;
struct ftnode_fetch_extra *bfe = (struct ftnode_fetch_extra *) read_extraargs;
//
@@ -964,20 +964,20 @@ BOOL toku_ftnode_pf_req_callback(void* ftnode_pv, void* read_extraargs) {
// - ftnode_fetch_subset: some subset is necessary (example use: toku_ft_search)
// - ftnode_fetch_all: entire node is necessary (example use: flush, split, merge)
// The code below checks if the necessary partitions are already in memory,
- // and if they are, return FALSE, and if not, return TRUE
+ // and if they are, return false, and if not, return true
//
if (bfe->type == ftnode_fetch_none) {
- retval = FALSE;
+ retval = false;
}
else if (bfe->type == ftnode_fetch_all) {
- retval = FALSE;
+ retval = false;
for (int i = 0; i < node->n_children; i++) {
BP_TOUCH_CLOCK(node,i);
// if we find a partition that is not available,
// then a partial fetch is required because
// the entire node must be made available
if (BP_STATE(node,i) != PT_AVAIL) {
- retval = TRUE;
+ retval = true;
}
}
}
@@ -996,7 +996,7 @@ BOOL toku_ftnode_pf_req_callback(void* ftnode_pv, void* read_extraargs) {
bfe->search
);
BP_TOUCH_CLOCK(node,bfe->child_to_read);
- // child we want to read is not available, must set retval to TRUE
+ // child we want to read is not available, must set retval to true
retval = (BP_STATE(node, bfe->child_to_read) != PT_AVAIL);
}
else if (bfe->type == ftnode_fetch_prefetch) {
@@ -1007,13 +1007,13 @@ BOOL toku_ftnode_pf_req_callback(void* ftnode_pv, void* read_extraargs) {
int rc = toku_bfe_rightmost_child_wanted(bfe, node);
for (int i = lc; i <= rc; ++i) {
if (BP_STATE(node, i) != PT_AVAIL) {
- retval = TRUE;
+ retval = true;
}
}
}
else {
// we have a bug. The type should be known
- assert(FALSE);
+ assert(false);
}
return retval;
}
@@ -1023,7 +1023,7 @@ ft_status_update_partial_fetch_reason(
struct ftnode_fetch_extra* UU(bfe),
int UU(i),
int UU(state),
- BOOL UU(is_leaf)
+ bool UU(is_leaf)
)
{
invariant(state == PT_COMPRESSED || state == PT_ON_DISK);
@@ -1120,7 +1120,7 @@ int toku_ftnode_pf_callback(void* ftnode_pv, void* disk_data, void* read_extraar
r = toku_deserialize_bp_from_disk(node, ndd, i, fd, bfe);
}
else {
- assert(FALSE);
+ assert(false);
}
}
@@ -1144,7 +1144,7 @@ int toku_ftnode_pf_callback(void* ftnode_pv, void* disk_data, void* read_extraar
}
static int
-leafval_heaviside_le (u_int32_t klen, void *kval,
+leafval_heaviside_le (uint32_t klen, void *kval,
struct cmd_leafval_heaviside_extra *be) {
DBT dbt;
DBT const * const key = be->key;
@@ -1159,7 +1159,7 @@ int
toku_cmd_leafval_heaviside (OMTVALUE lev, void *extra) {
LEAFENTRY CAST_FROM_VOIDP(le, lev);
struct cmd_leafval_heaviside_extra *CAST_FROM_VOIDP(be, extra);
- u_int32_t keylen;
+ uint32_t keylen;
void* key = le_key_and_len(le, &keylen);
return leafval_heaviside_le(keylen, key,
be);
@@ -1306,7 +1306,7 @@ ft_init_new_root(FT ft, FTNODE nodea, FTNODE nodeb, DBT splitk, CACHEKEY *rootp,
BP_STATE(newroot,1) = PT_AVAIL;
newroot->dirty = 1;
//printf("%s:%d put %lld\n", __FILE__, __LINE__, newroot_diskoff);
- u_int32_t fullhash = toku_cachetable_hash(ft->cf, newroot_diskoff);
+ uint32_t fullhash = toku_cachetable_hash(ft->cf, newroot_diskoff);
newroot->fullhash = fullhash;
toku_cachetable_put(ft->cf, newroot_diskoff, fullhash, newroot, make_ftnode_pair_attr(newroot), get_write_callbacks_for_node(ft));
@@ -1353,7 +1353,7 @@ toku_ft_nonleaf_append_child(FTNODE node, FTNODE child, const DBT *pivotkey) {
static void
ft_leaf_delete_leafentry (
BASEMENTNODE bn,
- u_int32_t idx,
+ uint32_t idx,
LEAFENTRY le
)
// Effect: Delete leafentry
@@ -1376,7 +1376,7 @@ void
toku_ft_bn_apply_cmd_once (
BASEMENTNODE bn,
const FT_MSG cmd,
- u_int32_t idx,
+ uint32_t idx,
LEAFENTRY le,
uint64_t *workdone,
STAT64INFO stats_to_update
@@ -1462,15 +1462,15 @@ toku_ft_bn_apply_cmd_once (
static const uint32_t setval_tag = 0xee0ccb99; // this was gotten by doing "cat /dev/random|head -c4|od -x" to get a random number. We want to make sure that the user actually passes us the setval_extra_s that we passed in.
struct setval_extra_s {
- u_int32_t tag;
- BOOL did_set_val;
+ uint32_t tag;
+ bool did_set_val;
int setval_r; // any error code that setval_fun wants to return goes here.
// need arguments for toku_ft_bn_apply_cmd_once
BASEMENTNODE bn;
MSN msn; // captured from original message, not currently used
XIDS xids;
const DBT *key;
- u_int32_t idx;
+ uint32_t idx;
LEAFENTRY le;
uint64_t * workdone; // set by toku_ft_bn_apply_cmd_once()
STAT64INFO stats_to_update;
@@ -1486,7 +1486,7 @@ static void setval_fun (const DBT *new_val, void *svextra_v) {
struct setval_extra_s *CAST_FROM_VOIDP(svextra, svextra_v);
assert(svextra->tag==setval_tag);
assert(!svextra->did_set_val);
- svextra->did_set_val = TRUE;
+ svextra->did_set_val = true;
{
// can't leave scope until toku_ft_bn_apply_cmd_once if
@@ -1542,12 +1542,12 @@ static int do_update(ft_update_func update_fun, DESCRIPTOR desc, BASEMENTNODE bn
keyp = toku_fill_dbt(&key, le_key(le), le_keylen(le));
update_function_extra = cmd->u.id.val;
} else {
- assert(FALSE);
+ assert(false);
}
if (le && !le_latest_is_del(le)) {
// if the latest val exists, use it, and we'll use the leafentry later
- u_int32_t vallen;
+ uint32_t vallen;
void *valp = le_latest_val_and_len(le, &vallen);
vdbtp = toku_fill_dbt(&vdbt, valp, vallen);
le_for_update = le;
@@ -1557,7 +1557,7 @@ static int do_update(ft_update_func update_fun, DESCRIPTOR desc, BASEMENTNODE bn
le_for_update = NULL;
}
- struct setval_extra_s setval_extra = {setval_tag, FALSE, 0, bn, cmd->msn, cmd->xids,
+ struct setval_extra_s setval_extra = {setval_tag, false, 0, bn, cmd->msn, cmd->xids,
keyp, idx, le_for_update, workdone, stats_to_update};
// call handlerton's brt->update_fun(), which passes setval_extra to setval_fun()
FAKE_DB(db, desc);
@@ -1592,7 +1592,7 @@ toku_ft_bn_apply_cmd (
LEAFENTRY storeddata;
OMTVALUE storeddatav=NULL;
- u_int32_t omt_size;
+ uint32_t omt_size;
int r;
struct cmd_leafval_heaviside_extra be = {compare_fun, desc, cmd->u.id.key};
@@ -1602,7 +1602,7 @@ toku_ft_bn_apply_cmd (
switch (cmd->type) {
case FT_INSERT_NO_OVERWRITE:
case FT_INSERT: {
- u_int32_t idx;
+ uint32_t idx;
if (doing_seqinsert) {
idx = toku_omt_size(bn->buffer);
r = toku_omt_fetch(bn->buffer, idx-1, &storeddatav);
@@ -1628,8 +1628,8 @@ toku_ft_bn_apply_cmd (
// the leaf then it is sequential
// window = min(32, number of leaf entries/16)
{
- u_int32_t s = toku_omt_size(bn->buffer);
- u_int32_t w = s / 16;
+ uint32_t s = toku_omt_size(bn->buffer);
+ uint32_t w = s / 16;
if (w == 0) w = 1;
if (w > 32) w = 32;
@@ -1642,7 +1642,7 @@ toku_ft_bn_apply_cmd (
case FT_DELETE_ANY:
case FT_ABORT_ANY:
case FT_COMMIT_ANY: {
- u_int32_t idx;
+ uint32_t idx;
// Apply to all the matches
r = toku_omt_find_zero(bn->buffer, toku_cmd_leafval_heaviside, &be,
@@ -1652,13 +1652,13 @@ toku_ft_bn_apply_cmd (
CAST_FROM_VOIDP(storeddata, storeddatav);
while (1) {
- u_int32_t num_leafentries_before = toku_omt_size(bn->buffer);
+ uint32_t num_leafentries_before = toku_omt_size(bn->buffer);
toku_ft_bn_apply_cmd_once(bn, cmd, idx, storeddata, workdone, stats_to_update);
{
// Now we must find the next leafentry.
- u_int32_t num_leafentries_after = toku_omt_size(bn->buffer);
+ uint32_t num_leafentries_after = toku_omt_size(bn->buffer);
//idx is the index of the leafentry we just modified.
//If the leafentry was deleted, we will have one less leafentry in
//the omt than we started with and the next leafentry will be at the
@@ -1676,7 +1676,7 @@ toku_ft_bn_apply_cmd (
CAST_FROM_VOIDP(storeddata, storeddatav);
{ // Continue only if the next record that we found has the same key.
DBT adbt;
- u_int32_t keylen;
+ uint32_t keylen;
void *keyp = le_key_and_len(storeddata, &keylen);
FAKE_DB(db, desc);
if (compare_fun(&db,
@@ -1694,14 +1694,14 @@ toku_ft_bn_apply_cmd (
case FT_OPTIMIZE:
// Apply to all leafentries
omt_size = toku_omt_size(bn->buffer);
- for (u_int32_t idx = 0; idx < omt_size; ) {
+ for (uint32_t idx = 0; idx < omt_size; ) {
r = toku_omt_fetch(bn->buffer, idx, &storeddatav);
assert_zero(r);
CAST_FROM_VOIDP(storeddata, storeddatav);
int deleted = 0;
if (!le_is_clean(storeddata)) { //If already clean, nothing to do.
toku_ft_bn_apply_cmd_once(bn, cmd, idx, storeddata, workdone, stats_to_update);
- u_int32_t new_omt_size = toku_omt_size(bn->buffer);
+ uint32_t new_omt_size = toku_omt_size(bn->buffer);
if (new_omt_size != omt_size) {
assert(new_omt_size+1 == omt_size);
//Item was deleted.
@@ -1720,14 +1720,14 @@ toku_ft_bn_apply_cmd (
case FT_ABORT_BROADCAST_TXN:
// Apply to all leafentries if txn is represented
omt_size = toku_omt_size(bn->buffer);
- for (u_int32_t idx = 0; idx < omt_size; ) {
+ for (uint32_t idx = 0; idx < omt_size; ) {
r = toku_omt_fetch(bn->buffer, idx, &storeddatav);
assert_zero(r);
CAST_FROM_VOIDP(storeddata, storeddatav);
int deleted = 0;
if (le_has_xids(storeddata, cmd->xids)) {
toku_ft_bn_apply_cmd_once(bn, cmd, idx, storeddata, workdone, stats_to_update);
- u_int32_t new_omt_size = toku_omt_size(bn->buffer);
+ uint32_t new_omt_size = toku_omt_size(bn->buffer);
if (new_omt_size != omt_size) {
assert(new_omt_size+1 == omt_size);
//Item was deleted.
@@ -1743,7 +1743,7 @@ toku_ft_bn_apply_cmd (
break;
case FT_UPDATE: {
- u_int32_t idx;
+ uint32_t idx;
r = toku_omt_find_zero(bn->buffer, toku_cmd_leafval_heaviside, &be,
&storeddatav, &idx);
if (r==DB_NOTFOUND) {
@@ -1756,8 +1756,8 @@ toku_ft_bn_apply_cmd (
}
case FT_UPDATE_BROADCAST_ALL: {
// apply to all leafentries.
- u_int32_t idx = 0;
- u_int32_t num_leafentries_before;
+ uint32_t idx = 0;
+ uint32_t num_leafentries_before;
while (idx < (num_leafentries_before = toku_omt_size(bn->buffer))) {
r = toku_omt_fetch(bn->buffer, idx, &storeddatav);
assert(r==0);
@@ -1991,19 +1991,19 @@ ft_nonleaf_cmd_all (ft_compare_func compare_fun, DESCRIPTOR desc, FTNODE node, F
}
}
-static BOOL
+static bool
ft_msg_applies_once(FT_MSG cmd)
{
return ft_msg_type_applies_once(cmd->type);
}
-static BOOL
+static bool
ft_msg_applies_all(FT_MSG cmd)
{
return ft_msg_type_applies_all(cmd->type);
}
-static BOOL
+static bool
ft_msg_does_nothing(FT_MSG cmd)
{
return ft_msg_type_does_nothing(cmd->type);
@@ -2052,15 +2052,15 @@ ft_nonleaf_put_cmd (ft_compare_func compare_fun, DESCRIPTOR desc, FTNODE node, F
}
-// return TRUE if root changed, FALSE otherwise
-static BOOL
+// return true if root changed, false otherwise
+static bool
ft_process_maybe_reactive_root (FT ft, CACHEKEY *rootp, FTNODE *nodep) {
FTNODE node = *nodep;
toku_assert_entire_node_in_memory(node);
enum reactivity re = get_node_reactivity(node);
switch (re) {
case RE_STABLE:
- return FALSE;
+ return false;
case RE_FISSIBLE:
{
// The root node should split, so make a new root.
@@ -2073,15 +2073,15 @@ ft_process_maybe_reactive_root (FT ft, CACHEKEY *rootp, FTNODE *nodep) {
// in just node. That would be correct.
//
if (node->height==0) {
- ftleaf_split(ft, node, &nodea, &nodeb, &splitk, TRUE, 0, NULL);
+ ftleaf_split(ft, node, &nodea, &nodeb, &splitk, true, 0, NULL);
} else {
ft_nonleaf_split(ft, node, &nodea, &nodeb, &splitk, 0, NULL);
}
ft_init_new_root(ft, nodea, nodeb, splitk, rootp, nodep);
- return TRUE;
+ return true;
}
case RE_FUSIBLE:
- return FALSE; // Cannot merge anything at the root, so return happy.
+ return false; // Cannot merge anything at the root, so return happy.
}
abort(); // cannot happen
}
@@ -2090,7 +2090,7 @@ ft_process_maybe_reactive_root (FT ft, CACHEKEY *rootp, FTNODE *nodep) {
// Garbage collect one leaf entry.
static void
ft_basement_node_gc_once(BASEMENTNODE bn,
- u_int32_t index,
+ uint32_t index,
LEAFENTRY leaf_entry,
const xid_omt_t &snapshot_xids,
const rx_omt_t &referenced_xids,
@@ -2176,8 +2176,8 @@ basement_node_gc_all_les(BASEMENTNODE bn,
STAT64INFO_S * delta)
{
int r = 0;
- u_int32_t index = 0;
- u_int32_t num_leafentries_before;
+ uint32_t index = 0;
+ uint32_t num_leafentries_before;
while (index < (num_leafentries_before = toku_omt_size(bn->buffer))) {
OMTVALUE storedatav = NULL;
LEAFENTRY leaf_entry;
@@ -2403,7 +2403,7 @@ void toku_ft_leaf_apply_cmd(
}
}
else if (!ft_msg_does_nothing(cmd)) {
- assert(FALSE);
+ assert(false);
}
VERIFY_NODE(t, node);
}
@@ -2493,7 +2493,7 @@ toku_ft_root_put_cmd (FT h, FT_MSG_S * cmd)
{
toku_ft_grab_treelock(h);
- u_int32_t fullhash;
+ uint32_t fullhash;
toku_calculate_root_offset_pointer(h, &root_key, &fullhash);
// get the root node
@@ -2504,7 +2504,7 @@ toku_ft_root_put_cmd (FT h, FT_MSG_S * cmd)
root_key,
fullhash,
&bfe,
- TRUE, // may_modify_node
+ true, // may_modify_node
0,
NULL,
&node
@@ -2523,7 +2523,7 @@ toku_ft_root_put_cmd (FT h, FT_MSG_S * cmd)
// first handle a reactive root, then put in the message
CACHEKEY new_root_key;
- BOOL root_changed = ft_process_maybe_reactive_root(h, &new_root_key, &node);
+ bool root_changed = ft_process_maybe_reactive_root(h, &new_root_key, &node);
if (root_changed) {
toku_ft_set_new_root_blocknum(h, new_root_key);
}
@@ -2548,7 +2548,7 @@ toku_ft_root_put_cmd (FT h, FT_MSG_S * cmd)
// Effect: Insert the key-val pair into brt.
int toku_ft_insert (FT_HANDLE brt, DBT *key, DBT *val, TOKUTXN txn) {
- return toku_ft_maybe_insert(brt, key, val, txn, FALSE, ZERO_LSN, TRUE, FT_INSERT);
+ return toku_ft_maybe_insert(brt, key, val, txn, false, ZERO_LSN, true, FT_INSERT);
}
int
@@ -2683,7 +2683,7 @@ toku_ft_log_put_multiple (TOKUTXN txn, FT_HANDLE src_ft, FT_HANDLE *brts, int nu
}
int
-toku_ft_maybe_insert (FT_HANDLE ft_h, DBT *key, DBT *val, TOKUTXN txn, BOOL oplsn_valid, LSN oplsn, BOOL do_logging, enum ft_msg_type type) {
+toku_ft_maybe_insert (FT_HANDLE ft_h, DBT *key, DBT *val, TOKUTXN txn, bool oplsn_valid, LSN oplsn, bool do_logging, enum ft_msg_type type) {
assert(type==FT_INSERT || type==FT_INSERT_NO_OVERWRITE);
int r = 0;
XIDS message_xids = xids_get_root_xids(); //By default use committed messages
@@ -2736,8 +2736,8 @@ ft_send_update_msg(FT_HANDLE brt, FT_MSG_S *msg, TOKUTXN txn) {
int
toku_ft_maybe_update(FT_HANDLE ft_h, const DBT *key, const DBT *update_function_extra,
- TOKUTXN txn, BOOL oplsn_valid, LSN oplsn,
- BOOL do_logging) {
+ TOKUTXN txn, bool oplsn_valid, LSN oplsn,
+ bool do_logging) {
int r = 0;
TXNID xid = toku_txn_get_txnid(txn);
@@ -2778,12 +2778,12 @@ cleanup:
int
toku_ft_maybe_update_broadcast(FT_HANDLE ft_h, const DBT *update_function_extra,
- TOKUTXN txn, BOOL oplsn_valid, LSN oplsn,
- BOOL do_logging, BOOL is_resetting_op) {
+ TOKUTXN txn, bool oplsn_valid, LSN oplsn,
+ bool do_logging, bool is_resetting_op) {
int r = 0;
TXNID xid = toku_txn_get_txnid(txn);
- u_int8_t resetting = is_resetting_op ? 1 : 0;
+ uint8_t resetting = is_resetting_op ? 1 : 0;
if (txn) {
r = toku_logger_save_rollback_cmdupdatebroadcast(txn, toku_cachefile_filenum(ft_h->ft->cf), resetting);
if (r != 0) { goto cleanup; }
@@ -2835,7 +2835,7 @@ toku_ft_send_commit_any(FT_HANDLE brt, DBT *key, XIDS xids) {
}
int toku_ft_delete(FT_HANDLE brt, DBT *key, TOKUTXN txn) {
- return toku_ft_maybe_delete(brt, key, txn, FALSE, ZERO_LSN, TRUE);
+ return toku_ft_maybe_delete(brt, key, txn, false, ZERO_LSN, true);
}
int
@@ -2879,7 +2879,7 @@ toku_ft_log_del_multiple (TOKUTXN txn, FT_HANDLE src_ft, FT_HANDLE *brts, int nu
}
int
-toku_ft_maybe_delete(FT_HANDLE ft_h, DBT *key, TOKUTXN txn, BOOL oplsn_valid, LSN oplsn, BOOL do_logging) {
+toku_ft_maybe_delete(FT_HANDLE ft_h, DBT *key, TOKUTXN txn, bool oplsn_valid, LSN oplsn, bool do_logging) {
int r;
XIDS message_xids = xids_get_root_xids(); //By default use committed messages
TXNID xid = toku_txn_get_txnid(txn);
@@ -2929,10 +2929,10 @@ struct omt_compressor_state {
OMT omt;
};
-static int move_it (OMTVALUE lev, u_int32_t idx, void *v) {
+static int move_it (OMTVALUE lev, uint32_t idx, void *v) {
LEAFENTRY CAST_FROM_VOIDP(le, lev);
struct omt_compressor_state *CAST_FROM_VOIDP(oc, v);
- u_int32_t size = leafentry_memsize(le);
+ uint32_t size = leafentry_memsize(le);
LEAFENTRY CAST_FROM_VOIDP(newdata, toku_mempool_malloc(oc->new_kvspace, size, 1));
lazy_assert(newdata); // we do this on a fresh mempool, so nothing bad should happen
memcpy(newdata, le, size);
@@ -2943,7 +2943,7 @@ static int move_it (OMTVALUE lev, u_int32_t idx, void *v) {
// Compress things, and grow the mempool if needed.
// TODO 4092 should copy data to new memory, then call toku_mempool_destory() followed by toku_mempool_init()
static int omt_compress_kvspace (OMT omt, struct mempool *memp, size_t added_size, void **maybe_free) {
- u_int32_t total_size_needed = memp->free_offset-memp->frag_size + added_size;
+ uint32_t total_size_needed = memp->free_offset-memp->frag_size + added_size;
if (total_size_needed+total_size_needed/4 >= memp->size) {
memp->size = total_size_needed+total_size_needed/4;
}
@@ -3063,7 +3063,7 @@ toku_ft_handle_get_compression_method(FT_HANDLE t, enum toku_compression_method
}
static int
-verify_builtin_comparisons_consistent(FT_HANDLE t, u_int32_t flags) {
+verify_builtin_comparisons_consistent(FT_HANDLE t, uint32_t flags) {
if ((flags & TOKU_DB_KEYCMP_BUILTIN) && (t->options.compare_fun != toku_builtin_compare_fun))
return EINVAL;
return 0;
@@ -3078,9 +3078,9 @@ toku_ft_change_descriptor(
FT_HANDLE ft_h,
const DBT* old_descriptor,
const DBT* new_descriptor,
- BOOL do_log,
+ bool do_log,
TOKUTXN txn,
- BOOL update_cmp_descriptor
+ bool update_cmp_descriptor
)
{
int r = 0;
@@ -3142,7 +3142,7 @@ toku_ft_handle_inherit_options(FT_HANDLE t, FT ft) {
.update_fun = ft->update_fun
};
t->options = options;
- t->did_set_flags = TRUE;
+ t->did_set_flags = true;
}
// This is the actual open, used for various purposes, such as normal use, recovery, and redirect.
@@ -3152,11 +3152,11 @@ toku_ft_handle_inherit_options(FT_HANDLE t, FT ft) {
static int
ft_handle_open(FT_HANDLE ft_h, const char *fname_in_env, int is_create, int only_create, CACHETABLE cachetable, TOKUTXN txn, FILENUM use_filenum, DICTIONARY_ID use_dictionary_id, LSN max_acceptable_lsn) {
int r;
- BOOL txn_created = FALSE;
+ bool txn_created = false;
char *fname_in_cwd = NULL;
CACHEFILE cf = NULL;
FT ft = NULL;
- BOOL did_create = FALSE;
+ bool did_create = false;
toku_ft_open_close_lock();
if (ft_h->did_set_flags) {
@@ -3179,14 +3179,14 @@ ft_handle_open(FT_HANDLE ft_h, const char *fname_in_env, int is_create, int only
reserved_filenum = toku_cachetable_reserve_filenum(cachetable);
}
if (r==ENOENT && is_create) {
- did_create = TRUE;
+ did_create = true;
mode_t mode = S_IRWXU|S_IRWXG|S_IRWXO;
if (txn) {
BYTESTRING bs = { .len=(uint32_t) strlen(fname_in_env), .data = (char*)fname_in_env };
r = toku_logger_save_rollback_fcreate(txn, reserved_filenum, &bs); // bs is a copy of the fname relative to the environment
assert_zero(r);
}
- txn_created = (BOOL)(txn!=NULL);
+ txn_created = (bool)(txn!=NULL);
r = toku_logger_log_fcreate(txn, fname_in_env, reserved_filenum, mode, ft_h->options.flags, ft_h->options.nodesize, ft_h->options.basementnodesize, ft_h->options.compression_method);
assert_zero(r); // only possible failure is panic, which we check above
r = ft_create_file(ft_h, fname_in_cwd, &fd);
@@ -3197,7 +3197,7 @@ ft_handle_open(FT_HANDLE ft_h, const char *fname_in_env, int is_create, int only
if (r) { goto exit; }
}
assert(ft_h->options.nodesize>0);
- BOOL was_already_open;
+ bool was_already_open;
if (is_create) {
r = toku_read_ft_and_store_in_cachefile(ft_h, cf, max_acceptable_lsn, &ft, &was_already_open);
if (r==TOKUDB_DICTIONARY_NO_HEADER) {
@@ -3288,7 +3288,7 @@ exit:
// we can simply try to remove the header.
// We don't need to unlink this brt from the header
toku_ft_grab_reflock(ft);
- BOOL needed = toku_ft_needed_unlocked(ft);
+ bool needed = toku_ft_needed_unlocked(ft);
toku_ft_release_reflock(ft);
if (!needed) {
//Close immediately.
@@ -3298,7 +3298,7 @@ exit:
}
}
else {
- toku_cachefile_close(&cf, 0, FALSE, ZERO_LSN);
+ toku_cachefile_close(&cf, 0, false, ZERO_LSN);
}
}
toku_ft_open_close_unlock();
@@ -3390,7 +3390,7 @@ toku_ft_get_dictionary_id(FT_HANDLE brt) {
int toku_ft_set_flags(FT_HANDLE brt, unsigned int flags) {
assert(flags==(flags&TOKU_DB_KEYCMP_BUILTIN)); // make sure there are no extraneous flags
- brt->did_set_flags = TRUE;
+ brt->did_set_flags = true;
brt->options.flags = flags;
return 0;
}
@@ -3510,7 +3510,7 @@ int toku_ft_handle_create(FT_HANDLE *ft_handle_ptr) {
memset(brt, 0, sizeof *brt);
toku_list_init(&brt->live_ft_handle_link);
brt->options.flags = 0;
- brt->did_set_flags = FALSE;
+ brt->did_set_flags = false;
brt->options.nodesize = FT_DEFAULT_NODE_SIZE;
brt->options.basementnodesize = FT_DEFAULT_BASEMENT_NODE_SIZE;
brt->options.compression_method = TOKU_DEFAULT_COMPRESSION_METHOD;
@@ -3560,9 +3560,9 @@ does_txn_read_entry(TXNID id, TOKUTXN context) {
static inline void
ft_cursor_extract_key_and_val(LEAFENTRY le,
FT_CURSOR cursor,
- u_int32_t *keylen,
+ uint32_t *keylen,
void **key,
- u_int32_t *vallen,
+ uint32_t *vallen,
void **val) {
if (toku_ft_cursor_is_leaf_mode(cursor)) {
*key = le_key_and_len(le, keylen);
@@ -3587,8 +3587,8 @@ int toku_ft_cursor (
FT_HANDLE brt,
FT_CURSOR *cursorptr,
TOKUTXN ttxn,
- BOOL is_snapshot_read,
- BOOL disable_prefetching
+ bool is_snapshot_read,
+ bool disable_prefetching
)
{
if (is_snapshot_read) {
@@ -3605,28 +3605,28 @@ int toku_ft_cursor (
return ENOMEM;
memset(cursor, 0, sizeof(*cursor));
cursor->ft_handle = brt;
- cursor->prefetching = FALSE;
+ cursor->prefetching = false;
toku_init_dbt(&cursor->range_lock_left_key);
toku_init_dbt(&cursor->range_lock_right_key);
- cursor->left_is_neg_infty = FALSE;
- cursor->right_is_pos_infty = FALSE;
+ cursor->left_is_neg_infty = false;
+ cursor->right_is_pos_infty = false;
cursor->is_snapshot_read = is_snapshot_read;
- cursor->is_leaf_mode = FALSE;
+ cursor->is_leaf_mode = false;
cursor->ttxn = ttxn;
cursor->disable_prefetching = disable_prefetching;
- cursor->is_temporary = FALSE;
+ cursor->is_temporary = false;
*cursorptr = cursor;
return 0;
}
void
toku_ft_cursor_set_temporary(FT_CURSOR ftcursor) {
- ftcursor->is_temporary = TRUE;
+ ftcursor->is_temporary = true;
}
void
toku_ft_cursor_set_leaf_mode(FT_CURSOR ftcursor) {
- ftcursor->is_leaf_mode = TRUE;
+ ftcursor->is_leaf_mode = true;
}
int
@@ -3636,7 +3636,7 @@ toku_ft_cursor_is_leaf_mode(FT_CURSOR ftcursor) {
void
toku_ft_cursor_set_range_lock(FT_CURSOR cursor, const DBT *left, const DBT *right,
- BOOL left_is_neg_infty, BOOL right_is_pos_infty)
+ bool left_is_neg_infty, bool right_is_pos_infty)
{
if (cursor->range_lock_left_key.data) {
toku_free(cursor->range_lock_left_key.data);
@@ -3648,13 +3648,13 @@ toku_ft_cursor_set_range_lock(FT_CURSOR cursor, const DBT *left, const DBT *righ
}
if (left_is_neg_infty) {
- cursor->left_is_neg_infty = TRUE;
+ cursor->left_is_neg_infty = true;
} else {
toku_fill_dbt(&cursor->range_lock_left_key,
toku_xmemdup(left->data, left->size), left->size);
}
if (right_is_pos_infty) {
- cursor->right_is_pos_infty = TRUE;
+ cursor->right_is_pos_infty = true;
} else {
toku_fill_dbt(&cursor->range_lock_right_key,
toku_xmemdup(right->data, right->size), right->size);
@@ -3676,22 +3676,22 @@ int toku_ft_cursor_close(FT_CURSOR cursor) {
}
static inline void ft_cursor_set_prefetching(FT_CURSOR cursor) {
- cursor->prefetching = TRUE;
+ cursor->prefetching = true;
}
-static inline BOOL ft_cursor_prefetching(FT_CURSOR cursor) {
+static inline bool ft_cursor_prefetching(FT_CURSOR cursor) {
return cursor->prefetching;
}
-//Return TRUE if cursor is uninitialized. FALSE otherwise.
-static BOOL
+//Return true if cursor is uninitialized. false otherwise.
+static bool
ft_cursor_not_set(FT_CURSOR cursor) {
assert((cursor->key.data==NULL) == (cursor->val.data==NULL));
- return (BOOL)(cursor->key.data == NULL);
+ return (bool)(cursor->key.data == NULL);
}
static int
-pair_leafval_heaviside_le (u_int32_t klen, void *kval,
+pair_leafval_heaviside_le (uint32_t klen, void *kval,
ft_search_t *search) {
DBT x;
int cmp = search->compare(search,
@@ -3709,7 +3709,7 @@ static int
heaviside_from_search_t (OMTVALUE lev, void *extra) {
LEAFENTRY CAST_FROM_VOIDP(le, lev);
ft_search_t *CAST_FROM_VOIDP(search, extra);
- u_int32_t keylen;
+ uint32_t keylen;
void* key = le_key_and_len(le, &keylen);
return pair_leafval_heaviside_le (keylen, key,
@@ -3724,7 +3724,7 @@ static inline int
is_le_val_del(LEAFENTRY le, FT_CURSOR ftcursor) {
int rval;
if (ftcursor->is_snapshot_read) {
- BOOL is_del;
+ bool is_del;
le_iterate_is_del(
le,
does_txn_read_entry,
@@ -3749,16 +3749,16 @@ static void search_save_bound (ft_search_t *search, DBT *pivot) {
search->pivot_bound.data = toku_malloc(pivot->size);
search->pivot_bound.size = pivot->size;
memcpy(search->pivot_bound.data, pivot->data, pivot->size);
- search->have_pivot_bound = TRUE;
+ search->have_pivot_bound = true;
}
-static BOOL search_pivot_is_bounded (ft_search_t *search, DESCRIPTOR desc, ft_compare_func cmp, DBT *pivot) __attribute__((unused));
-static BOOL search_pivot_is_bounded (ft_search_t *search, DESCRIPTOR desc, ft_compare_func cmp, DBT *pivot)
-// Effect: Return TRUE iff the pivot has already been searched (for fixing #3522.)
+static bool search_pivot_is_bounded (ft_search_t *search, DESCRIPTOR desc, ft_compare_func cmp, DBT *pivot) __attribute__((unused));
+static bool search_pivot_is_bounded (ft_search_t *search, DESCRIPTOR desc, ft_compare_func cmp, DBT *pivot)
+// Effect: Return true iff the pivot has already been searched (for fixing #3522.)
// If searching from left to right, if we have already searched all the values less than pivot, we don't want to search again.
// If searching from right to left, if we have already searched all the vlaues greater than pivot, we don't want to search again.
{
- if (!search->have_pivot_bound) return TRUE; // isn't bounded.
+ if (!search->have_pivot_bound) return true; // isn't bounded.
FAKE_DB(db, desc);
int comp = cmp(&db, pivot, &search->pivot_bound);
if (search->direction == FT_SEARCH_LEFT) {
@@ -3844,7 +3844,7 @@ do_bn_apply_cmd(FT_HANDLE t, BASEMENTNODE bn, FTNODE ancestor, int childnum, con
MSN msn = entry->msn;
const XIDS xids = (XIDS) &entry->xids_s;
bytevec key = xids_get_end_of_array(xids);
- bytevec val = (u_int8_t*)key + entry->keylen;
+ bytevec val = (uint8_t*)key + entry->keylen;
DBT hk;
toku_fill_dbt(&hk, key, keylen);
@@ -3996,7 +3996,7 @@ bnc_apply_messages_to_basement_node(
FTNODE ancestor, // the ancestor node where we can find messages to apply
int childnum, // which child buffer of ancestor contains messages we want
struct pivot_bounds const * const bounds, // contains pivot key bounds of this basement node
- BOOL* msgs_applied
+ bool* msgs_applied
)
{
int r;
@@ -4012,7 +4012,7 @@ bnc_apply_messages_to_basement_node(
stale_lbi = 0;
stale_ube = 0;
}
- u_int32_t fresh_lbi, fresh_ube;
+ uint32_t fresh_lbi, fresh_ube;
find_bounds_within_message_tree(&t->ft->cmp_descriptor, t->ft->compare_fun, bnc->fresh_message_tree, bnc->buffer, bounds, &fresh_lbi, &fresh_ube);
// We now know where all the messages we must apply are, so one of the
@@ -4050,7 +4050,7 @@ bnc_apply_messages_to_basement_node(
// Apply the messages in MSN order.
for (int i = 0; i < buffer_size; ++i) {
- *msgs_applied = TRUE;
+ *msgs_applied = true;
const struct fifo_entry *entry = toku_fifo_get_entry(bnc->buffer, offsets[i]);
do_bn_apply_cmd(t, bn, ancestor, childnum, entry, &stats_delta);
}
@@ -4059,13 +4059,13 @@ bnc_apply_messages_to_basement_node(
} else if (stale_lbi == stale_ube) {
// No stale messages to apply, we just apply fresh messages.
struct iterate_do_bn_apply_cmd_extra iter_extra = { .t = t, .bn = bn, .ancestor = ancestor, .childnum = childnum, .stats_to_update = &stats_delta};
- if (fresh_ube - fresh_lbi > 0) *msgs_applied = TRUE;
+ if (fresh_ube - fresh_lbi > 0) *msgs_applied = true;
r = bnc->fresh_message_tree.iterate_on_range<struct iterate_do_bn_apply_cmd_extra, iterate_do_bn_apply_cmd>(fresh_lbi, fresh_ube, &iter_extra);
assert_zero(r);
} else if (fresh_lbi == fresh_ube) {
// No fresh messages to apply, we just apply stale messages.
- if (stale_ube - stale_lbi > 0) *msgs_applied = TRUE;
+ if (stale_ube - stale_lbi > 0) *msgs_applied = true;
struct iterate_do_bn_apply_cmd_extra iter_extra = { .t = t, .bn = bn, .ancestor = ancestor, .childnum = childnum , .stats_to_update = &stats_delta};
r = bnc->stale_message_tree.iterate_on_range<struct iterate_do_bn_apply_cmd_extra, iterate_do_bn_apply_cmd>(stale_lbi, stale_ube, &iter_extra);
@@ -4089,7 +4089,7 @@ bnc_apply_messages_to_basement_node(
// Iterate over both lists, applying the smaller (in (key, msn)
// order) message at each step
while (stale_i < stale_ube && fresh_i < fresh_ube) {
- *msgs_applied = TRUE;
+ *msgs_applied = true;
int c = toku_fifo_entry_key_msn_cmp(extra, stale_offset, fresh_offset);
if (c < 0) {
// The stale message we're pointing to either has a
@@ -4169,7 +4169,7 @@ bnc_apply_messages_to_basement_node(
}
void
-maybe_apply_ancestors_messages_to_node (FT_HANDLE t, FTNODE node, ANCESTORS ancestors, struct pivot_bounds const * const bounds, BOOL* msgs_applied)
+maybe_apply_ancestors_messages_to_node (FT_HANDLE t, FTNODE node, ANCESTORS ancestors, struct pivot_bounds const * const bounds, bool* msgs_applied)
// Effect:
// Bring a leaf node up-to-date according to all the messages in the ancestors.
// If the leaf node is already up-to-date then do nothing.
@@ -4224,9 +4224,9 @@ ft_cursor_shortcut (
int direction,
FT_GET_CALLBACK_FUNCTION getf,
void *getf_v,
- u_int32_t *keylen,
+ uint32_t *keylen,
void **key,
- u_int32_t *vallen,
+ uint32_t *vallen,
void **val
);
@@ -4237,9 +4237,9 @@ ft_search_basement_node(
ft_search_t *search,
FT_GET_CALLBACK_FUNCTION getf,
void *getf_v,
- BOOL *doprefetch,
+ bool *doprefetch,
FT_CURSOR ftcursor,
- BOOL can_bulk_fetch
+ bool can_bulk_fetch
)
{
// Now we have to convert from ft_search_t to the heaviside function with a direction. What a pain...
@@ -4252,7 +4252,7 @@ ft_search_basement_node(
return EINVAL; // This return and the goto are a hack to get both compile-time and run-time checking on enum
ok: ;
OMTVALUE datav;
- u_int32_t idx = 0;
+ uint32_t idx = 0;
int r = toku_omt_find(bn->buffer,
heaviside_from_search_t,
search,
@@ -4279,7 +4279,7 @@ ok: ;
idx--;
break;
default:
- assert(FALSE);
+ assert(false);
}
r = toku_omt_fetch(bn->buffer, idx, &datav);
assert_zero(r); // we just validated the index
@@ -4289,9 +4289,9 @@ ok: ;
}
got_a_good_value:
{
- u_int32_t keylen;
+ uint32_t keylen;
void *key;
- u_int32_t vallen;
+ uint32_t vallen;
void *val;
ft_cursor_extract_key_and_val(le,
@@ -4328,7 +4328,7 @@ got_a_good_value:
ftcursor->val.size = vallen;
}
//The search was successful. Prefetching can continue.
- *doprefetch = TRUE;
+ *doprefetch = true;
}
}
if (r == TOKUDB_CURSOR_CONTINUE) r = 0;
@@ -4343,12 +4343,12 @@ ft_search_node (
int child_to_search,
FT_GET_CALLBACK_FUNCTION getf,
void *getf_v,
- BOOL *doprefetch,
+ bool *doprefetch,
FT_CURSOR ftcursor,
UNLOCKERS unlockers,
ANCESTORS,
struct pivot_bounds const * const bounds,
- BOOL can_bulk_fetch
+ bool can_bulk_fetch
);
// the number of nodes to prefetch
@@ -4356,7 +4356,7 @@ ft_search_node (
#if TOKU_DO_PREFETCH
static int
-ftnode_fetch_callback_and_free_bfe(CACHEFILE cf, int fd, BLOCKNUM nodename, u_int32_t fullhash, void **ftnode_pv, void** UU(disk_data), PAIR_ATTR *sizep, int *dirtyp, void *extraargs)
+ftnode_fetch_callback_and_free_bfe(CACHEFILE cf, int fd, BLOCKNUM nodename, uint32_t fullhash, void **ftnode_pv, void** UU(disk_data), PAIR_ATTR *sizep, int *dirtyp, void *extraargs)
{
int r = toku_ftnode_fetch_callback(cf, fd, nodename, fullhash, ftnode_pv, disk_data, sizep, dirtyp, extraargs);
struct ftnode_fetch_extra *CAST_FROM_VOIDP(ffe, extraargs);
@@ -4376,7 +4376,7 @@ ftnode_pf_callback_and_free_bfe(void *ftnode_pv, void* disk_data, void *read_ext
}
static void
-ft_node_maybe_prefetch(FT_HANDLE brt, FTNODE node, int childnum, FT_CURSOR ftcursor, BOOL *doprefetch) {
+ft_node_maybe_prefetch(FT_HANDLE brt, FTNODE node, int childnum, FT_CURSOR ftcursor, bool *doprefetch) {
// if we want to prefetch in the tree
// then prefetch the next children if there are any
@@ -4384,10 +4384,10 @@ ft_node_maybe_prefetch(FT_HANDLE brt, FTNODE node, int childnum, FT_CURSOR ftcur
int rc = ft_cursor_rightmost_child_wanted(ftcursor, brt, node);
for (int i = childnum + 1; (i <= childnum + TOKU_DO_PREFETCH) && (i <= rc); i++) {
BLOCKNUM nextchildblocknum = BP_BLOCKNUM(node, i);
- u_int32_t nextfullhash = compute_child_fullhash(brt->ft->cf, node, i);
+ uint32_t nextfullhash = compute_child_fullhash(brt->ft->cf, node, i);
struct ftnode_fetch_extra *MALLOC(bfe);
fill_bfe_for_prefetch(bfe, brt->ft, ftcursor);
- BOOL doing_prefetch = FALSE;
+ bool doing_prefetch = false;
toku_cachefile_prefetch(
brt->ft->cf,
nextchildblocknum,
@@ -4403,7 +4403,7 @@ ft_node_maybe_prefetch(FT_HANDLE brt, FTNODE node, int childnum, FT_CURSOR ftcur
destroy_bfe_for_prefetch(bfe);
toku_free(bfe);
}
- *doprefetch = FALSE;
+ *doprefetch = false;
}
}
}
@@ -4413,7 +4413,7 @@ ft_node_maybe_prefetch(FT_HANDLE brt, FTNODE node, int childnum, FT_CURSOR ftcur
struct unlock_ftnode_extra {
FT_HANDLE ft_handle;
FTNODE node;
- BOOL msgs_applied;
+ bool msgs_applied;
};
// When this is called, the cachetable lock is held
static void
@@ -4434,14 +4434,14 @@ unlock_ftnode_fun (void *v) {
/* search in a node's child */
static int
-ft_search_child(FT_HANDLE brt, FTNODE node, int childnum, ft_search_t *search, FT_GET_CALLBACK_FUNCTION getf, void *getf_v, BOOL *doprefetch, FT_CURSOR ftcursor, UNLOCKERS unlockers,
- ANCESTORS ancestors, struct pivot_bounds const * const bounds, BOOL can_bulk_fetch)
+ft_search_child(FT_HANDLE brt, FTNODE node, int childnum, ft_search_t *search, FT_GET_CALLBACK_FUNCTION getf, void *getf_v, bool *doprefetch, FT_CURSOR ftcursor, UNLOCKERS unlockers,
+ ANCESTORS ancestors, struct pivot_bounds const * const bounds, bool can_bulk_fetch)
// Effect: Search in a node's child. Searches are read-only now (at least as far as the hardcopy is concerned).
{
struct ancestors next_ancestors = {node, childnum, ancestors};
BLOCKNUM childblocknum = BP_BLOCKNUM(node,childnum);
- u_int32_t fullhash = compute_child_fullhash(brt->ft->cf, node, childnum);
+ uint32_t fullhash = compute_child_fullhash(brt->ft->cf, node, childnum);
FTNODE childnode;
struct ftnode_fetch_extra bfe;
@@ -4455,14 +4455,14 @@ ft_search_child(FT_HANDLE brt, FTNODE node, int childnum, ft_search_t *search, F
ftcursor->right_is_pos_infty,
ftcursor->disable_prefetching
);
- BOOL msgs_applied = FALSE;
+ bool msgs_applied = false;
{
int rr = toku_pin_ftnode(brt, childblocknum, fullhash,
unlockers,
&next_ancestors, bounds,
&bfe,
- (node->height == 1), // may_modify_node TRUE iff child is leaf
- TRUE,
+ (node->height == 1), // may_modify_node true iff child is leaf
+ true,
&childnode,
&msgs_applied);
if (rr==TOKUDB_TRY_AGAIN) return rr;
@@ -4470,7 +4470,7 @@ ft_search_child(FT_HANDLE brt, FTNODE node, int childnum, ft_search_t *search, F
}
struct unlock_ftnode_extra unlock_extra = {brt,childnode,msgs_applied};
- struct unlockers next_unlockers = {TRUE, unlock_ftnode_fun, (void*)&unlock_extra, unlockers};
+ struct unlockers next_unlockers = {true, unlock_ftnode_fun, (void*)&unlock_extra, unlockers};
int r = ft_search_node(brt, childnode, search, bfe.child_to_read, getf, getf_v, doprefetch, ftcursor, &next_unlockers, &next_ancestors, bounds, can_bulk_fetch);
if (r!=TOKUDB_TRY_AGAIN) {
@@ -4627,12 +4627,12 @@ ft_search_node(
int child_to_search,
FT_GET_CALLBACK_FUNCTION getf,
void *getf_v,
- BOOL *doprefetch,
+ bool *doprefetch,
FT_CURSOR ftcursor,
UNLOCKERS unlockers,
ANCESTORS ancestors,
struct pivot_bounds const * const bounds,
- BOOL can_bulk_fetch
+ bool can_bulk_fetch
)
{ int r = 0;
// assert that we got a valid child_to_search
@@ -4726,7 +4726,7 @@ ft_search_node(
}
static int
-toku_ft_search (FT_HANDLE brt, ft_search_t *search, FT_GET_CALLBACK_FUNCTION getf, void *getf_v, FT_CURSOR ftcursor, BOOL can_bulk_fetch)
+toku_ft_search (FT_HANDLE brt, ft_search_t *search, FT_GET_CALLBACK_FUNCTION getf, void *getf_v, FT_CURSOR ftcursor, bool can_bulk_fetch)
// Effect: Perform a search. Associate cursor with a leaf if possible.
// All searches are performed through this function.
{
@@ -4778,7 +4778,7 @@ try_again:
FTNODE node = NULL;
{
toku_ft_grab_treelock(brt->ft);
- u_int32_t fullhash;
+ uint32_t fullhash;
CACHEKEY root_key;
toku_calculate_root_offset_pointer(brt->ft, &root_key, &fullhash);
toku_pin_ftnode_off_client_thread(
@@ -4786,7 +4786,7 @@ try_again:
root_key,
fullhash,
&bfe,
- FALSE, // may_modify_node set to FALSE, because root cannot change during search
+ false, // may_modify_node set to false, because root cannot change during search
0,
NULL,
&node
@@ -4797,11 +4797,11 @@ try_again:
uint tree_height = node->height + 1; // How high is the tree? This is the height of the root node plus one (leaf is at height 0).
- struct unlock_ftnode_extra unlock_extra = {brt,node,FALSE};
- struct unlockers unlockers = {TRUE, unlock_ftnode_fun, (void*)&unlock_extra, (UNLOCKERS)NULL};
+ struct unlock_ftnode_extra unlock_extra = {brt,node,false};
+ struct unlockers unlockers = {true, unlock_ftnode_fun, (void*)&unlock_extra, (UNLOCKERS)NULL};
{
- BOOL doprefetch = FALSE;
+ bool doprefetch = false;
//static int counter = 0; counter++;
r = ft_search_node(brt, node, search, bfe.child_to_read, getf, getf_v, &doprefetch, ftcursor, &unlockers, (ANCESTORS)NULL, &infinite_bounds, can_bulk_fetch);
if (r==TOKUDB_TRY_AGAIN) {
@@ -4867,7 +4867,7 @@ struct ft_cursor_search_struct {
/* search for the first kv pair that matches the search object */
static int
-ft_cursor_search(FT_CURSOR cursor, ft_search_t *search, FT_GET_CALLBACK_FUNCTION getf, void *getf_v, BOOL can_bulk_fetch)
+ft_cursor_search(FT_CURSOR cursor, ft_search_t *search, FT_GET_CALLBACK_FUNCTION getf, void *getf_v, bool can_bulk_fetch)
{
int r = toku_ft_search(cursor->ft_handle, search, getf, getf_v, cursor, can_bulk_fetch);
return r;
@@ -4919,7 +4919,7 @@ toku_ft_cursor_current(FT_CURSOR cursor, int op, FT_GET_CALLBACK_FUNCTION getf,
if (op == DB_CURRENT) {
struct ft_cursor_search_struct bcss = {getf, getf_v, cursor, 0};
ft_search_t search; ft_search_init(&search, ft_cursor_compare_set, FT_SEARCH_LEFT, &cursor->key, cursor->ft_handle);
- int r = toku_ft_search(cursor->ft_handle, &search, ft_cursor_current_getf, &bcss, cursor, FALSE);
+ int r = toku_ft_search(cursor->ft_handle, &search, ft_cursor_current_getf, &bcss, cursor, false);
ft_search_finish(&search);
return r;
}
@@ -4930,7 +4930,7 @@ int
toku_ft_cursor_first(FT_CURSOR cursor, FT_GET_CALLBACK_FUNCTION getf, void *getf_v)
{
ft_search_t search; ft_search_init(&search, ft_cursor_compare_one, FT_SEARCH_LEFT, 0, cursor->ft_handle);
- int r = ft_cursor_search(cursor, &search, getf, getf_v, FALSE);
+ int r = ft_cursor_search(cursor, &search, getf, getf_v, false);
ft_search_finish(&search);
return r;
}
@@ -4939,7 +4939,7 @@ int
toku_ft_cursor_last(FT_CURSOR cursor, FT_GET_CALLBACK_FUNCTION getf, void *getf_v)
{
ft_search_t search; ft_search_init(&search, ft_cursor_compare_one, FT_SEARCH_RIGHT, 0, cursor->ft_handle);
- int r = ft_cursor_search(cursor, &search, getf, getf_v, FALSE);
+ int r = ft_cursor_search(cursor, &search, getf, getf_v, false);
ft_search_finish(&search);
return r;
}
@@ -4956,18 +4956,18 @@ ft_cursor_shortcut (
int direction,
FT_GET_CALLBACK_FUNCTION getf,
void *getf_v,
- u_int32_t *keylen,
+ uint32_t *keylen,
void **key,
- u_int32_t *vallen,
+ uint32_t *vallen,
void **val
)
{
int r = 0;
- u_int32_t index = cursor->leaf_info.to_be.index;
+ uint32_t index = cursor->leaf_info.to_be.index;
OMT omt = cursor->leaf_info.to_be.omt;
// if we are searching towards the end, limit is last element
// if we are searching towards the beginning, limit is the first element
- u_int32_t limit = (direction > 0) ? (toku_omt_size(omt) - 1) : 0;
+ uint32_t limit = (direction > 0) ? (toku_omt_size(omt) - 1) : 0;
//Starting with the prev, find the first real (non-provdel) leafentry.
OMTVALUE lev = NULL;
@@ -5009,7 +5009,7 @@ int
toku_ft_cursor_next(FT_CURSOR cursor, FT_GET_CALLBACK_FUNCTION getf, void *getf_v)
{
ft_search_t search; ft_search_init(&search, ft_cursor_compare_next, FT_SEARCH_LEFT, &cursor->key, cursor->ft_handle);
- int r = ft_cursor_search(cursor, &search, getf, getf_v, TRUE);
+ int r = ft_cursor_search(cursor, &search, getf, getf_v, true);
ft_search_finish(&search);
if (r == 0) ft_cursor_set_prefetching(cursor);
return r;
@@ -5042,7 +5042,7 @@ static int
ft_cursor_search_eq_k_x(FT_CURSOR cursor, ft_search_t *search, FT_GET_CALLBACK_FUNCTION getf, void *getf_v)
{
struct ft_cursor_search_struct bcss = {getf, getf_v, cursor, search};
- int r = toku_ft_search(cursor->ft_handle, search, ft_cursor_search_eq_k_x_getf, &bcss, cursor, FALSE);
+ int r = toku_ft_search(cursor->ft_handle, search, ft_cursor_search_eq_k_x_getf, &bcss, cursor, false);
return r;
}
@@ -5055,7 +5055,7 @@ int
toku_ft_cursor_prev(FT_CURSOR cursor, FT_GET_CALLBACK_FUNCTION getf, void *getf_v)
{
ft_search_t search; ft_search_init(&search, ft_cursor_compare_prev, FT_SEARCH_RIGHT, &cursor->key, cursor->ft_handle);
- int r = ft_cursor_search(cursor, &search, getf, getf_v, TRUE);
+ int r = ft_cursor_search(cursor, &search, getf, getf_v, true);
ft_search_finish(&search);
return r;
}
@@ -5078,7 +5078,7 @@ int
toku_ft_cursor_set_range(FT_CURSOR cursor, DBT *key, FT_GET_CALLBACK_FUNCTION getf, void *getf_v)
{
ft_search_t search; ft_search_init(&search, ft_cursor_compare_set_range, FT_SEARCH_LEFT, key, cursor->ft_handle);
- int r = ft_cursor_search(cursor, &search, getf, getf_v, FALSE);
+ int r = ft_cursor_search(cursor, &search, getf, getf_v, false);
ft_search_finish(&search);
return r;
}
@@ -5092,7 +5092,7 @@ int
toku_ft_cursor_set_range_reverse(FT_CURSOR cursor, DBT *key, FT_GET_CALLBACK_FUNCTION getf, void *getf_v)
{
ft_search_t search; ft_search_init(&search, ft_cursor_compare_set_range_reverse, FT_SEARCH_RIGHT, key, cursor->ft_handle);
- int r = ft_cursor_search(cursor, &search, getf, getf_v, FALSE);
+ int r = ft_cursor_search(cursor, &search, getf, getf_v, false);
ft_search_finish(&search);
return r;
}
@@ -5155,7 +5155,7 @@ toku_ft_cursor_peek(FT_CURSOR cursor, const DBT **pkey, const DBT **pval)
static const DBT __toku_dbt_fake = {};
static const DBT* const toku_dbt_fake = &__toku_dbt_fake;
-BOOL toku_ft_cursor_uninitialized(FT_CURSOR c) {
+bool toku_ft_cursor_uninitialized(FT_CURSOR c) {
return ft_cursor_not_set(c);
}
@@ -5168,7 +5168,7 @@ toku_ft_lookup (FT_HANDLE brt, DBT *k, FT_GET_CALLBACK_FUNCTION getf, void *getf
int r, rr;
FT_CURSOR cursor;
- rr = toku_ft_cursor(brt, &cursor, NULL, FALSE, FALSE);
+ rr = toku_ft_cursor(brt, &cursor, NULL, false, false);
if (rr != 0) return rr;
int op = DB_SET;
@@ -5190,7 +5190,7 @@ toku_ft_cursor_delete(FT_CURSOR cursor, int flags, TOKUTXN txn) {
int r;
int unchecked_flags = flags;
- BOOL error_if_missing = (BOOL) !(flags&DB_DELETE_ANY);
+ bool error_if_missing = (bool) !(flags&DB_DELETE_ANY);
unchecked_flags &= ~DB_DELETE_ANY;
if (unchecked_flags!=0) r = EINVAL;
else if (ft_cursor_not_set(cursor)) r = EINVAL;
@@ -5217,7 +5217,7 @@ struct keyrange_compare_s {
static int
keyrange_compare (OMTVALUE lev, void *extra) {
LEAFENTRY CAST_FROM_VOIDP(le, lev);
- u_int32_t keylen;
+ uint32_t keylen;
void* key = le_key_and_len(le, &keylen);
DBT omt_dbt;
toku_fill_dbt(&omt_dbt, key, keylen);
@@ -5228,8 +5228,8 @@ keyrange_compare (OMTVALUE lev, void *extra) {
}
static void
-keyrange_in_leaf_partition (FT_HANDLE brt, FTNODE node, DBT *key, int child_number, u_int64_t estimated_num_rows,
- u_int64_t *less, u_int64_t *equal, u_int64_t *greater)
+keyrange_in_leaf_partition (FT_HANDLE brt, FTNODE node, DBT *key, int child_number, uint64_t estimated_num_rows,
+ uint64_t *less, uint64_t *equal, uint64_t *greater)
// If the partition is in main memory then estimate the number
// If KEY==NULL then use an arbitrary key (leftmost or zero)
{
@@ -5239,7 +5239,7 @@ keyrange_in_leaf_partition (FT_HANDLE brt, FTNODE node, DBT *key, int child_numb
struct keyrange_compare_s s = {brt,key};
BASEMENTNODE bn = BLB(node, child_number);
OMTVALUE datav;
- u_int32_t idx = 0;
+ uint32_t idx = 0;
// if key is NULL then set r==-1 and idx==0.
int r = key ? toku_omt_find_zero(bn->buffer, keyrange_compare, &s, &datav, &idx) : -1;
if (r==0) {
@@ -5261,8 +5261,8 @@ keyrange_in_leaf_partition (FT_HANDLE brt, FTNODE node, DBT *key, int child_numb
static int
toku_ft_keyrange_internal (FT_HANDLE brt, FTNODE node,
- DBT *key, u_int64_t *less, u_int64_t *equal, u_int64_t *greater,
- u_int64_t estimated_num_rows,
+ DBT *key, uint64_t *less, uint64_t *equal, uint64_t *greater,
+ uint64_t estimated_num_rows,
struct ftnode_fetch_extra *bfe, // set up to read a minimal read.
struct unlockers *unlockers, ANCESTORS ancestors, struct pivot_bounds const * const bounds)
// Implementation note: Assign values to less, equal, and greater, and then on the way out (returning up the stack) we add more values in.
@@ -5282,9 +5282,9 @@ toku_ft_keyrange_internal (FT_HANDLE brt, FTNODE node,
// do the child.
struct ancestors next_ancestors = {node, child_number, ancestors};
BLOCKNUM childblocknum = BP_BLOCKNUM(node, child_number);
- u_int32_t fullhash = compute_child_fullhash(brt->ft->cf, node, child_number);
+ uint32_t fullhash = compute_child_fullhash(brt->ft->cf, node, child_number);
FTNODE childnode;
- BOOL msgs_applied = FALSE;
+ bool msgs_applied = false;
r = toku_pin_ftnode(
brt,
childblocknum,
@@ -5293,8 +5293,8 @@ toku_ft_keyrange_internal (FT_HANDLE brt, FTNODE node,
&next_ancestors,
bounds,
bfe,
- FALSE, // may_modify_node is FALSE, because node guaranteed to not change
- FALSE,
+ false, // may_modify_node is false, because node guaranteed to not change
+ false,
&childnode,
&msgs_applied
);
@@ -5302,8 +5302,8 @@ toku_ft_keyrange_internal (FT_HANDLE brt, FTNODE node,
if (r != TOKUDB_TRY_AGAIN) {
assert(r == 0);
- struct unlock_ftnode_extra unlock_extra = {brt,childnode,FALSE};
- struct unlockers next_unlockers = {TRUE, unlock_ftnode_fun, (void*)&unlock_extra, unlockers};
+ struct unlock_ftnode_extra unlock_extra = {brt,childnode,false};
+ struct unlockers next_unlockers = {true, unlock_ftnode_fun, (void*)&unlock_extra, unlockers};
const struct pivot_bounds next_bounds = next_pivot_keys(node, child_number, bounds);
r = toku_ft_keyrange_internal(brt, childnode, key, less, equal, greater, rows_per_child,
@@ -5323,7 +5323,7 @@ toku_ft_keyrange_internal (FT_HANDLE brt, FTNODE node,
}
int
-toku_ft_keyrange (FT_HANDLE brt, DBT *key, u_int64_t *less_p, u_int64_t *equal_p, u_int64_t *greater_p)
+toku_ft_keyrange (FT_HANDLE brt, DBT *key, uint64_t *less_p, uint64_t *equal_p, uint64_t *greater_p)
// Effect: Return an estimate of the number of keys to the left, the number equal, and the number to the right of the key.
// The values are an estimate.
// If you perform a keyrange on two keys that are in the same in-memory and uncompressed basement,
@@ -5337,12 +5337,12 @@ toku_ft_keyrange (FT_HANDLE brt, DBT *key, u_int64_t *less_p, u_int64_t *equal_p
fill_bfe_for_min_read(&bfe, brt->ft); // read pivot keys but not message buffers
try_again:
{
- u_int64_t less = 0, equal = 0, greater = 0;
+ uint64_t less = 0, equal = 0, greater = 0;
FTNODE node = NULL;
{
toku_ft_grab_treelock(brt->ft);
- u_int32_t fullhash;
+ uint32_t fullhash;
CACHEKEY root_key;
toku_calculate_root_offset_pointer(brt->ft, &root_key, &fullhash);
toku_pin_ftnode_off_client_thread(
@@ -5350,7 +5350,7 @@ try_again:
root_key,
fullhash,
&bfe,
- FALSE, // may_modify_node, cannot change root during keyrange
+ false, // may_modify_node, cannot change root during keyrange
0,
NULL,
&node
@@ -5358,8 +5358,8 @@ try_again:
toku_ft_release_treelock(brt->ft);
}
- struct unlock_ftnode_extra unlock_extra = {brt,node,FALSE};
- struct unlockers unlockers = {TRUE, unlock_ftnode_fun, (void*)&unlock_extra, (UNLOCKERS)NULL};
+ struct unlock_ftnode_extra unlock_extra = {brt,node,false};
+ struct unlockers unlockers = {true, unlock_ftnode_fun, (void*)&unlock_extra, (UNLOCKERS)NULL};
{
int64_t numrows = brt->ft->in_memory_stats.numrows;
@@ -5399,7 +5399,7 @@ toku_dump_ftnode (FILE *file, FT_HANDLE brt, BLOCKNUM blocknum, int depth, const
void* node_v;
toku_get_node_for_verify(blocknum, brt, &node);
result=toku_verify_ftnode(brt, ZERO_MSN, ZERO_MSN, node, -1, lorange, hirange, NULL, NULL, 0, 1, 0);
- u_int32_t fullhash = toku_cachetable_hash(brt->ft->cf, blocknum);
+ uint32_t fullhash = toku_cachetable_hash(brt->ft->cf, blocknum);
struct ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, brt->ft);
int r = toku_cachetable_get_and_pin(
@@ -5412,7 +5412,7 @@ toku_dump_ftnode (FILE *file, FT_HANDLE brt, BLOCKNUM blocknum, int depth, const
toku_ftnode_fetch_callback,
toku_ftnode_pf_req_callback,
toku_ftnode_pf_callback,
- TRUE, // may_modify_value, just safe to set to TRUE, I think it could theoretically be FALSE
+ true, // may_modify_value, just safe to set to true, I think it could theoretically be false
&bfe
);
assert_zero(r);
@@ -5482,7 +5482,7 @@ int toku_dump_ft (FILE *f, FT_HANDLE brt) {
{
toku_ft_grab_treelock(brt->ft);
- u_int32_t fullhash = 0;
+ uint32_t fullhash = 0;
CACHEKEY root_key;
toku_calculate_root_offset_pointer(brt->ft, &root_key, &fullhash);
r = toku_dump_ftnode(f, brt, root_key, 0, 0, 0);
@@ -5532,7 +5532,7 @@ toku_ft_suppress_recovery_logs (FT_HANDLE brt, TOKUTXN txn) {
assert(brt->ft->txnid_that_created_or_locked_when_empty == toku_txn_get_txnid(txn));
assert(brt->ft->txnid_that_suppressed_recovery_logs == TXNID_NONE);
brt->ft->txnid_that_suppressed_recovery_logs = toku_txn_get_txnid(txn);
- txn->checkpoint_needed_before_commit = TRUE;
+ txn->checkpoint_needed_before_commit = true;
}
int toku_ft_handle_set_panic(FT_HANDLE brt, int panic, const char *panic_string) {
@@ -5607,7 +5607,7 @@ toku_ft_get_fragmentation(FT_HANDLE brt, TOKU_DB_FRAGMENTATION report) {
return r;
}
-static BOOL is_empty_fast_iter (FT_HANDLE brt, FTNODE node) {
+static bool is_empty_fast_iter (FT_HANDLE brt, FTNODE node) {
if (node->height > 0) {
for (int childnum=0; childnum<node->n_children; childnum++) {
if (toku_bnc_nbytesinbuf(BNC(node, childnum)) != 0) {
@@ -5616,7 +5616,7 @@ static BOOL is_empty_fast_iter (FT_HANDLE brt, FTNODE node) {
FTNODE childnode;
{
BLOCKNUM childblocknum = BP_BLOCKNUM(node,childnum);
- u_int32_t fullhash = compute_child_fullhash(brt->ft->cf, node, childnum);
+ uint32_t fullhash = compute_child_fullhash(brt->ft->cf, node, childnum);
struct ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, brt->ft);
// don't need to pass in dependent nodes as we are not
@@ -5626,7 +5626,7 @@ static BOOL is_empty_fast_iter (FT_HANDLE brt, FTNODE node) {
childblocknum,
fullhash,
&bfe,
- FALSE, // may_modify_node set to FALSE, as nodes not modified
+ false, // may_modify_node set to false, as nodes not modified
0,
NULL,
&childnode
@@ -5641,18 +5641,18 @@ static BOOL is_empty_fast_iter (FT_HANDLE brt, FTNODE node) {
// leaf: If the omt is empty, we are happy.
for (int i = 0; i < node->n_children; i++) {
if (toku_omt_size(BLB_BUFFER(node, i))) {
- return FALSE;
+ return false;
}
}
- return TRUE;
+ return true;
}
}
-BOOL toku_ft_is_empty_fast (FT_HANDLE brt)
+bool toku_ft_is_empty_fast (FT_HANDLE brt)
// A fast check to see if the tree is empty. If there are any messages or leafentries, we consider the tree to be nonempty. It's possible that those
// messages and leafentries would all optimize away and that the tree is empty, but we'll say it is nonempty.
{
- u_int32_t fullhash;
+ uint32_t fullhash;
FTNODE node;
//assert(fullhash == toku_cachetable_hash(brt->ft->cf, *rootp));
{
@@ -5667,7 +5667,7 @@ BOOL toku_ft_is_empty_fast (FT_HANDLE brt)
root_key,
fullhash,
&bfe,
- FALSE, // may_modify_node set to FALSE, node does not change
+ false, // may_modify_node set to false, node does not change
0,
NULL,
&node
@@ -5675,7 +5675,7 @@ BOOL toku_ft_is_empty_fast (FT_HANDLE brt)
toku_ft_release_treelock(brt->ft);
}
- BOOL r = is_empty_fast_iter(brt, node);
+ bool r = is_empty_fast_iter(brt, node);
toku_unpin_ftnode(brt->ft, node);
return r;
}
diff --git a/ft/ft-ops.h b/ft/ft-ops.h
index dbeaf6ff56e..f3952f2511c 100644
--- a/ft/ft-ops.h
+++ b/ft/ft-ops.h
@@ -37,8 +37,8 @@ int toku_open_ft_handle (const char *fname, int is_create, FT_HANDLE *, int node
// - can only update cmp descriptor immidiately after opening the FIRST ft handle for this ft and before
// ANY operations. to update the cmp descriptor after any operations have already happened, all handles
// and transactions must close and reopen before the change, then you can update the cmp descriptor
-int toku_ft_change_descriptor(FT_HANDLE t, const DBT* old_descriptor, const DBT* new_descriptor, BOOL do_log, TOKUTXN txn, BOOL update_cmp_descriptor);
-u_int32_t toku_serialize_descriptor_size(const DESCRIPTOR desc);
+int toku_ft_change_descriptor(FT_HANDLE t, const DBT* old_descriptor, const DBT* new_descriptor, bool do_log, TOKUTXN txn, bool update_cmp_descriptor);
+uint32_t toku_serialize_descriptor_size(const DESCRIPTOR desc);
int toku_ft_handle_create(FT_HANDLE *) __attribute__ ((warn_unused_result));
int toku_ft_set_flags(FT_HANDLE, unsigned int flags) __attribute__ ((warn_unused_result));
@@ -129,17 +129,17 @@ int toku_ft_optimize (FT_HANDLE brt) __attribute__ ((warn_unused_result));
// Effect: Insert a key and data pair into a brt if the oplsn is newer than the brt lsn. This function is called during recovery.
// Returns 0 if successful
-int toku_ft_maybe_insert (FT_HANDLE brt, DBT *k, DBT *v, TOKUTXN txn, BOOL oplsn_valid, LSN oplsn, BOOL do_logging, enum ft_msg_type type) __attribute__ ((warn_unused_result));
+int toku_ft_maybe_insert (FT_HANDLE brt, DBT *k, DBT *v, TOKUTXN txn, bool oplsn_valid, LSN oplsn, bool do_logging, enum ft_msg_type type) __attribute__ ((warn_unused_result));
// Effect: Send an update message into a brt. This function is called
// during recovery.
// Returns 0 if successful
-int toku_ft_maybe_update(FT_HANDLE brt, const DBT *key, const DBT *update_function_extra, TOKUTXN txn, BOOL oplsn_valid, LSN oplsn, BOOL do_logging) __attribute__ ((warn_unused_result));
+int toku_ft_maybe_update(FT_HANDLE brt, const DBT *key, const DBT *update_function_extra, TOKUTXN txn, bool oplsn_valid, LSN oplsn, bool do_logging) __attribute__ ((warn_unused_result));
// Effect: Send a broadcasting update message into a brt. This function
// is called during recovery.
// Returns 0 if successful
-int toku_ft_maybe_update_broadcast(FT_HANDLE brt, const DBT *update_function_extra, TOKUTXN txn, BOOL oplsn_valid, LSN oplsn, BOOL do_logging, BOOL is_resetting_op) __attribute__ ((warn_unused_result));
+int toku_ft_maybe_update_broadcast(FT_HANDLE brt, const DBT *update_function_extra, TOKUTXN txn, bool oplsn_valid, LSN oplsn, bool do_logging, bool is_resetting_op) __attribute__ ((warn_unused_result));
int toku_ft_load_recovery(TOKUTXN txn, FILENUM old_filenum, char const * new_iname, int do_fsync, int do_log, LSN *load_lsn) __attribute__ ((warn_unused_result));
int toku_ft_load(FT_HANDLE brt, TOKUTXN txn, char const * new_iname, int do_fsync, LSN *get_lsn) __attribute__ ((warn_unused_result));
@@ -158,7 +158,7 @@ int toku_ft_delete (FT_HANDLE brt, DBT *k, TOKUTXN txn) __attribute__ ((warn_un
// Effect: Delete a key from a brt if the oplsn is newer than the brt lsn. This function is called during recovery.
// Returns 0 if successful
-int toku_ft_maybe_delete (FT_HANDLE brt, DBT *k, TOKUTXN txn, BOOL oplsn_valid, LSN oplsn, BOOL do_logging) __attribute__ ((warn_unused_result));
+int toku_ft_maybe_delete (FT_HANDLE brt, DBT *k, TOKUTXN txn, bool oplsn_valid, LSN oplsn, bool do_logging) __attribute__ ((warn_unused_result));
int toku_ft_send_insert(FT_HANDLE brt, DBT *key, DBT *val, XIDS xids, enum ft_msg_type type) __attribute__ ((warn_unused_result));
int toku_ft_send_delete(FT_HANDLE brt, DBT *key, XIDS xids) __attribute__ ((warn_unused_result));
@@ -175,13 +175,13 @@ int toku_verify_ft (FT_HANDLE brt) __attribute__ ((warn_unused_result));
int toku_verify_ft_with_progress (FT_HANDLE brt, int (*progress_callback)(void *extra, float progress), void *extra, int verbose, int keep_going) __attribute__ ((warn_unused_result));
typedef struct ft_cursor *FT_CURSOR;
-int toku_ft_cursor (FT_HANDLE, FT_CURSOR*, TOKUTXN, BOOL, BOOL) __attribute__ ((warn_unused_result));
+int toku_ft_cursor (FT_HANDLE, FT_CURSOR*, TOKUTXN, bool, bool) __attribute__ ((warn_unused_result));
void toku_ft_cursor_set_leaf_mode(FT_CURSOR);
// Sets a boolean on the brt cursor that prevents uncessary copying of
// the cursor duing a one query.
void toku_ft_cursor_set_temporary(FT_CURSOR);
int toku_ft_cursor_is_leaf_mode(FT_CURSOR);
-void toku_ft_cursor_set_range_lock(FT_CURSOR, const DBT *, const DBT *, BOOL, BOOL);
+void toku_ft_cursor_set_range_lock(FT_CURSOR, const DBT *, const DBT *, bool, bool);
// get is deprecated in favor of the individual functions below
int toku_ft_cursor_get (FT_CURSOR cursor, DBT *key, FT_GET_CALLBACK_FUNCTION getf, void *getf_v, int get_flags) __attribute__ ((warn_unused_result));
@@ -201,7 +201,7 @@ int toku_ft_cursor_get_both_range_reverse(FT_CURSOR cursor, DBT *key, DBT *val,
int toku_ft_cursor_delete(FT_CURSOR cursor, int flags, TOKUTXN) __attribute__ ((warn_unused_result));
int toku_ft_cursor_close (FT_CURSOR curs) __attribute__ ((warn_unused_result));
-BOOL toku_ft_cursor_uninitialized(FT_CURSOR c) __attribute__ ((warn_unused_result));
+bool toku_ft_cursor_uninitialized(FT_CURSOR c) __attribute__ ((warn_unused_result));
void toku_ft_cursor_peek(FT_CURSOR cursor, const DBT **pkey, const DBT **pval);
@@ -215,17 +215,17 @@ enum ft_flags {
};
int
-toku_ft_keyrange (FT_HANDLE brt, DBT *key, u_int64_t *less, u_int64_t *equal, u_int64_t *greater) __attribute__ ((warn_unused_result));
+toku_ft_keyrange (FT_HANDLE brt, DBT *key, uint64_t *less, uint64_t *equal, uint64_t *greater) __attribute__ ((warn_unused_result));
struct ftstat64_s {
- u_int64_t nkeys; /* estimate how many unique keys (even when flattened this may be an estimate) */
- u_int64_t ndata; /* estimate the number of pairs (exact when flattened and committed) */
- u_int64_t dsize; /* estimate the sum of the sizes of the pairs (exact when flattened and committed) */
- u_int64_t fsize; /* the size of the underlying file */
- u_int64_t ffree; /* Number of free bytes in the underlying file */
- u_int64_t create_time_sec; /* creation time in seconds. */
- u_int64_t modify_time_sec; /* time of last serialization, in seconds. */
- u_int64_t verify_time_sec; /* time of last verification, in seconds */
+ uint64_t nkeys; /* estimate how many unique keys (even when flattened this may be an estimate) */
+ uint64_t ndata; /* estimate the number of pairs (exact when flattened and committed) */
+ uint64_t dsize; /* estimate the sum of the sizes of the pairs (exact when flattened and committed) */
+ uint64_t fsize; /* the size of the underlying file */
+ uint64_t ffree; /* Number of free bytes in the underlying file */
+ uint64_t create_time_sec; /* creation time in seconds. */
+ uint64_t modify_time_sec; /* time of last serialization, in seconds. */
+ uint64_t verify_time_sec; /* time of last verification, in seconds */
};
int
@@ -253,8 +253,8 @@ void toku_ft_suppress_recovery_logs (FT_HANDLE brt, TOKUTXN txn);
int toku_ft_get_fragmentation(FT_HANDLE brt, TOKU_DB_FRAGMENTATION report) __attribute__ ((warn_unused_result));
-BOOL toku_ft_is_empty_fast (FT_HANDLE brt) __attribute__ ((warn_unused_result));
-// Effect: Return TRUE if there are no messages or leaf entries in the tree. If so, it's empty. If there are messages or leaf entries, we say it's not empty
+bool toku_ft_is_empty_fast (FT_HANDLE brt) __attribute__ ((warn_unused_result));
+// Effect: Return true if there are no messages or leaf entries in the tree. If so, it's empty. If there are messages or leaf entries, we say it's not empty
// even though if we were to optimize the tree it might turn out that they are empty.
int toku_ft_strerror_r(int error, char *buf, size_t buflen);
@@ -262,6 +262,6 @@ int toku_ft_strerror_r(int error, char *buf, size_t buflen);
// If error>=0 then the result is to do strerror_r(error, buf, buflen), that is fill buf with a descriptive error message.
// If error<0 then return a TokuDB-specific error code. For unknown cases, we return -1 and set errno=EINVAL, even for cases that *should* be known. (Not all DB errors are known by this function which is a bug.)
-extern BOOL garbage_collection_debug;
+extern bool garbage_collection_debug;
#endif
diff --git a/ft/ft-search.h b/ft/ft-search.h
index bcccb70011f..f7d44c561e0 100644
--- a/ft/ft-search.h
+++ b/ft/ft-search.h
@@ -50,7 +50,7 @@ typedef struct ft_search {
// There also remains a potential thrashing problem. When we get a TOKUDB_TRY_AGAIN, we unpin everything. There's
// no guarantee that we will get everything pinned again. We ought to keep nodes pinned when we retry, except that on the
// way out with a DB_NOTFOUND we ought to unpin those nodes. See #3528.
- BOOL have_pivot_bound;
+ bool have_pivot_bound;
DBT pivot_bound;
} ft_search_t;
@@ -60,7 +60,7 @@ static inline ft_search_t *ft_search_init(ft_search_t *so, ft_search_compare_fun
so->direction = direction;
so->k = k;
so->context = context;
- so->have_pivot_bound = FALSE;
+ so->have_pivot_bound = false;
return so;
}
diff --git a/ft/ft-serialize.cc b/ft/ft-serialize.cc
index eeba99f79fe..96b0d87b65e 100644
--- a/ft/ft-serialize.cc
+++ b/ft/ft-serialize.cc
@@ -20,18 +20,18 @@
#endif
// not version-sensitive because we only serialize a descriptor using the current layout_version
-u_int32_t
+uint32_t
toku_serialize_descriptor_size(const DESCRIPTOR desc) {
//Checksum NOT included in this. Checksum only exists in header's version.
- u_int32_t size = 4; // four bytes for size of descriptor
+ uint32_t size = 4; // four bytes for size of descriptor
size += desc->dbt.size;
return size;
}
-static u_int32_t
+static uint32_t
deserialize_descriptor_size(const DESCRIPTOR desc, int layout_version) {
//Checksum NOT included in this. Checksum only exists in header's version.
- u_int32_t size = 4; // four bytes for size of descriptor
+ uint32_t size = 4; // four bytes for size of descriptor
if (layout_version == FT_LAYOUT_VERSION_13)
size += 4; // for version 13, include four bytes of "version"
size += desc->dbt.size;
@@ -55,7 +55,7 @@ toku_serialize_descriptor_contents_to_fd(int fd, const DESCRIPTOR desc, DISKOFF
toku_serialize_descriptor_contents_to_wbuf(&w, desc);
{
//Add checksum
- u_int32_t checksum = x1764_finish(&w.checksum);
+ uint32_t checksum = x1764_finish(&w.checksum);
wbuf_int(&w, checksum);
}
lazy_assert(w.ndone==w.size);
@@ -74,7 +74,7 @@ deserialize_descriptor_from_rbuf(struct rbuf *rb, DESCRIPTOR desc, int layout_ve
(void) rbuf_int(rb);
}
- u_int32_t size;
+ uint32_t size;
bytevec data;
rbuf_bytes(rb, &data, &size);
bytevec data_copy = data;
@@ -106,9 +106,9 @@ deserialize_descriptor_from(int fd, BLOCK_TABLE bt, DESCRIPTOR desc, int layout_
}
{
// check the checksum
- u_int32_t x1764 = x1764_memory(dbuf, size-4);
+ uint32_t x1764 = x1764_memory(dbuf, size-4);
//printf("%s:%d read from %ld (x1764 offset=%ld) size=%ld\n", __FILE__, __LINE__, block_translation_address_on_disk, offset, block_translation_size_on_disk);
- u_int32_t stored_x1764 = toku_dtoh32(*(int*)(dbuf + size-4));
+ uint32_t stored_x1764 = toku_dtoh32(*(int*)(dbuf + size-4));
if (x1764 != stored_x1764) {
fprintf(stderr, "Descriptor checksum failure: calc=0x%08x read=0x%08x\n", x1764, stored_x1764);
r = TOKUDB_BAD_CHECKSUM;
@@ -163,7 +163,7 @@ deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version)
build_id = rbuf_network_int(rb);
//Size MUST be in network order regardless of disk order.
- u_int32_t size;
+ uint32_t size;
size = rbuf_network_int(rb);
lazy_assert(size == rb->size);
@@ -365,9 +365,9 @@ exit:
return r;
}
-static u_int32_t
-serialize_ft_min_size (u_int32_t version) {
- u_int32_t size = 0;
+static uint32_t
+serialize_ft_min_size (uint32_t version) {
+ uint32_t size = 0;
switch(version) {
@@ -417,7 +417,7 @@ serialize_ft_min_size (u_int32_t version) {
);
break;
default:
- lazy_assert(FALSE);
+ lazy_assert(false);
}
lazy_assert(size <= BLOCK_ALLOCATOR_HEADER_RESERVE);
return size;
@@ -432,9 +432,9 @@ int
deserialize_ft_from_fd_into_rbuf(int fd,
toku_off_t offset_of_header,
struct rbuf *rb,
- u_int64_t *checkpoint_count,
+ uint64_t *checkpoint_count,
LSN *checkpoint_lsn,
- u_int32_t * version_p)
+ uint32_t * version_p)
{
int r = 0;
const int64_t prefix_size = 8 + // magic ("tokudata")
@@ -461,7 +461,7 @@ deserialize_ft_from_fd_into_rbuf(int fd,
bytevec magic;
rbuf_literal_bytes(rb, &magic, 8);
if (memcmp(magic,"tokudata",8)!=0) {
- if ((*(u_int64_t*)magic) == 0) {
+ if ((*(uint64_t*)magic) == 0) {
r = TOKUDB_DICTIONARY_NO_HEADER;
} else {
r = EINVAL; //Not a tokudb file! Do not use.
@@ -470,7 +470,7 @@ deserialize_ft_from_fd_into_rbuf(int fd,
}
//Version MUST be in network order regardless of disk order.
- u_int32_t version;
+ uint32_t version;
version = rbuf_network_int(rb);
*version_p = version;
if (version < FT_LAYOUT_MIN_SUPPORTED_VERSION) {
@@ -482,13 +482,13 @@ deserialize_ft_from_fd_into_rbuf(int fd,
}
//build_id MUST be in network order regardless of disk order.
- u_int32_t build_id __attribute__((__unused__));
+ uint32_t build_id __attribute__((__unused__));
build_id = rbuf_network_int(rb);
int64_t min_header_size;
min_header_size = serialize_ft_min_size(version);
//Size MUST be in network order regardless of disk order.
- u_int32_t size;
+ uint32_t size;
size = rbuf_network_int(rb);
//If too big, it is corrupt. We would probably notice during checksum
//but may have to do a multi-gigabyte malloc+read to find out.
@@ -516,9 +516,9 @@ deserialize_ft_from_fd_into_rbuf(int fd,
//Size is within acceptable bounds.
//Verify checksum (FT_LAYOUT_VERSION_13 or later, when checksum function changed)
- u_int32_t calculated_x1764;
+ uint32_t calculated_x1764;
calculated_x1764 = x1764_memory(rb->buf, rb->size-4);
- u_int32_t stored_x1764;
+ uint32_t stored_x1764;
stored_x1764 = toku_dtoh32(*(int*)(rb->buf+rb->size-4));
if (calculated_x1764 != stored_x1764) {
r = TOKUDB_BAD_CHECKSUM; //Header useless
@@ -563,26 +563,26 @@ toku_deserialize_ft_from(int fd,
{
struct rbuf rb_0;
struct rbuf rb_1;
- u_int64_t checkpoint_count_0;
- u_int64_t checkpoint_count_1;
+ uint64_t checkpoint_count_0;
+ uint64_t checkpoint_count_1;
LSN checkpoint_lsn_0;
LSN checkpoint_lsn_1;
- u_int32_t version_0, version_1, version = 0;
- BOOL h0_acceptable = FALSE;
- BOOL h1_acceptable = FALSE;
+ uint32_t version_0, version_1, version = 0;
+ bool h0_acceptable = false;
+ bool h1_acceptable = false;
struct rbuf *rb = NULL;
int r0, r1, r;
toku_off_t header_0_off = 0;
r0 = deserialize_ft_from_fd_into_rbuf(fd, header_0_off, &rb_0, &checkpoint_count_0, &checkpoint_lsn_0, &version_0);
if (r0 == 0 && checkpoint_lsn_0.lsn <= max_acceptable_lsn.lsn) {
- h0_acceptable = TRUE;
+ h0_acceptable = true;
}
toku_off_t header_1_off = BLOCK_ALLOCATOR_HEADER_RESERVE;
r1 = deserialize_ft_from_fd_into_rbuf(fd, header_1_off, &rb_1, &checkpoint_count_1, &checkpoint_lsn_1, &version_1);
if (r1 == 0 && checkpoint_lsn_1.lsn <= max_acceptable_lsn.lsn) {
- h1_acceptable = TRUE;
+ h1_acceptable = true;
}
// if either header is too new, the dictionary is unreadable
@@ -656,7 +656,7 @@ exit:
int toku_serialize_ft_size (FT_HEADER h) {
- u_int32_t size = serialize_ft_min_size(h->layout_version);
+ uint32_t size = serialize_ft_min_size(h->layout_version);
//There is no dynamic data.
lazy_assert(size <= BLOCK_ALLOCATOR_HEADER_RESERVE);
return size;
@@ -698,7 +698,7 @@ int toku_serialize_ft_to_wbuf (
wbuf_MSN(wbuf, h->msn_at_start_of_last_completed_optimize);
wbuf_char(wbuf, (unsigned char) h->compression_method);
wbuf_MSN(wbuf, h->highest_unused_msn_for_upgrade);
- u_int32_t checksum = x1764_finish(&wbuf->checksum);
+ uint32_t checksum = x1764_finish(&wbuf->checksum);
wbuf_int(wbuf, checksum);
lazy_assert(wbuf->ndone == wbuf->size);
return 0;
diff --git a/ft/ft-test-helpers.cc b/ft/ft-test-helpers.cc
index bbbf1595e65..3e2e85ef51e 100644
--- a/ft/ft-test-helpers.cc
+++ b/ft/ft-test-helpers.cc
@@ -32,7 +32,7 @@ next_dummymsn(void) {
}
-BOOL ignore_if_was_already_open;
+bool ignore_if_was_already_open;
int toku_testsetup_leaf(FT_HANDLE brt, BLOCKNUM *blocknum, int n_children, char **keys, int *keylens) {
FTNODE node;
assert(testsetup_initialized);
@@ -93,7 +93,7 @@ int toku_testsetup_get_sersize(FT_HANDLE brt, BLOCKNUM diskoff) // Return the si
toku_ftnode_fetch_callback,
toku_ftnode_pf_req_callback,
toku_ftnode_pf_callback,
- TRUE,
+ true,
&bfe
);
assert(r==0);
@@ -121,7 +121,7 @@ int toku_testsetup_insert_to_leaf (FT_HANDLE brt, BLOCKNUM blocknum, const char
toku_ftnode_fetch_callback,
toku_ftnode_pf_req_callback,
toku_ftnode_pf_callback,
- TRUE,
+ true,
&bfe
);
if (r!=0) return r;
@@ -169,7 +169,7 @@ toku_pin_node_with_min_bfe(FTNODE* node, BLOCKNUM b, FT_HANDLE t)
b,
toku_cachetable_hash(t->ft->cf, b),
&bfe,
- TRUE,
+ true,
0,
NULL,
node
@@ -194,7 +194,7 @@ int toku_testsetup_insert_to_nonleaf (FT_HANDLE brt, BLOCKNUM blocknum, enum ft_
toku_ftnode_fetch_callback,
toku_ftnode_pf_req_callback,
toku_ftnode_pf_callback,
- TRUE,
+ true,
&bfe
);
if (r!=0) return r;
diff --git a/ft/ft-verify.cc b/ft/ft-verify.cc
index 6519694a92c..ec1a9dbe81c 100644
--- a/ft/ft-verify.cc
+++ b/ft/ft-verify.cc
@@ -159,7 +159,7 @@ static int
verify_sorted_by_key_msn(FT_HANDLE brt, FIFO fifo, const off_omt_t &mt) {
int result = 0;
size_t last_offset = 0;
- for (u_int32_t i = 0; i < mt.size(); i++) {
+ for (uint32_t i = 0; i < mt.size(); i++) {
long offset;
int r = mt.fetch(i, &offset);
assert_zero(r);
@@ -198,7 +198,7 @@ toku_get_node_for_verify(
FTNODE* nodep
)
{
- u_int32_t fullhash = toku_cachetable_hash(brt->ft->cf, blocknum);
+ uint32_t fullhash = toku_cachetable_hash(brt->ft->cf, blocknum);
struct ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, brt->ft);
toku_pin_ftnode_off_client_thread(
@@ -206,7 +206,7 @@ toku_get_node_for_verify(
blocknum,
fullhash,
&bfe,
- TRUE, // may_modify_node, safe to set to TRUE
+ true, // may_modify_node, safe to set to true
0,
NULL,
nodep
@@ -313,7 +313,7 @@ toku_verify_ftnode (FT_HANDLE brt,
}
else {
BASEMENTNODE bn = BLB(node, i);
- for (u_int32_t j = 0; j < toku_omt_size(bn->buffer); j++) {
+ for (uint32_t j = 0; j < toku_omt_size(bn->buffer); j++) {
VERIFY_ASSERTION((rootmsn.msn >= this_msn.msn), 0, "leaf may have latest msn, but cannot be greater than root msn");
LEAFENTRY le = get_ith_leafentry(bn, j);
if (curr_less_pivot) {
@@ -375,7 +375,7 @@ toku_verify_ft_with_progress (FT_HANDLE brt, int (*progress_callback)(void *extr
{
toku_ft_grab_treelock(brt->ft);
- u_int32_t root_hash;
+ uint32_t root_hash;
CACHEKEY root_key;
toku_calculate_root_offset_pointer(brt->ft, &root_key, &root_hash);
toku_get_node_for_verify(root_key, brt, &root_node);
diff --git a/ft/ft.cc b/ft/ft.cc
index 7e92378883c..0a743473886 100644
--- a/ft/ft.cc
+++ b/ft/ft.cc
@@ -265,7 +265,7 @@ ft_end_checkpoint (CACHEFILE UU(cachefile), int fd, void *header_v) {
// maps to cf->close_userdata
// Has access to fd (it is protected).
static int
-ft_close (CACHEFILE cachefile, int fd, void *header_v, char **malloced_error_string, BOOL oplsn_valid, LSN oplsn) {
+ft_close (CACHEFILE cachefile, int fd, void *header_v, char **malloced_error_string, bool oplsn_valid, LSN oplsn) {
FT ft = (FT) header_v;
assert(ft->h->type == FT_CURRENT);
// We already have exclusive access to this field already, so skip the locking.
@@ -369,7 +369,7 @@ static int setup_initial_ft_root_node (FT ft, BLOCKNUM blocknum) {
toku_initialize_empty_ftnode(node, blocknum, 0, 1, ft->h->layout_version, ft->h->nodesize, ft->h->flags);
BP_STATE(node,0) = PT_AVAIL;
- u_int32_t fullhash = toku_cachetable_hash(ft->cf, blocknum);
+ uint32_t fullhash = toku_cachetable_hash(ft->cf, blocknum);
node->fullhash = fullhash;
int r = toku_cachetable_put(ft->cf, blocknum, fullhash,
node, make_ftnode_pair_attr(node),
@@ -493,7 +493,7 @@ exit:
}
// TODO: (Zardosht) get rid of brt parameter
-int toku_read_ft_and_store_in_cachefile (FT_HANDLE brt, CACHEFILE cf, LSN max_acceptable_lsn, FT *header, BOOL* was_open)
+int toku_read_ft_and_store_in_cachefile (FT_HANDLE brt, CACHEFILE cf, LSN max_acceptable_lsn, FT *header, bool* was_open)
// If the cachefile already has the header, then just get it.
// If the cachefile has not been initialized, then don't modify anything.
// max_acceptable_lsn is the latest acceptable checkpointed version of the file.
@@ -502,13 +502,13 @@ int toku_read_ft_and_store_in_cachefile (FT_HANDLE brt, CACHEFILE cf, LSN max_ac
FT h;
if ((h = (FT) toku_cachefile_get_userdata(cf))!=0) {
*header = h;
- *was_open = TRUE;
+ *was_open = true;
assert(brt->options.update_fun == h->update_fun);
assert(brt->options.compare_fun == h->compare_fun);
return 0;
}
}
- *was_open = FALSE;
+ *was_open = false;
FT h;
int r;
{
@@ -550,7 +550,7 @@ toku_ft_note_ft_handle_open(FT ft, FT_HANDLE live) {
// pinned by a checkpoint.
static int
ft_get_reference_count(FT ft) {
- u_int32_t pinned_by_checkpoint = ft->pinned_by_checkpoint ? 1 : 0;
+ uint32_t pinned_by_checkpoint = ft->pinned_by_checkpoint ? 1 : 0;
int num_handles = toku_list_num_elements_est(&ft->live_ft_handles);
return pinned_by_checkpoint + ft->num_txns + num_handles;
}
@@ -569,7 +569,7 @@ toku_ft_has_one_reference_unlocked(FT ft) {
// evict a ft from memory by closing its cachefile. any future work
// will have to read in the ft in a new cachefile and new FT object.
-int toku_ft_evict_from_memory(FT ft, char **error_string, BOOL oplsn_valid, LSN oplsn) {
+int toku_ft_evict_from_memory(FT ft, char **error_string, bool oplsn_valid, LSN oplsn) {
int r = 0;
assert(ft->cf);
if (error_string) {
@@ -614,7 +614,7 @@ toku_ft_note_hot_begin(FT_HANDLE brt) {
// Purpose: set fields in brt_header to capture accountability info for end of HOT optimize.
// Note: See note for toku_ft_note_hot_begin().
void
-toku_ft_note_hot_complete(FT_HANDLE brt, BOOL success, MSN msn_at_start_of_hot) {
+toku_ft_note_hot_complete(FT_HANDLE brt, bool success, MSN msn_at_start_of_hot) {
FT ft = brt->ft;
time_t now = time(NULL);
@@ -863,7 +863,7 @@ toku_ft_remove_txn_ref(FT ft) {
void toku_calculate_root_offset_pointer (
FT ft,
CACHEKEY* root_key,
- u_int32_t *roothash
+ uint32_t *roothash
)
{
*roothash = toku_cachetable_hash(ft->cf, ft->h->root_blocknum);
diff --git a/ft/ft.h b/ft/ft.h
index 2c831fe0550..e123c4c90a4 100644
--- a/ft/ft.h
+++ b/ft/ft.h
@@ -36,7 +36,7 @@ void toku_ft_release_reflock(FT ft);
int toku_create_new_ft(FT *ftp, FT_OPTIONS options, CACHEFILE cf, TOKUTXN txn);
void toku_ft_free (FT h);
-int toku_read_ft_and_store_in_cachefile (FT_HANDLE brt, CACHEFILE cf, LSN max_acceptable_lsn, FT *header, BOOL* was_open);
+int toku_read_ft_and_store_in_cachefile (FT_HANDLE brt, CACHEFILE cf, LSN max_acceptable_lsn, FT *header, bool* was_open);
void toku_ft_note_ft_handle_open(FT ft, FT_HANDLE live);
bool toku_ft_needed_unlocked(FT ft);
@@ -44,12 +44,12 @@ bool toku_ft_has_one_reference_unlocked(FT ft);
// evict a ft from memory by closing its cachefile. any future work
// will have to read in the ft in a new cachefile and new FT object.
-int toku_ft_evict_from_memory(FT ft, char **error_string, BOOL oplsn_valid, LSN oplsn) __attribute__ ((warn_unused_result));
+int toku_ft_evict_from_memory(FT ft, char **error_string, bool oplsn_valid, LSN oplsn) __attribute__ ((warn_unused_result));
FT_HANDLE toku_ft_get_only_existing_ft_handle(FT h);
void toku_ft_note_hot_begin(FT_HANDLE brt);
-void toku_ft_note_hot_complete(FT_HANDLE brt, BOOL success, MSN msn_at_start_of_hot);
+void toku_ft_note_hot_complete(FT_HANDLE brt, bool success, MSN msn_at_start_of_hot);
void
toku_ft_init(
@@ -71,7 +71,7 @@ void toku_reset_root_xid_that_created(FT h, TXNID new_root_xid_that_created);
void toku_ft_add_txn_ref(FT h);
void toku_ft_remove_txn_ref(FT h);
-void toku_calculate_root_offset_pointer ( FT h, CACHEKEY* root_key, u_int32_t *roothash);
+void toku_calculate_root_offset_pointer ( FT h, CACHEKEY* root_key, uint32_t *roothash);
void toku_ft_set_new_root_blocknum(FT h, CACHEKEY new_root_key);
LSN toku_ft_checkpoint_lsn(FT h) __attribute__ ((warn_unused_result));
int toku_ft_set_panic(FT h, int panic, const char *panic_string) __attribute__ ((warn_unused_result));
diff --git a/ft/ft_msg.cc b/ft/ft_msg.cc
index 976a08d4a8b..8589bc38e71 100644
--- a/ft/ft_msg.cc
+++ b/ft/ft_msg.cc
@@ -10,15 +10,15 @@
#include "ft_msg.h"
-u_int32_t
+uint32_t
ft_msg_get_keylen(FT_MSG ft_msg) {
- u_int32_t rval = ft_msg->u.id.key->size;
+ uint32_t rval = ft_msg->u.id.key->size;
return rval;
}
-u_int32_t
+uint32_t
ft_msg_get_vallen(FT_MSG ft_msg) {
- u_int32_t rval = ft_msg->u.id.val->size;
+ uint32_t rval = ft_msg->u.id.val->size;
return rval;
}
diff --git a/ft/ft_msg.h b/ft/ft_msg.h
index 39f4e924245..dfa937688bc 100644
--- a/ft/ft_msg.h
+++ b/ft/ft_msg.h
@@ -13,9 +13,9 @@
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
-u_int32_t ft_msg_get_keylen(FT_MSG ft_msg);
+uint32_t ft_msg_get_keylen(FT_MSG ft_msg);
-u_int32_t ft_msg_get_vallen(FT_MSG ft_msg);
+uint32_t ft_msg_get_vallen(FT_MSG ft_msg);
XIDS ft_msg_get_xids(FT_MSG ft_msg);
diff --git a/ft/ft_node-serialize.cc b/ft/ft_node-serialize.cc
index a3729efc793..e7096e2ae9b 100644
--- a/ft/ft_node-serialize.cc
+++ b/ft/ft_node-serialize.cc
@@ -178,9 +178,9 @@ enum {
uncompressed_version_offset = 8,
};
-static u_int32_t
+static uint32_t
serialize_node_header_size(FTNODE node) {
- u_int32_t retval = 0;
+ uint32_t retval = 0;
retval += 8; // magic
retval += sizeof(node->layout_version);
retval += sizeof(node->layout_version_original);
@@ -208,7 +208,7 @@ serialize_node_header(FTNODE node, FTNODE_DISK_DATA ndd, struct wbuf *wbuf) {
wbuf_nocrc_int(wbuf, BP_SIZE (ndd, i)); // and the size
}
// checksum the header
- u_int32_t end_to_end_checksum = x1764_memory(wbuf->buf, wbuf_get_woffset(wbuf));
+ uint32_t end_to_end_checksum = x1764_memory(wbuf->buf, wbuf_get_woffset(wbuf));
wbuf_nocrc_int(wbuf, end_to_end_checksum);
invariant(wbuf->ndone == wbuf->size);
}
@@ -221,10 +221,10 @@ wbufwriteleafentry(OMTVALUE lev, const uint32_t UU(idx), void *wbv) {
return 0;
}
-static u_int32_t
+static uint32_t
serialize_ftnode_partition_size (FTNODE node, int i)
{
- u_int32_t result = 0;
+ uint32_t result = 0;
assert(node->bp[i].state == PT_AVAIL);
result++; // Byte that states what the partition is
if (node->height > 0) {
@@ -293,7 +293,7 @@ serialize_ftnode_partition(FTNODE node, int i, struct sub_block *sb) {
//
toku_omt_iterate(buffer, wbufwriteleafentry, &wb);
}
- u_int32_t end_to_end_checksum = x1764_memory(sb->uncompressed_ptr, wbuf_get_woffset(&wb));
+ uint32_t end_to_end_checksum = x1764_memory(sb->uncompressed_ptr, wbuf_get_woffset(&wb));
wbuf_nocrc_int(&wb, end_to_end_checksum);
invariant(wb.ndone == wb.size);
invariant(sb->uncompressed_size==wb.ndone);
@@ -331,7 +331,7 @@ compress_ftnode_sub_block(struct sub_block *sb, enum toku_compression_method met
method
);
- u_int32_t* extra = (u_int32_t *)(sb->compressed_ptr);
+ uint32_t* extra = (uint32_t *)(sb->compressed_ptr);
// store the compressed and uncompressed size at the beginning
extra[0] = toku_htod32(sb->compressed_size);
extra[1] = toku_htod32(sb->uncompressed_size);
@@ -357,10 +357,10 @@ compress_ftnode_sub_block(struct sub_block *sb, enum toku_compression_method met
// such as the magic, layout_version, and build_id
// Includes only node specific info such as pivot information, n_children, and so on
//
-static u_int32_t
+static uint32_t
serialize_ftnode_info_size(FTNODE node)
{
- u_int32_t retval = 0;
+ uint32_t retval = 0;
retval += 8; // max_msn_applied_to_node_on_disk
retval += 4; // nodesize
retval += 4; // flags
@@ -400,7 +400,7 @@ static void serialize_ftnode_info(FTNODE node,
}
}
- u_int32_t end_to_end_checksum = x1764_memory(sb->uncompressed_ptr, wbuf_get_woffset(&wb));
+ uint32_t end_to_end_checksum = x1764_memory(sb->uncompressed_ptr, wbuf_get_woffset(&wb));
wbuf_nocrc_int(&wb, end_to_end_checksum);
invariant(wb.ndone == wb.size);
invariant(sb->uncompressed_size==wb.ndone);
@@ -425,7 +425,7 @@ toku_serialize_ftnode_size (FTNODE node) {
}
struct array_info {
- u_int32_t offset;
+ uint32_t offset;
OMTVALUE* array;
};
@@ -442,7 +442,7 @@ struct sum_info {
};
static int
-sum_item (OMTVALUE lev, u_int32_t UU(idx), void *vsi) {
+sum_item (OMTVALUE lev, uint32_t UU(idx), void *vsi) {
LEAFENTRY le = (LEAFENTRY) lev;
struct sum_info *si = (struct sum_info *) vsi;
si->count++;
@@ -463,7 +463,7 @@ rebalance_ftnode_leaf(FTNODE node, unsigned int basementnodesize)
uint32_t num_orig_basements = node->n_children;
// Count number of leaf entries in this leaf (num_le).
- u_int32_t num_le = 0;
+ uint32_t num_le = 0;
for (uint32_t i = 0; i < num_orig_basements; i++) {
num_le += toku_omt_size(BLB_BUFFER(node, i));
}
@@ -478,7 +478,7 @@ rebalance_ftnode_leaf(FTNODE node, unsigned int basementnodesize)
// Capture pointers to old mempools' buffers (so they can be destroyed)
void **XMALLOC_N(num_orig_basements, old_mempool_bases);
- u_int32_t curr_le = 0;
+ uint32_t curr_le = 0;
for (uint32_t i = 0; i < num_orig_basements; i++) {
OMT curr_omt = BLB_BUFFER(node, i);
struct array_info ai;
@@ -493,7 +493,7 @@ rebalance_ftnode_leaf(FTNODE node, unsigned int basementnodesize)
// Create an array that will store indexes of new pivots.
// Each element in new_pivots is the index of a pivot key.
// (Allocating num_le of them is overkill, but num_le is an upper bound.)
- u_int32_t *XMALLOC_N(num_alloc, new_pivots);
+ uint32_t *XMALLOC_N(num_alloc, new_pivots);
new_pivots[0] = 0;
// Each element in le_sizes is the size of the leafentry pointed to by leafpointers.
@@ -514,11 +514,11 @@ rebalance_ftnode_leaf(FTNODE node, unsigned int basementnodesize)
// Figure out the new pivots.
// We need the index of each pivot, and for each basement we need
// the number of leaves and the sum of the sizes of the leaves (memory requirement for basement).
- u_int32_t curr_pivot = 0;
- u_int32_t num_le_in_curr_bn = 0;
- u_int32_t bn_size_so_far = 0;
- for (u_int32_t i = 0; i < num_le; i++) {
- u_int32_t curr_le_size = leafentry_disksize((LEAFENTRY) leafpointers[i]);
+ uint32_t curr_pivot = 0;
+ uint32_t num_le_in_curr_bn = 0;
+ uint32_t bn_size_so_far = 0;
+ for (uint32_t i = 0; i < num_le; i++) {
+ uint32_t curr_le_size = leafentry_disksize((LEAFENTRY) leafpointers[i]);
le_sizes[i] = curr_le_size;
if ((bn_size_so_far + curr_le_size > basementnodesize) && (num_le_in_curr_bn != 0)) {
// cap off the current basement node to end with the element before i
@@ -542,7 +542,7 @@ rebalance_ftnode_leaf(FTNODE node, unsigned int basementnodesize)
// Need to figure out how to properly deal with seqinsert.
// I am not happy with how this is being
// handled with basement nodes
- u_int32_t tmp_seqinsert = BLB_SEQINSERT(node, num_orig_basements - 1);
+ uint32_t tmp_seqinsert = BLB_SEQINSERT(node, num_orig_basements - 1);
// choose the max msn applied to any basement as the max msn applied to all new basements
MSN max_msn = ZERO_MSN;
@@ -586,9 +586,9 @@ rebalance_ftnode_leaf(FTNODE node, unsigned int basementnodesize)
BLB_SEQINSERT(node, i) = tmp_seqinsert;
// create start (inclusive) and end (exclusive) boundaries for data of basement node
- u_int32_t curr_start = (i==0) ? 0 : new_pivots[i-1]+1; // index of first leaf in basement
- u_int32_t curr_end = (i==num_pivots) ? num_le : new_pivots[i]+1; // index of first leaf in next basement
- u_int32_t num_in_bn = curr_end - curr_start; // number of leaves in this basement
+ uint32_t curr_start = (i==0) ? 0 : new_pivots[i-1]+1; // index of first leaf in basement
+ uint32_t curr_end = (i==num_pivots) ? num_le : new_pivots[i]+1; // index of first leaf in next basement
+ uint32_t num_in_bn = curr_end - curr_start; // number of leaves in this basement
// create indexes for new basement
invariant(baseindex_this_bn == curr_start);
@@ -669,7 +669,7 @@ toku_create_compressed_partition_from_available(
// to just the amount of the actual compressed data. So, we create a new buffer and copy
// just the compressed data.
//
- u_int32_t compressed_size = toku_dtoh32(*(u_int32_t *)sb->compressed_ptr);
+ uint32_t compressed_size = toku_dtoh32(*(uint32_t *)sb->compressed_ptr);
void* compressed_data = toku_xmalloc(compressed_size);
memcpy(compressed_data, (char *)sb->compressed_ptr + 8, compressed_size);
toku_free(sb->compressed_ptr);
@@ -760,8 +760,8 @@ toku_serialize_ftnode_to_memory (FTNODE node,
FTNODE_DISK_DATA* ndd,
unsigned int basementnodesize,
enum toku_compression_method compression_method,
- BOOL do_rebalancing,
- BOOL in_parallel, // for loader is TRUE, for toku_ftnode_flush_callback, is false
+ bool do_rebalancing,
+ bool in_parallel, // for loader is true, for toku_ftnode_flush_callback, is false
/*out*/ size_t *n_bytes_to_write,
/*out*/ char **bytes_to_write)
{
@@ -805,12 +805,12 @@ toku_serialize_ftnode_to_memory (FTNODE node,
// The total size of the node is:
// size of header + disk size of the n+1 sub_block's created above
- u_int32_t total_node_size = (serialize_node_header_size(node) // uncomrpessed header
+ uint32_t total_node_size = (serialize_node_header_size(node) // uncomrpessed header
+ sb_node_info.compressed_size // compressed nodeinfo (without its checksum)
+ 4); // nodinefo's checksum
// store the BP_SIZESs
for (int i = 0; i < node->n_children; i++) {
- u_int32_t len = sb[i].compressed_size + 4; // data and checksum
+ uint32_t len = sb[i].compressed_size + 4; // data and checksum
BP_SIZE (*ndd,i) = len;
BP_START(*ndd,i) = total_node_size;
total_node_size += sb[i].compressed_size + 4;
@@ -831,14 +831,14 @@ toku_serialize_ftnode_to_memory (FTNODE node,
memcpy(curr_ptr, sb_node_info.compressed_ptr, sb_node_info.compressed_size);
curr_ptr += sb_node_info.compressed_size;
// write the checksum
- *(u_int32_t *)curr_ptr = toku_htod32(sb_node_info.xsum);
+ *(uint32_t *)curr_ptr = toku_htod32(sb_node_info.xsum);
curr_ptr += sizeof(sb_node_info.xsum);
for (int i = 0; i < npartitions; i++) {
memcpy(curr_ptr, sb[i].compressed_ptr, sb[i].compressed_size);
curr_ptr += sb[i].compressed_size;
// write the checksum
- *(u_int32_t *)curr_ptr = toku_htod32(sb[i].xsum);
+ *(uint32_t *)curr_ptr = toku_htod32(sb[i].xsum);
curr_ptr += sizeof(sb[i].xsum);
}
assert(curr_ptr - data == total_node_size);
@@ -861,13 +861,13 @@ toku_serialize_ftnode_to_memory (FTNODE node,
}
int
-toku_serialize_ftnode_to (int fd, BLOCKNUM blocknum, FTNODE node, FTNODE_DISK_DATA* ndd, BOOL do_rebalancing, FT h, BOOL for_checkpoint) {
+toku_serialize_ftnode_to (int fd, BLOCKNUM blocknum, FTNODE node, FTNODE_DISK_DATA* ndd, bool do_rebalancing, FT h, bool for_checkpoint) {
size_t n_to_write;
char *compressed_buf = NULL;
{
// because toku_serialize_ftnode_to is only called for
- // in toku_ftnode_flush_callback, we pass FALSE
+ // in toku_ftnode_flush_callback, we pass false
// for in_parallel. The reasoning is that when we write
// nodes to disk via toku_ftnode_flush_callback, we
// assume that it is being done on a non-critical
@@ -875,7 +875,7 @@ toku_serialize_ftnode_to (int fd, BLOCKNUM blocknum, FTNODE node, FTNODE_DISK_DA
// should not hog CPU,
//
// Should the above facts change, we may want to revisit
- // passing FALSE for in_parallel here
+ // passing false for in_parallel here
//
// alternatively, we could have made in_parallel a parameter
// for toku_serialize_ftnode_to, but instead we did this.
@@ -885,7 +885,7 @@ toku_serialize_ftnode_to (int fd, BLOCKNUM blocknum, FTNODE node, FTNODE_DISK_DA
h->h->basementnodesize,
h->h->compression_method,
do_rebalancing,
- FALSE, // in_parallel
+ false, // in_parallel
&n_to_write,
&compressed_buf
);
@@ -954,7 +954,7 @@ deserialize_child_buffer(NONLEAF_CHILDINFO bnc, struct rbuf *rbuf,
dest = &broadcast_offsets[nbroadcast_offsets];
nbroadcast_offsets++;
} else {
- assert(FALSE);
+ assert(false);
}
} else {
dest = NULL;
@@ -984,12 +984,12 @@ deserialize_child_buffer(NONLEAF_CHILDINFO bnc, struct rbuf *rbuf,
// dump a buffer to stderr
// no locking around this for now
static void
-dump_bad_block(unsigned char *vp, u_int64_t size) {
- const u_int64_t linesize = 64;
- u_int64_t n = size / linesize;
- for (u_int64_t i = 0; i < n; i++) {
+dump_bad_block(unsigned char *vp, uint64_t size) {
+ const uint64_t linesize = 64;
+ uint64_t n = size / linesize;
+ for (uint64_t i = 0; i < n; i++) {
fprintf(stderr, "%p: ", vp);
- for (u_int64_t j = 0; j < linesize; j++) {
+ for (uint64_t j = 0; j < linesize; j++) {
unsigned char c = vp[j];
fprintf(stderr, "%2.2X", c);
}
@@ -997,7 +997,7 @@ dump_bad_block(unsigned char *vp, u_int64_t size) {
vp += linesize;
}
size = size % linesize;
- for (u_int64_t i=0; i<size; i++) {
+ for (uint64_t i=0; i<size; i++) {
if ((i % linesize) == 0)
fprintf(stderr, "%p: ", vp+i);
fprintf(stderr, "%2.2X", vp[i]);
@@ -1030,7 +1030,7 @@ struct mp_pair {
OMT omt;
};
-static int fix_mp_offset(OMTVALUE v, u_int32_t i, void* extra) {
+static int fix_mp_offset(OMTVALUE v, uint32_t i, void* extra) {
struct mp_pair *CAST_FROM_VOIDP(p, extra);
char* old_value = (char *) v;
char *new_value = old_value - (char *)p->orig_base + (char *)p->new_base;
@@ -1125,7 +1125,7 @@ read_block_from_fd_into_rbuf(
// get the file offset and block size for the block
DISKOFF offset, size;
toku_translate_blocknum_to_offset_size(h->blocktable, blocknum, &offset, &size);
- u_int8_t *XMALLOC_N(size, raw_block);
+ uint8_t *XMALLOC_N(size, raw_block);
rbuf_init(rb, raw_block, size);
{
// read the block
@@ -1147,7 +1147,7 @@ static void read_ftnode_header_from_fd_into_rbuf_if_small_enough (int fd, BLOCKN
DISKOFF offset, size;
toku_translate_blocknum_to_offset_size(h->blocktable, blocknum, &offset, &size);
DISKOFF read_size = MIN(read_header_heuristic_max, size);
- u_int8_t *XMALLOC_N(size, raw_block);
+ uint8_t *XMALLOC_N(size, raw_block);
rbuf_init(rb, raw_block, read_size);
{
// read the block
@@ -1172,7 +1172,7 @@ read_compressed_sub_block(struct rbuf *rb, struct sub_block *sb)
rbuf_literal_bytes(rb, cp, sb->compressed_size);
sb->xsum = rbuf_int(rb);
// let's check the checksum
- u_int32_t actual_xsum = x1764_memory((char *)sb->compressed_ptr-8, 8+sb->compressed_size);
+ uint32_t actual_xsum = x1764_memory((char *)sb->compressed_ptr-8, 8+sb->compressed_size);
if (sb->xsum != actual_xsum) {
r = TOKUDB_BAD_CHECKSUM;
}
@@ -1224,9 +1224,9 @@ verify_ftnode_sub_block (struct sub_block *sb)
{
int r = 0;
// first verify the checksum
- u_int32_t data_size = sb->uncompressed_size - 4; // checksum is 4 bytes at end
- u_int32_t stored_xsum = toku_dtoh32(*((u_int32_t *)((char *)sb->uncompressed_ptr + data_size)));
- u_int32_t actual_xsum = x1764_memory(sb->uncompressed_ptr, data_size);
+ uint32_t data_size = sb->uncompressed_size - 4; // checksum is 4 bytes at end
+ uint32_t stored_xsum = toku_dtoh32(*((uint32_t *)((char *)sb->uncompressed_ptr + data_size)));
+ uint32_t actual_xsum = x1764_memory(sb->uncompressed_ptr, data_size);
if (stored_xsum != actual_xsum) {
dump_bad_block((Bytef *) sb->uncompressed_ptr, sb->uncompressed_size);
r = TOKUDB_BAD_CHECKSUM;
@@ -1251,7 +1251,7 @@ deserialize_ftnode_info(
goto exit;
}
- u_int32_t data_size;
+ uint32_t data_size;
data_size = sb->uncompressed_size - 4; // checksum is 4 bytes at end
// now with the data verified, we can read the information into the node
@@ -1305,7 +1305,7 @@ deserialize_ftnode_info(
// make sure that all the data was read
if (data_size != rb.ndone) {
dump_bad_block(rb.buf, rb.size);
- assert(FALSE);
+ assert(false);
}
exit:
return r;
@@ -1390,7 +1390,7 @@ setup_partitions_using_bfe(FTNODE node,
case PT_INVALID:
break;
}
- assert(FALSE);
+ assert(false);
}
}
@@ -1428,7 +1428,7 @@ deserialize_ftnode_partition(
if (r != 0) {
goto exit;
}
- u_int32_t data_size;
+ uint32_t data_size;
data_size = sb->uncompressed_size - 4; // checksum is 4 bytes at end
// now with the data verified, we can read the information into the node
@@ -1457,14 +1457,14 @@ deserialize_ftnode_partition(
BASEMENTNODE bn = BLB(node, childnum);
toku_mempool_copy_construct(&bn->buffer_mempool, &rb.buf[rb.ndone], data_size);
uint8_t *CAST_FROM_VOIDP(le_base, toku_mempool_get_base(&bn->buffer_mempool)); // point to first le in mempool
- for (u_int32_t i = 0; i < num_entries; i++) { // now set up the pointers in the omt
+ for (uint32_t i = 0; i < num_entries; i++) { // now set up the pointers in the omt
LEAFENTRY le = reinterpret_cast<LEAFENTRY>(&le_base[rb.ndone - start_of_data]); // point to durable mempool, not to transient rbuf
uint32_t disksize = leafentry_disksize(le);
rb.ndone += disksize;
invariant(rb.ndone<=rb.size);
array[i] = le;
}
- u_int32_t end_of_data = rb.ndone;
+ uint32_t end_of_data = rb.ndone;
BLB_NBYTESINBUF(node, childnum) += end_of_data-start_of_data;
@@ -1516,7 +1516,7 @@ static int
deserialize_ftnode_header_from_rbuf_if_small_enough (FTNODE *ftnode,
FTNODE_DISK_DATA* ndd,
BLOCKNUM blocknum,
- u_int32_t fullhash,
+ uint32_t fullhash,
struct ftnode_fetch_extra *bfe,
struct rbuf *rb,
int fd)
@@ -1587,9 +1587,9 @@ deserialize_ftnode_header_from_rbuf_if_small_enough (FTNODE *ftnode,
BP_SIZE (*ndd,i) = rbuf_int(rb);
}
- u_int32_t checksum;
+ uint32_t checksum;
checksum = x1764_memory(rb->buf, rb->ndone);
- u_int32_t stored_checksum;
+ uint32_t stored_checksum;
stored_checksum = rbuf_int(rb);
if (stored_checksum != checksum) {
dump_bad_block(rb->buf, rb->size);
@@ -1615,7 +1615,7 @@ deserialize_ftnode_header_from_rbuf_if_small_enough (FTNODE *ftnode,
rbuf_literal_bytes(rb, cp, sb_node_info.compressed_size);
sb_node_info.xsum = rbuf_int(rb);
// let's check the checksum
- u_int32_t actual_xsum;
+ uint32_t actual_xsum;
actual_xsum = x1764_memory((char *)sb_node_info.compressed_ptr-8, 8+sb_node_info.compressed_size);
if (sb_node_info.xsum != actual_xsum) {
r = TOKUDB_BAD_CHECKSUM;
@@ -1804,7 +1804,7 @@ deserialize_and_upgrade_internal_node(FTNODE node,
// Atomically decrement the header's MSN count by the number
// of messages in the buffer.
MSN lowest;
- u_int64_t amount = n_in_this_buffer;
+ uint64_t amount = n_in_this_buffer;
lowest.msn = __sync_sub_and_fetch(&bfe->h->h->highest_unused_msn_for_upgrade.msn, amount);
if (highest_msn.msn == 0) {
highest_msn.msn = lowest.msn + n_in_this_buffer;
@@ -1876,8 +1876,8 @@ deserialize_and_upgrade_internal_node(FTNODE node,
// Must compute the checksum now (rather than at the end, while we
// still have the pointer to the buffer).
if (version >= FT_FIRST_LAYOUT_VERSION_WITH_END_TO_END_CHECKSUM) {
- u_int32_t expected_xsum = toku_dtoh32(*(u_int32_t*)(rb->buf+rb->size-4));
- u_int32_t actual_xsum = x1764_memory(rb->buf, rb->size-4);
+ uint32_t expected_xsum = toku_dtoh32(*(uint32_t*)(rb->buf+rb->size-4));
+ uint32_t actual_xsum = x1764_memory(rb->buf, rb->size-4);
if (expected_xsum != actual_xsum) {
fprintf(stderr, "%s:%d: Bad checksum: expected = %" PRIx32 ", actual= %" PRIx32 "\n",
__FUNCTION__,
@@ -2046,8 +2046,8 @@ deserialize_and_upgrade_leaf_node(FTNODE node,
// 14. Checksum (end to end) is only on version 14
if (version >= FT_FIRST_LAYOUT_VERSION_WITH_END_TO_END_CHECKSUM) {
- u_int32_t expected_xsum = rbuf_int(rb);
- u_int32_t actual_xsum = x1764_memory(rb->buf, rb->size - 4);
+ uint32_t expected_xsum = rbuf_int(rb);
+ uint32_t actual_xsum = x1764_memory(rb->buf, rb->size - 4);
if (expected_xsum != actual_xsum) {
fprintf(stderr, "%s:%d: Bad checksum: expected = %" PRIx32 ", actual= %" PRIx32 "\n",
__FUNCTION__,
@@ -2167,7 +2167,7 @@ deserialize_ftnode_from_rbuf(
FTNODE *ftnode,
FTNODE_DISK_DATA* ndd,
BLOCKNUM blocknum,
- u_int32_t fullhash,
+ uint32_t fullhash,
struct ftnode_fetch_extra* bfe,
STAT64INFO info,
struct rbuf *rb,
@@ -2233,9 +2233,9 @@ deserialize_ftnode_from_rbuf(
BP_SIZE (*ndd,i) = rbuf_int(rb);
}
// verify checksum of header stored
- u_int32_t checksum;
+ uint32_t checksum;
checksum = x1764_memory(rb->buf, rb->ndone);
- u_int32_t stored_checksum;
+ uint32_t stored_checksum;
stored_checksum = rbuf_int(rb);
if (stored_checksum != checksum) {
dump_bad_block(rb->buf, rb->size);
@@ -2268,8 +2268,8 @@ deserialize_ftnode_from_rbuf(
// Previously, this code was a for loop with spawns inside and a sync at the end.
// But now the loop is parallelizeable since we don't have a dependency on the work done so far.
cilk_for (int i = 0; i < node->n_children; i++) {
- u_int32_t curr_offset = BP_START(*ndd,i);
- u_int32_t curr_size = BP_SIZE(*ndd,i);
+ uint32_t curr_offset = BP_START(*ndd,i);
+ uint32_t curr_size = BP_SIZE(*ndd,i);
// the compressed, serialized partitions start at where rb is currently pointing,
// which would be rb->buf + rb->ndone
// we need to intialize curr_rbuf to point to this place
@@ -2358,11 +2358,11 @@ toku_deserialize_bp_from_disk(FTNODE node, FTNODE_DISK_DATA ndd, int childnum, i
&total_node_disk_size
);
- u_int32_t curr_offset = BP_START(ndd, childnum);
- u_int32_t curr_size = BP_SIZE (ndd, childnum);
+ uint32_t curr_offset = BP_START(ndd, childnum);
+ uint32_t curr_size = BP_SIZE (ndd, childnum);
struct rbuf rb = {.buf = NULL, .size = 0, .ndone = 0};
- u_int8_t *XMALLOC_N(curr_size, raw_block);
+ uint8_t *XMALLOC_N(curr_size, raw_block);
rbuf_init(&rb, raw_block, curr_size);
{
// read the block
@@ -2416,7 +2416,7 @@ toku_deserialize_bp_from_compressed(FTNODE node, int childnum,
static int
deserialize_ftnode_from_fd(int fd,
BLOCKNUM blocknum,
- u_int32_t fullhash,
+ uint32_t fullhash,
FTNODE *ftnode,
FTNODE_DISK_DATA *ndd,
struct ftnode_fetch_extra *bfe,
@@ -2442,7 +2442,7 @@ cleanup:
int
toku_deserialize_ftnode_from (int fd,
BLOCKNUM blocknum,
- u_int32_t fullhash,
+ uint32_t fullhash,
FTNODE *ftnode,
FTNODE_DISK_DATA* ndd,
struct ftnode_fetch_extra* bfe
@@ -2620,7 +2620,7 @@ toku_serialize_rollback_log_to_memory (ROLLBACK_LOG_NODE log,
int
toku_serialize_rollback_log_to (int fd, BLOCKNUM blocknum, ROLLBACK_LOG_NODE log,
FT h,
- BOOL for_checkpoint) {
+ bool for_checkpoint) {
size_t n_to_write;
char *compressed_buf;
{
@@ -2641,7 +2641,7 @@ toku_serialize_rollback_log_to (int fd, BLOCKNUM blocknum, ROLLBACK_LOG_NODE log
}
static int
-deserialize_rollback_log_from_rbuf (BLOCKNUM blocknum, u_int32_t fullhash, ROLLBACK_LOG_NODE *log_p,
+deserialize_rollback_log_from_rbuf (BLOCKNUM blocknum, uint32_t fullhash, ROLLBACK_LOG_NODE *log_p,
FT h, struct rbuf *rb) {
ROLLBACK_LOG_NODE MALLOC(result);
int r;
@@ -2661,7 +2661,7 @@ deserialize_rollback_log_from_rbuf (BLOCKNUM blocknum, u_int32_t fullhash, ROLLB
result->layout_version_original = rbuf_int(rb);
result->layout_version_read_from_disk = result->layout_version;
result->build_id = rbuf_int(rb);
- result->dirty = FALSE;
+ result->dirty = false;
//TODO: Maybe add descriptor (or just descriptor version) here eventually?
//TODO: This is hard.. everything is shared in a single dictionary.
rbuf_TXNID(rb, &result->txnid);
@@ -2718,7 +2718,7 @@ deserialize_rollback_log_from_rbuf (BLOCKNUM blocknum, u_int32_t fullhash, ROLLB
}
static int
-deserialize_rollback_log_from_rbuf_versioned (u_int32_t version, BLOCKNUM blocknum, u_int32_t fullhash,
+deserialize_rollback_log_from_rbuf_versioned (uint32_t version, BLOCKNUM blocknum, uint32_t fullhash,
ROLLBACK_LOG_NODE *log,
FT h, struct rbuf *rb) {
int r = 0;
@@ -2732,21 +2732,21 @@ deserialize_rollback_log_from_rbuf_versioned (u_int32_t version, BLOCKNUM blockn
}
int
-decompress_from_raw_block_into_rbuf(u_int8_t *raw_block, size_t raw_block_size, struct rbuf *rb, BLOCKNUM blocknum) {
+decompress_from_raw_block_into_rbuf(uint8_t *raw_block, size_t raw_block_size, struct rbuf *rb, BLOCKNUM blocknum) {
toku_trace("decompress");
int r = 0;
// get the number of compressed sub blocks
int n_sub_blocks;
- n_sub_blocks = toku_dtoh32(*(u_int32_t*)(&raw_block[node_header_overhead]));
+ n_sub_blocks = toku_dtoh32(*(uint32_t*)(&raw_block[node_header_overhead]));
// verify the number of sub blocks
invariant(0 <= n_sub_blocks && n_sub_blocks <= max_sub_blocks);
{ // verify the header checksum
- u_int32_t header_length = node_header_overhead + sub_block_header_size(n_sub_blocks);
+ uint32_t header_length = node_header_overhead + sub_block_header_size(n_sub_blocks);
invariant(header_length <= raw_block_size);
- u_int32_t xsum = x1764_memory(raw_block, header_length);
- u_int32_t stored_xsum = toku_dtoh32(*(u_int32_t *)(raw_block + header_length));
+ uint32_t xsum = x1764_memory(raw_block, header_length);
+ uint32_t stored_xsum = toku_dtoh32(*(uint32_t *)(raw_block + header_length));
if (xsum != stored_xsum) {
r = TOKUDB_BAD_CHECKSUM;
}
@@ -2754,7 +2754,7 @@ decompress_from_raw_block_into_rbuf(u_int8_t *raw_block, size_t raw_block_size,
// deserialize the sub block header
struct sub_block sub_block[n_sub_blocks];
- u_int32_t *sub_block_header = (u_int32_t *) &raw_block[node_header_overhead+4];
+ uint32_t *sub_block_header = (uint32_t *) &raw_block[node_header_overhead+4];
for (int i = 0; i < n_sub_blocks; i++) {
sub_block_init(&sub_block[i]);
sub_block[i].compressed_size = toku_dtoh32(sub_block_header[0]);
@@ -2771,13 +2771,13 @@ decompress_from_raw_block_into_rbuf(u_int8_t *raw_block, size_t raw_block_size,
// verify sub block sizes
for (int i = 0; i < n_sub_blocks; i++) {
- u_int32_t compressed_size = sub_block[i].compressed_size;
+ uint32_t compressed_size = sub_block[i].compressed_size;
if (compressed_size<=0 || compressed_size>(1<<30)) {
r = toku_db_badformat();
goto exit;
}
- u_int32_t uncompressed_size = sub_block[i].uncompressed_size;
+ uint32_t uncompressed_size = sub_block[i].uncompressed_size;
if (0) printf("Block %" PRId64 " Compressed size = %u, uncompressed size=%u\n", blocknum.b, compressed_size, uncompressed_size);
if (uncompressed_size<=0 || uncompressed_size>(1<<30)) {
r = toku_db_badformat();
@@ -2802,7 +2802,7 @@ decompress_from_raw_block_into_rbuf(u_int8_t *raw_block, size_t raw_block_size,
// point at the start of the compressed data (past the node header, the sub block header, and the header checksum)
unsigned char *compressed_data;
- compressed_data = raw_block + node_header_overhead + sub_block_header_size(n_sub_blocks) + sizeof (u_int32_t);
+ compressed_data = raw_block + node_header_overhead + sub_block_header_size(n_sub_blocks) + sizeof (uint32_t);
// point at the start of the uncompressed data
unsigned char *uncompressed_data;
@@ -2825,7 +2825,7 @@ exit:
}
static int
-decompress_from_raw_block_into_rbuf_versioned(u_int32_t version, u_int8_t *raw_block, size_t raw_block_size, struct rbuf *rb, BLOCKNUM blocknum) {
+decompress_from_raw_block_into_rbuf_versioned(uint32_t version, uint8_t *raw_block, size_t raw_block_size, struct rbuf *rb, BLOCKNUM blocknum) {
// This function exists solely to accomodate future changes in compression.
int r = 0;
switch (version) {
@@ -2835,7 +2835,7 @@ decompress_from_raw_block_into_rbuf_versioned(u_int32_t version, u_int8_t *raw_b
r = decompress_from_raw_block_into_rbuf(raw_block, raw_block_size, rb, blocknum);
break;
default:
- lazy_assert(FALSE);
+ lazy_assert(false);
}
return r;
}
@@ -2854,7 +2854,7 @@ read_and_decompress_block_from_fd_into_rbuf(int fd, BLOCKNUM blocknum,
// get the file offset and block size for the block
DISKOFF offset, size;
toku_translate_blocknum_to_offset_size(h->blocktable, blocknum, &offset, &size);
- u_int8_t *XMALLOC_N(size, raw_block);
+ uint8_t *XMALLOC_N(size, raw_block);
{
// read the (partially compressed) block
ssize_t rlen = toku_os_pread(fd, raw_block, size, offset);
@@ -2863,14 +2863,14 @@ read_and_decompress_block_from_fd_into_rbuf(int fd, BLOCKNUM blocknum,
// get the layout_version
int layout_version;
{
- u_int8_t *magic = raw_block + uncompressed_magic_offset;
+ uint8_t *magic = raw_block + uncompressed_magic_offset;
if (memcmp(magic, "tokuleaf", 8)!=0 &&
memcmp(magic, "tokunode", 8)!=0 &&
memcmp(magic, "tokuroll", 8)!=0) {
r = toku_db_badformat();
goto cleanup;
}
- u_int8_t *version = raw_block + uncompressed_version_offset;
+ uint8_t *version = raw_block + uncompressed_version_offset;
layout_version = toku_dtoh32(*(uint32_t*)version);
if (layout_version < FT_LAYOUT_MIN_SUPPORTED_VERSION || layout_version > FT_LAYOUT_VERSION) {
r = toku_db_badformat();
@@ -2907,7 +2907,7 @@ cleanup:
// Read rollback log node from file into struct. Perform version upgrade if necessary.
int
-toku_deserialize_rollback_log_from (int fd, BLOCKNUM blocknum, u_int32_t fullhash,
+toku_deserialize_rollback_log_from (int fd, BLOCKNUM blocknum, uint32_t fullhash,
ROLLBACK_LOG_NODE *logp, FT h) {
toku_trace("deserial start");
@@ -2919,7 +2919,7 @@ toku_deserialize_rollback_log_from (int fd, BLOCKNUM blocknum, u_int32_t fullhas
if (r!=0) goto cleanup;
{
- u_int8_t *magic = rb.buf + uncompressed_magic_offset;
+ uint8_t *magic = rb.buf + uncompressed_magic_offset;
if (memcmp(magic, "tokuroll", 8)!=0) {
r = toku_db_badformat();
goto cleanup;
diff --git a/ft/ftdump.cc b/ft/ftdump.cc
index 681a089ad0f..46ed014c355 100644
--- a/ft/ftdump.cc
+++ b/ft/ftdump.cc
@@ -37,25 +37,25 @@ print_item (bytevec val, ITEMLEN len) {
}
static void
-simple_hex_dump(unsigned char *vp, u_int64_t size) {
- for (u_int64_t i = 0; i < size; i++) {
+simple_hex_dump(unsigned char *vp, uint64_t size) {
+ for (uint64_t i = 0; i < size; i++) {
unsigned char c = vp[i];
printf("%2.2X", c);
}
}
static void
-hex_dump(unsigned char *vp, u_int64_t offset, u_int64_t size) {
- u_int64_t n = size / 32;
- for (u_int64_t i = 0; i < n; i++) {
+hex_dump(unsigned char *vp, uint64_t offset, uint64_t size) {
+ uint64_t n = size / 32;
+ for (uint64_t i = 0; i < n; i++) {
printf("%" PRIu64 ": ", offset);
- for (u_int64_t j = 0; j < 32; j++) {
+ for (uint64_t j = 0; j < 32; j++) {
unsigned char c = vp[j];
printf("%2.2X", c);
if (((j+1) % 4) == 0)
printf(" ");
}
- for (u_int64_t j = 0; j < 32; j++) {
+ for (uint64_t j = 0; j < 32; j++) {
unsigned char c = vp[j];
printf("%c", isprint(c) ? c : ' ');
}
@@ -64,7 +64,7 @@ hex_dump(unsigned char *vp, u_int64_t offset, u_int64_t size) {
offset += 32;
}
size = size % 32;
- for (u_int64_t i=0; i<size; i++) {
+ for (uint64_t i=0; i<size; i++) {
if ((i % 32) == 0)
printf("%" PRIu64 ": ", offset+i);
printf("%2.2X", vp[i]);
@@ -116,7 +116,7 @@ dump_header (int f, FT *header, CACHEFILE cf) {
}
static int
-print_le (OMTVALUE lev, u_int32_t UU(idx), void *UU(v)) {
+print_le (OMTVALUE lev, uint32_t UU(idx), void *UU(v)) {
LEAFENTRY CAST_FROM_VOIDP(le, lev);
print_leafentry(stdout, le);
printf("\n");
@@ -215,16 +215,16 @@ dump_node (int f, BLOCKNUM blocknum, FT h) {
}
static void
-dump_block_translation(FT h, u_int64_t offset) {
+dump_block_translation(FT h, uint64_t offset) {
toku_blocknum_dump_translation(h->blocktable, make_blocknum(offset));
}
typedef struct {
int f;
FT h;
- u_int64_t blocksizes;
- u_int64_t leafsizes;
- u_int64_t leafblocks;
+ uint64_t blocksizes;
+ uint64_t leafsizes;
+ uint64_t leafblocks;
} frag_help_extra;
static int
@@ -254,7 +254,7 @@ dump_fragmentation(int f, FT h) {
info.f = f;
info.h = h;
toku_blocktable_iterate(h->blocktable, TRANSLATION_CHECKPOINTED,
- fragmentation_helper, &info, TRUE, TRUE);
+ fragmentation_helper, &info, true, true);
int64_t used_space;
int64_t total_space;
toku_blocktable_internal_fragmentation(h->blocktable, &total_space, &used_space);
@@ -277,7 +277,7 @@ typedef struct {
} garbage_help_extra;
static int
-garbage_leafentry_helper(OMTVALUE v, u_int32_t UU(idx), void *extra) {
+garbage_leafentry_helper(OMTVALUE v, uint32_t UU(idx), void *extra) {
garbage_help_extra *CAST_FROM_VOIDP(info, extra);
LEAFENTRY CAST_FROM_VOIDP(le, v);
info->total_space += leafentry_disksize(le);
@@ -320,21 +320,21 @@ dump_garbage_stats(int f, FT h) {
info.f = f;
info.h = h;
toku_blocktable_iterate(h->blocktable, TRANSLATION_CHECKPOINTED,
- garbage_helper, &info, TRUE, TRUE);
+ garbage_helper, &info, true, true);
printf("total_size: %zu\n", info.total_space);
printf("used_size: %zu\n", info.used_space);
}
-static u_int32_t
+static uint32_t
get_unaligned_uint32(unsigned char *p) {
- return *(u_int32_t *)p;
+ return *(uint32_t *)p;
}
struct dump_sub_block {
- u_int32_t compressed_size;
- u_int32_t uncompressed_size;
- u_int32_t xsum;
+ uint32_t compressed_size;
+ uint32_t uncompressed_size;
+ uint32_t xsum;
};
static void
@@ -345,22 +345,22 @@ sub_block_deserialize(struct dump_sub_block *sb, unsigned char *sub_block_header
}
static void
-verify_block(unsigned char *cp, u_int64_t file_offset, u_int64_t size) {
+verify_block(unsigned char *cp, uint64_t file_offset, uint64_t size) {
// verify the header checksum
- const size_t node_header = 8 + sizeof (u_int32_t) + sizeof (u_int32_t) + sizeof (u_int32_t);
+ const size_t node_header = 8 + sizeof (uint32_t) + sizeof (uint32_t) + sizeof (uint32_t);
printf("%.8s layout_version=%u %u build=%d\n", cp, get_unaligned_uint32(cp+8), get_unaligned_uint32(cp+12), get_unaligned_uint32(cp+16));
unsigned char *sub_block_header = &cp[node_header];
- u_int32_t n_sub_blocks = toku_dtoh32(get_unaligned_uint32(&sub_block_header[0]));
- u_int32_t header_length = node_header + n_sub_blocks * sizeof (struct dump_sub_block);
- header_length += sizeof (u_int32_t); // CRC
+ uint32_t n_sub_blocks = toku_dtoh32(get_unaligned_uint32(&sub_block_header[0]));
+ uint32_t header_length = node_header + n_sub_blocks * sizeof (struct dump_sub_block);
+ header_length += sizeof (uint32_t); // CRC
if (header_length > size) {
printf("header length too big: %u\n", header_length);
return;
}
- u_int32_t header_xsum = x1764_memory(cp, header_length);
- u_int32_t expected_xsum = toku_dtoh32(get_unaligned_uint32(&cp[header_length]));
+ uint32_t header_xsum = x1764_memory(cp, header_length);
+ uint32_t expected_xsum = toku_dtoh32(get_unaligned_uint32(&cp[header_length]));
if (header_xsum != expected_xsum) {
printf("header checksum failed: %u %u\n", header_xsum, expected_xsum);
return;
@@ -368,16 +368,16 @@ verify_block(unsigned char *cp, u_int64_t file_offset, u_int64_t size) {
// deserialize the sub block header
struct dump_sub_block sub_block[n_sub_blocks];
- sub_block_header += sizeof (u_int32_t);
- for (u_int32_t i = 0 ; i < n_sub_blocks; i++) {
+ sub_block_header += sizeof (uint32_t);
+ for (uint32_t i = 0 ; i < n_sub_blocks; i++) {
sub_block_deserialize(&sub_block[i], sub_block_header);
sub_block_header += sizeof (struct dump_sub_block);
}
// verify the sub block header
- u_int32_t offset = header_length + 4;
- for (u_int32_t i = 0 ; i < n_sub_blocks; i++) {
- u_int32_t xsum = x1764_memory(cp + offset, sub_block[i].compressed_size);
+ uint32_t offset = header_length + 4;
+ for (uint32_t i = 0 ; i < n_sub_blocks; i++) {
+ uint32_t xsum = x1764_memory(cp + offset, sub_block[i].compressed_size);
printf("%u: %u %u %u", i, sub_block[i].compressed_size, sub_block[i].uncompressed_size, sub_block[i].xsum);
if (xsum != sub_block[i].xsum)
printf(" fail %u offset %" PRIu64, xsum, file_offset + offset);
@@ -395,17 +395,17 @@ dump_block(int f, BLOCKNUM blocknum, FT h) {
printf("%" PRId64 " at %" PRId64 " size %" PRId64 "\n", blocknum.b, offset, size);
unsigned char *CAST_FROM_VOIDP(vp, toku_malloc(size));
- u_int64_t r = pread(f, vp, size, offset);
- if (r == (u_int64_t)size) {
+ uint64_t r = pread(f, vp, size, offset);
+ if (r == (uint64_t)size) {
verify_block(vp, offset, size);
}
toku_free(vp);
}
static void
-dump_file(int f, u_int64_t offset, u_int64_t size, FILE *outfp) {
+dump_file(int f, uint64_t offset, uint64_t size, FILE *outfp) {
unsigned char *XMALLOC_N(size, vp);
- u_int64_t r = pread(f, vp, size, offset);
+ uint64_t r = pread(f, vp, size, offset);
if (r == size) {
if (outfp == stdout)
hex_dump(vp, offset, size);
@@ -416,7 +416,7 @@ dump_file(int f, u_int64_t offset, u_int64_t size, FILE *outfp) {
}
static void
-set_file(int f, u_int64_t offset, unsigned char newc) {
+set_file(int f, uint64_t offset, unsigned char newc) {
toku_os_pwrite(f, &newc, sizeof newc, offset);
}
@@ -546,7 +546,7 @@ main (int argc, const char *const argv[]) {
} else if (strcmp(fields[0], "dumpdata") == 0 && nfields == 2) {
dump_data = strtol(fields[1], NULL, 10);
} else if (strcmp(fields[0], "block_translation") == 0 || strcmp(fields[0], "bx") == 0) {
- u_int64_t offset = 0;
+ uint64_t offset = 0;
if (nfields == 2)
offset = getuint64(fields[1]);
dump_block_translation(ft, offset);
@@ -555,14 +555,14 @@ main (int argc, const char *const argv[]) {
} else if (strcmp(fields[0], "garbage") == 0) {
dump_garbage_stats(f, ft);
} else if (strcmp(fields[0], "file") == 0 && nfields >= 3) {
- u_int64_t offset = getuint64(fields[1]);
- u_int64_t size = getuint64(fields[2]);
+ uint64_t offset = getuint64(fields[1]);
+ uint64_t size = getuint64(fields[2]);
FILE *outfp = stdout;
if (nfields >= 4)
outfp = fopen(fields[3], "w");
dump_file(f, offset, size, outfp);
} else if (strcmp(fields[0], "setfile") == 0 && nfields == 3) {
- u_int64_t offset = getuint64(fields[1]);
+ uint64_t offset = getuint64(fields[1]);
unsigned char newc = getuint64(fields[2]);
set_file(f, offset, newc);
} else if (strcmp(fields[0], "quit") == 0 || strcmp(fields[0], "q") == 0) {
@@ -580,7 +580,7 @@ main (int argc, const char *const argv[]) {
info.f = f;
info.h = ft;
toku_blocktable_iterate(ft->blocktable, TRANSLATION_CHECKPOINTED,
- dump_node_wrapper, &info, TRUE, TRUE);
+ dump_node_wrapper, &info, true, true);
}
toku_ft_free(ft);
return 0;
diff --git a/ft/ftloader-callback.cc b/ft/ftloader-callback.cc
index be97eb62c34..0391bbbfd6b 100644
--- a/ft/ftloader-callback.cc
+++ b/ft/ftloader-callback.cc
@@ -74,7 +74,7 @@ int ft_loader_call_error_function(ft_loader_error_callback loader_error) {
error_callback_lock(loader_error);
r = loader_error->error;
if (r && loader_error->error_callback && !loader_error->did_callback) {
- loader_error->did_callback = TRUE;
+ loader_error->did_callback = true;
loader_error->error_callback(loader_error->db,
loader_error->which_db,
loader_error->error,
diff --git a/ft/ftloader-internal.h b/ft/ftloader-internal.h
index 082cc31e665..44a285099f4 100644
--- a/ft/ftloader-internal.h
+++ b/ft/ftloader-internal.h
@@ -16,11 +16,11 @@
/* These structures maintain a collection of all the open temporary files used by the loader. */
struct file_info {
- BOOL is_open;
- BOOL is_extant; // if true, the file must be unlinked.
+ bool is_open;
+ bool is_extant; // if true, the file must be unlinked.
char *fname;
FILE *file;
- u_int64_t n_rows; // how many rows were written into that file
+ uint64_t n_rows; // how many rows were written into that file
size_t buffer_size;
void *buffer;
};
@@ -56,11 +56,11 @@ int init_rowset (struct rowset *rows, uint64_t memory_budget);
void destroy_rowset (struct rowset *rows);
int add_row (struct rowset *rows, DBT *key, DBT *val);
-int loader_write_row(DBT *key, DBT *val, FIDX data, FILE*, u_int64_t *dataoff, FTLOADER bl);
+int loader_write_row(DBT *key, DBT *val, FIDX data, FILE*, uint64_t *dataoff, FTLOADER bl);
int loader_read_row (FILE *f, DBT *key, DBT *val);
struct merge_fileset {
- BOOL have_sorted_output; // Is there an previous key?
+ bool have_sorted_output; // Is there an previous key?
FIDX sorted_output; // this points to one of the data_fidxs. If output_is_sorted then this is the file containing sorted data. It's still open
DBT prev_key; // What is it? If it's here, its the last output in the merge fileset
@@ -93,7 +93,7 @@ struct error_callback_s {
int which_db;
DBT key;
DBT val;
- BOOL did_callback;
+ bool did_callback;
toku_mutex_t mutex;
};
typedef struct error_callback_s *ft_loader_error_callback;
@@ -134,18 +134,18 @@ struct ft_loader_s {
QUEUE primary_rowset_queue; // main thread enqueues rowsets in this queue (in maybe 64MB chunks). The extractor thread removes them, sorts them, adn writes to file.
toku_pthread_t extractor_thread; // the thread that takes primary rowset and does extraction and the first level sort and write to file.
- BOOL extractor_live;
+ bool extractor_live;
DBT *last_key; // for each rowset, remember the most recently output key. The system may choose not to keep this up-to-date when a rowset is unsorted. These keys are malloced and ulen maintains the size of the malloced block.
struct rowset *rows; // secondary rows that have been put, but haven't been sorted and written to a file.
- u_int64_t n_rows; // how many rows have been put?
+ uint64_t n_rows; // how many rows have been put?
struct merge_fileset *fs;
const char *temp_file_template;
CACHETABLE cachetable;
- BOOL did_reserve_memory;
+ bool did_reserve_memory;
uint64_t reserved_memory; // how much memory are we allowed to use?
/* To make it easier to recover from errors, we don't use FILE*, instead we use an index into the file_infos. */
@@ -162,19 +162,19 @@ struct ft_loader_s {
QUEUE *fractal_queues; // an array of work queues, one for each secondary index.
toku_pthread_t *fractal_threads;
- BOOL *fractal_threads_live; // an array of bools indicating that fractal_threads[i] is a live thread. (There is no NULL for a pthread_t, so we have to maintain this separately).
+ bool *fractal_threads_live; // an array of bools indicating that fractal_threads[i] is a live thread. (There is no NULL for a pthread_t, so we have to maintain this separately).
unsigned fractal_workers; // number of fractal tree writer threads
toku_mutex_t mutex;
- BOOL mutex_init;
+ bool mutex_init;
};
// Set the number of rows in the loader. Used for test.
-void toku_ft_loader_set_n_rows(FTLOADER bl, u_int64_t n_rows);
+void toku_ft_loader_set_n_rows(FTLOADER bl, uint64_t n_rows);
// Get the number of rows in the loader. Used for test.
-u_int64_t toku_ft_loader_get_n_rows(FTLOADER bl);
+uint64_t toku_ft_loader_get_n_rows(FTLOADER bl);
// The data passed into a fractal_thread via pthread_create.
struct fractal_thread_args {
@@ -191,8 +191,8 @@ struct fractal_thread_args {
enum toku_compression_method target_compression_method;
};
-void toku_ft_loader_set_n_rows(FTLOADER bl, u_int64_t n_rows);
-u_int64_t toku_ft_loader_get_n_rows(FTLOADER bl);
+void toku_ft_loader_set_n_rows(FTLOADER bl, uint64_t n_rows);
+uint64_t toku_ft_loader_get_n_rows(FTLOADER bl);
int merge_row_arrays_base (struct row dest[/*an+bn*/], struct row a[/*an*/], int an, struct row b[/*bn*/], int bn,
int which_db, DB *dest_db, ft_compare_func,
@@ -206,7 +206,7 @@ int sort_and_write_rows (struct rowset rows, struct merge_fileset *fs, FTLOADER
int mergesort_row_array (struct row rows[/*n*/], int n, int which_db, DB *dest_db, ft_compare_func, FTLOADER, struct rowset *);
//int write_file_to_dbfile (int outfile, FIDX infile, FTLOADER bl, const DESCRIPTOR descriptor, int progress_allocation);
-int toku_merge_some_files_using_dbufio (const BOOL to_q, FIDX dest_data, QUEUE q, int n_sources, DBUFIO_FILESET bfs, FIDX srcs_fidxs[/*n_sources*/], FTLOADER bl, int which_db, DB *dest_db, ft_compare_func compare, int progress_allocation);
+int toku_merge_some_files_using_dbufio (const bool to_q, FIDX dest_data, QUEUE q, int n_sources, DBUFIO_FILESET bfs, FIDX srcs_fidxs[/*n_sources*/], FTLOADER bl, int which_db, DB *dest_db, ft_compare_func compare, int progress_allocation);
int ft_loader_sort_and_write_rows (struct rowset *rows, struct merge_fileset *fs, FTLOADER bl, int which_db, DB *dest_db, ft_compare_func);
@@ -227,8 +227,8 @@ int ft_loader_mergesort_row_array (struct row rows[/*n*/], int n, int which_db,
int ft_loader_write_file_to_dbfile (int outfile, FIDX infile, FTLOADER bl, const DESCRIPTOR descriptor, int progress_allocation);
int ft_loader_init_file_infos (struct file_infos *fi);
-void ft_loader_fi_destroy (struct file_infos *fi, BOOL is_error);
-int ft_loader_fi_close (struct file_infos *fi, FIDX idx, BOOL require_open);
+void ft_loader_fi_destroy (struct file_infos *fi, bool is_error);
+int ft_loader_fi_close (struct file_infos *fi, FIDX idx, bool require_open);
int ft_loader_fi_close_all (struct file_infos *fi);
int ft_loader_fi_reopen (struct file_infos *fi, FIDX idx, const char *mode);
int ft_loader_fi_unlink (struct file_infos *fi, FIDX idx);
@@ -243,9 +243,9 @@ int toku_ft_loader_internal_init (/* out */ FTLOADER *blp,
const char *temp_file_template,
LSN load_lsn,
TOKUTXN txn,
- BOOL reserve_memory);
+ bool reserve_memory);
-void toku_ft_loader_internal_destroy (FTLOADER bl, BOOL is_error);
+void toku_ft_loader_internal_destroy (FTLOADER bl, bool is_error);
// For test purposes only. (In production, the rowset size is determined by negotation with the cachetable for some memory. See #2613.)
uint64_t toku_ft_loader_get_rowset_budget_for_testing (void);
diff --git a/ft/ftloader.cc b/ft/ftloader.cc
index 9c1506b23df..475b05f1fb4 100644
--- a/ft/ftloader.cc
+++ b/ft/ftloader.cc
@@ -90,13 +90,13 @@ toku_ft_loader_get_rowset_budget_for_testing (void)
void ft_loader_lock_init(FTLOADER bl) {
invariant(!bl->mutex_init);
toku_mutex_init(&bl->mutex, NULL);
- bl->mutex_init = TRUE;
+ bl->mutex_init = true;
}
void ft_loader_lock_destroy(FTLOADER bl) {
if (bl->mutex_init) {
toku_mutex_destroy(&bl->mutex);
- bl->mutex_init = FALSE;
+ bl->mutex_init = false;
}
}
@@ -112,13 +112,13 @@ static void ft_loader_unlock(FTLOADER bl) {
static int add_big_buffer(struct file_info *file) {
int result = 0;
- BOOL newbuffer = FALSE;
+ bool newbuffer = false;
if (file->buffer == NULL) {
file->buffer = toku_malloc(file->buffer_size);
if (file->buffer == NULL)
result = get_error_errno();
else
- newbuffer = TRUE;
+ newbuffer = true;
}
if (result == 0) {
int r = setvbuf(file->file, (char *) file->buffer, _IOFBF, file->buffer_size);
@@ -153,7 +153,7 @@ int ft_loader_init_file_infos (struct file_infos *fi) {
return result;
}
-void ft_loader_fi_destroy (struct file_infos *fi, BOOL is_error)
+void ft_loader_fi_destroy (struct file_infos *fi, bool is_error)
// Effect: Free the resources in the fi.
// If is_error then we close and unlink all the temp files.
// If !is_error then requires that all the temp files have been closed and destroyed
@@ -198,8 +198,8 @@ static int open_file_add (struct file_infos *fi,
XREALLOC_N(fi->n_files_limit, fi->file_infos);
}
invariant(fi->n_files < fi->n_files_limit);
- fi->file_infos[fi->n_files].is_open = TRUE;
- fi->file_infos[fi->n_files].is_extant = TRUE;
+ fi->file_infos[fi->n_files].is_open = true;
+ fi->file_infos[fi->n_files].is_extant = true;
fi->file_infos[fi->n_files].fname = fname;
fi->file_infos[fi->n_files].file = file;
fi->file_infos[fi->n_files].n_rows = 0;
@@ -227,7 +227,7 @@ int ft_loader_fi_reopen (struct file_infos *fi, FIDX idx, const char *mode) {
if (fi->file_infos[i].file == NULL) {
result = get_error_errno();
} else {
- fi->file_infos[i].is_open = TRUE;
+ fi->file_infos[i].is_open = true;
// No longer need the big buffer for reopened files. Don't allocate the space, we need it elsewhere.
//add_big_buffer(&fi->file_infos[i]);
fi->n_files_open++;
@@ -236,7 +236,7 @@ int ft_loader_fi_reopen (struct file_infos *fi, FIDX idx, const char *mode) {
return result;
}
-int ft_loader_fi_close (struct file_infos *fi, FIDX idx, BOOL require_open)
+int ft_loader_fi_close (struct file_infos *fi, FIDX idx, bool require_open)
{
int result = 0;
toku_mutex_lock(&fi->lock);
@@ -244,7 +244,7 @@ int ft_loader_fi_close (struct file_infos *fi, FIDX idx, BOOL require_open)
if (fi->file_infos[idx.idx].is_open) {
invariant(fi->n_files_open>0); // loader-cleanup-test failure
fi->n_files_open--;
- fi->file_infos[idx.idx].is_open = FALSE;
+ fi->file_infos[idx.idx].is_open = false;
int r = toku_os_fclose(fi->file_infos[idx.idx].file);
if (r)
result = get_error_errno();
@@ -264,7 +264,7 @@ int ft_loader_fi_unlink (struct file_infos *fi, FIDX idx) {
invariant(fi->n_files_extant>0);
fi->n_files_extant--;
invariant(!fi->file_infos[id].is_open); // must be closed before we unlink
- fi->file_infos[id].is_extant = FALSE;
+ fi->file_infos[id].is_extant = false;
int r = unlink(fi->file_infos[id].fname);
if (r != 0)
result = get_error_errno();
@@ -282,7 +282,7 @@ ft_loader_fi_close_all(struct file_infos *fi) {
for (int i = 0; i < fi->n_files; i++) {
int r;
FIDX idx = { i };
- r = ft_loader_fi_close(fi, idx, FALSE); // ignore files that are already closed
+ r = ft_loader_fi_close(fi, idx, false); // ignore files that are already closed
if (rval == 0 && r)
rval = r; // capture first error
}
@@ -327,7 +327,7 @@ int ft_loader_open_temp_file (FTLOADER bl, FIDX *file_idx)
return result;
}
-void toku_ft_loader_internal_destroy (FTLOADER bl, BOOL is_error) {
+void toku_ft_loader_internal_destroy (FTLOADER bl, bool is_error) {
ft_loader_lock_destroy(bl);
// These frees rely on the fact that if you free a NULL pointer then nothing bad happens.
@@ -444,7 +444,7 @@ static void ft_loader_set_fractal_workers_count(FTLOADER bl) {
//
// DBUFIO_DEPTH*F*MERGE_BUF_SIZE + FRACTAL_WRITER_ROWSETS*MERGE_BUF_SIZE + WORKERS*NODESIZE*2 <= RESERVED_MEMORY
-static int64_t memory_avail_during_merge(FTLOADER bl, BOOL is_fractal_node) {
+static int64_t memory_avail_during_merge(FTLOADER bl, bool is_fractal_node) {
// avail memory = reserved memory - WORKERS*NODESIZE*2 for the last merge stage only
int64_t avail_memory = bl->reserved_memory;
if (is_fractal_node) {
@@ -454,7 +454,7 @@ static int64_t memory_avail_during_merge(FTLOADER bl, BOOL is_fractal_node) {
return avail_memory;
}
-static int merge_fanin (FTLOADER bl, BOOL is_fractal_node) {
+static int merge_fanin (FTLOADER bl, bool is_fractal_node) {
// return number of temp files to read in this pass
int64_t memory_avail = memory_avail_during_merge(bl, is_fractal_node);
int64_t nbuffers = memory_avail / (int64_t)TARGET_MERGE_BUF_SIZE;
@@ -463,7 +463,7 @@ static int merge_fanin (FTLOADER bl, BOOL is_fractal_node) {
return MAX(nbuffers / (int64_t)DBUFIO_DEPTH, (int)MIN_MERGE_FANIN);
}
-static uint64_t memory_per_rowset_during_merge (FTLOADER bl, int merge_factor, BOOL is_fractal_node // if it is being sent to a q
+static uint64_t memory_per_rowset_during_merge (FTLOADER bl, int merge_factor, bool is_fractal_node // if it is being sent to a q
) {
int64_t memory_avail = memory_avail_during_merge(bl, is_fractal_node);
int64_t nbuffers = DBUFIO_DEPTH * merge_factor;
@@ -482,7 +482,7 @@ int toku_ft_loader_internal_init (/* out */ FTLOADER *blp,
const char *temp_file_template,
LSN load_lsn,
TOKUTXN txn,
- BOOL reserve_memory)
+ bool reserve_memory)
// Effect: Allocate and initialize a FTLOADER, but do not create the extractor thread.
{
FTLOADER CALLOC(bl); // initialized to all zeros (hence CALLOC)
@@ -491,11 +491,11 @@ int toku_ft_loader_internal_init (/* out */ FTLOADER *blp,
bl->generate_row_for_put = g;
bl->cachetable = cachetable;
if (reserve_memory && bl->cachetable) {
- bl->did_reserve_memory = TRUE;
+ bl->did_reserve_memory = true;
bl->reserved_memory = toku_cachetable_reserve_memory(bl->cachetable, 2.0/3.0); // allocate 2/3 of the unreserved part (which is 3/4 of the memory to start with).
}
else {
- bl->did_reserve_memory = FALSE;
+ bl->did_reserve_memory = false;
bl->reserved_memory = 512*1024*1024; // if no cache table use 512MB.
}
//printf("Reserved memory=%ld\n", bl->reserved_memory);
@@ -513,8 +513,8 @@ int toku_ft_loader_internal_init (/* out */ FTLOADER *blp,
ft_loader_init_error_callback(&bl->error_callback);
ft_loader_init_poll_callback(&bl->poll_callback);
-#define MY_CALLOC_N(n,v) CALLOC_N(n,v); if (!v) { int r = get_error_errno(); toku_ft_loader_internal_destroy(bl, TRUE); return r; }
-#define SET_TO_MY_STRDUP(lval, s) do { char *v = toku_strdup(s); if (!v) { int r = get_error_errno(); toku_ft_loader_internal_destroy(bl, TRUE); return r; } lval = v; } while (0)
+#define MY_CALLOC_N(n,v) CALLOC_N(n,v); if (!v) { int r = get_error_errno(); toku_ft_loader_internal_destroy(bl, true); return r; }
+#define SET_TO_MY_STRDUP(lval, s) do { char *v = toku_strdup(s); if (!v) { int r = get_error_errno(); toku_ft_loader_internal_destroy(bl, true); return r; } lval = v; } while (0)
MY_CALLOC_N(N, bl->root_xids_that_created);
for (int i=0; i<N; i++) if (brts[i]) bl->root_xids_that_created[i]=brts[i]->ft->h->root_xid_that_created;
@@ -532,11 +532,11 @@ int toku_ft_loader_internal_init (/* out */ FTLOADER *blp,
for (int i=0; i<N; i++) bl->fractal_queues[i]=NULL;
MY_CALLOC_N(N, bl->fractal_threads);
MY_CALLOC_N(N, bl->fractal_threads_live);
- for (int i=0; i<N; i++) bl->fractal_threads_live[i] = FALSE;
+ for (int i=0; i<N; i++) bl->fractal_threads_live[i] = false;
{
int r = ft_loader_init_file_infos(&bl->file_infos);
- if (r!=0) { toku_ft_loader_internal_destroy(bl, TRUE); return r; }
+ if (r!=0) { toku_ft_loader_internal_destroy(bl, true); return r; }
}
SET_TO_MY_STRDUP(bl->temp_file_template, temp_file_template);
@@ -551,7 +551,7 @@ int toku_ft_loader_internal_init (/* out */ FTLOADER *blp,
for(int i=0;i<N;i++) {
{
int r = init_rowset(&bl->rows[i], memory_per_rowset_during_extract(bl));
- if (r!=0) { toku_ft_loader_internal_destroy(bl, TRUE); return r; }
+ if (r!=0) { toku_ft_loader_internal_destroy(bl, true); return r; }
}
init_merge_fileset(&bl->fs[i]);
bl->last_key[i].flags = DB_DBT_REALLOC; // don't really need this, but it's nice to maintain it. We use ulen to keep track of the realloced space.
@@ -559,10 +559,10 @@ int toku_ft_loader_internal_init (/* out */ FTLOADER *blp,
{
int r = init_rowset(&bl->primary_rowset, memory_per_rowset_during_extract(bl));
- if (r!=0) { toku_ft_loader_internal_destroy(bl, TRUE); return r; }
+ if (r!=0) { toku_ft_loader_internal_destroy(bl, true); return r; }
}
{ int r = queue_create(&bl->primary_rowset_queue, EXTRACTOR_QUEUE_DEPTH);
- if (r!=0) { toku_ft_loader_internal_destroy(bl, TRUE); return r; }
+ if (r!=0) { toku_ft_loader_internal_destroy(bl, true); return r; }
}
//printf("%s:%d toku_pthread_create\n", __FILE__, __LINE__);
{
@@ -584,7 +584,7 @@ int toku_ft_loader_open (/* out */ FTLOADER *blp,
const char *temp_file_template,
LSN load_lsn,
TOKUTXN txn,
- BOOL reserve_memory)
+ bool reserve_memory)
/* Effect: called by DB_ENV->create_loader to create a brt loader.
* Arguments:
* blp Return the brt loader here.
@@ -613,16 +613,16 @@ int toku_ft_loader_open (/* out */ FTLOADER *blp,
FTLOADER bl = *blp;
int r = toku_pthread_create(&bl->extractor_thread, NULL, extractor_thread, (void*)bl);
if (r==0) {
- bl->extractor_live = TRUE;
+ bl->extractor_live = true;
} else {
result = r;
- (void) toku_ft_loader_internal_destroy(bl, TRUE);
+ (void) toku_ft_loader_internal_destroy(bl, true);
}
}
return result;
}
-static void ft_loader_set_panic(FTLOADER bl, int error, BOOL callback) {
+static void ft_loader_set_panic(FTLOADER bl, int error, bool callback) {
int r = ft_loader_set_error(&bl->error_callback, error, NULL, 0, NULL, NULL);
if (r == 0 && callback)
ft_loader_call_error_function(&bl->error_callback);
@@ -719,7 +719,7 @@ static int bl_read_dbt (/*in*/DBT *dbt, FILE *stream)
static int bl_read_dbt_from_dbufio (/*in*/DBT *dbt, DBUFIO_FILESET bfs, int filenum)
{
int result = 0;
- u_int32_t len;
+ uint32_t len;
{
size_t n_read;
int r = dbufio_fileset_read(bfs, filenum, &len, sizeof(len), &n_read);
@@ -755,7 +755,7 @@ static int bl_read_dbt_from_dbufio (/*in*/DBT *dbt, DBUFIO_FILESET bfs, int file
}
-int loader_write_row(DBT *key, DBT *val, FIDX data, FILE *dataf, u_int64_t *dataoff, FTLOADER bl)
+int loader_write_row(DBT *key, DBT *val, FIDX data, FILE *dataf, uint64_t *dataoff, FTLOADER bl)
/* Effect: Given a key and a val (both DBTs), write them to a file. Increment *dataoff so that it's up to date.
* Arguments:
* key, val write these.
@@ -967,7 +967,7 @@ static void* extractor_thread (void *blv) {
{
r = process_primary_rows(bl, primary_rowset);
if (r)
- ft_loader_set_panic(bl, r, FALSE);
+ ft_loader_set_panic(bl, r, false);
}
}
@@ -975,7 +975,7 @@ static void* extractor_thread (void *blv) {
if (r == 0) {
r = finish_primary_rows(bl);
if (r)
- ft_loader_set_panic(bl, r, FALSE);
+ ft_loader_set_panic(bl, r, false);
}
return NULL;
@@ -1032,7 +1032,7 @@ finish_extractor (FTLOADER bl) {
int r = toku_pthread_join(bl->extractor_thread, &toku_pthread_retval);
resource_assert_zero(r);
invariant(toku_pthread_retval == NULL);
- bl->extractor_live = FALSE;
+ bl->extractor_live = false;
}
{
int r = queue_destroy(bl->primary_rowset_queue);
@@ -1047,7 +1047,7 @@ finish_extractor (FTLOADER bl) {
static const DBT zero_dbt = {0,0,0,0};
-static DBT make_dbt (void *data, u_int32_t size) {
+static DBT make_dbt (void *data, uint32_t size) {
DBT result = zero_dbt;
result.data = data;
result.size = size;
@@ -1206,11 +1206,11 @@ int toku_ft_loader_put (FTLOADER bl, DBT *key, DBT *val)
return loader_do_put(bl, key, val);
}
-void toku_ft_loader_set_n_rows(FTLOADER bl, u_int64_t n_rows) {
+void toku_ft_loader_set_n_rows(FTLOADER bl, uint64_t n_rows) {
bl->n_rows = n_rows;
}
-u_int64_t toku_ft_loader_get_n_rows(FTLOADER bl) {
+uint64_t toku_ft_loader_get_n_rows(FTLOADER bl) {
return bl->n_rows;
}
@@ -1410,7 +1410,7 @@ static int sort_rows (struct rowset *rows, int which_db, DB *dest_db, ft_compare
void init_merge_fileset (struct merge_fileset *fs)
/* Effect: Initialize a fileset */
{
- fs->have_sorted_output = FALSE;
+ fs->have_sorted_output = false;
fs->sorted_output = FIDX_NULL;
fs->prev_key = zero_dbt;
fs->prev_key.flags = DB_DBT_REALLOC;
@@ -1491,7 +1491,7 @@ static int write_rowset_to_file (FTLOADER bl, FIDX sfile, const struct rowset ro
DBT skey = make_dbt(rows.data + rows.rows[i].off, rows.rows[i].klen);
DBT sval = make_dbt(rows.data + rows.rows[i].off + rows.rows[i].klen, rows.rows[i].vlen);
- u_int64_t soffset=0; // don't really need this.
+ uint64_t soffset=0; // don't really need this.
int r = loader_write_row(&skey, &sval, sfile, sstream, &soffset, bl);
if (r != 0) return r;
}
@@ -1512,7 +1512,7 @@ int sort_and_write_rows (struct rowset rows, struct merge_fileset *fs, FTLOADER
* Returns 0 on success, otherwise an error number.
* Destroy the rowset after finishing it.
* Note: There is no sense in trying to calculate progress by this function since it's done concurrently with the loader->put operation.
- * Note first time called: invariant: fs->have_sorted_output == FALSE
+ * Note first time called: invariant: fs->have_sorted_output == false
*/
{
//printf(" sort_and_write use %d progress=%d fin at %d\n", progress_allocation, bl->progress, bl->progress+progress_allocation);
@@ -1541,8 +1541,8 @@ int sort_and_write_rows (struct rowset rows, struct merge_fileset *fs, FTLOADER
} else {
// write the sorted rowset into a new temp file
if (fs->have_sorted_output) {
- fs->have_sorted_output = FALSE;
- result = ft_loader_fi_close(&bl->file_infos, fs->sorted_output, TRUE);
+ fs->have_sorted_output = false;
+ result = ft_loader_fi_close(&bl->file_infos, fs->sorted_output, true);
}
if (result == 0) {
FIDX sfile = FIDX_NULL;
@@ -1550,13 +1550,13 @@ int sort_and_write_rows (struct rowset rows, struct merge_fileset *fs, FTLOADER
if (result == 0) {
result = write_rowset_to_file(bl, sfile, rows);
if (result == 0) {
- fs->have_sorted_output = TRUE; fs->sorted_output = sfile;
+ fs->have_sorted_output = true; fs->sorted_output = sfile;
// set the max key in the temp file to the max key in the sorted rowset
result = toku_dbt_set(rows.rows[rows.n_rows-1].klen, rows.data + rows.rows[rows.n_rows-1].off, &fs->prev_key, NULL);
}
}
}
- // Note: if result == 0 then invariant fs->have_sorted_output == TRUE
+ // Note: if result == 0 then invariant fs->have_sorted_output == true
}
}
}
@@ -1573,7 +1573,7 @@ int ft_loader_sort_and_write_rows (struct rowset *rows, struct merge_fileset *fs
return sort_and_write_rows (*rows, fs, bl, which_db, dest_db, compare);
}
-int toku_merge_some_files_using_dbufio (const BOOL to_q, FIDX dest_data, QUEUE q, int n_sources, DBUFIO_FILESET bfs, FIDX srcs_fidxs[/*n_sources*/], FTLOADER bl, int which_db, DB *dest_db, ft_compare_func compare, int progress_allocation)
+int toku_merge_some_files_using_dbufio (const bool to_q, FIDX dest_data, QUEUE q, int n_sources, DBUFIO_FILESET bfs, FIDX srcs_fidxs[/*n_sources*/], FTLOADER bl, int which_db, DB *dest_db, ft_compare_func compare, int progress_allocation)
/* Effect: Given an array of FILE*'s each containing sorted, merge the data and write it to an output. All the files remain open after the merge.
* This merge is performed in one pass, so don't pass too many files in. If you need a tree of merges do it elsewhere.
* If TO_Q is true then we write rowsets into queue Q. Otherwise we write into dest_data.
@@ -1598,7 +1598,7 @@ int toku_merge_some_files_using_dbufio (const BOOL to_q, FIDX dest_data, QUEUE q
//printf(" merge_some_files progress=%d fin at %d\n", bl->progress, bl->progress+progress_allocation);
DBT keys[n_sources];
DBT vals[n_sources];
- u_int64_t dataoff[n_sources];
+ uint64_t dataoff[n_sources];
DBT zero = zero_dbt; zero.flags=DB_DBT_REALLOC;
for (int i=0; i<n_sources; i++) {
@@ -1614,7 +1614,7 @@ int toku_merge_some_files_using_dbufio (const BOOL to_q, FIDX dest_data, QUEUE q
if (r!=0) result = r;
}
- u_int64_t n_rows = 0;
+ uint64_t n_rows = 0;
if (result==0) {
// load pqueue with first value from each source
for (int i=0; i<n_sources; i++) {
@@ -1642,7 +1642,7 @@ int toku_merge_some_files_using_dbufio (const BOOL to_q, FIDX dest_data, QUEUE q
toku_mutex_unlock(&bl->file_infos.lock);
}
}
- u_int64_t n_rows_done = 0;
+ uint64_t n_rows_done = 0;
struct rowset *output_rowset = NULL;
if (result==0 && to_q) {
@@ -1727,7 +1727,7 @@ int toku_merge_some_files_using_dbufio (const BOOL to_q, FIDX dest_data, QUEUE q
}
n_rows_done++;
- const u_int64_t rows_per_report = size_factor*1024;
+ const uint64_t rows_per_report = size_factor*1024;
if (n_rows_done%rows_per_report==0) {
// need to update the progress.
double fraction_of_remaining_we_just_did = (double)rows_per_report / (double)(n_rows - n_rows_done + rows_per_report);
@@ -1766,7 +1766,7 @@ int toku_merge_some_files_using_dbufio (const BOOL to_q, FIDX dest_data, QUEUE q
return result;
}
-static int merge_some_files (const BOOL to_q, FIDX dest_data, QUEUE q, int n_sources, FIDX srcs_fidxs[/*n_sources*/], FTLOADER bl, int which_db, DB *dest_db, ft_compare_func compare, int progress_allocation)
+static int merge_some_files (const bool to_q, FIDX dest_data, QUEUE q, int n_sources, FIDX srcs_fidxs[/*n_sources*/], FTLOADER bl, int which_db, DB *dest_db, ft_compare_func compare, int progress_allocation)
{
int result = 0;
DBUFIO_FILESET bfs = NULL;
@@ -1838,8 +1838,8 @@ int merge_files (struct merge_fileset *fs,
{
//printf(" merge_files %d files\n", fs->n_temp_files);
//printf(" merge_files use %d progress=%d fin at %d\n", progress_allocation, bl->progress, bl->progress+progress_allocation);
- const int final_mergelimit = (size_factor == 1) ? 4 : merge_fanin(bl, TRUE); // try for a merge to the leaf level
- const int earlier_mergelimit = (size_factor == 1) ? 4 : merge_fanin(bl, FALSE); // try for a merge at nonleaf.
+ const int final_mergelimit = (size_factor == 1) ? 4 : merge_fanin(bl, true); // try for a merge to the leaf level
+ const int earlier_mergelimit = (size_factor == 1) ? 4 : merge_fanin(bl, false); // try for a merge at nonleaf.
int n_passes_left = (fs->n_temp_files<=final_mergelimit)
? 1
: 1+n_passes((fs->n_temp_files+final_mergelimit-1)/final_mergelimit, earlier_mergelimit);
@@ -1852,7 +1852,7 @@ int merge_files (struct merge_fileset *fs,
invariant(fs->n_temp_files>0);
struct merge_fileset next_file_set;
- BOOL to_queue = (BOOL)(fs->n_temp_files <= final_mergelimit);
+ bool to_queue = (bool)(fs->n_temp_files <= final_mergelimit);
init_merge_fileset(&next_file_set);
while (fs->n_temp_files>0) {
// grab some files and merge them.
@@ -1893,7 +1893,7 @@ int merge_files (struct merge_fileset *fs,
for (int i=0; i<n_to_merge; i++) {
if (!fidx_is_null(data_fidxs[i])) {
{
- int r = ft_loader_fi_close(&bl->file_infos, data_fidxs[i], TRUE);
+ int r = ft_loader_fi_close(&bl->file_infos, data_fidxs[i], true);
if (r!=0 && result==0) result = r;
}
{
@@ -1906,7 +1906,7 @@ int merge_files (struct merge_fileset *fs,
fs->n_temp_files -= n_to_merge;
if (!to_queue && !fidx_is_null(merged_data)) {
- int r = ft_loader_fi_close(&bl->file_infos, merged_data, TRUE);
+ int r = ft_loader_fi_close(&bl->file_infos, merged_data, true);
if (r!=0 && result==0) result = r;
}
toku_free(data_fidxs);
@@ -1924,7 +1924,7 @@ int merge_files (struct merge_fileset *fs,
if (result!=0) break;
}
- if (result) ft_loader_set_panic(bl, result, TRUE);
+ if (result) ft_loader_set_panic(bl, result, true);
{
int r = queue_eof(output_q);
@@ -2263,8 +2263,8 @@ static int toku_loader_write_ft_from_q (FTLOADER bl,
TXNID le_xid = leafentry_xid(bl, which_db);
struct leaf_buf *lbuf = start_leaf(&out, descriptor, lblock, le_xid, target_nodesize);
- u_int64_t n_rows_remaining = bl->n_rows;
- u_int64_t old_n_rows_remaining = bl->n_rows;
+ uint64_t n_rows_remaining = bl->n_rows;
+ uint64_t old_n_rows_remaining = bl->n_rows;
uint64_t used_estimate = 0; // how much diskspace have we used up?
@@ -2277,7 +2277,7 @@ static int toku_loader_write_ft_from_q (FTLOADER bl,
int rr = queue_deq(q, &item, NULL, NULL);
if (rr == EOF) break;
if (rr != 0) {
- ft_loader_set_panic(bl, rr, TRUE); // error after cilk sync
+ ft_loader_set_panic(bl, rr, true); // error after cilk sync
break;
}
}
@@ -2312,7 +2312,7 @@ static int toku_loader_write_ft_from_q (FTLOADER bl,
invariant(maxkey.data != NULL);
if ((r = bl_write_dbt(&maxkey, pivots_stream, NULL, bl))) {
- ft_loader_set_panic(bl, r, TRUE); // error after cilk sync
+ ft_loader_set_panic(bl, r, true); // error after cilk sync
if (result == 0) result = r;
break;
}
@@ -2322,7 +2322,7 @@ static int toku_loader_write_ft_from_q (FTLOADER bl,
r = allocate_block(&out, &lblock);
if (r != 0) {
- ft_loader_set_panic(bl, r, TRUE);
+ ft_loader_set_panic(bl, r, true);
if (result == 0) result = r;
break;
}
@@ -2404,7 +2404,7 @@ static int toku_loader_write_ft_from_q (FTLOADER bl,
char *XMALLOC_N(desc_size, buf);
wbuf_init(&wbuf, buf, desc_size);
toku_serialize_descriptor_contents_to_wbuf(&wbuf, descriptor);
- u_int32_t checksum = x1764_finish(&wbuf.checksum);
+ uint32_t checksum = x1764_finish(&wbuf.checksum);
wbuf_int(&wbuf, checksum);
invariant(wbuf.ndone==desc_size);
r = toku_os_write(out.fd, wbuf.buf, wbuf.ndone);
@@ -2540,8 +2540,8 @@ static int loader_do_i (FTLOADER bl,
// ignore r2, since we already have an error
goto error;
}
- invariant(bl->fractal_threads_live[which_db]==FALSE);
- bl->fractal_threads_live[which_db] = TRUE;
+ invariant(bl->fractal_threads_live[which_db]==false);
+ bl->fractal_threads_live[which_db] = true;
r = merge_files(fs, bl, which_db, dest_db, compare, allocation_for_merge, bl->fractal_queues[which_db]);
@@ -2552,7 +2552,7 @@ static int loader_do_i (FTLOADER bl,
resource_assert_zero(r2);
invariant(toku_pthread_retval==NULL);
invariant(bl->fractal_threads_live[which_db]);
- bl->fractal_threads_live[which_db] = FALSE;
+ bl->fractal_threads_live[which_db] = false;
if (r == 0) r = fta.errno_result;
}
}
@@ -2606,7 +2606,7 @@ static int toku_ft_loader_close_internal (FTLOADER bl)
invariant(bl->file_infos.n_files_extant == 0);
invariant(bl->progress == PROGRESS_MAX);
error:
- toku_ft_loader_internal_destroy(bl, (BOOL)(result!=0));
+ toku_ft_loader_internal_destroy(bl, (bool)(result!=0));
return result;
}
@@ -2644,7 +2644,7 @@ int toku_ft_loader_close (FTLOADER bl,
if (r && result == 0)
result = r;
} else
- toku_ft_loader_internal_destroy(bl, TRUE);
+ toku_ft_loader_internal_destroy(bl, true);
return result;
}
@@ -2661,7 +2661,7 @@ int toku_ft_loader_finish_extractor(FTLOADER bl) {
return result;
}
-int toku_ft_loader_abort(FTLOADER bl, BOOL is_error)
+int toku_ft_loader_abort(FTLOADER bl, bool is_error)
/* Effect : Abort the bulk loader, free ft_loader resources */
{
int result = 0;
@@ -2722,7 +2722,7 @@ static void finish_leafnode (struct dbout *out, struct leaf_buf *lbuf, int progr
size_t serialized_leaf_size = 0;
char *serialized_leaf = NULL;
FTNODE_DISK_DATA ndd = NULL;
- result = toku_serialize_ftnode_to_memory(lbuf->node, &ndd, target_basementnodesize, target_compression_method, TRUE, TRUE, &serialized_leaf_size, &serialized_leaf);
+ result = toku_serialize_ftnode_to_memory(lbuf->node, &ndd, target_basementnodesize, target_compression_method, true, true, &serialized_leaf_size, &serialized_leaf);
// write it out
if (result == 0) {
@@ -2751,7 +2751,7 @@ static void finish_leafnode (struct dbout *out, struct leaf_buf *lbuf, int progr
result = update_progress(progress_allocation, bl, "wrote node");
if (result)
- ft_loader_set_panic(bl, result, TRUE);
+ ft_loader_set_panic(bl, result, true);
}
static int write_translation_table (struct dbout *out, long long *off_of_translation_p) {
@@ -2926,7 +2926,7 @@ static void write_nonleaf_node (FTLOADER bl, struct dbout *out, int64_t blocknum
size_t n_bytes;
char *bytes;
int r;
- r = toku_serialize_ftnode_to_memory(node, &ndd, target_basementnodesize, target_compression_method, TRUE, TRUE, &n_bytes, &bytes);
+ r = toku_serialize_ftnode_to_memory(node, &ndd, target_basementnodesize, target_compression_method, true, true, &n_bytes, &bytes);
if (r) {
result = r;
} else {
@@ -2960,7 +2960,7 @@ static void write_nonleaf_node (FTLOADER bl, struct dbout *out, int64_t blocknum
toku_free(subtree_info);
if (result != 0)
- ft_loader_set_panic(bl, result, TRUE);
+ ft_loader_set_panic(bl, result, true);
}
static int write_nonleaves (FTLOADER bl, FIDX pivots_fidx, struct dbout *out, struct subtrees_info *sts, const DESCRIPTOR descriptor, uint32_t target_nodesize, uint32_t target_basementnodesize, enum toku_compression_method target_compression_method) {
@@ -3066,7 +3066,7 @@ static int write_nonleaves (FTLOADER bl, FIDX pivots_fidx, struct dbout *out, st
result = ft_loader_get_error(&bl->error_callback);
// Now set things up for the next iteration.
- int r = ft_loader_fi_close(&bl->file_infos, pivots_fidx, TRUE); if (r != 0 && result == 0) result = r;
+ int r = ft_loader_fi_close(&bl->file_infos, pivots_fidx, true); if (r != 0 && result == 0) result = r;
r = ft_loader_fi_unlink(&bl->file_infos, pivots_fidx); if (r != 0 && result == 0) result = r;
pivots_fidx = next_pivots_file;
toku_free(sts->subtrees); sts->subtrees = NULL;
@@ -3076,7 +3076,7 @@ static int write_nonleaves (FTLOADER bl, FIDX pivots_fidx, struct dbout *out, st
if (result)
break;
}
- { int r = ft_loader_fi_close (&bl->file_infos, pivots_fidx, TRUE); if (r != 0 && result == 0) result = r; }
+ { int r = ft_loader_fi_close (&bl->file_infos, pivots_fidx, true); if (r != 0 && result == 0) result = r; }
{ int r = ft_loader_fi_unlink(&bl->file_infos, pivots_fidx); if (r != 0 && result == 0) result = r; }
return result;
}
diff --git a/ft/ftloader.h b/ft/ftloader.h
index 68e5d25c3ab..c7c9d2fea32 100644
--- a/ft/ftloader.h
+++ b/ft/ftloader.h
@@ -26,7 +26,7 @@ int toku_ft_loader_open (FTLOADER *bl,
const char *temp_file_template,
LSN load_lsn,
TOKUTXN txn,
- BOOL reserve_memory);
+ bool reserve_memory);
int toku_ft_loader_put (FTLOADER bl, DBT *key, DBT *val);
@@ -35,7 +35,7 @@ int toku_ft_loader_close (FTLOADER bl,
ft_loader_poll_func poll_callback, void *poll_callback_extra);
int toku_ft_loader_abort(FTLOADER bl,
- BOOL is_error);
+ bool is_error);
// For test purposes only
void toku_ft_loader_set_size_factor (uint32_t factor);
diff --git a/ft/fttypes.h b/ft/fttypes.h
index 1c777e574b8..ba297d47197 100644
--- a/ft/fttypes.h
+++ b/ft/fttypes.h
@@ -18,15 +18,7 @@
#include <inttypes.h>
-#include <stdbool.h>
-#ifndef TRUE
-// In the future, use the stdbool bool and constants (true false), rather than BOOL, TRUE, and FALSE.
-#define TRUE true
-#define FALSE false
-typedef bool BOOL;
-#endif
-
-
+// Use the C++ bool and constants (true false), rather than BOOL, TRUE, and FALSE.
typedef struct ft_handle *FT_HANDLE;
typedef struct ftnode *FTNODE;
@@ -82,30 +74,30 @@ static inline PAIR_ATTR make_pair_attr(long size) {
.leaf_size = 0,
.rollback_size = 0,
.cache_pressure_size = 0,
- .is_valid = TRUE
+ .is_valid = true
};
#else
- PAIR_ATTR result = {size, 0, 0, 0, 0, TRUE};
+ PAIR_ATTR result = {size, 0, 0, 0, 0, true};
#endif
return result;
}
typedef struct {
- u_int32_t len;
+ uint32_t len;
char *data;
} BYTESTRING;
/* Log Sequence Number (LSN)
* Make the LSN be a struct instead of an integer so that we get better type checking. */
-typedef struct __toku_lsn { u_int64_t lsn; } LSN;
+typedef struct __toku_lsn { uint64_t lsn; } LSN;
#define ZERO_LSN ((LSN){0})
#define MAX_LSN ((LSN){UINT64_MAX})
/* Message Sequence Number (MSN)
* Make the MSN be a struct instead of an integer so that we get better type checking. */
-typedef struct __toku_msn { u_int64_t msn; } MSN;
+typedef struct __toku_msn { uint64_t msn; } MSN;
#define ZERO_MSN ((MSN){0}) // dummy used for message construction, to be filled in when msg is applied to tree
-#define MIN_MSN ((MSN){(u_int64_t)1 << 62}) // first 2^62 values reserved for messages created before Dr. No (for upgrade)
+#define MIN_MSN ((MSN){(uint64_t)1 << 62}) // first 2^62 values reserved for messages created before Dr. No (for upgrade)
#define MAX_MSN ((MSN){UINT64_MAX})
typedef struct {
@@ -120,14 +112,14 @@ static const STAT64INFO_S ZEROSTATS = {0,0};
* With the introduction of the loader (ticket 2216), it is possible for the file that holds
* an open dictionary to change, so these are now separate and independent unique identifiers.
*/
-typedef struct {u_int32_t fileid;} FILENUM;
+typedef struct {uint32_t fileid;} FILENUM;
#define FILENUM_NONE ((FILENUM){UINT32_MAX})
-typedef struct {u_int64_t dictid;} DICTIONARY_ID;
+typedef struct {uint64_t dictid;} DICTIONARY_ID;
#define DICTIONARY_ID_NONE ((DICTIONARY_ID){0})
typedef struct {
- u_int32_t num;
+ uint32_t num;
FILENUM *filenums;
} FILENUMS;
@@ -166,10 +158,10 @@ enum ft_msg_type {
FT_UPDATE_BROADCAST_ALL = 15
};
-static inline BOOL
+static inline bool
ft_msg_type_applies_once(enum ft_msg_type type)
{
- BOOL ret_val;
+ bool ret_val;
switch (type) {
case FT_INSERT_NO_OVERWRITE:
case FT_INSERT:
@@ -177,7 +169,7 @@ ft_msg_type_applies_once(enum ft_msg_type type)
case FT_ABORT_ANY:
case FT_COMMIT_ANY:
case FT_UPDATE:
- ret_val = TRUE;
+ ret_val = true;
break;
case FT_COMMIT_BROADCAST_ALL:
case FT_COMMIT_BROADCAST_TXN:
@@ -186,18 +178,18 @@ ft_msg_type_applies_once(enum ft_msg_type type)
case FT_OPTIMIZE_FOR_UPGRADE:
case FT_UPDATE_BROADCAST_ALL:
case FT_NONE:
- ret_val = FALSE;
+ ret_val = false;
break;
default:
- assert(FALSE);
+ assert(false);
}
return ret_val;
}
-static inline BOOL
+static inline bool
ft_msg_type_applies_all(enum ft_msg_type type)
{
- BOOL ret_val;
+ bool ret_val;
switch (type) {
case FT_NONE:
case FT_INSERT_NO_OVERWRITE:
@@ -206,7 +198,7 @@ ft_msg_type_applies_all(enum ft_msg_type type)
case FT_ABORT_ANY:
case FT_COMMIT_ANY:
case FT_UPDATE:
- ret_val = FALSE;
+ ret_val = false;
break;
case FT_COMMIT_BROADCAST_ALL:
case FT_COMMIT_BROADCAST_TXN:
@@ -214,15 +206,15 @@ ft_msg_type_applies_all(enum ft_msg_type type)
case FT_OPTIMIZE:
case FT_OPTIMIZE_FOR_UPGRADE:
case FT_UPDATE_BROADCAST_ALL:
- ret_val = TRUE;
+ ret_val = true;
break;
default:
- assert(FALSE);
+ assert(false);
}
return ret_val;
}
-static inline BOOL
+static inline bool
ft_msg_type_does_nothing(enum ft_msg_type type)
{
return (type == FT_NONE);
diff --git a/ft/ftverify.cc b/ft/ftverify.cc
index 4ac4f49387e..ed01a18ca03 100644
--- a/ft/ftverify.cc
+++ b/ft/ftverify.cc
@@ -14,7 +14,6 @@
#include <fcntl.h>
#include <math.h>
-#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/stat.h>
@@ -40,9 +39,9 @@ static double pct = 0.5;
struct verify_block_extra {
BLOCKNUM b;
int n_sub_blocks;
- u_int32_t header_length;
- u_int32_t calc_xsum;
- u_int32_t stored_xsum;
+ uint32_t header_length;
+ uint32_t calc_xsum;
+ uint32_t stored_xsum;
bool header_valid;
bool sub_blocks_valid;
struct sub_block_info *sub_block_results;
@@ -90,13 +89,13 @@ deserialize_headers(int fd, struct ft **h1p, struct ft **h2p)
{
struct rbuf rb_0;
struct rbuf rb_1;
- u_int64_t checkpoint_count_0;
- u_int64_t checkpoint_count_1;
+ uint64_t checkpoint_count_0;
+ uint64_t checkpoint_count_1;
LSN checkpoint_lsn_0;
LSN checkpoint_lsn_1;
- u_int32_t version_0, version_1;
- BOOL h0_acceptable = FALSE;
- BOOL h1_acceptable = FALSE;
+ uint32_t version_0, version_1;
+ bool h0_acceptable = false;
+ bool h1_acceptable = false;
int r0, r1;
int r;
@@ -111,7 +110,7 @@ deserialize_headers(int fd, struct ft **h1p, struct ft **h2p)
&version_0
);
if ((r0==0) && (checkpoint_lsn_0.lsn <= MAX_LSN.lsn)) {
- h0_acceptable = TRUE;
+ h0_acceptable = true;
}
}
{
@@ -125,7 +124,7 @@ deserialize_headers(int fd, struct ft **h1p, struct ft **h2p)
&version_1
);
if ((r1==0) && (checkpoint_lsn_1.lsn <= MAX_LSN.lsn)) {
- h1_acceptable = TRUE;
+ h1_acceptable = true;
}
}
@@ -284,8 +283,8 @@ check_block(BLOCKNUM blocknum, int64_t UU(blocksize), int64_t UU(address), void
// Using the node info, decompress all the keys and pivots to
// detect any corruptions.
for (int i = 0; i < node->n_children; ++i) {
- u_int32_t curr_offset = BP_START(ndd,i);
- u_int32_t curr_size = BP_SIZE(ndd,i);
+ uint32_t curr_offset = BP_START(ndd,i);
+ uint32_t curr_size = BP_SIZE(ndd,i);
struct rbuf curr_rbuf = {.buf = NULL, .size = 0, .ndone = 0};
rbuf_init(&curr_rbuf, rb.buf + curr_offset, curr_size);
struct sub_block curr_sb;
diff --git a/ft/hash-benchmarks/hash-benchmark-manually-open.cc b/ft/hash-benchmarks/hash-benchmark-manually-open.cc
index 6ec7206efb5..db7fade7cf6 100644
--- a/ft/hash-benchmarks/hash-benchmark-manually-open.cc
+++ b/ft/hash-benchmarks/hash-benchmark-manually-open.cc
@@ -39,13 +39,13 @@ int sum32 (int start, void *buf, int bytecount) {
return start;
}
-static const u_int32_t m = 0x5bd1e995;
+static const uint32_t m = 0x5bd1e995;
static const int r = 24;
-static const u_int32_t seed = 0x3dd3b51a;
+static const uint32_t seed = 0x3dd3b51a;
#define USE_ZERO_CHECKSUM 0
-static u_int32_t MurmurHash2 ( const void * key, int len)
+static uint32_t MurmurHash2 ( const void * key, int len)
{
if (USE_ZERO_CHECKSUM) return 0;
@@ -55,7 +55,7 @@ static u_int32_t MurmurHash2 ( const void * key, int len)
// Initialize the hash to a 'random' value
- u_int32_t h = seed;
+ uint32_t h = seed;
// Mix 4 bytes at a time into the hash
@@ -63,7 +63,7 @@ static u_int32_t MurmurHash2 ( const void * key, int len)
while(len >= 4)
{
- u_int32_t k = *(u_int32_t *)data;
+ uint32_t k = *(uint32_t *)data;
k *= m;
k ^= k >> r;
@@ -98,8 +98,8 @@ static u_int32_t MurmurHash2 ( const void * key, int len)
struct murmur {
int n_bytes_in_k; // How many bytes in k
- u_int32_t k; // These are the extra bytes. Bytes are shifted into the low-order bits.
- u_int32_t h; // The hash so far (up to the most recent 4-byte boundary)
+ uint32_t k; // These are the extra bytes. Bytes are shifted into the low-order bits.
+ uint32_t h; // The hash so far (up to the most recent 4-byte boundary)
};
void murmur_init (struct murmur *mm) {
@@ -123,9 +123,9 @@ void murmur_add (struct murmur *mm, const void * key, unsigned int len) {
if (USE_ZERO_CHECKSUM) return;
if (len==0) return;
const int n_bytes_in_k = mm->n_bytes_in_k;
- u_int32_t k = mm->k;
+ uint32_t k = mm->k;
const unsigned char *data = key;
- u_int32_t h = mm->h;
+ uint32_t h = mm->h;
switch (n_bytes_in_k) {
case 0:
switch (len) {
@@ -162,7 +162,7 @@ void murmur_add (struct murmur *mm, const void * key, unsigned int len) {
// We've used up the partial bytes at the beginning of k.
assert(mm->n_bytes_in_k==0);
while (len >= 4) {
- u_int32_t k = toku_dtoh32(*(u_int32_t *)data);
+ uint32_t k = toku_dtoh32(*(uint32_t *)data);
//printf(" oldh=%08x k=%08x", h, k);
k *= m;
@@ -179,7 +179,7 @@ void murmur_add (struct murmur *mm, const void * key, unsigned int len) {
mm->h=h;
//printf("%s:%d h=%08x\n", __FILE__, __LINE__, h);
{
- u_int32_t k=0;
+ uint32_t k=0;
switch (len) {
case 3: k = *data << 16; data++;
case 2: k |= *data << 8; data++;
@@ -192,9 +192,9 @@ void murmur_add (struct murmur *mm, const void * key, unsigned int len) {
}
}
-u_int32_t murmur_finish (struct murmur *mm) {
+uint32_t murmur_finish (struct murmur *mm) {
if (USE_ZERO_CHECKSUM) return 0;
- u_int32_t h = mm->h;
+ uint32_t h = mm->h;
if (mm->n_bytes_in_k>0) {
h ^= mm->k;
h *= m;
@@ -209,7 +209,7 @@ u_int32_t murmur_finish (struct murmur *mm) {
}
struct sum84 {
- u_int32_t sum;
+ uint32_t sum;
int i;
};
void sum84_init (struct sum84 *s) { s->sum=0; s->i=0; };
@@ -233,7 +233,7 @@ int sum84_finish (struct sum84 *s) {
return s->sum;
}
-u_int32_t xor8_add (u_int32_t x, char *buf, int count) {
+uint32_t xor8_add (uint32_t x, char *buf, int count) {
while (count>4) {
x ^= *(int*)buf;
buf+=4; count-=4;
@@ -245,13 +245,13 @@ u_int32_t xor8_add (u_int32_t x, char *buf, int count) {
}
return x;
}
-u_int32_t xor8_finish (u_int32_t x) {
+uint32_t xor8_finish (uint32_t x) {
return (x ^ (x>>8) ^ (x>>16) ^ (x>>24))&0xff;
}
-u_int64_t xor8_64_add (u_int64_t x, char *buf, int count) {
+uint64_t xor8_64_add (uint64_t x, char *buf, int count) {
while (count>8) {
- x ^= *(u_int64_t*)buf;
+ x ^= *(uint64_t*)buf;
buf+=8; count-=8;
}
while (count>0) {
@@ -261,7 +261,7 @@ u_int64_t xor8_64_add (u_int64_t x, char *buf, int count) {
}
return x;
}
-u_int32_t xor8_64_finish (u_int64_t x) {
+uint32_t xor8_64_finish (uint64_t x) {
return (x ^ (x>>8) ^ (x>>16) ^ (x>>24) ^ (x>>32) ^ (x>>40) ^ (x>>48) ^ (x>>56))&0xff;
}
@@ -281,7 +281,7 @@ static void measure_bandwidths (void) {
measure_bandwidth("murmurby2", ({ struct murmur mm; murmur_init(&mm); int j; for(j=0; j<N; j+=2) murmur_add(&mm, buf+j, 2); c=murmur_finish(&mm); }));
measure_bandwidth("sum84by1 ", ({ struct sum84 s; sum84_init(&s); int j; for(j=0; j<N; j++) sum84_add(&s, buf+j, 1); c=sum84_finish(&s); }));
measure_bandwidth("xor8by1 ", ({ int j; c=0; for(j=0; j<N; j++) c=xor8_add(c, buf+j, 1); c=xor8_finish(c); }));
- measure_bandwidth("xor864by1", ({ int j; u_int64_t x=0; for(j=0; j<N; j++) x=xor8_64_add(x, buf+j, 1); c=xor8_64_finish(x); }));
+ measure_bandwidth("xor864by1", ({ int j; uint64_t x=0; for(j=0; j<N; j++) x=xor8_64_add(x, buf+j, 1); c=xor8_64_finish(x); }));
}
int main (int argc __attribute__((__unused__)), char *argv[] __attribute__((__unused__))) {
diff --git a/ft/hash-benchmarks/hash-benchmark.cc b/ft/hash-benchmarks/hash-benchmark.cc
index 9a8ba65d800..5b329cbd721 100644
--- a/ft/hash-benchmarks/hash-benchmark.cc
+++ b/ft/hash-benchmarks/hash-benchmark.cc
@@ -44,9 +44,9 @@ int sum32 (void *buf, int bytecount) {
return start;
}
-u_int64_t sum64 (void *buf, int bytecount) {
- u_int64_t *ibuf = buf;
- u_int64_t start = 0;
+uint64_t sum64 (void *buf, int bytecount) {
+ uint64_t *ibuf = buf;
+ uint64_t start = 0;
assert(bytecount%8==0);
while (bytecount>0) {
start+=*ibuf;
@@ -56,13 +56,13 @@ u_int64_t sum64 (void *buf, int bytecount) {
return start;
}
-static const u_int32_t m = 0x5bd1e995;
+static const uint32_t m = 0x5bd1e995;
static const int r = 24;
-static const u_int32_t seed = 0x3dd3b51a;
+static const uint32_t seed = 0x3dd3b51a;
#define USE_ZERO_CHECKSUM 0
-static u_int32_t MurmurHash2 ( const void * key, int len)
+static uint32_t MurmurHash2 ( const void * key, int len)
{
if (USE_ZERO_CHECKSUM) return 0;
@@ -72,7 +72,7 @@ static u_int32_t MurmurHash2 ( const void * key, int len)
// Initialize the hash to a 'random' value
- u_int32_t h = seed;
+ uint32_t h = seed;
// Mix 4 bytes at a time into the hash
@@ -80,7 +80,7 @@ static u_int32_t MurmurHash2 ( const void * key, int len)
while(len >= 4)
{
- u_int32_t k = *(u_int32_t *)data;
+ uint32_t k = *(uint32_t *)data;
k *= m;
k ^= k >> r;
@@ -115,8 +115,8 @@ static u_int32_t MurmurHash2 ( const void * key, int len)
struct murmur {
int n_bytes_in_k; // How many bytes in k
- u_int32_t k; // These are the extra bytes. Bytes are shifted into the low-order bits.
- u_int32_t h; // The hash so far (up to the most recent 4-byte boundary)
+ uint32_t k; // These are the extra bytes. Bytes are shifted into the low-order bits.
+ uint32_t h; // The hash so far (up to the most recent 4-byte boundary)
};
void murmur_init (struct murmur *mm) {
@@ -129,11 +129,11 @@ inline void murmur_add (struct murmur *mm, const void * key, unsigned int len) {
if (USE_ZERO_CHECKSUM) return;
assert(mm->n_bytes_in_k<4);
const unsigned char *data = key;
- u_int32_t h = mm->h;
+ uint32_t h = mm->h;
{
int n_bytes_in_k = mm->n_bytes_in_k;
if (n_bytes_in_k>0) {
- u_int32_t k = mm->k;
+ uint32_t k = mm->k;
while (n_bytes_in_k<4 && len>0) {
k = (k << 8) | *data;
n_bytes_in_k++;
@@ -162,7 +162,7 @@ inline void murmur_add (struct murmur *mm, const void * key, unsigned int len) {
// We've used up the partial bytes at the beginning of k.
assert(mm->n_bytes_in_k==0);
while (len >= 4) {
- u_int32_t k = toku_dtoh32(*(u_int32_t *)data);
+ uint32_t k = toku_dtoh32(*(uint32_t *)data);
//printf(" oldh=%08x k=%08x", h, k);
k *= m;
@@ -179,7 +179,7 @@ inline void murmur_add (struct murmur *mm, const void * key, unsigned int len) {
mm->h=h;
//printf("%s:%d h=%08x\n", __FILE__, __LINE__, h);
{
- u_int32_t k=0;
+ uint32_t k=0;
switch (len) {
case 3: k = *data << 16; data++;
case 2: k |= *data << 8; data++;
@@ -192,9 +192,9 @@ inline void murmur_add (struct murmur *mm, const void * key, unsigned int len) {
}
}
-u_int32_t murmur_finish (struct murmur *mm) {
+uint32_t murmur_finish (struct murmur *mm) {
if (USE_ZERO_CHECKSUM) return 0;
- u_int32_t h = mm->h;
+ uint32_t h = mm->h;
if (mm->n_bytes_in_k>0) {
h ^= mm->k;
h *= m;
@@ -209,7 +209,7 @@ u_int32_t murmur_finish (struct murmur *mm) {
}
struct sum84 {
- u_int32_t sum;
+ uint32_t sum;
int i;
};
void sum84_init (struct sum84 *s) { s->sum=0; s->i=0; };
@@ -233,7 +233,7 @@ int sum84_finish (struct sum84 *s) {
return s->sum;
}
-u_int32_t xor8_add (u_int32_t x, unsigned char *buf, int count) {
+uint32_t xor8_add (uint32_t x, unsigned char *buf, int count) {
while (count>4) {
x ^= *(int*)buf;
buf+=4; count-=4;
@@ -245,13 +245,13 @@ u_int32_t xor8_add (u_int32_t x, unsigned char *buf, int count) {
}
return x;
}
-u_int32_t xor8_finish (u_int32_t x) {
+uint32_t xor8_finish (uint32_t x) {
return (x ^ (x>>8) ^ (x>>16) ^ (x>>24))&0xff;
}
-u_int64_t xor8_64_add (u_int64_t x, unsigned char *buf, int count) {
+uint64_t xor8_64_add (uint64_t x, unsigned char *buf, int count) {
while (count>8) {
- x ^= *(u_int64_t*)buf;
+ x ^= *(uint64_t*)buf;
buf+=8; count-=8;
}
while (count>0) {
@@ -261,7 +261,7 @@ u_int64_t xor8_64_add (u_int64_t x, unsigned char *buf, int count) {
}
return x;
}
-u_int32_t xor8_64_finish (u_int64_t x) {
+uint32_t xor8_64_finish (uint64_t x) {
return (x ^ (x>>8) ^ (x>>16) ^ (x>>24) ^ (x>>32) ^ (x>>40) ^ (x>>48) ^ (x>>56))&0xff;
}
@@ -837,7 +837,7 @@ inline local unsigned long crc32_big(crc, buf, len)
#endif /* BYFOUR */
// Character-by-character implementation of x17.
-static u_int32_t x17c (u_int32_t c, void *buf, int len) {
+static uint32_t x17c (uint32_t c, void *buf, int len) {
int i;
unsigned char *cbuf=buf;
for (i=0; i<len; i++) {
@@ -847,14 +847,14 @@ static u_int32_t x17c (u_int32_t c, void *buf, int len) {
}
// x17 using shorts
-static u_int32_t x17s (u_int32_t c, void *buf, int len) {
+static uint32_t x17s (uint32_t c, void *buf, int len) {
unsigned char *cbuf=buf;
while ((((long)cbuf)&1 ) && len) {
c = c*17 + *cbuf;
len--; cbuf++;
}
while (len>2) {
- u_int16_t s = *(u_int16_t*)cbuf;
+ uint16_t s = *(uint16_t*)cbuf;
c = c*17*17 + (s&0xff)*17 + (s>>8);
len-=2; cbuf+=2;
}
@@ -865,14 +865,14 @@ static u_int32_t x17s (u_int32_t c, void *buf, int len) {
return c;
}
-static u_int32_t x17i (u_int32_t c, void *buf, int len) {
+static uint32_t x17i (uint32_t c, void *buf, int len) {
unsigned char *cbuf=buf;
while ((((long)cbuf)&3 ) && len) {
c = c*17 + *cbuf;
len--; cbuf++;
}
while (len>4) {
- u_int32_t l = *(u_int32_t*)cbuf;
+ uint32_t l = *(uint32_t*)cbuf;
c = c*17*17*17*17 + (l&0xff)*17*17*17 + ((l>>8)&0xff)*17*17 + ((l>>16)&0xff)*17 + ((l>>24)&0xff);
len-=4; cbuf+=4;
}
@@ -883,10 +883,10 @@ static u_int32_t x17i (u_int32_t c, void *buf, int len) {
return c;
}
-u_int32_t l17_fast64 (const void *buf, int len) {
+uint32_t l17_fast64 (const void *buf, int len) {
assert(len%8==0);
- const u_int64_t *lbuf=buf;
- u_int64_t c=0;
+ const uint64_t *lbuf=buf;
+ uint64_t c=0;
while (len>0) {
c = c*17 + *lbuf;
if (PRINT) printf("%d: c=%016lx sum=%016lx\n", __LINE__, *lbuf, c);
@@ -896,8 +896,8 @@ u_int32_t l17_fast64 (const void *buf, int len) {
return c&0xFFFFFFFF;
}
struct l1764 {
- u_int64_t sum;
- u_int64_t input;
+ uint64_t sum;
+ uint64_t input;
int n_input_bytes;
};
void l1764_init(struct l1764 *l) {
@@ -911,7 +911,7 @@ inline void l1764_add (struct l1764 *l, const void *vbuf, int len) {
const unsigned char *cbuf = vbuf;
// Special case short inputs
if (len==1) {
- u_int64_t input = l->input | ((u_int64_t)(*cbuf))<<(8*n_input_bytes);
+ uint64_t input = l->input | ((uint64_t)(*cbuf))<<(8*n_input_bytes);
n_input_bytes++;
if (n_input_bytes==8) {
l->sum = l->sum*17 + input;
@@ -923,8 +923,8 @@ inline void l1764_add (struct l1764 *l, const void *vbuf, int len) {
}
return;
} else if (len==2) {
- u_int64_t input = l->input;
- u_int64_t thisv = ((u_int64_t)(*(u_int16_t*)cbuf));
+ uint64_t input = l->input;
+ uint64_t thisv = ((uint64_t)(*(uint16_t*)cbuf));
if (n_input_bytes==7) {
l->sum = l->sum*17 + (input | (thisv<<(8*7)));
l->input = thisv>>8;
@@ -940,14 +940,14 @@ inline void l1764_add (struct l1764 *l, const void *vbuf, int len) {
return;
}
- u_int64_t sum;
+ uint64_t sum;
//assert(len>=0);
if (n_input_bytes) {
- u_int64_t input = l->input;
+ uint64_t input = l->input;
if (len>=8) {
sum = l->sum;
while (len>=8) {
- u_int64_t thisv = *(u_int64_t*)cbuf;
+ uint64_t thisv = *(uint64_t*)cbuf;
input |= thisv<<(8*n_input_bytes);
sum = sum*17 + input;
if (PRINT) printf("%d: input=%016lx sum=%016lx\n", __LINE__, input, sum);
@@ -961,7 +961,7 @@ inline void l1764_add (struct l1764 *l, const void *vbuf, int len) {
l->sum = sum;
}
if (len>=4) {
- u_int64_t thisv = *(u_int32_t*)cbuf;
+ uint64_t thisv = *(uint32_t*)cbuf;
if (n_input_bytes<4) {
input |= thisv<<(8*n_input_bytes);
if (PRINT) printf("%d: input=%016lx\n", __LINE__, input);
@@ -980,7 +980,7 @@ inline void l1764_add (struct l1764 *l, const void *vbuf, int len) {
}
//assert(n_input_bytes<=8);
while (n_input_bytes<8 && len) {
- input |= ((u_int64_t)(*cbuf))<<(8*n_input_bytes);
+ input |= ((uint64_t)(*cbuf))<<(8*n_input_bytes);
n_input_bytes++;
cbuf++;
len--;
@@ -1000,53 +1000,53 @@ inline void l1764_add (struct l1764 *l, const void *vbuf, int len) {
}
//assert(len>=0);
while (len>=8) {
- sum = sum*17 + *(u_int64_t*)cbuf;
+ sum = sum*17 + *(uint64_t*)cbuf;
cbuf+=8;
len -=8;
}
l->sum = sum;
n_input_bytes = 0;
- u_int64_t input;
+ uint64_t input;
l->n_input_bytes = len;
// Surprisingly, the loop is the fastest on bradley's laptop.
if (1) {
int i;
input=0;
for (i=0; i<len; i++) {
- input |= ((u_int64_t)(cbuf[i]))<<(8*i);
+ input |= ((uint64_t)(cbuf[i]))<<(8*i);
}
} else if (0) {
switch (len) {
- case 7: input = ((u_int64_t)(*(u_int32_t*)(cbuf))) | (((u_int64_t)(*(u_int16_t*)(cbuf+4)))<<32) | (((u_int64_t)(*(cbuf+4)))<<48); break;
- case 6: input = ((u_int64_t)(*(u_int32_t*)(cbuf))) | (((u_int64_t)(*(u_int16_t*)(cbuf+4)))<<32); break;
- case 5: input = ((u_int64_t)(*(u_int32_t*)(cbuf))) | (((u_int64_t)(*(cbuf+4)))<<32); break;
- case 4: input = ((u_int64_t)(*(u_int32_t*)(cbuf))); break;
- case 3: input = ((u_int64_t)(*(u_int16_t*)(cbuf))) | (((u_int64_t)(*(cbuf+2)))<<16); break;
- case 2: input = ((u_int64_t)(*(u_int16_t*)(cbuf))); break;
- case 1: input = ((u_int64_t)(*cbuf)); break;
+ case 7: input = ((uint64_t)(*(uint32_t*)(cbuf))) | (((uint64_t)(*(uint16_t*)(cbuf+4)))<<32) | (((uint64_t)(*(cbuf+4)))<<48); break;
+ case 6: input = ((uint64_t)(*(uint32_t*)(cbuf))) | (((uint64_t)(*(uint16_t*)(cbuf+4)))<<32); break;
+ case 5: input = ((uint64_t)(*(uint32_t*)(cbuf))) | (((uint64_t)(*(cbuf+4)))<<32); break;
+ case 4: input = ((uint64_t)(*(uint32_t*)(cbuf))); break;
+ case 3: input = ((uint64_t)(*(uint16_t*)(cbuf))) | (((uint64_t)(*(cbuf+2)))<<16); break;
+ case 2: input = ((uint64_t)(*(uint16_t*)(cbuf))); break;
+ case 1: input = ((uint64_t)(*cbuf)); break;
case 0: input = 0; break;
default: abort();
}
} else {
input=0;
int i=0;
- if (len>=4) { input = ((u_int64_t)(*(u_int32_t*)(cbuf))); cbuf+=4; len-=4; i=4;}
- if (len>=2) { input |= ((u_int64_t)(*(u_int16_t*)(cbuf)))<<(i*8); cbuf+=2; len-=2; i+=2; }
- if (len>=1) { input |= ((u_int64_t)(*(u_int8_t *)(cbuf)))<<(i*8); /*cbuf+=1; len-=1; i++;*/ }
+ if (len>=4) { input = ((uint64_t)(*(uint32_t*)(cbuf))); cbuf+=4; len-=4; i=4;}
+ if (len>=2) { input |= ((uint64_t)(*(uint16_t*)(cbuf)))<<(i*8); cbuf+=2; len-=2; i+=2; }
+ if (len>=1) { input |= ((uint64_t)(*(uint8_t *)(cbuf)))<<(i*8); /*cbuf+=1; len-=1; i++;*/ }
}
l->input = input;
if (PRINT) printf("%d: n_input_bytes=%d\n", __LINE__, l->n_input_bytes);
}
-u_int32_t l1764_finish (struct l1764 *l) {
+uint32_t l1764_finish (struct l1764 *l) {
if (PRINT) printf("%d: n_input_bytes=%d\n", __LINE__, l->n_input_bytes);
assert(l->n_input_bytes==0);
return (l->sum)&0xffffffff;
}
-u_int32_t l17_fast (const void *buf, int len) {
+uint32_t l17_fast (const void *buf, int len) {
assert(len%4==0);
- const u_int32_t *lbuf=buf;
- u_int32_t c=0;
+ const uint32_t *lbuf=buf;
+ uint32_t c=0;
while (len>0) {
c = c*17 + *lbuf;
lbuf++;
@@ -1056,8 +1056,8 @@ u_int32_t l17_fast (const void *buf, int len) {
}
struct l17 {
- u_int32_t sum;
- u_int32_t input;
+ uint32_t sum;
+ uint32_t input;
int input_len;
};
void l17_init (struct l17 *l17) {
@@ -1108,7 +1108,7 @@ static void measure_bandwidths (void) {
measure_bandwidth("murmurby2", ({ struct murmur mm; murmur_init(&mm); int j; for(j=0; j<N; j+=2) murmur_add(&mm, buf+j, 2); c=murmur_finish(&mm); }));
measure_bandwidth("sum84by1 ", ({ struct sum84 s; sum84_init(&s); int j; for(j=0; j<N; j++) sum84_add(&s, buf+j, 1); c=sum84_finish(&s); }));
measure_bandwidth("xor8by1 ", ({ int j; c=0; for(j=0; j<N; j++) c=xor8_add(c, buf+j, 1); c=xor8_finish(c); }));
- measure_bandwidth("xor864by1", ({ int j; u_int64_t x=0; for(j=0; j<N; j++) x=xor8_64_add(x, buf+j, 1); c=xor8_64_finish(x); }));
+ measure_bandwidth("xor864by1", ({ int j; uint64_t x=0; for(j=0; j<N; j++) x=xor8_64_add(x, buf+j, 1); c=xor8_64_finish(x); }));
}
int main (int argc __attribute__((__unused__)), char *argv[] __attribute__((__unused__))) {
diff --git a/ft/hashfun.h b/ft/hashfun.h
index 87ec4ddcbb7..e7102e871e7 100644
--- a/ft/hashfun.h
+++ b/ft/hashfun.h
@@ -10,11 +10,11 @@
// FNV Hash: From an idea sent by Glenn Fowler and Phong Vo to the IEEE POSIX 1003.2 committee. Landon Curt Noll improved it.
// See: http://isthe.com/chongo/tech/comp/fnv/
-static inline u_int32_t hash_key_extend(u_int32_t initial_hash,
+static inline uint32_t hash_key_extend(uint32_t initial_hash,
const unsigned char *key,
size_t keylen) {
size_t i;
- u_int32_t hash = initial_hash;
+ uint32_t hash = initial_hash;
for (i=0; i<keylen; i++, key++) {
hash *= 16777619;
// GCC 4.1.2 -O2 and -O3 translates the following shifts back into the multiply shown on the line above here.
@@ -25,7 +25,7 @@ static inline u_int32_t hash_key_extend(u_int32_t initial_hash,
return hash;
}
-static inline u_int32_t hash_key(const unsigned char *key, size_t keylen) {
+static inline uint32_t hash_key(const unsigned char *key, size_t keylen) {
return hash_key_extend(0, key, keylen);
}
diff --git a/ft/kibbutz.cc b/ft/kibbutz.cc
index 66266c05a3a..60cce5f9e8c 100644
--- a/ft/kibbutz.cc
+++ b/ft/kibbutz.cc
@@ -5,7 +5,6 @@
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#include <config.h>
-#include <stdbool.h>
#include <toku_pthread.h>
#include "kibbutz.h"
#include "includes.h"
diff --git a/ft/le-cursor.h b/ft/le-cursor.h
index 3d8bf3c9fff..6df96eee824 100644
--- a/ft/le-cursor.h
+++ b/ft/le-cursor.h
@@ -34,11 +34,11 @@ int toku_le_cursor_close(LE_CURSOR le_cursor);
// Failure: returns a non-zero error number
int toku_le_cursor_next(LE_CURSOR le_cursor, FT_GET_CALLBACK_FUNCTION getf, void *getf_v);
-// Return TRUE if the key is to the right of the LE_CURSOR position. that is, current cursor key < given key
-// Otherwise returns FALSE when the key is at or to the left of the LE_CURSOR position. that is, current cursor key >= given key
-// The LE_CURSOR position is intialized to -infinity. Any key comparision with -infinity returns TRUE.
+// Return true if the key is to the right of the LE_CURSOR position. that is, current cursor key < given key
+// Otherwise returns false when the key is at or to the left of the LE_CURSOR position. that is, current cursor key >= given key
+// The LE_CURSOR position is intialized to -infinity. Any key comparision with -infinity returns true.
// When the cursor runs off the right edge of the tree, the LE_CURSOR position is set to +infinity. Any key comparision with +infinity
-// returns FALSE.
+// returns false.
bool toku_le_cursor_is_key_greater(LE_CURSOR le_cursor, const DBT *key);
#endif
diff --git a/ft/leafentry.h b/ft/leafentry.h
index 98e6b08ec9a..9fd36ae6163 100644
--- a/ft/leafentry.h
+++ b/ft/leafentry.h
@@ -61,7 +61,7 @@ struct __attribute__ ((__packed__)) leafentry {
struct __attribute__ ((__packed__)) leafentry_mvcc {
uint32_t num_cxrs; // number of committed transaction records
uint8_t num_pxrs; // number of provisional transaction records
- u_int8_t key_xrs[0]; //Actual key,
+ uint8_t key_xrs[0]; //Actual key,
//then TXNIDs of XRs relevant for reads:
// if provisional XRs exist, store OUTERMOST TXNID
// store committed TXNIDs, from most recently committed to least recently committed (newest first)
@@ -133,19 +133,19 @@ void wbuf_nocrc_LEAFENTRY(struct wbuf *w, LEAFENTRY le);
int print_leafentry (FILE *outf, LEAFENTRY v); // Print a leafentry out in human-readable form.
int le_latest_is_del(LEAFENTRY le); // Return true if it is a provisional delete.
-BOOL le_is_clean(LEAFENTRY le); //Return how many xids exist (0 does not count)
-BOOL le_has_xids(LEAFENTRY le, XIDS xids); // Return true transaction represented by xids is still provisional in this leafentry (le's xid stack is a superset or equal to xids)
-u_int32_t le_latest_keylen (LEAFENTRY le); // Return the latest keylen.
+bool le_is_clean(LEAFENTRY le); //Return how many xids exist (0 does not count)
+bool le_has_xids(LEAFENTRY le, XIDS xids); // Return true transaction represented by xids is still provisional in this leafentry (le's xid stack is a superset or equal to xids)
+uint32_t le_latest_keylen (LEAFENTRY le); // Return the latest keylen.
void* le_latest_val (LEAFENTRY le); // Return the latest val (return NULL for provisional deletes)
-u_int32_t le_latest_vallen (LEAFENTRY le); // Return the latest vallen. Returns 0 for provisional deletes.
-void* le_latest_val_and_len (LEAFENTRY le, u_int32_t *len);
+uint32_t le_latest_vallen (LEAFENTRY le); // Return the latest vallen. Returns 0 for provisional deletes.
+void* le_latest_val_and_len (LEAFENTRY le, uint32_t *len);
// Return any key or value (even if it's only provisional).
void* le_key (LEAFENTRY le);
-u_int32_t le_keylen (LEAFENTRY le);
-void* le_key_and_len (LEAFENTRY le, u_int32_t *len);
+uint32_t le_keylen (LEAFENTRY le);
+void* le_key_and_len (LEAFENTRY le, uint32_t *len);
-u_int64_t le_outermost_uncommitted_xid (LEAFENTRY le);
+uint64_t le_outermost_uncommitted_xid (LEAFENTRY le);
void
le_committed_mvcc(uint8_t *key, uint32_t keylen,
@@ -168,9 +168,9 @@ le_clean(uint8_t *key, uint32_t keylen,
// r|r!=0&&r!=TOKUDB_ACCEPT: Quit early, return r, because something unexpected went wrong (error case)
typedef int(*LE_ITERATE_CALLBACK)(TXNID id, TOKUTXN context);
-int le_iterate_is_del(LEAFENTRY le, LE_ITERATE_CALLBACK f, BOOL *is_empty, TOKUTXN context);
+int le_iterate_is_del(LEAFENTRY le, LE_ITERATE_CALLBACK f, bool *is_empty, TOKUTXN context);
-int le_iterate_val(LEAFENTRY le, LE_ITERATE_CALLBACK f, void** valpp, u_int32_t *vallenp, TOKUTXN context);
+int le_iterate_val(LEAFENTRY le, LE_ITERATE_CALLBACK f, void** valpp, uint32_t *vallenp, TOKUTXN context);
size_t
diff --git a/ft/log-internal.h b/ft/log-internal.h
index 6104a8bc9aa..37c205b0a13 100644
--- a/ft/log-internal.h
+++ b/ft/log-internal.h
@@ -58,12 +58,12 @@ struct tokulogger {
toku_mutex_t output_condition_lock; // if you need both this lock and input_lock, acquire the output_lock first, then input_lock. More typical is to get the output_is_available condition to be false, and then acquire the input_lock.
toku_cond_t output_condition; //
- BOOL output_is_available; // this is part of the predicate for the output condition. It's true if no thread is modifying the output (either doing an fsync or otherwise fiddling with the output).
+ bool output_is_available; // this is part of the predicate for the output condition. It's true if no thread is modifying the output (either doing an fsync or otherwise fiddling with the output).
- BOOL is_open;
- BOOL is_panicked;
- BOOL write_log_files;
- BOOL trim_log_files; // for test purposes
+ bool is_open;
+ bool is_panicked;
+ bool write_log_files;
+ bool trim_log_files; // for test purposes
int panic_errno;
char *directory; // file system directory
DIR *dir; // descriptor for directory
@@ -86,11 +86,11 @@ struct tokulogger {
// To access the logfilemgr you must have the output condition lock.
TOKULOGFILEMGR logfilemgr;
- u_int32_t write_block_size; // How big should the blocks be written to various logs?
+ uint32_t write_block_size; // How big should the blocks be written to various logs?
- u_int64_t input_lock_ctr; // how many times has input_lock been taken and released
- u_int64_t output_condition_lock_ctr; // how many times has output_condition_lock been taken and released
- u_int64_t swap_ctr; // how many times have input/output log buffers been swapped
+ uint64_t input_lock_ctr; // how many times has input_lock been taken and released
+ uint64_t output_condition_lock_ctr; // how many times has output_condition_lock been taken and released
+ uint64_t swap_ctr; // how many times have input/output log buffers been swapped
void (*remove_finalize_callback) (DICTIONARY_ID, void*); // ydb-level callback to be called when a transaction that ...
void * remove_finalize_callback_extra; // ... deletes a file is committed or when one that creates a file is aborted.
CACHEFILE rollback_cachefile;
@@ -135,11 +135,11 @@ struct txn_roll_info {
struct tokutxn {
// These don't change after create:
const time_t starttime; // timestamp in seconds of transaction start
- const u_int64_t txnid64; // this happens to be the first lsn
- const u_int64_t ancestor_txnid64; // this is the lsn of root transaction
- const u_int64_t snapshot_txnid64; // this is the lsn of the snapshot
+ const uint64_t txnid64; // this happens to be the first lsn
+ const uint64_t ancestor_txnid64; // this is the lsn of root transaction
+ const uint64_t snapshot_txnid64; // this is the lsn of the snapshot
const TXN_SNAPSHOT_TYPE snapshot_type;
- const BOOL recovered_from_checkpoint;
+ const bool recovered_from_checkpoint;
const TOKULOGGER logger;
const TOKUTXN parent;
// These don't either but they're created in a way that's hard to make
@@ -151,9 +151,9 @@ struct tokutxn {
bool begin_was_logged;
// These are not read until a commit, prepare, or abort starts, and
// they're "monotonic" (only go false->true) during operation:
- BOOL checkpoint_needed_before_commit;
- BOOL do_fsync;
- BOOL force_fsync_on_commit; //This transaction NEEDS an fsync once (if) it commits. (commit means root txn)
+ bool checkpoint_needed_before_commit;
+ bool do_fsync;
+ bool force_fsync_on_commit; //This transaction NEEDS an fsync once (if) it commits. (commit means root txn)
// Not used until commit, prepare, or abort starts:
LSN do_fsync_lsn;
@@ -187,7 +187,7 @@ struct txninfo {
uint64_t rollentry_raw_count; // the total count of every byte in the transaction and all its children.
uint32_t num_fts;
FT *open_fts;
- BOOL force_fsync_on_commit; //This transaction NEEDS an fsync once (if) it commits. (commit means root txn)
+ bool force_fsync_on_commit; //This transaction NEEDS an fsync once (if) it commits. (commit means root txn)
uint64_t num_rollback_nodes;
uint64_t num_rollentries;
BLOCKNUM spilled_rollback_head;
@@ -195,19 +195,19 @@ struct txninfo {
BLOCKNUM current_rollback;
};
-static inline int toku_logsizeof_u_int8_t (u_int32_t v __attribute__((__unused__))) {
+static inline int toku_logsizeof_uint8_t (uint32_t v __attribute__((__unused__))) {
return 1;
}
-static inline int toku_logsizeof_u_int32_t (u_int32_t v __attribute__((__unused__))) {
+static inline int toku_logsizeof_uint32_t (uint32_t v __attribute__((__unused__))) {
return 4;
}
-static inline int toku_logsizeof_u_int64_t (u_int32_t v __attribute__((__unused__))) {
+static inline int toku_logsizeof_uint64_t (uint32_t v __attribute__((__unused__))) {
return 8;
}
-static inline int toku_logsizeof_BOOL (u_int32_t v __attribute__((__unused__))) {
+static inline int toku_logsizeof_bool (uint32_t v __attribute__((__unused__))) {
return 1;
}
diff --git a/ft/log.h b/ft/log.h
index 17e5f0eb6a9..e177c14ed92 100644
--- a/ft/log.h
+++ b/ft/log.h
@@ -32,17 +32,17 @@ static inline int toku_copy_BYTESTRING(BYTESTRING *target, BYTESTRING val) {
}
static inline void toku_free_TXNID(TXNID txnid __attribute__((__unused__))) {}
static inline void toku_free_LSN(LSN lsn __attribute__((__unused__))) {}
-static inline void toku_free_u_int64_t(u_int64_t u __attribute__((__unused__))) {}
-static inline void toku_free_u_int32_t(u_int32_t u __attribute__((__unused__))) {}
-static inline void toku_free_u_int8_t(u_int8_t u __attribute__((__unused__))) {}
+static inline void toku_free_uint64_t(uint64_t u __attribute__((__unused__))) {}
+static inline void toku_free_uint32_t(uint32_t u __attribute__((__unused__))) {}
+static inline void toku_free_uint8_t(uint8_t u __attribute__((__unused__))) {}
static inline void toku_free_FILENUM(FILENUM u __attribute__((__unused__))) {}
static inline void toku_free_BLOCKNUM(BLOCKNUM u __attribute__((__unused__))) {}
-static inline void toku_free_BOOL(BOOL u __attribute__((__unused__))) {}
+static inline void toku_free_bool(bool u __attribute__((__unused__))) {}
static inline void toku_free_XIDP(XIDP xidp) { toku_free(xidp); }
static inline void toku_free_BYTESTRING(BYTESTRING val) { toku_free(val.data); }
static inline void toku_free_FILENUMS(FILENUMS val) { toku_free(val.filenums); }
-int toku_maybe_upgrade_log (const char *env_dir, const char *log_dir, LSN * lsn_of_clean_shutdown, BOOL * upgrade_in_progress);
+int toku_maybe_upgrade_log (const char *env_dir, const char *log_dir, LSN * lsn_of_clean_shutdown, bool * upgrade_in_progress);
uint64_t toku_log_upgrade_get_footprint(void);
diff --git a/ft/log_upgrade.cc b/ft/log_upgrade.cc
index dfbe119a46b..0c0ed93d391 100644
--- a/ft/log_upgrade.cc
+++ b/ft/log_upgrade.cc
@@ -201,13 +201,13 @@ upgrade_log(const char *env_dir, const char *log_dir, LSN last_lsn, TXNID last_x
// If log on disk is old (environment is old) and clean shutdown, then create log of current version,
// which will make the environment of the current version (and delete the old logs).
int
-toku_maybe_upgrade_log(const char *env_dir, const char *log_dir, LSN * lsn_of_clean_shutdown, BOOL * upgrade_in_progress) {
+toku_maybe_upgrade_log(const char *env_dir, const char *log_dir, LSN * lsn_of_clean_shutdown, bool * upgrade_in_progress) {
int r;
int lockfd = -1;
FOOTPRINTSETUP(100000);
footprint = 0;
- *upgrade_in_progress = FALSE; // set TRUE only if all criteria are met and we're actually doing an upgrade
+ *upgrade_in_progress = false; // set true only if all criteria are met and we're actually doing an upgrade
FOOTPRINT(1);
r = toku_recover_lock(log_dir, &lockfd);
@@ -219,7 +219,7 @@ toku_maybe_upgrade_log(const char *env_dir, const char *log_dir, LSN * lsn_of_cl
assert(env_dir);
uint32_t version_of_logs_on_disk;
- BOOL found_any_logs;
+ bool found_any_logs;
r = toku_get_version_of_logs_on_disk(log_dir, &found_any_logs, &version_of_logs_on_disk);
if (r != 0) {
goto cleanup;
@@ -243,7 +243,7 @@ toku_maybe_upgrade_log(const char *env_dir, const char *log_dir, LSN * lsn_of_cl
}
FOOTPRINT(5);
*lsn_of_clean_shutdown = last_lsn;
- *upgrade_in_progress = TRUE;
+ *upgrade_in_progress = true;
r = upgrade_log(env_dir, log_dir, last_lsn, last_xid);
}
cleanup:
diff --git a/ft/logcursor.cc b/ft/logcursor.cc
index 2d5a8bf84cb..4f4a023ed03 100644
--- a/ft/logcursor.cc
+++ b/ft/logcursor.cc
@@ -16,9 +16,9 @@ struct toku_logcursor {
FILE *cur_fp;
size_t buffer_size;
void *buffer;
- BOOL is_open;
+ bool is_open;
struct log_entry entry;
- BOOL entry_valid;
+ bool entry_valid;
LSN cur_lsn;
enum lc_direction last_direction;
};
@@ -44,7 +44,7 @@ static int lc_close_cur_logfile(TOKULOGCURSOR lc) {
if ( lc->is_open ) {
r = fclose(lc->cur_fp);
assert(0==r);
- lc->is_open = FALSE;
+ lc->is_open = false;
}
return 0;
}
@@ -94,7 +94,7 @@ static int lc_open_logfile(TOKULOGCURSOR lc, int index) {
return DB_BADFORMAT;
}
// mark as open
- lc->is_open = TRUE;
+ lc->is_open = true;
return r;
}
@@ -122,9 +122,9 @@ static int lc_create(TOKULOGCURSOR *lc, const char *log_dir) {
// malloc a cursor
TOKULOGCURSOR cursor = (TOKULOGCURSOR) toku_xmalloc(sizeof(struct toku_logcursor));
// find logfiles in logdir
- cursor->is_open = FALSE;
+ cursor->is_open = false;
cursor->cur_logfiles_index = 0;
- cursor->entry_valid = FALSE;
+ cursor->entry_valid = false;
cursor->buffer_size = 1<<20; // use a 1MB stream buffer (setvbuf)
cursor->buffer = toku_malloc(cursor->buffer_size); // it does not matter if it failes
// cursor->logdir must be an absolute path
@@ -189,7 +189,7 @@ int toku_logcursor_destroy(TOKULOGCURSOR *lc) {
if ( *lc ) {
if ( (*lc)->entry_valid ) {
toku_log_free_log_entry_resources(&((*lc)->entry));
- (*lc)->entry_valid = FALSE;
+ (*lc)->entry_valid = false;
}
r = lc_close_cur_logfile(*lc);
int lf;
@@ -267,7 +267,7 @@ int toku_logcursor_next(TOKULOGCURSOR lc, struct log_entry **le) {
int r=0;
if ( lc->entry_valid ) {
toku_log_free_log_entry_resources(&(lc->entry));
- lc->entry_valid = FALSE;
+ lc->entry_valid = false;
if (lc->last_direction == LC_BACKWARD) {
struct log_entry junk;
r = toku_log_fread(lc->cur_fp, &junk);
@@ -284,7 +284,7 @@ int toku_logcursor_next(TOKULOGCURSOR lc, struct log_entry **le) {
r = lc_check_lsn(lc, LC_FORWARD);
if (r!=0) return r;
lc->last_direction = LC_FORWARD;
- lc->entry_valid = TRUE;
+ lc->entry_valid = true;
*le = &(lc->entry);
return r;
}
@@ -293,7 +293,7 @@ int toku_logcursor_prev(TOKULOGCURSOR lc, struct log_entry **le) {
int r=0;
if ( lc->entry_valid ) {
toku_log_free_log_entry_resources(&(lc->entry));
- lc->entry_valid = FALSE;
+ lc->entry_valid = false;
if (lc->last_direction == LC_FORWARD) {
struct log_entry junk;
r = toku_log_fread_backward(lc->cur_fp, &junk);
@@ -310,7 +310,7 @@ int toku_logcursor_prev(TOKULOGCURSOR lc, struct log_entry **le) {
r = lc_check_lsn(lc, LC_BACKWARD);
if (r!=0) return r;
lc->last_direction = LC_BACKWARD;
- lc->entry_valid = TRUE;
+ lc->entry_valid = true;
*le = &(lc->entry);
return r;
}
@@ -319,7 +319,7 @@ int toku_logcursor_first(TOKULOGCURSOR lc, struct log_entry **le) {
int r=0;
if ( lc->entry_valid ) {
toku_log_free_log_entry_resources(&(lc->entry));
- lc->entry_valid = FALSE;
+ lc->entry_valid = false;
}
// close any but the first log file
if ( lc->cur_logfiles_index != 0 ) {
@@ -339,7 +339,7 @@ int toku_logcursor_first(TOKULOGCURSOR lc, struct log_entry **le) {
r = lc_check_lsn(lc, LC_FIRST);
if (r!=0) return r;
lc->last_direction = LC_FIRST;
- lc->entry_valid = TRUE;
+ lc->entry_valid = true;
*le = &(lc->entry);
return r;
}
@@ -349,7 +349,7 @@ int toku_logcursor_last(TOKULOGCURSOR lc, struct log_entry **le) {
int r=0;
if ( lc->entry_valid ) {
toku_log_free_log_entry_resources(&(lc->entry));
- lc->entry_valid = FALSE;
+ lc->entry_valid = false;
}
// close any but last log file
if ( lc->cur_logfiles_index != lc->n_logfiles-1 ) {
@@ -401,7 +401,7 @@ int toku_logcursor_last(TOKULOGCURSOR lc, struct log_entry **le) {
if (r!=0)
return r;
lc->last_direction = LC_LAST;
- lc->entry_valid = TRUE;
+ lc->entry_valid = true;
*le = &(lc->entry);
return r;
}
diff --git a/ft/logformat.cc b/ft/logformat.cc
index f8e6b12885e..9b5e9249188 100644
--- a/ft/logformat.cc
+++ b/ft/logformat.cc
@@ -17,7 +17,6 @@
#include <ctype.h>
#include <errno.h>
#include <stdarg.h>
-#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@ -69,11 +68,11 @@ const struct logtype rollbacks[] = {
{"BYTESTRING", "key", 0},
NULLFIELD}, LOG_BEGIN_ACTION_NA},
{"rollinclude", 'r', FA{{"TXNID", "xid", 0},
- {"u_int64_t", "num_nodes", 0},
+ {"uint64_t", "num_nodes", 0},
{"BLOCKNUM", "spilled_head", 0},
- {"u_int32_t", "spilled_head_hash", 0},
+ {"uint32_t", "spilled_head_hash", 0},
{"BLOCKNUM", "spilled_tail", 0},
- {"u_int32_t", "spilled_tail_hash", 0},
+ {"uint32_t", "spilled_tail_hash", 0},
NULLFIELD}, LOG_BEGIN_ACTION_NA},
{"load", 'l', FA{{"FILENUM", "old_filenum", 0},
{"BYTESTRING", "new_iname", 0},
@@ -88,7 +87,7 @@ const struct logtype rollbacks[] = {
{"BYTESTRING", "key", 0},
NULLFIELD}, LOG_BEGIN_ACTION_NA},
{"cmdupdatebroadcast", 'B', FA{{"FILENUM", "filenum", 0},
- {"BOOL", "is_resetting_op", 0},
+ {"bool", "is_resetting_op", 0},
NULLFIELD}, LOG_BEGIN_ACTION_NA},
{"change_fdescriptor", 'D', FA{{"FILENUM", "filenum", 0},
{"BYTESTRING", "old_descriptor", 0},
@@ -101,27 +100,27 @@ const struct logtype logtypes[] = {
#if 0 // no longer used, but reserve the type
{"local_txn_checkpoint", 'c', FA{{"TXNID", "xid", 0}, NULLFIELD}},
#endif
- {"begin_checkpoint", 'x', FA{{"u_int64_t", "timestamp", 0}, {"TXNID", "last_xid", 0}, NULLFIELD}, IGNORE_LOG_BEGIN},
+ {"begin_checkpoint", 'x', FA{{"uint64_t", "timestamp", 0}, {"TXNID", "last_xid", 0}, NULLFIELD}, IGNORE_LOG_BEGIN},
{"end_checkpoint", 'X', FA{{"LSN", "lsn_begin_checkpoint", 0},
- {"u_int64_t", "timestamp", 0},
- {"u_int32_t", "num_fassociate_entries", 0}, // how many files were checkpointed
- {"u_int32_t", "num_xstillopen_entries", 0}, // how many txns were checkpointed
+ {"uint64_t", "timestamp", 0},
+ {"uint32_t", "num_fassociate_entries", 0}, // how many files were checkpointed
+ {"uint32_t", "num_xstillopen_entries", 0}, // how many txns were checkpointed
NULLFIELD}, IGNORE_LOG_BEGIN},
//TODO: #2037 Add dname
{"fassociate", 'f', FA{{"FILENUM", "filenum", 0},
- {"u_int32_t", "treeflags", 0},
+ {"uint32_t", "treeflags", 0},
{"BYTESTRING", "iname", 0}, // pathname of file
- {"u_int8_t", "unlink_on_close", 0},
+ {"uint8_t", "unlink_on_close", 0},
NULLFIELD}, IGNORE_LOG_BEGIN},
//We do not use a TXNINFO struct since recovery log has
//FILENUMS and TOKUTXN has FTs (for open_fts)
{"xstillopen", 's', FA{{"TXNID", "xid", 0},
{"TXNID", "parentxid", 0},
- {"u_int64_t", "rollentry_raw_count", 0},
+ {"uint64_t", "rollentry_raw_count", 0},
{"FILENUMS", "open_filenums", 0},
- {"u_int8_t", "force_fsync_on_commit", 0},
- {"u_int64_t", "num_rollback_nodes", 0},
- {"u_int64_t", "num_rollentries", 0},
+ {"uint8_t", "force_fsync_on_commit", 0},
+ {"uint64_t", "num_rollback_nodes", 0},
+ {"uint64_t", "num_rollentries", 0},
{"BLOCKNUM", "spilled_rollback_head", 0},
{"BLOCKNUM", "spilled_rollback_tail", 0},
{"BLOCKNUM", "current_rollback", 0},
@@ -129,11 +128,11 @@ const struct logtype logtypes[] = {
// prepared txns need a gid
{"xstillopenprepared", 'p', FA{{"TXNID", "xid", 0},
{"XIDP", "xa_xid", 0}, // prepared transactions need a gid, and have no parentxid.
- {"u_int64_t", "rollentry_raw_count", 0},
+ {"uint64_t", "rollentry_raw_count", 0},
{"FILENUMS", "open_filenums", 0},
- {"u_int8_t", "force_fsync_on_commit", 0},
- {"u_int64_t", "num_rollback_nodes", 0},
- {"u_int64_t", "num_rollentries", 0},
+ {"uint8_t", "force_fsync_on_commit", 0},
+ {"uint64_t", "num_rollback_nodes", 0},
+ {"uint64_t", "num_rollentries", 0},
{"BLOCKNUM", "spilled_rollback_head", 0},
{"BLOCKNUM", "spilled_rollback_tail", 0},
{"BLOCKNUM", "current_rollback", 0},
@@ -150,16 +149,16 @@ const struct logtype logtypes[] = {
{"fcreate", 'F', FA{{"TXNID", "xid", 0},
{"FILENUM", "filenum", 0},
{"BYTESTRING", "iname", 0},
- {"u_int32_t", "mode", "0%o"},
- {"u_int32_t", "treeflags", 0},
- {"u_int32_t", "nodesize", 0},
- {"u_int32_t", "basementnodesize", 0},
- {"u_int32_t", "compression_method", 0},
+ {"uint32_t", "mode", "0%o"},
+ {"uint32_t", "treeflags", 0},
+ {"uint32_t", "nodesize", 0},
+ {"uint32_t", "basementnodesize", 0},
+ {"uint32_t", "compression_method", 0},
NULLFIELD}, SHOULD_LOG_BEGIN},
//TODO: #2037 Add dname
{"fopen", 'O', FA{{"BYTESTRING", "iname", 0},
{"FILENUM", "filenum", 0},
- {"u_int32_t", "treeflags", 0},
+ {"uint32_t", "treeflags", 0},
NULLFIELD}, IGNORE_LOG_BEGIN},
//TODO: #2037 Add dname
{"fclose", 'e', FA{{"BYTESTRING", "iname", 0},
@@ -195,14 +194,14 @@ const struct logtype logtypes[] = {
{"BYTESTRING", "src_key", 0},
{"BYTESTRING", "src_val", 0},
NULLFIELD}, SHOULD_LOG_BEGIN},
- {"comment", 'T', FA{{"u_int64_t", "timestamp", 0},
+ {"comment", 'T', FA{{"uint64_t", "timestamp", 0},
{"BYTESTRING", "comment", 0},
NULLFIELD}, IGNORE_LOG_BEGIN},
// Note: shutdown_up_to_19 log entry is NOT ALLOWED TO BE CHANGED.
// Do not change the letter ('Q'), do not add fields,
// do not remove fields.
// TODO: Kill this logentry entirely once we no longer support version 19.
- {"shutdown_up_to_19", 'Q', FA{{"u_int64_t", "timestamp", 0},
+ {"shutdown_up_to_19", 'Q', FA{{"uint64_t", "timestamp", 0},
NULLFIELD}, IGNORE_LOG_BEGIN},
// Note: Shutdown log entry is NOT ALLOWED TO BE CHANGED.
// Do not change the letter ('0'), do not add fields,
@@ -212,7 +211,7 @@ const struct logtype logtypes[] = {
// This is how we detect clean shutdowns from OLDER VERSIONS.
// This log entry must always be readable for future versions.
// If you DO change it, you need to write a separate log upgrade mechanism.
- {"shutdown", '0', FA{{"u_int64_t", "timestamp", 0},
+ {"shutdown", '0', FA{{"uint64_t", "timestamp", 0},
{"TXNID", "last_xid", 0},
NULLFIELD}, IGNORE_LOG_BEGIN},
{"load", 'l', FA{{"TXNID", "xid", 0},
@@ -231,13 +230,13 @@ const struct logtype logtypes[] = {
{"enq_updatebroadcast", 'B', FA{{"FILENUM", "filenum", 0},
{"TXNID", "xid", 0},
{"BYTESTRING", "extra", 0},
- {"BOOL", "is_resetting_op", 0},
+ {"bool", "is_resetting_op", 0},
NULLFIELD}, SHOULD_LOG_BEGIN},
{"change_fdescriptor", 'D', FA{{"FILENUM", "filenum", 0},
{"TXNID", "xid", 0},
{"BYTESTRING", "old_descriptor", 0},
{"BYTESTRING", "new_descriptor", 0},
- {"BOOL", "update_cmp_descriptor", 0},
+ {"bool", "update_cmp_descriptor", 0},
NULLFIELD}, SHOULD_LOG_BEGIN},
{0,0,FA{NULLFIELD}, (enum log_begin_action) 0}
};
@@ -308,8 +307,8 @@ generate_log_struct (void) {
fprintf(hf, " %-16s lsn;\n", "LSN");
DO_FIELDS(field_type, lt,
fprintf(hf, " %-16s %s;\n", field_type->type, field_type->name));
- fprintf(hf, " %-16s crc;\n", "u_int32_t");
- fprintf(hf, " %-16s len;\n", "u_int32_t");
+ fprintf(hf, " %-16s crc;\n", "uint32_t");
+ fprintf(hf, " %-16s len;\n", "uint32_t");
fprintf(hf, "};\n");
//fprintf(hf, "void toku_recover_%s (LSN lsn", lt->name);
//DO_FIELDS(field_type, lt, fprintf(hf, ", %s %s", field_type->type, field_type->name));
@@ -380,7 +379,7 @@ generate_dispatch (void) {
static void
generate_get_timestamp(void) {
- fprintf(cf, "static u_int64_t toku_get_timestamp(void) {\n");
+ fprintf(cf, "static uint64_t toku_get_timestamp(void) {\n");
fprintf(cf, " struct timeval tv; int r = gettimeofday(&tv, NULL);\n");
fprintf(cf, " assert(r==0);\n");
fprintf(cf, " return tv.tv_sec * 1000000ULL + tv.tv_usec;\n");
@@ -473,16 +472,16 @@ generate_log_writer (void) {
static void
generate_log_reader (void) {
DO_LOGTYPES(lt, {
- fprintf(cf, "static int toku_log_fread_%s (FILE *infile, u_int32_t len1, struct logtype_%s *data, struct x1764 *checksum)", lt->name, lt->name);
+ fprintf(cf, "static int toku_log_fread_%s (FILE *infile, uint32_t len1, struct logtype_%s *data, struct x1764 *checksum)", lt->name, lt->name);
fprintf(cf, " {\n");
fprintf(cf, " int r=0;\n");
- fprintf(cf, " u_int32_t actual_len=5; // 1 for the command, 4 for the first len.\n");
+ fprintf(cf, " uint32_t actual_len=5; // 1 for the command, 4 for the first len.\n");
fprintf(cf, " r=toku_fread_%-16s(infile, &data->%-16s, checksum, &actual_len); if (r!=0) return r;\n", "LSN", "lsn");
DO_FIELDS(field_type, lt,
fprintf(cf, " r=toku_fread_%-16s(infile, &data->%-16s, checksum, &actual_len); if (r!=0) return r;\n", field_type->type, field_type->name));
- fprintf(cf, " u_int32_t checksum_in_file, len_in_file;\n");
- fprintf(cf, " r=toku_fread_u_int32_t_nocrclen(infile, &checksum_in_file); actual_len+=4; if (r!=0) return r;\n");
- fprintf(cf, " r=toku_fread_u_int32_t_nocrclen(infile, &len_in_file); actual_len+=4; if (r!=0) return r;\n");
+ fprintf(cf, " uint32_t checksum_in_file, len_in_file;\n");
+ fprintf(cf, " r=toku_fread_uint32_t_nocrclen(infile, &checksum_in_file); actual_len+=4; if (r!=0) return r;\n");
+ fprintf(cf, " r=toku_fread_uint32_t_nocrclen(infile, &len_in_file); actual_len+=4; if (r!=0) return r;\n");
fprintf(cf, " if (checksum_in_file!=x1764_finish(checksum) || len_in_file!=actual_len || len1 != len_in_file) return DB_BADFORMAT;\n");
fprintf(cf, " return 0;\n");
fprintf(cf, "}\n\n");
@@ -490,11 +489,11 @@ generate_log_reader (void) {
fprintf2(cf, hf, "int toku_log_fread (FILE *infile, struct log_entry *le)");
fprintf(hf, ";\n");
fprintf(cf, " {\n");
- fprintf(cf, " u_int32_t len1; int r;\n");
- fprintf(cf, " u_int32_t ignorelen=0;\n");
+ fprintf(cf, " uint32_t len1; int r;\n");
+ fprintf(cf, " uint32_t ignorelen=0;\n");
fprintf(cf, " struct x1764 checksum;\n");
fprintf(cf, " x1764_init(&checksum);\n");
- fprintf(cf, " r = toku_fread_u_int32_t(infile, &len1, &checksum, &ignorelen); if (r!=0) return r;\n");
+ fprintf(cf, " r = toku_fread_uint32_t(infile, &len1, &checksum, &ignorelen); if (r!=0) return r;\n");
fprintf(cf, " int cmd=fgetc(infile);\n");
fprintf(cf, " if (cmd==EOF) return EOF;\n");
fprintf(cf, " char cmdchar = (char)cmd;\n");
@@ -517,8 +516,8 @@ generate_log_reader (void) {
fprintf(cf, " {\n long pos = ftell(infile);\n if (pos<=12) return -1;\n }\n");
fprintf(cf, " int r = fseek(infile, -4, SEEK_CUR); \n");// assert(r==0);\n");
fprintf(cf, " if (r!=0) return get_error_errno();\n");
- fprintf(cf, " u_int32_t len;\n");
- fprintf(cf, " r = toku_fread_u_int32_t_nocrclen(infile, &len); \n");// assert(r==0);\n");
+ fprintf(cf, " uint32_t len;\n");
+ fprintf(cf, " r = toku_fread_uint32_t_nocrclen(infile, &len); \n");// assert(r==0);\n");
fprintf(cf, " if (r!=0) return 1;\n");
fprintf(cf, " r = fseek(infile, -(int)len, SEEK_CUR) ; \n");// assert(r==0);\n");
fprintf(cf, " if (r!=0) return get_error_errno();\n");
@@ -558,15 +557,15 @@ generate_logprint (void) {
fprintf(hf, ";\n");
fprintf(pf, " {\n");
fprintf(pf, " int cmd, r;\n");
- fprintf(pf, " u_int32_t len1, crc_in_file;\n");
- fprintf(pf, " u_int32_t ignorelen=0;\n");
+ fprintf(pf, " uint32_t len1, crc_in_file;\n");
+ fprintf(pf, " uint32_t ignorelen=0;\n");
fprintf(pf, " struct x1764 checksum;\n");
fprintf(pf, " x1764_init(&checksum);\n");
- fprintf(pf, " r=toku_fread_u_int32_t(f, &len1, &checksum, &ignorelen);\n");
+ fprintf(pf, " r=toku_fread_uint32_t(f, &len1, &checksum, &ignorelen);\n");
fprintf(pf, " if (r==EOF) return EOF;\n");
fprintf(pf, " cmd=fgetc(f);\n");
fprintf(pf, " if (cmd==EOF) return DB_BADFORMAT;\n");
- fprintf(pf, " u_int32_t len_in_file, len=1+4; // cmd + len1\n");
+ fprintf(pf, " uint32_t len_in_file, len=1+4; // cmd + len1\n");
fprintf(pf, " char charcmd = (char)cmd;\n");
fprintf(pf, " x1764_add(&checksum, &charcmd, 1);\n");
fprintf(pf, " switch ((enum lt_cmd)cmd) {\n");
@@ -586,11 +585,11 @@ generate_logprint (void) {
fprintf(pf, "); if (r!=0) return r;\n");
});
fprintf(pf, " {\n");
- fprintf(pf, " u_int32_t actual_murmur = x1764_finish(&checksum);\n");
- fprintf(pf, " r = toku_fread_u_int32_t_nocrclen (f, &crc_in_file); len+=4; if (r!=0) return r;\n");
+ fprintf(pf, " uint32_t actual_murmur = x1764_finish(&checksum);\n");
+ fprintf(pf, " r = toku_fread_uint32_t_nocrclen (f, &crc_in_file); len+=4; if (r!=0) return r;\n");
fprintf(pf, " fprintf(outf, \" crc=%%08x\", crc_in_file);\n");
fprintf(pf, " if (crc_in_file!=actual_murmur) fprintf(outf, \" checksum=%%08x\", actual_murmur);\n");
- fprintf(pf, " r = toku_fread_u_int32_t_nocrclen (f, &len_in_file); len+=4; if (r!=0) return r;\n");
+ fprintf(pf, " r = toku_fread_uint32_t_nocrclen (f, &len_in_file); len+=4; if (r!=0) return r;\n");
fprintf(pf, " fprintf(outf, \" len=%%u\", len_in_file);\n");
fprintf(pf, " if (len_in_file!=len) fprintf(outf, \" actual_len=%%u\", len);\n");
fprintf(pf, " if (len_in_file!=len || crc_in_file!=actual_murmur) return DB_BADFORMAT;\n");
@@ -644,7 +643,7 @@ generate_rollbacks (void) {
});
{
int count=0;
- fprintf(cf, " u_int32_t rollback_fsize = toku_logger_rollback_fsize_%s(", lt->name);
+ fprintf(cf, " uint32_t rollback_fsize = toku_logger_rollback_fsize_%s(", lt->name);
DO_FIELDS(field_type, lt, fprintf(cf, "%s%s", (count++>0)?", ":"", field_type->name));
fprintf(cf, ");\n");
}
@@ -660,7 +659,7 @@ generate_rollbacks (void) {
fprintf(cf, " log->rollentry_resident_bytecount += rollback_fsize;\n");
fprintf(cf, " txn->roll_info.rollentry_raw_count += rollback_fsize;\n");
fprintf(cf, " txn->roll_info.num_rollentries++;\n");
- fprintf(cf, " log->dirty = TRUE;\n");
+ fprintf(cf, " log->dirty = true;\n");
fprintf(cf, " // spill and unpin assert success internally\n");
fprintf(cf, " toku_maybe_spill_rollbacks(txn, log);\n");
fprintf(cf, " toku_rollback_log_unpin(txn, log);\n");
@@ -677,7 +676,7 @@ generate_rollbacks (void) {
{
int count=0;
- fprintf(cf, " u_int32_t rollback_fsize = toku_logger_rollback_fsize_%s(", lt->name);
+ fprintf(cf, " uint32_t rollback_fsize = toku_logger_rollback_fsize_%s(", lt->name);
DO_FIELDS(field_type, lt, fprintf(cf, "%s%s", (count++>0)?", ":"", field_type->name));
fprintf(cf, ");\n");
fprintf(cf, " wbuf_nocrc_int(wbuf, rollback_fsize);\n");
@@ -697,7 +696,7 @@ generate_rollbacks (void) {
fprintf(cf, " }\n assert(0);\n");
fprintf(cf, "}\n");
DO_ROLLBACKS(lt, {
- fprintf2(cf, hf, "u_int32_t toku_logger_rollback_fsize_%s (", lt->name);
+ fprintf2(cf, hf, "uint32_t toku_logger_rollback_fsize_%s (", lt->name);
int count=0;
DO_FIELDS(field_type, lt, fprintf2(cf, hf, "%s%s %s", (count++>0)?", ":"", field_type->type, field_type->name));
fprintf(hf, ");\n");
@@ -708,7 +707,7 @@ generate_rollbacks (void) {
fprintf(cf, "\n + toku_logsizeof_%s(%s)", field_type->type, field_type->name));
fprintf(cf, ";\n}\n");
});
- fprintf2(cf, hf, "u_int32_t toku_logger_rollback_fsize(struct roll_entry *item)");
+ fprintf2(cf, hf, "uint32_t toku_logger_rollback_fsize(struct roll_entry *item)");
fprintf(hf, ";\n");
fprintf(cf, "{\n switch(item->cmd) {\n");
DO_ROLLBACKS(lt, {
@@ -720,7 +719,7 @@ generate_rollbacks (void) {
fprintf(cf, " }\n assert(0);\n return 0;\n");
fprintf(cf, "}\n");
- fprintf2(cf, hf, "int toku_parse_rollback(unsigned char *buf, u_int32_t n_bytes, struct roll_entry **itemp, MEMARENA ma)");
+ fprintf2(cf, hf, "int toku_parse_rollback(unsigned char *buf, uint32_t n_bytes, struct roll_entry **itemp, MEMARENA ma)");
fprintf(hf, ";\n");
fprintf(cf, " {\n assert(n_bytes>0);\n struct roll_entry *item;\n enum rt_cmd cmd = (enum rt_cmd)(buf[0]);\n size_t mem_needed;\n");
fprintf(cf, " struct rbuf rc = {buf, n_bytes, 1};\n");
diff --git a/ft/logger.cc b/ft/logger.cc
index f7ce4fd9ff0..d3f9fdd5c8e 100644
--- a/ft/logger.cc
+++ b/ft/logger.cc
@@ -15,9 +15,9 @@ static int delete_logfile(TOKULOGGER logger, long long index, uint32_t version);
static void grab_output(TOKULOGGER logger, LSN *fsynced_lsn);
static void release_output(TOKULOGGER logger, LSN fsynced_lsn);
-static void toku_print_bytes (FILE *outf, u_int32_t len, char *data) {
+static void toku_print_bytes (FILE *outf, uint32_t len, char *data) {
fprintf(outf, "\"");
- u_int32_t i;
+ uint32_t i;
for (i=0; i<len; i++) {
switch (data[i]) {
case '"': fprintf(outf, "\\\""); break;
@@ -31,8 +31,8 @@ static void toku_print_bytes (FILE *outf, u_int32_t len, char *data) {
fprintf(outf, "\"");
}
-static BOOL is_a_logfile_any_version (const char *name, uint64_t *number_result, uint32_t *version_of_log) {
- BOOL rval = TRUE;
+static bool is_a_logfile_any_version (const char *name, uint64_t *number_result, uint32_t *version_of_log) {
+ bool rval = true;
uint64_t result;
int n;
int r;
@@ -43,7 +43,7 @@ static BOOL is_a_logfile_any_version (const char *name, uint64_t *number_result,
version = TOKU_LOG_VERSION_1;
r = sscanf(name, "log%" SCNu64 ".tokulog%n", &result, &n);
if (r!=1 || name[n]!='\0') {
- rval = FALSE;
+ rval = false;
}
}
if (rval) {
@@ -55,13 +55,13 @@ static BOOL is_a_logfile_any_version (const char *name, uint64_t *number_result,
}
// added for #2424, improved for #2521
-static BOOL is_a_logfile (const char *name, long long *number_result) {
- BOOL rval;
+static bool is_a_logfile (const char *name, long long *number_result) {
+ bool rval;
uint64_t result;
uint32_t version;
rval = is_a_logfile_any_version(name, &result, &version);
if (rval && version != TOKU_LOG_VERSION)
- rval = FALSE;
+ rval = false;
if (rval)
*number_result = result;
return rval;
@@ -71,11 +71,11 @@ static BOOL is_a_logfile (const char *name, long long *number_result) {
int toku_logger_create (TOKULOGGER *resultp) {
TOKULOGGER MALLOC(result);
if (result==0) return get_error_errno();
- result->is_open=FALSE;
- result->is_panicked=FALSE;
+ result->is_open=false;
+ result->is_panicked=false;
result->panic_errno = 0;
- result->write_log_files = TRUE;
- result->trim_log_files = TRUE;
+ result->write_log_files = true;
+ result->trim_log_files = true;
result->directory=0;
result->remove_finalize_callback = NULL;
// fd is uninitialized on purpose
@@ -99,7 +99,7 @@ int toku_logger_create (TOKULOGGER *resultp) {
result->output_condition_lock_ctr = 0;
result->swap_ctr = 0;
result->rollback_cachefile = NULL;
- result->output_is_available = TRUE;
+ result->output_is_available = true;
toku_txn_manager_init(&result->txn_manager);
return 0;
}
@@ -166,7 +166,7 @@ toku_logger_open_with_last_xid(const char *directory, TOKULOGGER logger, TXNID l
}
toku_txn_manager_set_last_xid_from_logger(logger->txn_manager, last_xid);
- logger->is_open = TRUE;
+ logger->is_open = true;
return 0;
}
@@ -179,7 +179,7 @@ bool toku_logger_rollback_is_open (TOKULOGGER logger) {
}
int
-toku_logger_open_rollback(TOKULOGGER logger, CACHETABLE cachetable, BOOL create) {
+toku_logger_open_rollback(TOKULOGGER logger, CACHETABLE cachetable, bool create) {
assert(logger->is_open);
assert(!logger->is_panicked);
assert(!logger->rollback_cachefile);
@@ -196,7 +196,7 @@ toku_logger_open_rollback(TOKULOGGER logger, CACHETABLE cachetable, BOOL create)
assert(!t->ft->panic);
//Must have no data blocks (rollback logs or otherwise).
toku_block_verify_no_data_blocks_except_root_unlocked(t->ft->blocktable, t->ft->h->root_blocknum);
- BOOL is_empty;
+ bool is_empty;
is_empty = toku_ft_is_empty_fast(t);
assert(is_empty);
return r;
@@ -208,7 +208,7 @@ toku_logger_open_rollback(TOKULOGGER logger, CACHETABLE cachetable, BOOL create)
// Rollback log can only be closed when there are no open transactions,
// so it will always be empty (no data blocks) when about to be closed.
int
-toku_logger_close_rollback(TOKULOGGER logger, BOOL recovery_failed) {
+toku_logger_close_rollback(TOKULOGGER logger, bool recovery_failed) {
int r = 0;
CACHEFILE cf = logger->rollback_cachefile; // stored in logger at rollback cachefile open
if (!logger->is_panicked && cf) {
@@ -228,7 +228,7 @@ toku_logger_close_rollback(TOKULOGGER logger, BOOL recovery_failed) {
assert(!ft->h->dirty);
ft_to_close = toku_ft_get_only_existing_ft_handle(ft);
{
- BOOL is_empty;
+ bool is_empty;
is_empty = toku_ft_is_empty_fast(ft_to_close);
assert(is_empty);
}
@@ -272,7 +272,7 @@ int toku_logger_close(TOKULOGGER *loggerp) {
ml_destroy(&logger->input_lock);
toku_mutex_destroy(&logger->output_condition_lock);
toku_cond_destroy(&logger->output_condition);
- logger->is_panicked=TRUE; // Just in case this might help.
+ logger->is_panicked=true; // Just in case this might help.
toku_txn_manager_destroy(logger->txn_manager);
if (logger->directory) toku_free(logger->directory);
toku_logfilemgr_destroy(&logger->logfilemgr);
@@ -290,7 +290,7 @@ int toku_logger_shutdown(TOKULOGGER logger) {
TXN_MANAGER mgr = logger->txn_manager;
if (toku_txn_manager_num_live_txns(mgr) == 0) {
TXNID last_xid = toku_txn_manager_get_last_xid(mgr);
- r = toku_log_shutdown(logger, NULL, TRUE, 0, last_xid);
+ r = toku_log_shutdown(logger, NULL, true, 0, last_xid);
}
}
return r;
@@ -344,7 +344,7 @@ grab_output(TOKULOGGER logger, LSN *fsynced_lsn)
toku_mutex_lock(&logger->output_condition_lock);
logger->output_condition_lock_ctr++;
wait_till_output_available(logger);
- logger->output_is_available = FALSE;
+ logger->output_is_available = false;
if (fsynced_lsn) {
*fsynced_lsn = logger->fsynced_lsn;
}
@@ -352,7 +352,7 @@ grab_output(TOKULOGGER logger, LSN *fsynced_lsn)
toku_mutex_unlock(&logger->output_condition_lock);
}
-static BOOL
+static bool
wait_till_output_already_written_or_output_buffer_available (TOKULOGGER logger, LSN lsn, LSN *fsynced_lsn)
// Effect: Wait until either the output is available or the lsn has been written.
// Return true iff the lsn has been written.
@@ -361,17 +361,17 @@ wait_till_output_already_written_or_output_buffer_available (TOKULOGGER logger,
// Entry: Hold no locks.
// Exit: Hold the output permission if returns false.
{
- BOOL result;
+ bool result;
toku_mutex_lock(&logger->output_condition_lock);
logger->output_condition_lock_ctr++;
while (1) {
if (logger->fsynced_lsn.lsn >= lsn.lsn) { // we can look at the fsynced lsn since we have the lock.
- result = TRUE;
+ result = true;
break;
}
if (logger->output_is_available) {
- logger->output_is_available = FALSE;
- result = FALSE;
+ logger->output_is_available = false;
+ result = false;
break;
}
// otherwise wait for a good time to look again.
@@ -391,7 +391,7 @@ release_output (TOKULOGGER logger, LSN fsynced_lsn)
{
toku_mutex_lock(&logger->output_condition_lock);
logger->output_condition_lock_ctr++;
- logger->output_is_available = TRUE;
+ logger->output_is_available = true;
if (logger->fsynced_lsn.lsn < fsynced_lsn.lsn) {
logger->fsynced_lsn = fsynced_lsn;
}
@@ -488,7 +488,7 @@ int toku_logger_fsync (TOKULOGGER logger)
if (logger->is_panicked) return EINVAL;
ml_lock(&logger->input_lock);
logger->input_lock_ctr++;
- r = toku_logger_maybe_fsync(logger, logger->inbuf.max_lsn_in_buf, TRUE);
+ r = toku_logger_maybe_fsync(logger, logger->inbuf.max_lsn_in_buf, true);
if (r!=0) {
toku_logger_panic(logger, r);
}
@@ -502,7 +502,7 @@ toku_logger_fsync_if_lsn_not_fsynced (TOKULOGGER logger, LSN lsn) {
else if (logger->write_log_files) {
ml_lock(&logger->input_lock);
logger->input_lock_ctr++;
- r = toku_logger_maybe_fsync(logger, lsn, TRUE);
+ r = toku_logger_maybe_fsync(logger, lsn, true);
if (r!=0) {
toku_logger_panic(logger, r);
}
@@ -512,7 +512,7 @@ toku_logger_fsync_if_lsn_not_fsynced (TOKULOGGER logger, LSN lsn) {
void toku_logger_panic (TOKULOGGER logger, int err) {
logger->panic_errno=err;
- logger->is_panicked=TRUE;
+ logger->is_panicked=true;
}
int toku_logger_panicked(TOKULOGGER logger) {
if (logger==0) return 0;
@@ -527,7 +527,7 @@ void toku_logger_set_cachetable (TOKULOGGER logger, CACHETABLE ct) {
logger->ct = ct;
}
-int toku_logger_set_lg_max(TOKULOGGER logger, u_int32_t lg_max) {
+int toku_logger_set_lg_max(TOKULOGGER logger, uint32_t lg_max) {
if (logger==0) return EINVAL; // no logger
if (logger->is_panicked) return EINVAL;
if (logger->is_open) return EINVAL;
@@ -535,14 +535,14 @@ int toku_logger_set_lg_max(TOKULOGGER logger, u_int32_t lg_max) {
logger->lg_max = lg_max;
return 0;
}
-int toku_logger_get_lg_max(TOKULOGGER logger, u_int32_t *lg_maxp) {
+int toku_logger_get_lg_max(TOKULOGGER logger, uint32_t *lg_maxp) {
if (logger==0) return EINVAL; // no logger
if (logger->is_panicked) return EINVAL;
*lg_maxp = logger->lg_max;
return 0;
}
-int toku_logger_set_lg_bsize(TOKULOGGER logger, u_int32_t bsize) {
+int toku_logger_set_lg_bsize(TOKULOGGER logger, uint32_t bsize) {
if (logger==0) return EINVAL; // no logger
if (logger->is_panicked) return EINVAL;
if (logger->is_open) return EINVAL;
@@ -589,7 +589,7 @@ static int logfilenamecompare (const void *ap, const void *bp) {
char *b=*(char**)bp;
char * b_leafname = fileleafname(b);
int rval;
- BOOL valid;
+ bool valid;
uint64_t num_a = 0; // placate compiler
uint64_t num_b = 0;
uint32_t ver_a = 0;
@@ -727,13 +727,13 @@ int toku_logger_maybe_trim_log(TOKULOGGER logger, LSN trim_lsn)
return r;
}
-void toku_logger_write_log_files (TOKULOGGER logger, BOOL write_log_files)
+void toku_logger_write_log_files (TOKULOGGER logger, bool write_log_files)
// Called only during initialization (or just after recovery), so no locks are needed.
{
logger->write_log_files = write_log_files;
}
-void toku_logger_trim_log_files (TOKULOGGER logger, BOOL trim_log_files)
+void toku_logger_trim_log_files (TOKULOGGER logger, bool trim_log_files)
// Called only during initialization, so no locks are needed.
{
logger->trim_log_files = trim_log_files;
@@ -751,7 +751,7 @@ int toku_logger_maybe_fsync (TOKULOGGER logger, LSN lsn, int do_fsync)
logger->input_lock_ctr++;
ml_unlock(&logger->input_lock);
LSN fsynced_lsn;
- BOOL already_done = wait_till_output_already_written_or_output_buffer_available(logger, lsn, &fsynced_lsn);
+ bool already_done = wait_till_output_already_written_or_output_buffer_available(logger, lsn, &fsynced_lsn);
if (already_done) return 0;
// otherwise we now own the output permission, and our lsn isn't outputed.
@@ -833,8 +833,8 @@ int toku_logger_restart(TOKULOGGER logger, LSN lastlsn)
// reset the LSN's to the lastlsn when the logger was opened
logger->lsn = logger->written_lsn = logger->fsynced_lsn = lastlsn;
- logger->write_log_files = TRUE;
- logger->trim_log_files = TRUE;
+ logger->write_log_files = true;
+ logger->trim_log_files = true;
// open a new log file
r = open_logfile(logger);
@@ -843,7 +843,7 @@ int toku_logger_restart(TOKULOGGER logger, LSN lastlsn)
}
// fname is the iname
-int toku_logger_log_fcreate (TOKUTXN txn, const char *fname, FILENUM filenum, u_int32_t mode, u_int32_t treeflags, u_int32_t nodesize, u_int32_t basementnodesize, enum toku_compression_method compression_method) {
+int toku_logger_log_fcreate (TOKUTXN txn, const char *fname, FILENUM filenum, uint32_t mode, uint32_t treeflags, uint32_t nodesize, uint32_t basementnodesize, enum toku_compression_method compression_method) {
if (txn==0) return 0;
if (txn->logger->is_panicked) return EINVAL;
BYTESTRING bs_fname = { .len = (uint32_t) strlen(fname), .data = (char *) fname };
@@ -874,106 +874,106 @@ int toku_logger_log_fopen (TOKUTXN txn, const char * fname, FILENUM filenum, uin
return toku_log_fopen (txn->logger, (LSN*)0, 0, bs, filenum, treeflags);
}
-static int toku_fread_u_int8_t_nocrclen (FILE *f, u_int8_t *v) {
+static int toku_fread_uint8_t_nocrclen (FILE *f, uint8_t *v) {
int vi=fgetc(f);
if (vi==EOF) return -1;
- u_int8_t vc=(u_int8_t)vi;
+ uint8_t vc=(uint8_t)vi;
*v = vc;
return 0;
}
-int toku_fread_u_int8_t (FILE *f, u_int8_t *v, struct x1764 *mm, u_int32_t *len) {
+int toku_fread_uint8_t (FILE *f, uint8_t *v, struct x1764 *mm, uint32_t *len) {
int vi=fgetc(f);
if (vi==EOF) return -1;
- u_int8_t vc=(u_int8_t)vi;
+ uint8_t vc=(uint8_t)vi;
x1764_add(mm, &vc, 1);
(*len)++;
*v = vc;
return 0;
}
-int toku_fread_u_int32_t_nocrclen (FILE *f, u_int32_t *v) {
- u_int32_t result;
- u_int8_t *cp = (u_int8_t*)&result;
+int toku_fread_uint32_t_nocrclen (FILE *f, uint32_t *v) {
+ uint32_t result;
+ uint8_t *cp = (uint8_t*)&result;
int r;
- r = toku_fread_u_int8_t_nocrclen (f, cp+0); if (r!=0) return r;
- r = toku_fread_u_int8_t_nocrclen (f, cp+1); if (r!=0) return r;
- r = toku_fread_u_int8_t_nocrclen (f, cp+2); if (r!=0) return r;
- r = toku_fread_u_int8_t_nocrclen (f, cp+3); if (r!=0) return r;
+ r = toku_fread_uint8_t_nocrclen (f, cp+0); if (r!=0) return r;
+ r = toku_fread_uint8_t_nocrclen (f, cp+1); if (r!=0) return r;
+ r = toku_fread_uint8_t_nocrclen (f, cp+2); if (r!=0) return r;
+ r = toku_fread_uint8_t_nocrclen (f, cp+3); if (r!=0) return r;
*v = toku_dtoh32(result);
return 0;
}
-int toku_fread_u_int32_t (FILE *f, u_int32_t *v, struct x1764 *checksum, u_int32_t *len) {
- u_int32_t result;
- u_int8_t *cp = (u_int8_t*)&result;
+int toku_fread_uint32_t (FILE *f, uint32_t *v, struct x1764 *checksum, uint32_t *len) {
+ uint32_t result;
+ uint8_t *cp = (uint8_t*)&result;
int r;
- r = toku_fread_u_int8_t (f, cp+0, checksum, len); if(r!=0) return r;
- r = toku_fread_u_int8_t (f, cp+1, checksum, len); if(r!=0) return r;
- r = toku_fread_u_int8_t (f, cp+2, checksum, len); if(r!=0) return r;
- r = toku_fread_u_int8_t (f, cp+3, checksum, len); if(r!=0) return r;
+ r = toku_fread_uint8_t (f, cp+0, checksum, len); if(r!=0) return r;
+ r = toku_fread_uint8_t (f, cp+1, checksum, len); if(r!=0) return r;
+ r = toku_fread_uint8_t (f, cp+2, checksum, len); if(r!=0) return r;
+ r = toku_fread_uint8_t (f, cp+3, checksum, len); if(r!=0) return r;
*v = toku_dtoh32(result);
return 0;
}
-int toku_fread_u_int64_t (FILE *f, u_int64_t *v, struct x1764 *checksum, u_int32_t *len) {
- u_int32_t v1,v2;
+int toku_fread_uint64_t (FILE *f, uint64_t *v, struct x1764 *checksum, uint32_t *len) {
+ uint32_t v1,v2;
int r;
- r=toku_fread_u_int32_t(f, &v1, checksum, len); if (r!=0) return r;
- r=toku_fread_u_int32_t(f, &v2, checksum, len); if (r!=0) return r;
- *v = (((u_int64_t)v1)<<32 ) | ((u_int64_t)v2);
+ r=toku_fread_uint32_t(f, &v1, checksum, len); if (r!=0) return r;
+ r=toku_fread_uint32_t(f, &v2, checksum, len); if (r!=0) return r;
+ *v = (((uint64_t)v1)<<32 ) | ((uint64_t)v2);
return 0;
}
-int toku_fread_BOOL (FILE *f, BOOL *v, struct x1764 *mm, u_int32_t *len) {
- u_int8_t iv;
- int r = toku_fread_u_int8_t(f, &iv, mm, len);
+int toku_fread_bool (FILE *f, bool *v, struct x1764 *mm, uint32_t *len) {
+ uint8_t iv;
+ int r = toku_fread_uint8_t(f, &iv, mm, len);
if (r == 0) {
*v = (iv!=0);
}
return r;
}
-int toku_fread_LSN (FILE *f, LSN *lsn, struct x1764 *checksum, u_int32_t *len) {
- return toku_fread_u_int64_t (f, &lsn->lsn, checksum, len);
+int toku_fread_LSN (FILE *f, LSN *lsn, struct x1764 *checksum, uint32_t *len) {
+ return toku_fread_uint64_t (f, &lsn->lsn, checksum, len);
}
-int toku_fread_BLOCKNUM (FILE *f, BLOCKNUM *b, struct x1764 *checksum, u_int32_t *len) {
- return toku_fread_u_int64_t (f, (u_int64_t*)&b->b, checksum, len);
+int toku_fread_BLOCKNUM (FILE *f, BLOCKNUM *b, struct x1764 *checksum, uint32_t *len) {
+ return toku_fread_uint64_t (f, (uint64_t*)&b->b, checksum, len);
}
-int toku_fread_FILENUM (FILE *f, FILENUM *filenum, struct x1764 *checksum, u_int32_t *len) {
- return toku_fread_u_int32_t (f, &filenum->fileid, checksum, len);
+int toku_fread_FILENUM (FILE *f, FILENUM *filenum, struct x1764 *checksum, uint32_t *len) {
+ return toku_fread_uint32_t (f, &filenum->fileid, checksum, len);
}
-int toku_fread_TXNID (FILE *f, TXNID *txnid, struct x1764 *checksum, u_int32_t *len) {
- return toku_fread_u_int64_t (f, txnid, checksum, len);
+int toku_fread_TXNID (FILE *f, TXNID *txnid, struct x1764 *checksum, uint32_t *len) {
+ return toku_fread_uint64_t (f, txnid, checksum, len);
}
-int toku_fread_XIDP (FILE *f, XIDP *xidp, struct x1764 *checksum, u_int32_t *len) {
+int toku_fread_XIDP (FILE *f, XIDP *xidp, struct x1764 *checksum, uint32_t *len) {
// These reads are verbose because XA defined the fields as "long", but we use 4 bytes, 1 byte and 1 byte respectively.
TOKU_XA_XID *XMALLOC(xid);
{
- u_int32_t formatID;
- int r = toku_fread_u_int32_t(f, &formatID, checksum, len);
+ uint32_t formatID;
+ int r = toku_fread_uint32_t(f, &formatID, checksum, len);
if (r!=0) return r;
xid->formatID = formatID;
}
{
- u_int8_t gtrid_length;
- int r = toku_fread_u_int8_t (f, &gtrid_length, checksum, len);
+ uint8_t gtrid_length;
+ int r = toku_fread_uint8_t (f, &gtrid_length, checksum, len);
if (r!=0) return r;
xid->gtrid_length = gtrid_length;
}
{
- u_int8_t bqual_length;
- int r = toku_fread_u_int8_t (f, &bqual_length, checksum, len);
+ uint8_t bqual_length;
+ int r = toku_fread_uint8_t (f, &bqual_length, checksum, len);
if (r!=0) return r;
xid->bqual_length = bqual_length;
}
for (int i=0; i< xid->gtrid_length + xid->bqual_length; i++) {
- u_int8_t byte;
- int r = toku_fread_u_int8_t(f, &byte, checksum, len);
+ uint8_t byte;
+ int r = toku_fread_uint8_t(f, &byte, checksum, len);
if (r!=0) return r;
xid->data[i] = byte;
}
@@ -982,13 +982,13 @@ int toku_fread_XIDP (FILE *f, XIDP *xidp, struct x1764 *checksum, u_int32_t *
}
// fills in the bs with malloced data.
-int toku_fread_BYTESTRING (FILE *f, BYTESTRING *bs, struct x1764 *checksum, u_int32_t *len) {
- int r=toku_fread_u_int32_t(f, (u_int32_t*)&bs->len, checksum, len);
+int toku_fread_BYTESTRING (FILE *f, BYTESTRING *bs, struct x1764 *checksum, uint32_t *len) {
+ int r=toku_fread_uint32_t(f, (uint32_t*)&bs->len, checksum, len);
if (r!=0) return r;
XMALLOC_N(bs->len, bs->data);
- u_int32_t i;
+ uint32_t i;
for (i=0; i<bs->len; i++) {
- r=toku_fread_u_int8_t(f, (u_int8_t*)&bs->data[i], checksum, len);
+ r=toku_fread_uint8_t(f, (uint8_t*)&bs->data[i], checksum, len);
if (r!=0) {
toku_free(bs->data);
bs->data=0;
@@ -999,11 +999,11 @@ int toku_fread_BYTESTRING (FILE *f, BYTESTRING *bs, struct x1764 *checksum, u_in
}
// fills in the fs with malloced data.
-int toku_fread_FILENUMS (FILE *f, FILENUMS *fs, struct x1764 *checksum, u_int32_t *len) {
- int r=toku_fread_u_int32_t(f, (u_int32_t*)&fs->num, checksum, len);
+int toku_fread_FILENUMS (FILE *f, FILENUMS *fs, struct x1764 *checksum, uint32_t *len) {
+ int r=toku_fread_uint32_t(f, (uint32_t*)&fs->num, checksum, len);
if (r!=0) return r;
XMALLOC_N(fs->num, fs->filenums);
- u_int32_t i;
+ uint32_t i;
for (i=0; i<fs->num; i++) {
r=toku_fread_FILENUM (f, &fs->filenums[i], checksum, len);
if (r!=0) {
@@ -1015,7 +1015,7 @@ int toku_fread_FILENUMS (FILE *f, FILENUMS *fs, struct x1764 *checksum, u_int32_
return 0;
}
-int toku_logprint_LSN (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, u_int32_t *len, const char *format __attribute__((__unused__))) {
+int toku_logprint_LSN (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format __attribute__((__unused__))) {
LSN v;
int r = toku_fread_LSN(inf, &v, checksum, len);
if (r!=0) return r;
@@ -1023,7 +1023,7 @@ int toku_logprint_LSN (FILE *outf, FILE *inf, const char *fieldname, struct x176
return 0;
}
-int toku_logprint_TXNID (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, u_int32_t *len, const char *format __attribute__((__unused__))) {
+int toku_logprint_TXNID (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format __attribute__((__unused__))) {
TXNID v;
int r = toku_fread_TXNID(inf, &v, checksum, len);
if (r!=0) return r;
@@ -1031,7 +1031,7 @@ int toku_logprint_TXNID (FILE *outf, FILE *inf, const char *fieldname, struct x1
return 0;
}
-int toku_logprint_XIDP (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, u_int32_t *len, const char *format __attribute__((__unused__))) {
+int toku_logprint_XIDP (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format __attribute__((__unused__))) {
XIDP vp;
int r = toku_fread_XIDP(inf, &vp, checksum, len);
if (r!=0) return r;
@@ -1042,9 +1042,9 @@ int toku_logprint_XIDP (FILE *outf, FILE *inf, const char *fieldname, struct x17
return 0;
}
-int toku_logprint_u_int8_t (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, u_int32_t *len, const char *format) {
- u_int8_t v;
- int r = toku_fread_u_int8_t(inf, &v, checksum, len);
+int toku_logprint_uint8_t (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format) {
+ uint8_t v;
+ int r = toku_fread_uint8_t(inf, &v, checksum, len);
if (r!=0) return r;
fprintf(outf, " %s=%d", fieldname, v);
if (format) fprintf(outf, format, v);
@@ -1054,41 +1054,41 @@ int toku_logprint_u_int8_t (FILE *outf, FILE *inf, const char *fieldname, struct
return 0;
}
-int toku_logprint_u_int32_t (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, u_int32_t *len, const char *format) {
- u_int32_t v;
- int r = toku_fread_u_int32_t(inf, &v, checksum, len);
+int toku_logprint_uint32_t (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format) {
+ uint32_t v;
+ int r = toku_fread_uint32_t(inf, &v, checksum, len);
if (r!=0) return r;
fprintf(outf, " %s=", fieldname);
fprintf(outf, format ? format : "%d", v);
return 0;
}
-int toku_logprint_u_int64_t (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, u_int32_t *len, const char *format) {
- u_int64_t v;
- int r = toku_fread_u_int64_t(inf, &v, checksum, len);
+int toku_logprint_uint64_t (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format) {
+ uint64_t v;
+ int r = toku_fread_uint64_t(inf, &v, checksum, len);
if (r!=0) return r;
fprintf(outf, " %s=", fieldname);
fprintf(outf, format ? format : "%" PRId64, v);
return 0;
}
-int toku_logprint_BOOL (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, u_int32_t *len, const char *format __attribute__((__unused__))) {
- BOOL v;
- int r = toku_fread_BOOL(inf, &v, checksum, len);
+int toku_logprint_bool (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format __attribute__((__unused__))) {
+ bool v;
+ int r = toku_fread_bool(inf, &v, checksum, len);
if (r!=0) return r;
- fprintf(outf, " %s=%s", fieldname, v ? "TRUE" : "FALSE");
+ fprintf(outf, " %s=%s", fieldname, v ? "true" : "false");
return 0;
}
-void toku_print_BYTESTRING (FILE *outf, u_int32_t len, char *data) {
+void toku_print_BYTESTRING (FILE *outf, uint32_t len, char *data) {
fprintf(outf, "{len=%u data=", len);
toku_print_bytes(outf, len, data);
fprintf(outf, "}");
}
-int toku_logprint_BYTESTRING (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, u_int32_t *len, const char *format __attribute__((__unused__))) {
+int toku_logprint_BYTESTRING (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format __attribute__((__unused__))) {
BYTESTRING bs;
int r = toku_fread_BYTESTRING(inf, &bs, checksum, len);
if (r!=0) return r;
@@ -1098,20 +1098,20 @@ int toku_logprint_BYTESTRING (FILE *outf, FILE *inf, const char *fieldname, stru
return 0;
}
-int toku_logprint_BLOCKNUM (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, u_int32_t *len, const char *format) {
- return toku_logprint_u_int64_t(outf, inf, fieldname, checksum, len, format);
+int toku_logprint_BLOCKNUM (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format) {
+ return toku_logprint_uint64_t(outf, inf, fieldname, checksum, len, format);
}
-int toku_logprint_FILENUM (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, u_int32_t *len, const char *format) {
- return toku_logprint_u_int32_t(outf, inf, fieldname, checksum, len, format);
+int toku_logprint_FILENUM (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format) {
+ return toku_logprint_uint32_t(outf, inf, fieldname, checksum, len, format);
}
static void
-toku_print_FILENUMS (FILE *outf, u_int32_t num, FILENUM *filenums) {
+toku_print_FILENUMS (FILE *outf, uint32_t num, FILENUM *filenums) {
fprintf(outf, "{num=%u filenums=\"", num);
- u_int32_t i;
+ uint32_t i;
for (i=0; i<num; i++) {
if (i>0)
fprintf(outf, ",");
@@ -1121,7 +1121,7 @@ toku_print_FILENUMS (FILE *outf, u_int32_t num, FILENUM *filenums) {
}
-int toku_logprint_FILENUMS (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, u_int32_t *len, const char *format __attribute__((__unused__))) {
+int toku_logprint_FILENUMS (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format __attribute__((__unused__))) {
FILENUMS bs;
int r = toku_fread_FILENUMS(inf, &bs, checksum, len);
if (r!=0) return r;
@@ -1131,7 +1131,7 @@ int toku_logprint_FILENUMS (FILE *outf, FILE *inf, const char *fieldname, struct
return 0;
}
-int toku_read_and_print_logmagic (FILE *f, u_int32_t *versionp) {
+int toku_read_and_print_logmagic (FILE *f, uint32_t *versionp) {
{
char magic[8];
int r=fread(magic, 1, 8, f);
@@ -1155,7 +1155,7 @@ int toku_read_and_print_logmagic (FILE *f, u_int32_t *versionp) {
return 0;
}
-int toku_read_logmagic (FILE *f, u_int32_t *versionp) {
+int toku_read_logmagic (FILE *f, uint32_t *versionp) {
{
char magic[8];
int r=fread(magic, 1, 8, f);
@@ -1218,7 +1218,7 @@ static int peek_at_log (TOKULOGGER logger, char* filename, LSN *first_lsn) {
int r = read(fd, header, SKIP+8);
if (r!=SKIP+8) return 0; // cannot determine that it's archivable, so we'll assume no. If a later-log is archivable is then this one will be too.
- u_int64_t lsn;
+ uint64_t lsn;
{
struct rbuf rb;
rb.buf = header+SKIP;
@@ -1386,10 +1386,10 @@ toku_logger_get_status(TOKULOGGER logger, LOGGER_STATUS statp) {
//////////////////////////////////////////////////////////////////////////////////////////////////////
// Used for upgrade:
// if any valid log files exist in log_dir, then
-// set *found_any_logs to TRUE and set *version_found to version number of latest log
+// set *found_any_logs to true and set *version_found to version number of latest log
int
-toku_get_version_of_logs_on_disk(const char *log_dir, BOOL *found_any_logs, uint32_t *version_found) {
- BOOL found = FALSE;
+toku_get_version_of_logs_on_disk(const char *log_dir, bool *found_any_logs, uint32_t *version_found) {
+ bool found = false;
uint32_t highest_version = 0;
int r = 0;
@@ -1403,10 +1403,10 @@ toku_get_version_of_logs_on_disk(const char *log_dir, BOOL *found_any_logs, uint
while ((de=readdir(d))) {
uint32_t this_log_version;
uint64_t this_log_number;
- BOOL is_log = is_a_logfile_any_version(de->d_name, &this_log_number, &this_log_version);
+ bool is_log = is_a_logfile_any_version(de->d_name, &this_log_number, &this_log_version);
if (is_log) {
if (!found) { // first log file found
- found = TRUE;
+ found = true;
highest_version = this_log_version;
}
else
diff --git a/ft/logger.h b/ft/logger.h
index bc5bd817805..ed190af0520 100644
--- a/ft/logger.h
+++ b/ft/logger.h
@@ -25,8 +25,8 @@ int toku_logger_open (const char *directory, TOKULOGGER logger);
int toku_logger_open_with_last_xid(const char *directory, TOKULOGGER logger, TXNID last_xid);
int toku_logger_shutdown(TOKULOGGER logger);
int toku_logger_close(TOKULOGGER *loggerp);
-int toku_logger_open_rollback(TOKULOGGER logger, CACHETABLE cachetable, BOOL create);
-int toku_logger_close_rollback(TOKULOGGER logger, BOOL recovery_failed);
+int toku_logger_open_rollback(TOKULOGGER logger, CACHETABLE cachetable, bool create);
+int toku_logger_close_rollback(TOKULOGGER logger, bool recovery_failed);
bool toku_logger_rollback_is_open (TOKULOGGER); // return true iff the rollback is open.
int toku_logger_fsync (TOKULOGGER logger);
@@ -35,15 +35,15 @@ void toku_logger_panic (TOKULOGGER logger, int err);
int toku_logger_panicked(TOKULOGGER logger);
int toku_logger_is_open(TOKULOGGER logger);
void toku_logger_set_cachetable (TOKULOGGER logger, CACHETABLE ct);
-int toku_logger_set_lg_max(TOKULOGGER logger, u_int32_t lg_max);
-int toku_logger_get_lg_max(TOKULOGGER logger, u_int32_t *lg_maxp);
-int toku_logger_set_lg_bsize(TOKULOGGER logger, u_int32_t bsize);
+int toku_logger_set_lg_max(TOKULOGGER logger, uint32_t lg_max);
+int toku_logger_get_lg_max(TOKULOGGER logger, uint32_t *lg_maxp);
+int toku_logger_set_lg_bsize(TOKULOGGER logger, uint32_t bsize);
int toku_logger_lock_init(void);
int toku_logger_lock_destroy(void);
-void toku_logger_write_log_files (TOKULOGGER logger, BOOL write_log_files);
-void toku_logger_trim_log_files(TOKULOGGER logger, BOOL trim_log_files);
+void toku_logger_write_log_files (TOKULOGGER logger, bool write_log_files);
+void toku_logger_trim_log_files(TOKULOGGER logger, bool trim_log_files);
// Restart the logger. This function is used by recovery to really start
// logging.
@@ -58,37 +58,37 @@ int toku_logger_restart(TOKULOGGER logger, LSN lastlsn);
// Returns: 0 if success
int toku_logger_maybe_trim_log(TOKULOGGER logger, LSN oldest_open_lsn);
-int toku_logger_log_fcreate (TOKUTXN txn, const char *fname, FILENUM filenum, u_int32_t mode, u_int32_t flags, u_int32_t nodesize, u_int32_t basementnodesize, enum toku_compression_method compression_method);
+int toku_logger_log_fcreate (TOKUTXN txn, const char *fname, FILENUM filenum, uint32_t mode, uint32_t flags, uint32_t nodesize, uint32_t basementnodesize, enum toku_compression_method compression_method);
int toku_logger_log_fdelete (TOKUTXN txn, FILENUM filenum);
int toku_logger_log_fopen (TOKUTXN txn, const char * fname, FILENUM filenum, uint32_t treeflags);
-int toku_fread_u_int8_t (FILE *f, u_int8_t *v, struct x1764 *mm, u_int32_t *len);
-int toku_fread_u_int32_t_nocrclen (FILE *f, u_int32_t *v);
-int toku_fread_u_int32_t (FILE *f, u_int32_t *v, struct x1764 *checksum, u_int32_t *len);
-int toku_fread_u_int64_t (FILE *f, u_int64_t *v, struct x1764 *checksum, u_int32_t *len);
-int toku_fread_BOOL (FILE *f, BOOL *v, struct x1764 *checksum, u_int32_t *len);
-int toku_fread_LSN (FILE *f, LSN *lsn, struct x1764 *checksum, u_int32_t *len);
-int toku_fread_BLOCKNUM (FILE *f, BLOCKNUM *lsn, struct x1764 *checksum, u_int32_t *len);
-int toku_fread_FILENUM (FILE *f, FILENUM *filenum, struct x1764 *checksum, u_int32_t *len);
-int toku_fread_TXNID (FILE *f, TXNID *txnid, struct x1764 *checksum, u_int32_t *len);
-int toku_fread_XIDP (FILE *f, XIDP *xidp, struct x1764 *checksum, u_int32_t *len);
-int toku_fread_BYTESTRING (FILE *f, BYTESTRING *bs, struct x1764 *checksum, u_int32_t *len);
-int toku_fread_FILENUMS (FILE *f, FILENUMS *fs, struct x1764 *checksum, u_int32_t *len);
-
-int toku_logprint_LSN (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, u_int32_t *len, const char *format __attribute__((__unused__)));
-int toku_logprint_TXNID (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, u_int32_t *len, const char *format __attribute__((__unused__)));
-int toku_logprint_XIDP (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, u_int32_t *len, const char *format __attribute__((__unused__)));
-int toku_logprint_u_int8_t (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, u_int32_t *len, const char *format);
-int toku_logprint_u_int32_t (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, u_int32_t *len, const char *format);
-int toku_logprint_BLOCKNUM (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, u_int32_t *len, const char *format);
-int toku_logprint_u_int64_t (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, u_int32_t *len, const char *format);
-int toku_logprint_BOOL (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, u_int32_t *len, const char *format __attribute__((__unused__)));
-void toku_print_BYTESTRING (FILE *outf, u_int32_t len, char *data);
-int toku_logprint_BYTESTRING (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, u_int32_t *len, const char *format __attribute__((__unused__)));
-int toku_logprint_FILENUM (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, u_int32_t *len, const char *format);
-int toku_logprint_FILENUMS (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, u_int32_t *len, const char *format);
-int toku_read_and_print_logmagic (FILE *f, u_int32_t *versionp);
-int toku_read_logmagic (FILE *f, u_int32_t *versionp);
+int toku_fread_uint8_t (FILE *f, uint8_t *v, struct x1764 *mm, uint32_t *len);
+int toku_fread_uint32_t_nocrclen (FILE *f, uint32_t *v);
+int toku_fread_uint32_t (FILE *f, uint32_t *v, struct x1764 *checksum, uint32_t *len);
+int toku_fread_uint64_t (FILE *f, uint64_t *v, struct x1764 *checksum, uint32_t *len);
+int toku_fread_bool (FILE *f, bool *v, struct x1764 *checksum, uint32_t *len);
+int toku_fread_LSN (FILE *f, LSN *lsn, struct x1764 *checksum, uint32_t *len);
+int toku_fread_BLOCKNUM (FILE *f, BLOCKNUM *lsn, struct x1764 *checksum, uint32_t *len);
+int toku_fread_FILENUM (FILE *f, FILENUM *filenum, struct x1764 *checksum, uint32_t *len);
+int toku_fread_TXNID (FILE *f, TXNID *txnid, struct x1764 *checksum, uint32_t *len);
+int toku_fread_XIDP (FILE *f, XIDP *xidp, struct x1764 *checksum, uint32_t *len);
+int toku_fread_BYTESTRING (FILE *f, BYTESTRING *bs, struct x1764 *checksum, uint32_t *len);
+int toku_fread_FILENUMS (FILE *f, FILENUMS *fs, struct x1764 *checksum, uint32_t *len);
+
+int toku_logprint_LSN (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format __attribute__((__unused__)));
+int toku_logprint_TXNID (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format __attribute__((__unused__)));
+int toku_logprint_XIDP (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format __attribute__((__unused__)));
+int toku_logprint_uint8_t (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format);
+int toku_logprint_uint32_t (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format);
+int toku_logprint_BLOCKNUM (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format);
+int toku_logprint_uint64_t (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format);
+int toku_logprint_bool (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format __attribute__((__unused__)));
+void toku_print_BYTESTRING (FILE *outf, uint32_t len, char *data);
+int toku_logprint_BYTESTRING (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format __attribute__((__unused__)));
+int toku_logprint_FILENUM (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format);
+int toku_logprint_FILENUMS (FILE *outf, FILE *inf, const char *fieldname, struct x1764 *checksum, uint32_t *len, const char *format);
+int toku_read_and_print_logmagic (FILE *f, uint32_t *versionp);
+int toku_read_logmagic (FILE *f, uint32_t *versionp);
TXNID toku_txn_get_txnid (TOKUTXN txn);
TXNID toku_txn_get_root_txnid (TOKUTXN txn);
@@ -177,14 +177,14 @@ typedef enum {
} logger_status_entry;
typedef struct {
- BOOL initialized;
+ bool initialized;
TOKU_ENGINE_STATUS_ROW_S status[LOGGER_STATUS_NUM_ROWS];
} LOGGER_STATUS_S, *LOGGER_STATUS;
void toku_logger_get_status(TOKULOGGER logger, LOGGER_STATUS s);
-int toku_get_version_of_logs_on_disk(const char *log_dir, BOOL *found_any_logs, uint32_t *version_found);
+int toku_get_version_of_logs_on_disk(const char *log_dir, bool *found_any_logs, uint32_t *version_found);
int toku_delete_all_logs_of_version(const char *log_dir, uint32_t version_to_delete);
TXN_MANAGER toku_logger_get_txn_manager(TOKULOGGER logger);
diff --git a/ft/minicron.cc b/ft/minicron.cc
index e2cd84a6e94..1f06fafac95 100644
--- a/ft/minicron.cc
+++ b/ft/minicron.cc
@@ -84,14 +84,14 @@ minicron_do (void *pv)
}
int
-toku_minicron_setup(struct minicron *p, u_int32_t period_in_seconds, int(*f)(void *), void *arg)
+toku_minicron_setup(struct minicron *p, uint32_t period_in_seconds, int(*f)(void *), void *arg)
{
p->f = f;
p->arg = arg;
toku_gettime(&p->time_of_last_call_to_f);
//printf("now=%.6f", p->time_of_last_call_to_f.tv_sec + p->time_of_last_call_to_f.tv_nsec*1e-9);
p->period_in_seconds = period_in_seconds;
- p->do_shutdown = FALSE;
+ p->do_shutdown = false;
toku_mutex_init(&p->mutex, 0);
toku_cond_init (&p->condvar, 0);
//printf("%s:%d setup period=%d\n", __FILE__, __LINE__, period_in_seconds);
@@ -99,7 +99,7 @@ toku_minicron_setup(struct minicron *p, u_int32_t period_in_seconds, int(*f)(voi
}
int
-toku_minicron_change_period(struct minicron *p, u_int32_t new_period)
+toku_minicron_change_period(struct minicron *p, uint32_t new_period)
{
toku_mutex_lock(&p->mutex);
p->period_in_seconds = new_period;
@@ -108,20 +108,20 @@ toku_minicron_change_period(struct minicron *p, u_int32_t new_period)
return 0;
}
-u_int32_t
+uint32_t
toku_minicron_get_period(struct minicron *p)
{
toku_mutex_lock(&p->mutex);
- u_int32_t retval = toku_minicron_get_period_unlocked(p);
+ uint32_t retval = toku_minicron_get_period_unlocked(p);
toku_mutex_unlock(&p->mutex);
return retval;
}
/* unlocked function for use by engine status which takes no locks */
-u_int32_t
+uint32_t
toku_minicron_get_period_unlocked(struct minicron *p)
{
- u_int32_t retval = p->period_in_seconds;
+ uint32_t retval = p->period_in_seconds;
return retval;
}
@@ -129,7 +129,7 @@ int
toku_minicron_shutdown(struct minicron *p) {
toku_mutex_lock(&p->mutex);
assert(!p->do_shutdown);
- p->do_shutdown = TRUE;
+ p->do_shutdown = true;
//printf("%s:%d signalling\n", __FILE__, __LINE__);
toku_cond_signal(&p->condvar);
toku_mutex_unlock(&p->mutex);
@@ -144,7 +144,7 @@ toku_minicron_shutdown(struct minicron *p) {
return 0;
}
-BOOL
+bool
toku_minicron_has_been_shutdown(struct minicron *p) {
return p->do_shutdown;
}
diff --git a/ft/minicron.h b/ft/minicron.h
index c93a3e49297..815d8ec76c5 100644
--- a/ft/minicron.h
+++ b/ft/minicron.h
@@ -32,16 +32,16 @@ struct minicron {
toku_cond_t condvar;
int (*f)(void*);
void *arg;
- u_int32_t period_in_seconds;
- BOOL do_shutdown;
+ uint32_t period_in_seconds;
+ bool do_shutdown;
};
-int toku_minicron_setup (struct minicron *s, u_int32_t period_in_seconds, int(*f)(void *), void *arg);
-int toku_minicron_change_period(struct minicron *p, u_int32_t new_period);
-u_int32_t toku_minicron_get_period(struct minicron *p);
-u_int32_t toku_minicron_get_period_unlocked(struct minicron *p);
+int toku_minicron_setup (struct minicron *s, uint32_t period_in_seconds, int(*f)(void *), void *arg);
+int toku_minicron_change_period(struct minicron *p, uint32_t new_period);
+uint32_t toku_minicron_get_period(struct minicron *p);
+uint32_t toku_minicron_get_period_unlocked(struct minicron *p);
int toku_minicron_shutdown(struct minicron *p);
-BOOL toku_minicron_has_been_shutdown(struct minicron *p);
+bool toku_minicron_has_been_shutdown(struct minicron *p);
#endif
diff --git a/ft/omt-tmpl.cc b/ft/omt-tmpl.cc
index 9e614dffd2e..13d535604d0 100644
--- a/ft/omt-tmpl.cc
+++ b/ft/omt-tmpl.cc
@@ -9,7 +9,6 @@
#include <toku_assert.h>
#include <memory.h>
#include <stdint.h>
-#include <stdbool.h>
#include <string.h>
#include <errno.h>
#include <db.h>
diff --git a/ft/omt-tmpl.h b/ft/omt-tmpl.h
index f25f1f066c0..012f43df744 100644
--- a/ft/omt-tmpl.h
+++ b/ft/omt-tmpl.h
@@ -9,7 +9,6 @@
#include <toku_portability.h>
#include <stdint.h>
-#include <stdbool.h>
namespace toku {
diff --git a/ft/omt.cc b/ft/omt.cc
index 9495be9438b..7a50393f7ac 100644
--- a/ft/omt.cc
+++ b/ft/omt.cc
@@ -21,12 +21,12 @@
#include "omt.h"
#include "fttypes.h"
-typedef u_int32_t node_idx;
+typedef uint32_t node_idx;
static const node_idx NODE_NULL = UINT32_MAX;
typedef struct omt_node *OMT_NODE;
struct omt_node {
- u_int32_t weight; /* Size of subtree rooted at this node (including this one). */
+ uint32_t weight; /* Size of subtree rooted at this node (including this one). */
node_idx left; /* Index of left subtree. */
node_idx right; /* Index of right subtree. */
OMTVALUE value; /* The value stored in the node. */
@@ -34,8 +34,8 @@ struct omt_node {
struct omt_array {
- u_int32_t start_idx;
- u_int32_t num_values;
+ uint32_t start_idx;
+ uint32_t num_values;
OMTVALUE *values;
};
@@ -47,8 +47,8 @@ struct omt_tree {
};
struct omt {
- BOOL is_array;
- u_int32_t capacity;
+ bool is_array;
+ uint32_t capacity;
union {
struct omt_array a;
struct omt_tree t;
@@ -59,14 +59,14 @@ static inline int
omt_create_no_array(OMT *omtp) {
OMT XMALLOC(result);
if (result==NULL) return ENOMEM;
- result->is_array = TRUE;
+ result->is_array = true;
result->i.a.num_values = 0;
result->i.a.start_idx = 0;
*omtp = result;
return 0;
}
-static int omt_create_internal(OMT *omtp, u_int32_t num_starting_nodes) {
+static int omt_create_internal(OMT *omtp, uint32_t num_starting_nodes) {
OMT result;
int r = omt_create_no_array(&result);
if (r) return r;
@@ -78,7 +78,7 @@ static int omt_create_internal(OMT *omtp, u_int32_t num_starting_nodes) {
}
int
-toku_omt_create_steal_sorted_array(OMT *omtp, OMTVALUE **valuesp, u_int32_t numvalues, u_int32_t capacity) {
+toku_omt_create_steal_sorted_array(OMT *omtp, OMTVALUE **valuesp, uint32_t numvalues, uint32_t capacity) {
if (numvalues>capacity || !*valuesp) return EINVAL;
int r = omt_create_no_array(omtp);
if (r) return r;
@@ -90,12 +90,12 @@ toku_omt_create_steal_sorted_array(OMT *omtp, OMTVALUE **valuesp, u_int32_t numv
return 0;
}
-static inline u_int32_t nweight(OMT omt, node_idx idx) {
+static inline uint32_t nweight(OMT omt, node_idx idx) {
if (idx==NODE_NULL) return 0;
else return (omt->i.t.nodes+idx)->weight;
}
-static inline u_int32_t omt_size(OMT omt) {
+static inline uint32_t omt_size(OMT omt) {
return omt->is_array ? omt->i.a.num_values : nweight(omt, omt->i.t.root);
}
@@ -125,11 +125,11 @@ static inline void fill_array_with_subtree_values(OMT omt, OMTVALUE *array, node
// numvalues=1, halfway=0, left side is values of size 0
// right side is values of size 0.
static inline void rebuild_from_sorted_array(OMT omt, node_idx *n_idxp,
- OMTVALUE *values, u_int32_t numvalues) {
+ OMTVALUE *values, uint32_t numvalues) {
if (numvalues==0) {
*n_idxp = NODE_NULL;
} else {
- u_int32_t halfway = numvalues/2;
+ uint32_t halfway = numvalues/2;
node_idx newidx = omt_node_malloc(omt);
OMT_NODE newnode = omt->i.t.nodes+newidx;
newnode->weight = numvalues;
@@ -140,9 +140,9 @@ static inline void rebuild_from_sorted_array(OMT omt, node_idx *n_idxp,
}
}
-static inline int maybe_resize_array(OMT omt, u_int32_t n) {
- u_int32_t new_size = n<=2 ? 4 : 2*n;
- u_int32_t room = omt->capacity - omt->i.a.start_idx;
+static inline int maybe_resize_array(OMT omt, uint32_t n) {
+ uint32_t new_size = n<=2 ? 4 : 2*n;
+ uint32_t room = omt->capacity - omt->i.a.start_idx;
if (room<n || omt->capacity/2>=new_size) {
OMTVALUE *XMALLOC_N(new_size, tmp_values);
@@ -159,15 +159,15 @@ static inline int maybe_resize_array(OMT omt, u_int32_t n) {
static int omt_convert_to_tree(OMT omt) {
if (!omt->is_array) return 0;
- u_int32_t num_nodes = omt_size(omt);
- u_int32_t new_size = num_nodes*2;
+ uint32_t num_nodes = omt_size(omt);
+ uint32_t new_size = num_nodes*2;
new_size = new_size < 4 ? 4 : new_size;
OMT_NODE XMALLOC_N(new_size, new_nodes);
if (new_nodes==NULL) return get_error_errno();
OMTVALUE *values = omt->i.a.values;
OMTVALUE *tmp_values = values + omt->i.a.start_idx;
- omt->is_array = FALSE;
+ omt->is_array = false;
omt->i.t.nodes = new_nodes;
omt->capacity = new_size;
omt->i.t.free_idx = 0;
@@ -179,15 +179,15 @@ static int omt_convert_to_tree(OMT omt) {
static int omt_convert_to_array(OMT omt) {
if (omt->is_array) return 0;
- u_int32_t num_values = omt_size(omt);
- u_int32_t new_size = 2*num_values;
+ uint32_t num_values = omt_size(omt);
+ uint32_t new_size = 2*num_values;
new_size = new_size < 4 ? 4 : new_size;
OMTVALUE *XMALLOC_N(new_size, tmp_values);
if (tmp_values==NULL) return get_error_errno();
fill_array_with_subtree_values(omt, tmp_values, omt->i.t.root);
toku_free(omt->i.t.nodes);
- omt->is_array = TRUE;
+ omt->is_array = true;
omt->capacity = new_size;
omt->i.a.num_values = num_values;
omt->i.a.values = tmp_values;
@@ -195,10 +195,10 @@ static int omt_convert_to_array(OMT omt) {
return 0;
}
-static inline int maybe_resize_or_convert(OMT omt, u_int32_t n) {
+static inline int maybe_resize_or_convert(OMT omt, uint32_t n) {
if (omt->is_array) return maybe_resize_array(omt, n);
- u_int32_t new_size = n<=2 ? 4 : 2*n;
+ uint32_t new_size = n<=2 ? 4 : 2*n;
/* Rebuild/realloc the nodes array iff any of the following:
* The array is smaller than the number of elements we want.
@@ -206,7 +206,7 @@ static inline int maybe_resize_or_convert(OMT omt, u_int32_t n) {
* The array is too large. */
//Rebuilding means we first turn it to an array.
//Lets pause at the array form.
- u_int32_t num_nodes = nweight(omt, omt->i.t.root);
+ uint32_t num_nodes = nweight(omt, omt->i.t.root);
if ((omt->capacity/2 >= new_size) ||
(omt->i.t.free_idx>=omt->capacity && num_nodes<n) ||
(omt->capacity<n)) {
@@ -225,11 +225,11 @@ static inline void fill_array_with_subtree_idxs(OMT omt, node_idx *array, node_i
/* Reuses existing OMT_NODE structures (used for rebalancing). */
static inline void rebuild_subtree_from_idxs(OMT omt, node_idx *n_idxp, node_idx *idxs,
- u_int32_t numvalues) {
+ uint32_t numvalues) {
if (numvalues==0) {
*n_idxp=NODE_NULL;
} else {
- u_int32_t halfway = numvalues/2;
+ uint32_t halfway = numvalues/2;
node_idx newidx = idxs[halfway];
OMT_NODE newnode = omt->i.t.nodes+newidx;
newnode->weight = numvalues;
@@ -254,15 +254,15 @@ static inline void rebalance(OMT omt, node_idx *n_idxp) {
node_idx *tmp_array;
size_t mem_needed = n->weight*sizeof(*tmp_array);
size_t mem_free = (omt->capacity-omt->i.t.free_idx)*sizeof(*omt->i.t.nodes);
- BOOL malloced;
+ bool malloced;
if (mem_needed<=mem_free) {
//There is sufficient free space at the end of the nodes array
//to hold enough node indexes to rebalance.
- malloced = FALSE;
+ malloced = false;
tmp_array = (node_idx*)(omt->i.t.nodes+omt->i.t.free_idx);
}
else {
- malloced = TRUE;
+ malloced = true;
XMALLOC_N(n->weight, tmp_array);
if (tmp_array==NULL) return; //Don't rebalance. Still a working tree.
}
@@ -271,19 +271,19 @@ static inline void rebalance(OMT omt, node_idx *n_idxp) {
if (malloced) toku_free(tmp_array);
}
-static inline BOOL will_need_rebalance(OMT omt, node_idx n_idx, int leftmod, int rightmod) {
- if (n_idx==NODE_NULL) return FALSE;
+static inline bool will_need_rebalance(OMT omt, node_idx n_idx, int leftmod, int rightmod) {
+ if (n_idx==NODE_NULL) return false;
OMT_NODE n = omt->i.t.nodes+n_idx;
// one of the 1's is for the root.
// the other is to take ceil(n/2)
- u_int32_t weight_left = nweight(omt, n->left) + leftmod;
- u_int32_t weight_right = nweight(omt, n->right) + rightmod;
- return (BOOL)((1+weight_left < (1+1+weight_right)/2)
+ uint32_t weight_left = nweight(omt, n->left) + leftmod;
+ uint32_t weight_right = nweight(omt, n->right) + rightmod;
+ return (bool)((1+weight_left < (1+1+weight_right)/2)
||
(1+weight_right < (1+1+weight_left)/2));
}
-static inline void insert_internal(OMT omt, node_idx *n_idxp, OMTVALUE value, u_int32_t index, node_idx **rebalance_idx) {
+static inline void insert_internal(OMT omt, node_idx *n_idxp, OMTVALUE value, uint32_t index, node_idx **rebalance_idx) {
if (*n_idxp==NODE_NULL) {
assert(index==0);
node_idx newidx = omt_node_malloc(omt);
@@ -306,17 +306,17 @@ static inline void insert_internal(OMT omt, node_idx *n_idxp, OMTVALUE value, u_
if (*rebalance_idx==NULL && will_need_rebalance(omt, idx, 0, 1)) {
*rebalance_idx = n_idxp;
}
- u_int32_t sub_index = index-nweight(omt, n->left)-1;
+ uint32_t sub_index = index-nweight(omt, n->left)-1;
insert_internal(omt, &n->right, value, sub_index, rebalance_idx);
}
}
}
-static inline void set_at_internal_array(OMT omt, OMTVALUE v, u_int32_t index) {
+static inline void set_at_internal_array(OMT omt, OMTVALUE v, uint32_t index) {
omt->i.a.values[omt->i.a.start_idx+index] = v;
}
-static inline void set_at_internal(OMT omt, node_idx n_idx, OMTVALUE v, u_int32_t index) {
+static inline void set_at_internal(OMT omt, node_idx n_idx, OMTVALUE v, uint32_t index) {
assert(n_idx!=NODE_NULL);
OMT_NODE n = omt->i.t.nodes+n_idx;
if (index<nweight(omt, n->left))
@@ -328,7 +328,7 @@ static inline void set_at_internal(OMT omt, node_idx n_idx, OMTVALUE v, u_int32_
}
}
-static inline void delete_internal(OMT omt, node_idx *n_idxp, u_int32_t index, OMTVALUE *vp, node_idx **rebalance_idx) {
+static inline void delete_internal(OMT omt, node_idx *n_idxp, uint32_t index, OMTVALUE *vp, node_idx **rebalance_idx) {
assert(*n_idxp!=NODE_NULL);
OMT_NODE n = omt->i.t.nodes+*n_idxp;
if (index < nweight(omt, n->left)) {
@@ -339,12 +339,12 @@ static inline void delete_internal(OMT omt, node_idx *n_idxp, u_int32_t index, O
delete_internal(omt, &n->left, index, vp, rebalance_idx);
} else if (index == nweight(omt, n->left)) {
if (n->left==NODE_NULL) {
- u_int32_t idx = *n_idxp;
+ uint32_t idx = *n_idxp;
*n_idxp = n->right;
*vp = n->value;
omt_node_free(omt, idx);
} else if (n->right==NODE_NULL) {
- u_int32_t idx = *n_idxp;
+ uint32_t idx = *n_idxp;
*n_idxp = n->left;
*vp = n->value;
omt_node_free(omt, idx);
@@ -367,11 +367,11 @@ static inline void delete_internal(OMT omt, node_idx *n_idxp, u_int32_t index, O
}
}
-static inline void fetch_internal_array(OMT V, u_int32_t i, OMTVALUE *v) {
+static inline void fetch_internal_array(OMT V, uint32_t i, OMTVALUE *v) {
*v = V->i.a.values[V->i.a.start_idx+i];
}
-static inline void fetch_internal(OMT V, node_idx idx, u_int32_t i, OMTVALUE *v) {
+static inline void fetch_internal(OMT V, node_idx idx, uint32_t i, OMTVALUE *v) {
OMT_NODE n = V->i.t.nodes+idx;
if (i < nweight(V, n->left)) {
fetch_internal(V, n->left, i, v);
@@ -383,10 +383,10 @@ static inline void fetch_internal(OMT V, node_idx idx, u_int32_t i, OMTVALUE *v)
}
static inline int iterate_internal_array(OMT omt,
- u_int32_t left, u_int32_t right,
- int (*f)(OMTVALUE, u_int32_t, void*), void*v) {
+ uint32_t left, uint32_t right,
+ int (*f)(OMTVALUE, uint32_t, void*), void*v) {
int r;
- u_int32_t i;
+ uint32_t i;
for (i = left; i < right; i++) {
r = f(omt->i.a.values[i+omt->i.a.start_idx], i, v);
@@ -395,27 +395,27 @@ static inline int iterate_internal_array(OMT omt,
return 0;
}
-static inline int iterate_internal(OMT omt, u_int32_t left, u_int32_t right,
- node_idx n_idx, u_int32_t idx,
- int (*f)(OMTVALUE, u_int32_t, void*), void*v) {
+static inline int iterate_internal(OMT omt, uint32_t left, uint32_t right,
+ node_idx n_idx, uint32_t idx,
+ int (*f)(OMTVALUE, uint32_t, void*), void*v) {
int r;
if (n_idx==NODE_NULL) return 0;
OMT_NODE n = omt->i.t.nodes+n_idx;
- u_int32_t idx_root = idx+nweight(omt,n->left);
+ uint32_t idx_root = idx+nweight(omt,n->left);
if (left< idx_root && (r=iterate_internal(omt, left, right, n->left, idx, f, v))) return r;
if (left<=idx_root && idx_root<right && (r=f(n->value, idx_root, v))) return r;
if (idx_root+1<right) return iterate_internal(omt, left, right, n->right, idx_root+1, f, v);
return 0;
}
-static inline int find_internal_zero_array(OMT omt, int (*h)(OMTVALUE, void*extra), void*extra, OMTVALUE *value, u_int32_t *index) {
- u_int32_t min = omt->i.a.start_idx;
- u_int32_t limit = omt->i.a.start_idx + omt->i.a.num_values;
- u_int32_t best_pos = NODE_NULL;
- u_int32_t best_zero = NODE_NULL;
+static inline int find_internal_zero_array(OMT omt, int (*h)(OMTVALUE, void*extra), void*extra, OMTVALUE *value, uint32_t *index) {
+ uint32_t min = omt->i.a.start_idx;
+ uint32_t limit = omt->i.a.start_idx + omt->i.a.num_values;
+ uint32_t best_pos = NODE_NULL;
+ uint32_t best_zero = NODE_NULL;
while (min!=limit) {
- u_int32_t mid = (min + limit) / 2;
+ uint32_t mid = (min + limit) / 2;
int hv = h(omt->i.a.values[mid], extra);
if (hv<0) {
min = mid+1;
@@ -440,7 +440,7 @@ static inline int find_internal_zero_array(OMT omt, int (*h)(OMTVALUE, void*extr
return DB_NOTFOUND;
}
-static inline int find_internal_zero(OMT omt, node_idx n_idx, int (*h)(OMTVALUE, void*extra), void*extra, OMTVALUE *value, u_int32_t *index)
+static inline int find_internal_zero(OMT omt, node_idx n_idx, int (*h)(OMTVALUE, void*extra), void*extra, OMTVALUE *value, uint32_t *index)
// requires: index!=NULL
{
if (n_idx==NODE_NULL) {
@@ -467,13 +467,13 @@ static inline int find_internal_zero(OMT omt, node_idx n_idx, int (*h)(OMTVALUE,
}
-static inline int find_internal_plus_array(OMT omt, int (*h)(OMTVALUE, void*extra), void*extra, OMTVALUE *value, u_int32_t *index) {
- u_int32_t min = omt->i.a.start_idx;
- u_int32_t limit = omt->i.a.start_idx + omt->i.a.num_values;
- u_int32_t best = NODE_NULL;
+static inline int find_internal_plus_array(OMT omt, int (*h)(OMTVALUE, void*extra), void*extra, OMTVALUE *value, uint32_t *index) {
+ uint32_t min = omt->i.a.start_idx;
+ uint32_t limit = omt->i.a.start_idx + omt->i.a.num_values;
+ uint32_t best = NODE_NULL;
while (min!=limit) {
- u_int32_t mid = (min + limit) / 2;
+ uint32_t mid = (min + limit) / 2;
int hv = h(omt->i.a.values[mid], extra);
if (hv>0) {
best = mid;
@@ -489,13 +489,13 @@ static inline int find_internal_plus_array(OMT omt, int (*h)(OMTVALUE, void*extr
return 0;
}
-static inline int find_internal_minus_array(OMT omt, int (*h)(OMTVALUE, void*extra), void*extra, OMTVALUE *value, u_int32_t *index) {
- u_int32_t min = omt->i.a.start_idx;
- u_int32_t limit = omt->i.a.start_idx + omt->i.a.num_values;
- u_int32_t best = NODE_NULL;
+static inline int find_internal_minus_array(OMT omt, int (*h)(OMTVALUE, void*extra), void*extra, OMTVALUE *value, uint32_t *index) {
+ uint32_t min = omt->i.a.start_idx;
+ uint32_t limit = omt->i.a.start_idx + omt->i.a.num_values;
+ uint32_t best = NODE_NULL;
while (min!=limit) {
- u_int32_t mid = (min + limit) / 2;
+ uint32_t mid = (min + limit) / 2;
int hv = h(omt->i.a.values[mid], extra);
if (hv<0) {
best = mid;
@@ -512,7 +512,7 @@ static inline int find_internal_minus_array(OMT omt, int (*h)(OMTVALUE, void*ext
}
// If direction <0 then find the largest i such that h(V_i,extra)<0.
-static inline int find_internal_minus(OMT omt, node_idx n_idx, int (*h)(OMTVALUE, void*extra), void*extra, OMTVALUE *value, u_int32_t *index)
+static inline int find_internal_minus(OMT omt, node_idx n_idx, int (*h)(OMTVALUE, void*extra), void*extra, OMTVALUE *value, uint32_t *index)
// requires: index!=NULL
{
if (n_idx==NODE_NULL) return DB_NOTFOUND;
@@ -533,7 +533,7 @@ static inline int find_internal_minus(OMT omt, node_idx n_idx, int (*h)(OMTVALUE
}
// If direction >0 then find the smallest i such that h(V_i,extra)>0.
-static inline int find_internal_plus(OMT omt, node_idx n_idx, int (*h)(OMTVALUE, void*extra), void*extra, OMTVALUE *value, u_int32_t *index)
+static inline int find_internal_plus(OMT omt, node_idx n_idx, int (*h)(OMTVALUE, void*extra), void*extra, OMTVALUE *value, uint32_t *index)
// requires: index!=NULL
{
if (n_idx==NODE_NULL) return DB_NOTFOUND;
@@ -567,11 +567,11 @@ void toku_omt_destroy(OMT *omtp) {
*omtp=NULL;
}
-u_int32_t toku_omt_size(OMT V) {
+uint32_t toku_omt_size(OMT V) {
return omt_size(V);
}
-int toku_omt_create_from_sorted_array(OMT *omtp, OMTVALUE *values, u_int32_t numvalues) {
+int toku_omt_create_from_sorted_array(OMT *omtp, OMTVALUE *values, uint32_t numvalues) {
OMT omt = NULL;
int r;
if ((r = omt_create_internal(&omt, numvalues))) return r;
@@ -581,7 +581,7 @@ int toku_omt_create_from_sorted_array(OMT *omtp, OMTVALUE *values, u_int32_t num
return 0;
}
-int toku_omt_insert_at(OMT omt, OMTVALUE value, u_int32_t index) {
+int toku_omt_insert_at(OMT omt, OMTVALUE value, uint32_t index) {
int r;
if (index>omt_size(omt)) return EINVAL;
if ((r=maybe_resize_or_convert(omt, 1+omt_size(omt)))) return r;
@@ -606,7 +606,7 @@ int toku_omt_insert_at(OMT omt, OMTVALUE value, u_int32_t index) {
return 0;
}
-int toku_omt_set_at (OMT omt, OMTVALUE value, u_int32_t index) {
+int toku_omt_set_at (OMT omt, OMTVALUE value, uint32_t index) {
if (index>=omt_size(omt)) return EINVAL;
if (omt->is_array) {
set_at_internal_array(omt, value, index);
@@ -617,7 +617,7 @@ int toku_omt_set_at (OMT omt, OMTVALUE value, u_int32_t index) {
return 0;
}
-int toku_omt_delete_at(OMT omt, u_int32_t index) {
+int toku_omt_delete_at(OMT omt, uint32_t index) {
OMTVALUE v;
int r;
if (index>=omt_size(omt)) return EINVAL;
@@ -639,7 +639,7 @@ int toku_omt_delete_at(OMT omt, u_int32_t index) {
return 0;
}
-int toku_omt_fetch(OMT V, u_int32_t i, OMTVALUE *v) {
+int toku_omt_fetch(OMT V, uint32_t i, OMTVALUE *v) {
if (i>=omt_size(V)) return EINVAL;
if (V->is_array) {
fetch_internal_array(V, i, v);
@@ -651,7 +651,7 @@ int toku_omt_fetch(OMT V, u_int32_t i, OMTVALUE *v) {
}
static int
-free_item (OMTVALUE lev, u_int32_t UU(idx), void *vsi) {
+free_item (OMTVALUE lev, uint32_t UU(idx), void *vsi) {
assert(vsi == NULL);
toku_free(lev);
return 0;
@@ -663,14 +663,14 @@ void toku_omt_free_items(OMT omt) {
lazy_assert_zero(r);
}
-int toku_omt_iterate(OMT omt, int (*f)(OMTVALUE, u_int32_t, void*), void*v) {
+int toku_omt_iterate(OMT omt, int (*f)(OMTVALUE, uint32_t, void*), void*v) {
if (omt->is_array) {
return iterate_internal_array(omt, 0, omt_size(omt), f, v);
}
return iterate_internal(omt, 0, nweight(omt, omt->i.t.root), omt->i.t.root, 0, f, v);
}
-int toku_omt_iterate_on_range(OMT omt, u_int32_t left, u_int32_t right, int (*f)(OMTVALUE, u_int32_t, void*), void*v) {
+int toku_omt_iterate_on_range(OMT omt, uint32_t left, uint32_t right, int (*f)(OMTVALUE, uint32_t, void*), void*v) {
if (right>omt_size(omt)) return EINVAL;
if (omt->is_array) {
return iterate_internal_array(omt, left, right, f, v);
@@ -678,9 +678,9 @@ int toku_omt_iterate_on_range(OMT omt, u_int32_t left, u_int32_t right, int (*f)
return iterate_internal(omt, left, right, omt->i.t.root, 0, f, v);
}
-int toku_omt_insert(OMT omt, OMTVALUE value, int(*h)(OMTVALUE, void*v), void *v, u_int32_t *index) {
+int toku_omt_insert(OMT omt, OMTVALUE value, int(*h)(OMTVALUE, void*v), void *v, uint32_t *index) {
int r;
- u_int32_t idx;
+ uint32_t idx;
r = toku_omt_find_zero(omt, h, v, NULL, &idx);
if (r==0) {
@@ -695,8 +695,8 @@ int toku_omt_insert(OMT omt, OMTVALUE value, int(*h)(OMTVALUE, void*v), void *v,
return 0;
}
-int toku_omt_find_zero(OMT V, int (*h)(OMTVALUE, void*extra), void*extra, OMTVALUE *value, u_int32_t *index) {
- u_int32_t tmp_index;
+int toku_omt_find_zero(OMT V, int (*h)(OMTVALUE, void*extra), void*extra, OMTVALUE *value, uint32_t *index) {
+ uint32_t tmp_index;
if (index==NULL) index=&tmp_index;
int r;
if (V->is_array) {
@@ -708,8 +708,8 @@ int toku_omt_find_zero(OMT V, int (*h)(OMTVALUE, void*extra), void*extra, OMTVAL
return r;
}
-int toku_omt_find(OMT V, int (*h)(OMTVALUE, void*extra), void*extra, int direction, OMTVALUE *value, u_int32_t *index) {
- u_int32_t tmp_index;
+int toku_omt_find(OMT V, int (*h)(OMTVALUE, void*extra), void*extra, int direction, OMTVALUE *value, uint32_t *index) {
+ uint32_t tmp_index;
int r;
if (index==NULL) index=&tmp_index;
if (direction==0) {
@@ -732,13 +732,13 @@ int toku_omt_find(OMT V, int (*h)(OMTVALUE, void*extra), void*extra, int directi
return r;
}
-int toku_omt_split_at(OMT omt, OMT *newomtp, u_int32_t index) {
+int toku_omt_split_at(OMT omt, OMT *newomtp, uint32_t index) {
int r;
OMT newomt;
if (index>omt_size(omt)) return EINVAL;
if ((r=omt_convert_to_array(omt))) return r;
- u_int32_t newsize = omt_size(omt)-index;
+ uint32_t newsize = omt_size(omt)-index;
if ((r=toku_omt_create_from_sorted_array(&newomt,
omt->i.a.values+omt->i.a.start_idx+index,
newsize))) return r;
@@ -756,7 +756,7 @@ int toku_omt_split_at(OMT omt, OMT *newomtp, u_int32_t index) {
int toku_omt_merge(OMT leftomt, OMT rightomt, OMT *newomtp) {
int r;
OMT newomt = 0;
- u_int32_t newsize = omt_size(leftomt)+omt_size(rightomt);
+ uint32_t newsize = omt_size(leftomt)+omt_size(rightomt);
if ((r = omt_create_internal(&newomt, newsize))) return r;
if (leftomt->is_array) {
@@ -784,16 +784,16 @@ int toku_omt_merge(OMT leftomt, OMT rightomt, OMT *newomtp) {
struct copy_data_extra {
OMTVALUE *a;
- u_int32_t eltsize;
+ uint32_t eltsize;
};
-static int copy_data_iter(OMTVALUE v, u_int32_t idx, void *ve) {
+static int copy_data_iter(OMTVALUE v, uint32_t idx, void *ve) {
struct copy_data_extra *CAST_FROM_VOIDP(e, ve);
memcpy(e->a[idx], v, e->eltsize);
return 0;
}
-static int omt_copy_data(OMTVALUE *a, OMT omt, u_int32_t eltsize) {
+static int omt_copy_data(OMTVALUE *a, OMT omt, uint32_t eltsize) {
struct copy_data_extra extra = { .a = a, .eltsize = eltsize };
if (omt->is_array) {
return iterate_internal_array(omt, 0, omt_size(omt), copy_data_iter, &extra);
@@ -801,14 +801,14 @@ static int omt_copy_data(OMTVALUE *a, OMT omt, u_int32_t eltsize) {
return iterate_internal(omt, 0, nweight(omt, omt->i.t.root), omt->i.t.root, 0, copy_data_iter, &extra);
}
-int toku_omt_clone(OMT *dest, OMT src, u_int32_t eltsize) {
- u_int32_t size = omt_size(src);
+int toku_omt_clone(OMT *dest, OMT src, uint32_t eltsize) {
+ uint32_t size = omt_size(src);
if (size == 0) {
toku_omt_create(dest);
return 0;
}
OMTVALUE *XMALLOC_N(size, a);
- for (u_int32_t i = 0; i < size; ++i) {
+ for (uint32_t i = 0; i < size; ++i) {
CAST_FROM_VOIDP(a[i], toku_xmalloc(eltsize));
}
int r = omt_copy_data(a, src, eltsize);
@@ -821,15 +821,15 @@ err:
return r;
}
-int toku_omt_clone_pool(OMT *dest, OMT src, u_int32_t eltsize) {
- u_int32_t size = omt_size(src);
+int toku_omt_clone_pool(OMT *dest, OMT src, uint32_t eltsize) {
+ uint32_t size = omt_size(src);
if (size == 0) {
toku_omt_create(dest);
return 0;
}
OMTVALUE *XMALLOC_N(size, a);
unsigned char *XMALLOC_N(eltsize * size, data);
- for (u_int32_t i = 0; i < size; ++i) {
+ for (uint32_t i = 0; i < size; ++i) {
a[i] = &data[eltsize * i];
}
int r = omt_copy_data(a, src, eltsize);
@@ -855,7 +855,7 @@ void toku_omt_free_items_pool(OMT omt) {
}
int toku_omt_clone_noptr(OMT *dest, OMT src) {
- u_int32_t size = omt_size(src);
+ uint32_t size = omt_size(src);
if (size == 0) {
toku_omt_create(dest);
return 0;
diff --git a/ft/omt.h b/ft/omt.h
index 531a7fe468b..15332f42209 100644
--- a/ft/omt.h
+++ b/ft/omt.h
@@ -69,7 +69,7 @@ int toku_omt_create (OMT *omtp);
// ENOMEM out of memory (and doesn't modify *omtp)
// Performance: constant time.
-int toku_omt_create_from_sorted_array(OMT *omtp, OMTVALUE *values, u_int32_t numvalues);
+int toku_omt_create_from_sorted_array(OMT *omtp, OMTVALUE *values, uint32_t numvalues);
// Effect: Create a OMT containing values. The number of values is in numvalues.
// Stores the new OMT in *omtp.
// Requires: omtp != NULL
@@ -83,7 +83,7 @@ int toku_omt_create_from_sorted_array(OMT *omtp, OMTVALUE *values, u_int32_t num
// If the N values are known in advance, are sorted, and
// the structure is empty, we can batch insert them much faster.
-int toku_omt_create_steal_sorted_array(OMT *omtp, OMTVALUE **valuesp, u_int32_t numvalues, u_int32_t steal_capacity);
+int toku_omt_create_steal_sorted_array(OMT *omtp, OMTVALUE **valuesp, uint32_t numvalues, uint32_t steal_capacity);
// Effect: Create an OMT containing values. The number of values is in numvalues.
// On success the OMT takes ownership of *valuesp array, and sets valuesp=NULL.
// Requires: omtp != NULL
@@ -115,12 +115,12 @@ void toku_omt_destroy(OMT *omtp);
// Rationale: Does not free the OMTVALUEs to reduce complexity.
// Performance: time=O(toku_omt_size(*omtp))
-u_int32_t toku_omt_size(OMT V);
+uint32_t toku_omt_size(OMT V);
// Effect: return |V|.
// Requires: V != NULL
// Performance: time=O(1)
-int toku_omt_iterate_on_range(OMT omt, u_int32_t left, u_int32_t right, int (*f)(OMTVALUE, u_int32_t, void*), void*v);
+int toku_omt_iterate_on_range(OMT omt, uint32_t left, uint32_t right, int (*f)(OMTVALUE, uint32_t, void*), void*v);
// Effect: Iterate over the values of the omt, from left to right, calling f on each value.
// The second argument passed to f is the index of the value.
// The third argument passed to f is v.
@@ -144,7 +144,7 @@ void toku_omt_free_items(OMT omt);
// destroy the OMT. However, destroying the OMT requires invalidating cursors. This cannot be done if the values of the OMT
// have been already freed. So, this function is written to invalidate cursors and free items.
-int toku_omt_iterate(OMT omt, int (*f)(OMTVALUE, u_int32_t, void*), void*v);
+int toku_omt_iterate(OMT omt, int (*f)(OMTVALUE, uint32_t, void*), void*v);
// Effect: Iterate over the values of the omt, from left to right, calling f on each value.
// The second argument passed to f is the index of the value.
// The third argument passed to f is v.
@@ -158,7 +158,7 @@ int toku_omt_iterate(OMT omt, int (*f)(OMTVALUE, u_int32_t, void*), void*v);
// Performance: time=O(i+\log N) where i is the number of times f is called, and N is the number of elements in omt.
// Rational: Although the functional iterator requires defining another function (as opposed to C++ style iterator), it is much easier to read.
-int toku_omt_insert_at(OMT omt, OMTVALUE value, u_int32_t idx);
+int toku_omt_insert_at(OMT omt, OMTVALUE value, uint32_t idx);
// Effect: Increases indexes of all items at slot >= index by 1.
// Insert value into the position at index.
//
@@ -170,7 +170,7 @@ int toku_omt_insert_at(OMT omt, OMTVALUE value, u_int32_t idx);
// Performance: time=O(\log N) amortized time.
// Rationale: Some future implementation may be O(\log N) worst-case time, but O(\log N) amortized is good enough for now.
-int toku_omt_set_at (OMT omt, OMTVALUE value, u_int32_t idx);
+int toku_omt_set_at (OMT omt, OMTVALUE value, uint32_t idx);
// Effect: Replaces the item at index with value.
// Returns:
// 0 success
@@ -179,7 +179,7 @@ int toku_omt_set_at (OMT omt, OMTVALUE value, u_int32_t idx);
// Performance: time=O(\log N)
// Rationale: The BRT needs to be able to replace a value with another copy of the same value (allocated in a different location)
-int toku_omt_insert(OMT omt, OMTVALUE value, int(*h)(OMTVALUE, void*v), void *v, u_int32_t *idx);
+int toku_omt_insert(OMT omt, OMTVALUE value, int(*h)(OMTVALUE, void*v), void *v, uint32_t *idx);
// Effect: Insert value into the OMT.
// If there is some i such that $h(V_i, v)=0$ then returns DB_KEYEXIST.
// Otherwise, let i be the minimum value such that $h(V_i, v)>0$.
@@ -197,7 +197,7 @@ int toku_omt_insert(OMT omt, OMTVALUE value, int(*h)(OMTVALUE, void*v), void *v,
// Performance: time=O(\log N) amortized.
// Rationale: Some future implementation may be O(\log N) worst-case time, but O(\log N) amortized is good enough for now.
-int toku_omt_delete_at(OMT omt, u_int32_t idx);
+int toku_omt_delete_at(OMT omt, uint32_t idx);
// Effect: Delete the item in slot index.
// Decreases indexes of all items at slot >= index by 1.
// Returns
@@ -207,7 +207,7 @@ int toku_omt_delete_at(OMT omt, u_int32_t idx);
// Rationale: To delete an item, first find its index using toku_omt_find, then delete it.
// Performance: time=O(\log N) amortized.
-int toku_omt_fetch (OMT V, u_int32_t i, OMTVALUE *v);
+int toku_omt_fetch (OMT V, uint32_t i, OMTVALUE *v);
// Effect: Set *v=V_i
// If c!=NULL then set c's abstract offset to i.
// Requires: v != NULL
@@ -222,14 +222,14 @@ int toku_omt_fetch (OMT V, u_int32_t i, OMTVALUE *v);
// function, the function must remove c's association with the old
// OMT, and associate it with the new OMT.
-int toku_omt_find_zero(OMT V, int (*h)(OMTVALUE, void*extra), void*extra, OMTVALUE *value, u_int32_t *idx);
+int toku_omt_find_zero(OMT V, int (*h)(OMTVALUE, void*extra), void*extra, OMTVALUE *value, uint32_t *idx);
// Effect: Find the smallest i such that h(V_i, extra)>=0
// If there is such an i and h(V_i,extra)==0 then set *index=i and return 0.
// If there is such an i and h(V_i,extra)>0 then set *index=i and return DB_NOTFOUND.
// If there is no such i then set *index=toku_omt_size(V) and return DB_NOTFOUND.
// Requires: index!=NULL
-int toku_omt_find(OMT V, int (*h)(OMTVALUE, void*extra), void*extra, int direction, OMTVALUE *value, u_int32_t *idx);
+int toku_omt_find(OMT V, int (*h)(OMTVALUE, void*extra), void*extra, int direction, OMTVALUE *value, uint32_t *idx);
// Effect:
// If direction >0 then find the smallest i such that h(V_i,extra)>0.
// If direction <0 then find the largest i such that h(V_i,extra)<0.
@@ -290,7 +290,7 @@ int toku_omt_find(OMT V, int (*h)(OMTVALUE, void*extra), void*extra, int directi
// -...-0...0+...+
// AC B
-int toku_omt_split_at(OMT omt, OMT *newomt, u_int32_t idx);
+int toku_omt_split_at(OMT omt, OMT *newomt, uint32_t idx);
// Effect: Create a new OMT, storing it in *newomt.
// The values to the right of index (starting at index) are moved to *newomt.
// Requires: omt != NULL
@@ -313,7 +313,7 @@ int toku_omt_merge(OMT leftomt, OMT rightomt, OMT *newomt);
// On error, nothing is modified.
// Performance: time=O(n) is acceptable, but one can imagine implementations that are O(\log n) worst-case.
-int toku_omt_clone(OMT *dest, OMT src, u_int32_t eltsize);
+int toku_omt_clone(OMT *dest, OMT src, uint32_t eltsize);
// Effect: Creates a copy of an omt.
// Sets *dest to the clone
// Each element is allocated separately with toku_xmalloc and is assumed to be eltsize big.
@@ -323,7 +323,7 @@ int toku_omt_clone(OMT *dest, OMT src, u_int32_t eltsize);
// Performance: time between O(n) and O(n log n), depending how long it
// takes to traverse src.
-int toku_omt_clone_pool(OMT *dest, OMT src, u_int32_t eltsize);
+int toku_omt_clone_pool(OMT *dest, OMT src, uint32_t eltsize);
// Effect: Creates a copy of an omt.
// Sets *dest to the clone
// Each element is copied to a contiguous buffer allocated with toku_xmalloc and each element is assumed to be eltsize big.
diff --git a/ft/queue.cc b/ft/queue.cc
index 2209da25c7e..1e78151232e 100644
--- a/ft/queue.cc
+++ b/ft/queue.cc
@@ -17,15 +17,15 @@ struct qitem;
struct qitem {
void *item;
struct qitem *next;
- u_int64_t weight;
+ uint64_t weight;
};
struct queue {
- u_int64_t contents_weight; // how much stuff is in there?
- u_int64_t weight_limit; // Block enqueueing when the contents gets to be bigger than the weight.
+ uint64_t contents_weight; // how much stuff is in there?
+ uint64_t weight_limit; // Block enqueueing when the contents gets to be bigger than the weight.
struct qitem *head, *tail;
- BOOL eof;
+ bool eof;
toku_mutex_t mutex;
toku_cond_t cond;
@@ -43,7 +43,7 @@ struct queue {
// q->mutex and q->cond are used as condition variables.
-int queue_create (QUEUE *q, u_int64_t weight_limit)
+int queue_create (QUEUE *q, uint64_t weight_limit)
{
QUEUE MALLOC(result);
if (result==NULL) return get_error_errno();
@@ -51,7 +51,7 @@ int queue_create (QUEUE *q, u_int64_t weight_limit)
result->weight_limit = weight_limit;
result->head = NULL;
result->tail = NULL;
- result->eof = FALSE;
+ result->eof = false;
toku_mutex_init(&result->mutex, NULL);
toku_cond_init(&result->cond, NULL);
*q = result;
@@ -68,7 +68,7 @@ int queue_destroy (QUEUE q)
return 0;
}
-int queue_enq (QUEUE q, void *item, u_int64_t weight, u_int64_t *total_weight_after_enq)
+int queue_enq (QUEUE q, void *item, uint64_t weight, uint64_t *total_weight_after_enq)
{
toku_mutex_lock(&q->mutex);
assert(!q->eof);
@@ -108,13 +108,13 @@ int queue_eof (QUEUE q)
{
toku_mutex_lock(&q->mutex);
assert(!q->eof);
- q->eof = TRUE;
+ q->eof = true;
toku_cond_signal(&q->cond);
toku_mutex_unlock(&q->mutex);
return 0;
}
-int queue_deq (QUEUE q, void **item, u_int64_t *weight, u_int64_t *total_weight_after_deq)
+int queue_deq (QUEUE q, void **item, uint64_t *weight, uint64_t *total_weight_after_deq)
{
toku_mutex_lock(&q->mutex);
int result;
diff --git a/ft/queue.h b/ft/queue.h
index 3b9d408edbc..5644d6d47a9 100644
--- a/ft/queue.h
+++ b/ft/queue.h
@@ -25,10 +25,10 @@
typedef struct queue *QUEUE;
-int queue_create (QUEUE *q, u_int64_t weight_limit);
+int queue_create (QUEUE *q, uint64_t weight_limit);
// Effect: Create a queue with a given weight limit. The queue is initially empty.
-int queue_enq (QUEUE q, void *item, u_int64_t weight, u_int64_t *total_weight_after_enq);
+int queue_enq (QUEUE q, void *item, uint64_t weight, uint64_t *total_weight_after_enq);
// Effect: Insert ITEM of weight WEIGHT into queue. If the resulting contents weight too much then block (don't return) until the total weight is low enough.
// If total_weight_after_enq!=NULL then return the current weight of the items in the queue (after finishing blocking on overweight, and after enqueueing the item).
// If successful return 0.
@@ -39,7 +39,7 @@ int queue_eof (QUEUE q);
// Effect: Inform the queue that no more values will be inserted. After all the values that have been inserted are dequeued, further dequeue operations will return EOF.
// Returns 0 on success. On failure, things are pretty bad (likely to be some sort of mutex failure).
-int queue_deq (QUEUE q, void **item, u_int64_t *weight, u_int64_t *total_weight_after_deq);
+int queue_deq (QUEUE q, void **item, uint64_t *weight, uint64_t *total_weight_after_deq);
// Effect: Wait until the queue becomes nonempty. Then dequeue and return the oldest item. The item and its weight are returned in *ITEM.
// If weight!=NULL then return the item's weight in *weight.
// If total_weight_after_deq!=NULL then return the current weight of the items in the queue (after dequeuing the item).
diff --git a/ft/rbuf.h b/ft/rbuf.h
index 630bc5d2ad5..baf9cefcfb6 100644
--- a/ft/rbuf.h
+++ b/ft/rbuf.h
@@ -37,12 +37,12 @@ static inline unsigned char rbuf_char (struct rbuf *r) {
return r->buf[r->ndone++];
}
-static inline void rbuf_ma_u_int8_t (struct rbuf *r, MEMARENA ma __attribute__((__unused__)), u_int8_t *num) {
+static inline void rbuf_ma_uint8_t (struct rbuf *r, MEMARENA ma __attribute__((__unused__)), uint8_t *num) {
*num = rbuf_char(r);
}
-static inline void rbuf_ma_BOOL (struct rbuf *r, MEMARENA ma __attribute__((__unused__)), BOOL *b) {
- u_int8_t n = rbuf_char(r);
+static inline void rbuf_ma_bool (struct rbuf *r, MEMARENA ma __attribute__((__unused__)), bool *b) {
+ uint8_t n = rbuf_char(r);
*b = (n!=0);
}
@@ -50,7 +50,7 @@ static inline void rbuf_ma_BOOL (struct rbuf *r, MEMARENA ma __attribute__((__un
static unsigned int rbuf_network_int (struct rbuf *r) __attribute__((__unused__));
static unsigned int rbuf_network_int (struct rbuf *r) {
assert(r->ndone+4 <= r->size);
- u_int32_t result = toku_ntohl(*(u_int32_t*)(r->buf+r->ndone)); // This only works on machines where unaligned loads are OK.
+ uint32_t result = toku_ntohl(*(uint32_t*)(r->buf+r->ndone)); // This only works on machines where unaligned loads are OK.
r->ndone+=4;
return result;
}
@@ -58,7 +58,7 @@ static unsigned int rbuf_network_int (struct rbuf *r) {
static unsigned int rbuf_int (struct rbuf *r) {
#if 1
assert(r->ndone+4 <= r->size);
- u_int32_t result = toku_dtoh32(*(u_int32_t*)(r->buf+r->ndone)); // This only works on machines where unaligned loads are OK.
+ uint32_t result = toku_dtoh32(*(uint32_t*)(r->buf+r->ndone)); // This only works on machines where unaligned loads are OK.
r->ndone+=4;
return result;
#else
@@ -118,11 +118,11 @@ static inline void rbuf_ma_BLOCKNUM (struct rbuf *r, MEMARENA ma __attribute__((
*blocknum = rbuf_blocknum(r);
}
-static inline void rbuf_ma_u_int32_t (struct rbuf *r, MEMARENA ma __attribute__((__unused__)), u_int32_t *num) {
+static inline void rbuf_ma_uint32_t (struct rbuf *r, MEMARENA ma __attribute__((__unused__)), uint32_t *num) {
*num = rbuf_int(r);
}
-static inline void rbuf_ma_u_int64_t (struct rbuf *r, MEMARENA ma __attribute__((__unused__)), u_int64_t *num) {
+static inline void rbuf_ma_uint64_t (struct rbuf *r, MEMARENA ma __attribute__((__unused__)), uint64_t *num) {
*num = rbuf_ulonglong(r);
}
@@ -147,17 +147,17 @@ static inline void rbuf_FILENUMS(struct rbuf *r, FILENUMS *filenums) {
filenums->num = rbuf_int(r);
filenums->filenums = (FILENUM *) toku_malloc( filenums->num * sizeof(FILENUM) );
assert(filenums->filenums != NULL);
- for (u_int32_t i=0; i < filenums->num; i++) {
+ for (uint32_t i=0; i < filenums->num; i++) {
rbuf_FILENUM(r, &(filenums->filenums[i]));
}
}
// 2954
static inline void rbuf_ma_FILENUMS (struct rbuf *r, MEMARENA ma __attribute__((__unused__)), FILENUMS *filenums) {
- rbuf_ma_u_int32_t(r, ma, &(filenums->num));
+ rbuf_ma_uint32_t(r, ma, &(filenums->num));
filenums->filenums = (FILENUM *) malloc_in_memarena(ma, filenums->num * sizeof(FILENUM) );
assert(filenums->filenums != NULL);
- for (u_int32_t i=0; i < filenums->num; i++) {
+ for (uint32_t i=0; i < filenums->num; i++) {
rbuf_ma_FILENUM(r, ma, &(filenums->filenums[i]));
}
}
@@ -165,7 +165,7 @@ static inline void rbuf_ma_FILENUMS (struct rbuf *r, MEMARENA ma __attribute__((
// Don't try to use the same space, malloc it
static inline void rbuf_BYTESTRING (struct rbuf *r, BYTESTRING *bs) {
bs->len = rbuf_int(r);
- u_int32_t newndone = r->ndone + bs->len;
+ uint32_t newndone = r->ndone + bs->len;
assert(newndone <= r->size);
bs->data = (char *) toku_memdup(&r->buf[r->ndone], (size_t)bs->len);
assert(bs->data);
@@ -174,7 +174,7 @@ static inline void rbuf_BYTESTRING (struct rbuf *r, BYTESTRING *bs) {
static inline void rbuf_ma_BYTESTRING (struct rbuf *r, MEMARENA ma, BYTESTRING *bs) {
bs->len = rbuf_int(r);
- u_int32_t newndone = r->ndone + bs->len;
+ uint32_t newndone = r->ndone + bs->len;
assert(newndone <= r->size);
bs->data = (char *) memarena_memdup(ma, &r->buf[r->ndone], (size_t)bs->len);
assert(bs->data);
diff --git a/ft/recover.cc b/ft/recover.cc
index 49c8db09acb..afb11ed4c60 100644
--- a/ft/recover.cc
+++ b/ft/recover.cc
@@ -15,7 +15,7 @@ int tokudb_recovery_trace = 0; // turn on recovery tracing, d
//#define DO_VERIFY_COUNTS
#ifdef DO_VERIFY_COUNTS
-#define VERIFY_COUNTS(n) toku_verify_or_set_counts(n, FALSE)
+#define VERIFY_COUNTS(n) toku_verify_or_set_counts(n, false)
#else
#define VERIFY_COUNTS(n) ((void)0)
#endif
@@ -102,7 +102,7 @@ struct recover_env {
generate_row_for_del_func generate_row_for_del;
struct scan_state ss;
struct file_map fmap;
- BOOL goforward;
+ bool goforward;
bool destroy_logger_at_end; // If true then destroy the logger when we are done. If false then set the logger into write-files mode when we are done with recovery.*/
};
typedef struct recover_env *RECOVER_ENV;
@@ -121,11 +121,11 @@ static uint32_t file_map_get_num_dictionaries(struct file_map *fmap) {
return toku_omt_size(fmap->filenums);
}
-static void file_map_close_dictionaries(struct file_map *fmap, BOOL recovery_succeeded, LSN oplsn) {
+static void file_map_close_dictionaries(struct file_map *fmap, bool recovery_succeeded, LSN oplsn) {
int r;
while (1) {
- u_int32_t n = toku_omt_size(fmap->filenums);
+ uint32_t n = toku_omt_size(fmap->filenums);
if (n == 0)
break;
OMTVALUE v;
@@ -167,7 +167,7 @@ static int file_map_insert (struct file_map *fmap, FILENUM fnum, FT_HANDLE brt,
}
static void file_map_remove(struct file_map *fmap, FILENUM fnum) {
- OMTVALUE v; u_int32_t idx;
+ OMTVALUE v; uint32_t idx;
int r = toku_omt_find_zero(fmap->filenums, file_map_h, &fnum, &v, &idx);
if (r == 0) {
struct file_map_tuple *CAST_FROM_VOIDP(tuple, v);
@@ -179,7 +179,7 @@ static void file_map_remove(struct file_map *fmap, FILENUM fnum) {
// Look up file info: given FILENUM, return file_map_tuple (or DB_NOTFOUND)
static int file_map_find(struct file_map *fmap, FILENUM fnum, struct file_map_tuple **file_map_tuple) {
- OMTVALUE v; u_int32_t idx;
+ OMTVALUE v; uint32_t idx;
int r = toku_omt_find_zero(fmap->filenums, file_map_h, &fnum, &v, &idx);
if (r == 0) {
struct file_map_tuple *CAST_FROM_VOIDP(tuple, v);
@@ -211,7 +211,7 @@ static int recover_env_init (RECOVER_ENV renv,
r = toku_logger_create(&renv->logger);
assert(r == 0);
}
- toku_logger_write_log_files(renv->logger, FALSE);
+ toku_logger_write_log_files(renv->logger, false);
r = toku_create_cachetable(&renv->ct, cachetable_size ? cachetable_size : 1<<25, (LSN){0}, renv->logger);
assert(r == 0);
toku_cachetable_set_env_dir(renv->ct, env_dir);
@@ -225,7 +225,7 @@ static int recover_env_init (RECOVER_ENV renv,
renv->generate_row_for_put = generate_row_for_put;
renv->generate_row_for_del = generate_row_for_del;
file_map_init(&renv->fmap);
- renv->goforward = FALSE;
+ renv->goforward = false;
if (tokudb_recovery_trace)
fprintf(stderr, "%s:%d\n", __FUNCTION__, __LINE__);
@@ -264,7 +264,7 @@ static const char *recover_state(RECOVER_ENV renv) {
}
// Open the file if it is not already open. If it is already open, then do nothing.
-static int internal_recover_fopen_or_fcreate (RECOVER_ENV renv, BOOL must_create, int UU(mode), BYTESTRING *bs_iname, FILENUM filenum, u_int32_t treeflags,
+static int internal_recover_fopen_or_fcreate (RECOVER_ENV renv, bool must_create, int UU(mode), BYTESTRING *bs_iname, FILENUM filenum, uint32_t treeflags,
TOKUTXN txn, uint32_t nodesize, uint32_t basementnodesize, enum toku_compression_method compression_method, LSN max_acceptable_lsn) {
int r;
FT_HANDLE brt = NULL;
@@ -359,7 +359,7 @@ static int toku_recover_backward_begin_checkpoint (struct logtype_begin_checkpoi
assert(l->lsn.lsn == renv->ss.checkpoint_begin_lsn.lsn);
renv->ss.ss = FORWARD_BETWEEN_CHECKPOINT_BEGIN_END;
renv->ss.checkpoint_begin_timestamp = l->timestamp;
- renv->goforward = TRUE;
+ renv->goforward = true;
tnow = time(NULL);
fprintf(stderr, "%.24s Tokudb recovery turning around at begin checkpoint %" PRIu64 " time %" PRIu64 "\n",
ctime(&tnow), l->lsn.lsn,
@@ -427,7 +427,7 @@ static int toku_recover_fassociate (struct logtype_fassociate *l, RECOVER_ENV re
// If rollback file, specify which checkpointed version of file we need (not just the latest)
// because we cannot use a rollback log that is later than the last complete checkpoint. See #3113.
{
- BOOL rollback_file = (0==strcmp(fname, ROLLBACK_CACHEFILE_NAME));
+ bool rollback_file = (0==strcmp(fname, ROLLBACK_CACHEFILE_NAME));
LSN max_acceptable_lsn = MAX_LSN;
if (rollback_file) {
max_acceptable_lsn = renv->ss.checkpoint_begin_lsn;
@@ -437,7 +437,7 @@ static int toku_recover_fassociate (struct logtype_fassociate *l, RECOVER_ENV re
r = toku_ft_handle_open_recovery(t, ROLLBACK_CACHEFILE_NAME, false, false, renv->ct, (TOKUTXN)NULL, l->filenum, max_acceptable_lsn);
renv->logger->rollback_cachefile = t->ft->cf;
} else {
- r = internal_recover_fopen_or_fcreate(renv, FALSE, 0, &l->iname, l->filenum, l->treeflags, NULL, 0, 0, TOKU_DEFAULT_COMPRESSION_METHOD, max_acceptable_lsn);
+ r = internal_recover_fopen_or_fcreate(renv, false, 0, &l->iname, l->filenum, l->treeflags, NULL, 0, 0, TOKU_DEFAULT_COMPRESSION_METHOD, max_acceptable_lsn);
assert(r==0);
}
}
@@ -503,16 +503,16 @@ static int recover_xstillopen_internal (TOKUTXN *txnp,
LSN UU(lsn),
TXNID xid,
TXNID parentxid,
- u_int64_t rollentry_raw_count,
+ uint64_t rollentry_raw_count,
FILENUMS open_filenums,
bool force_fsync_on_commit,
- u_int64_t num_rollback_nodes,
- u_int64_t num_rollentries,
+ uint64_t num_rollback_nodes,
+ uint64_t num_rollentries,
BLOCKNUM spilled_rollback_head,
BLOCKNUM spilled_rollback_tail,
BLOCKNUM current_rollback,
- u_int32_t UU(crc),
- u_int32_t UU(len),
+ uint32_t UU(crc),
+ uint32_t UU(len),
RECOVER_ENV renv) {
int r;
*txnp = NULL;
@@ -685,7 +685,7 @@ static int toku_recover_xcommit (struct logtype_xcommit *l, RECOVER_ENV renv) {
assert(txn!=NULL);
// commit the transaction
- r = toku_txn_commit_with_lsn(txn, TRUE, l->lsn,
+ r = toku_txn_commit_with_lsn(txn, true, l->lsn,
NULL, NULL);
assert(r == 0);
@@ -778,7 +778,7 @@ static int toku_recover_fcreate (struct logtype_fcreate *l, RECOVER_ENV renv) {
toku_free(iname_in_cwd);
toku_free(iname);
- BOOL must_create = TRUE;
+ bool must_create = true;
r = internal_recover_fopen_or_fcreate(renv, must_create, l->mode, &l->iname, l->filenum, l->treeflags, txn, l->nodesize, l->basementnodesize, (enum toku_compression_method) l->compression_method, MAX_LSN);
return r;
}
@@ -798,7 +798,7 @@ static int toku_recover_fopen (struct logtype_fopen *l, RECOVER_ENV renv) {
r = file_map_find(&renv->fmap, l->filenum, &tuple);
assert(r==DB_NOTFOUND);
- BOOL must_create = FALSE;
+ bool must_create = false;
TOKUTXN txn = NULL;
char *fname = fixup_fname(&l->iname);
@@ -838,7 +838,7 @@ static int toku_recover_change_fdescriptor (struct logtype_change_fdescriptor *l
tuple->ft_handle,
&old_descriptor,
&new_descriptor,
- FALSE,
+ false,
txn,
l->update_cmp_descriptor
);
@@ -912,7 +912,7 @@ static int toku_recover_enq_insert (struct logtype_enq_insert *l, RECOVER_ENV re
DBT keydbt, valdbt;
toku_fill_dbt(&keydbt, l->key.data, l->key.len);
toku_fill_dbt(&valdbt, l->value.data, l->value.len);
- r = toku_ft_maybe_insert(tuple->ft_handle, &keydbt, &valdbt, txn, TRUE, l->lsn, FALSE, FT_INSERT);
+ r = toku_ft_maybe_insert(tuple->ft_handle, &keydbt, &valdbt, txn, true, l->lsn, false, FT_INSERT);
assert(r == 0);
toku_txn_maybe_note_ft(txn, tuple->ft_handle->ft);
}
@@ -937,7 +937,7 @@ static int toku_recover_enq_insert_no_overwrite (struct logtype_enq_insert_no_ov
DBT keydbt, valdbt;
toku_fill_dbt(&keydbt, l->key.data, l->key.len);
toku_fill_dbt(&valdbt, l->value.data, l->value.len);
- r = toku_ft_maybe_insert(tuple->ft_handle, &keydbt, &valdbt, txn, TRUE, l->lsn, FALSE, FT_INSERT_NO_OVERWRITE);
+ r = toku_ft_maybe_insert(tuple->ft_handle, &keydbt, &valdbt, txn, true, l->lsn, false, FT_INSERT_NO_OVERWRITE);
assert(r == 0);
}
return 0;
@@ -960,7 +960,7 @@ static int toku_recover_enq_delete_any (struct logtype_enq_delete_any *l, RECOVE
//Maybe do the deletion if we found the cachefile.
DBT keydbt;
toku_fill_dbt(&keydbt, l->key.data, l->key.len);
- r = toku_ft_maybe_delete(tuple->ft_handle, &keydbt, txn, TRUE, l->lsn, FALSE);
+ r = toku_ft_maybe_delete(tuple->ft_handle, &keydbt, txn, true, l->lsn, false);
assert(r == 0);
}
return 0;
@@ -978,7 +978,7 @@ static int toku_recover_enq_insert_multiple (struct logtype_enq_insert_multiple
assert(r == 0);
assert(txn!=NULL);
DB *src_db = NULL;
- BOOL do_inserts = TRUE;
+ bool do_inserts = true;
{
struct file_map_tuple *tuple = NULL;
r = file_map_find(&renv->fmap, l->src_filenum, &tuple);
@@ -988,7 +988,7 @@ static int toku_recover_enq_insert_multiple (struct logtype_enq_insert_multiple
if (r == 0)
src_db = &tuple->fake_db;
else
- do_inserts = FALSE; // src file was probably deleted, #3129
+ do_inserts = false; // src file was probably deleted, #3129
}
}
@@ -1007,7 +1007,7 @@ static int toku_recover_enq_insert_multiple (struct logtype_enq_insert_multiple
DB *db = &tuple->fake_db;
r = renv->generate_row_for_put(db, src_db, &dest_key, &dest_val, &src_key, &src_val);
assert(r==0);
- r = toku_ft_maybe_insert(tuple->ft_handle, &dest_key, &dest_val, txn, TRUE, l->lsn, FALSE, FT_INSERT);
+ r = toku_ft_maybe_insert(tuple->ft_handle, &dest_key, &dest_val, txn, true, l->lsn, false, FT_INSERT);
assert(r == 0);
//flags==0 means generate_row_for_put callback changed it
@@ -1040,7 +1040,7 @@ static int toku_recover_enq_delete_multiple (struct logtype_enq_delete_multiple
assert(r == 0);
assert(txn!=NULL);
DB *src_db = NULL;
- BOOL do_deletes = TRUE;
+ bool do_deletes = true;
{
struct file_map_tuple *tuple = NULL;
r = file_map_find(&renv->fmap, l->src_filenum, &tuple);
@@ -1050,7 +1050,7 @@ static int toku_recover_enq_delete_multiple (struct logtype_enq_delete_multiple
if (r == 0)
src_db = &tuple->fake_db;
else
- do_deletes = FALSE; // src file was probably deleted, #3129
+ do_deletes = false; // src file was probably deleted, #3129
}
}
@@ -1068,7 +1068,7 @@ static int toku_recover_enq_delete_multiple (struct logtype_enq_delete_multiple
DB *db = &tuple->fake_db;
r = renv->generate_row_for_del(db, src_db, &dest_key, &src_key, &src_val);
assert(r==0);
- r = toku_ft_maybe_delete(tuple->ft_handle, &dest_key, txn, TRUE, l->lsn, FALSE);
+ r = toku_ft_maybe_delete(tuple->ft_handle, &dest_key, txn, true, l->lsn, false);
assert(r == 0);
//flags==0 indicates the return values are stored in temporary memory that does
@@ -1102,8 +1102,8 @@ static int toku_recover_enq_update(struct logtype_enq_update *l, RECOVER_ENV ren
DBT key, extra;
toku_fill_dbt(&key, l->key.data, l->key.len);
toku_fill_dbt(&extra, l->extra.data, l->extra.len);
- r = toku_ft_maybe_update(tuple->ft_handle, &key, &extra, txn, TRUE, l->lsn,
- FALSE);
+ r = toku_ft_maybe_update(tuple->ft_handle, &key, &extra, txn, true, l->lsn,
+ false);
assert(r == 0);
}
return 0;
@@ -1121,8 +1121,8 @@ static int toku_recover_enq_updatebroadcast(struct logtype_enq_updatebroadcast *
// Maybe do the update broadcast if we found the cachefile.
DBT extra;
toku_fill_dbt(&extra, l->extra.data, l->extra.len);
- r = toku_ft_maybe_update_broadcast(tuple->ft_handle, &extra, txn, TRUE,
- l->lsn, FALSE, l->is_resetting_op);
+ r = toku_ft_maybe_update_broadcast(tuple->ft_handle, &extra, txn, true,
+ l->lsn, false, l->is_resetting_op);
assert(r == 0);
}
return 0;
@@ -1210,15 +1210,15 @@ static int toku_recover_backward_hot_index(struct logtype_hot_index *UU(l), RECO
// Effects: If there are no log files, or if there is a clean "shutdown" at
// the end of the log, then we don't need recovery to run.
-// Returns: TRUE if we need recovery, otherwise FALSE.
-int tokudb_needs_recovery(const char *log_dir, BOOL ignore_log_empty) {
+// Returns: true if we need recovery, otherwise false.
+int tokudb_needs_recovery(const char *log_dir, bool ignore_log_empty) {
int needs_recovery;
int r;
TOKULOGCURSOR logcursor = NULL;
r = toku_logcursor_create(&logcursor, log_dir);
if (r != 0) {
- needs_recovery = TRUE; goto exit;
+ needs_recovery = true; goto exit;
}
struct log_entry *le;
@@ -1499,12 +1499,12 @@ static int do_recovery(RECOVER_ENV renv, const char *env_dir, const char *log_di
tnow = time(NULL);
fprintf(stderr, "%.24s Tokudb recovery closing %" PRIu32 " dictionar%s\n", ctime(&tnow), n, n > 1 ? "ies" : "y");
}
- file_map_close_dictionaries(&renv->fmap, TRUE, lastlsn);
+ file_map_close_dictionaries(&renv->fmap, true, lastlsn);
{
// write a recovery log entry
BYTESTRING recover_comment = { strlen("recover"), (char *) "recover" };
- r = toku_log_comment(renv->logger, NULL, TRUE, 0, recover_comment);
+ r = toku_log_comment(renv->logger, NULL, true, 0, recover_comment);
assert(r == 0);
}
@@ -1577,7 +1577,7 @@ int tokudb_recover(DB_ENV *env,
return r;
int rr = 0;
- if (tokudb_needs_recovery(log_dir, FALSE)) {
+ if (tokudb_needs_recovery(log_dir, false)) {
struct recover_env renv;
r = recover_env_init(&renv,
env_dir,
@@ -1594,7 +1594,7 @@ int tokudb_recover(DB_ENV *env,
rr = do_recovery(&renv, env_dir, log_dir);
- recover_env_cleanup(&renv, (BOOL)(rr == 0));
+ recover_env_cleanup(&renv, (bool)(rr == 0));
}
r = toku_recover_unlock(lockfd);
diff --git a/ft/recover.h b/ft/recover.h
index c800aa1155c..d65cec73486 100644
--- a/ft/recover.h
+++ b/ft/recover.h
@@ -35,8 +35,8 @@ int tokudb_recover (DB_ENV *env,
// Effect: Check the tokudb logs to determine whether or not we need to run recovery.
// If the log is empty or if there is a clean shutdown at the end of the log, then we
// dont need to run recovery.
-// Returns: TRUE if we need recovery, otherwise FALSE.
-int tokudb_needs_recovery(const char *logdir, BOOL ignore_empty_log);
+// Returns: true if we need recovery, otherwise false.
+int tokudb_needs_recovery(const char *logdir, bool ignore_empty_log);
// Return 0 if recovery log exists, ENOENT if log is missing
int tokudb_recover_log_exists(const char * log_dir);
diff --git a/ft/roll.cc b/ft/roll.cc
index e851dd7ad74..4df42e8c458 100644
--- a/ft/roll.cc
+++ b/ft/roll.cc
@@ -157,12 +157,12 @@ static int find_ft_from_filenum (OMTVALUE v, void *filenumvp) {
return 0;
}
-// Input arg reset_root_xid_that_created TRUE means that this operation has changed the definition of this dictionary.
+// Input arg reset_root_xid_that_created true means that this operation has changed the definition of this dictionary.
// (Example use is for schema change committed with txn that inserted cmdupdatebroadcast message.)
// The oplsn argument is ZERO_LSN for normal operation. When this function is called for recovery, it has the LSN of
// the operation (insert, delete, update, etc).
static int do_insertion (enum ft_msg_type type, FILENUM filenum, BYTESTRING key, BYTESTRING *data, TOKUTXN txn, LSN oplsn,
- BOOL reset_root_xid_that_created) {
+ bool reset_root_xid_that_created) {
CACHEFILE cf;
// 2954 - ignore messages for aborted hot-index
int r = 0;
@@ -220,7 +220,7 @@ static int do_nothing_with_filenum(TOKUTXN UU(txn), FILENUM UU(filenum)) {
int toku_commit_cmdinsert (FILENUM filenum, BYTESTRING UU(key), TOKUTXN txn, LSN UU(oplsn)) {
#if TOKU_DO_COMMIT_CMD_INSERT
- return do_insertion (FT_COMMIT_ANY, filenum, key, 0, txn, oplsn, FALSE);
+ return do_insertion (FT_COMMIT_ANY, filenum, key, 0, txn, oplsn, false);
#else
return do_nothing_with_filenum(txn, filenum);
#endif
@@ -232,7 +232,7 @@ toku_rollback_cmdinsert (FILENUM filenum,
TOKUTXN txn,
LSN oplsn)
{
- return do_insertion (FT_ABORT_ANY, filenum, key, 0, txn, oplsn, FALSE);
+ return do_insertion (FT_ABORT_ANY, filenum, key, 0, txn, oplsn, false);
}
int
@@ -241,7 +241,7 @@ toku_commit_cmdupdate(FILENUM filenum,
TOKUTXN txn,
LSN oplsn)
{
- return do_insertion(FT_COMMIT_ANY, filenum, key, 0, txn, oplsn, FALSE);
+ return do_insertion(FT_COMMIT_ANY, filenum, key, 0, txn, oplsn, false);
}
int
@@ -250,18 +250,18 @@ toku_rollback_cmdupdate(FILENUM filenum,
TOKUTXN txn,
LSN oplsn)
{
- return do_insertion(FT_ABORT_ANY, filenum, key, 0, txn, oplsn, FALSE);
+ return do_insertion(FT_ABORT_ANY, filenum, key, 0, txn, oplsn, false);
}
int
toku_commit_cmdupdatebroadcast(FILENUM filenum,
- BOOL is_resetting_op,
+ bool is_resetting_op,
TOKUTXN txn,
LSN oplsn)
{
// if is_resetting_op, reset root_xid_that_created in
// relevant ft.
- BOOL reset_root_xid_that_created = (is_resetting_op ? TRUE : FALSE);
+ bool reset_root_xid_that_created = (is_resetting_op ? true : false);
const enum ft_msg_type msg_type = (is_resetting_op
? FT_COMMIT_BROADCAST_ALL
: FT_COMMIT_BROADCAST_TXN);
@@ -271,12 +271,12 @@ toku_commit_cmdupdatebroadcast(FILENUM filenum,
int
toku_rollback_cmdupdatebroadcast(FILENUM filenum,
- BOOL UU(is_resetting_op),
+ bool UU(is_resetting_op),
TOKUTXN txn,
LSN oplsn)
{
BYTESTRING nullkey = { 0, NULL };
- return do_insertion(FT_ABORT_BROADCAST_TXN, filenum, nullkey, 0, txn, oplsn, FALSE);
+ return do_insertion(FT_ABORT_BROADCAST_TXN, filenum, nullkey, 0, txn, oplsn, false);
}
int
@@ -286,7 +286,7 @@ toku_commit_cmddelete (FILENUM filenum,
LSN oplsn)
{
#if TOKU_DO_COMMIT_CMD_DELETE
- return do_insertion (FT_COMMIT_ANY, filenum, key, 0, txn, oplsn, FALSE);
+ return do_insertion (FT_COMMIT_ANY, filenum, key, 0, txn, oplsn, false);
#else
key = key; oplsn = oplsn;
return do_nothing_with_filenum(txn, filenum);
@@ -299,7 +299,7 @@ toku_rollback_cmddelete (FILENUM filenum,
TOKUTXN txn,
LSN oplsn)
{
- return do_insertion (FT_ABORT_ANY, filenum, key, 0, txn, oplsn, FALSE);
+ return do_insertion (FT_ABORT_ANY, filenum, key, 0, txn, oplsn, false);
}
static int
@@ -319,7 +319,7 @@ toku_apply_rollinclude (TXNID xid,
uint32_t next_log_hash = spilled_tail_hash;
uint64_t last_sequence = num_nodes;
- BOOL found_head = FALSE;
+ bool found_head = false;
assert(next_log.b != ROLLBACK_NONE.b);
while (next_log.b != ROLLBACK_NONE.b) {
//pin log
@@ -337,7 +337,7 @@ toku_apply_rollinclude (TXNID xid,
}
if (next_log.b == spilled_head.b) {
assert(!found_head);
- found_head = TRUE;
+ found_head = true;
assert(log->sequence == 0);
}
next_log = log->previous;
diff --git a/ft/rollback-apply.cc b/ft/rollback-apply.cc
index 00924f8bb92..fa03c21d888 100644
--- a/ft/rollback-apply.cc
+++ b/ft/rollback-apply.cc
@@ -25,7 +25,7 @@ int toku_commit_rollback_item (TOKUTXN txn, struct roll_entry *item, LSN lsn) {
rolltype_dispatch_assign(item, toku_commit_, r, txn, lsn);
txn->roll_info.num_rollentries_processed++;
if (txn->roll_info.num_rollentries_processed % 1024 == 0) {
- poll_txn_progress_function(txn, TRUE, FALSE);
+ poll_txn_progress_function(txn, true, false);
}
return r;
}
@@ -35,13 +35,13 @@ int toku_abort_rollback_item (TOKUTXN txn, struct roll_entry *item, LSN lsn) {
rolltype_dispatch_assign(item, toku_rollback_, r, txn, lsn);
txn->roll_info.num_rollentries_processed++;
if (txn->roll_info.num_rollentries_processed % 1024 == 0) {
- poll_txn_progress_function(txn, FALSE, FALSE);
+ poll_txn_progress_function(txn, false, false);
}
return r;
}
static int
-note_ft_used_in_txns_parent(OMTVALUE ftv, u_int32_t UU(index), void *txnv) {
+note_ft_used_in_txns_parent(OMTVALUE ftv, uint32_t UU(index), void *txnv) {
TOKUTXN CAST_FROM_VOIDP(child, txnv);
TOKUTXN parent = child->parent;
FT CAST_FROM_VOIDP(ft, ftv);
@@ -68,11 +68,11 @@ apply_txn(TOKUTXN txn, LSN lsn, apply_rollback_item func) {
BLOCKNUM next_log = ROLLBACK_NONE;
uint32_t next_log_hash = 0;
- BOOL is_current = FALSE;
+ bool is_current = false;
if (txn_has_current_rollback_log(txn)) {
next_log = txn->roll_info.current_rollback;
next_log_hash = txn->roll_info.current_rollback_hash;
- is_current = TRUE;
+ is_current = true;
}
else if (txn_has_spilled_rollback_logs(txn)) {
next_log = txn->roll_info.spilled_rollback_tail;
@@ -80,7 +80,7 @@ apply_txn(TOKUTXN txn, LSN lsn, apply_rollback_item func) {
}
uint64_t last_sequence = txn->roll_info.num_rollback_nodes;
- BOOL found_head = FALSE;
+ bool found_head = false;
while (next_log.b != ROLLBACK_NONE.b) {
ROLLBACK_LOG_NODE log;
//pin log
@@ -99,7 +99,7 @@ apply_txn(TOKUTXN txn, LSN lsn, apply_rollback_item func) {
}
if (next_log.b == txn->roll_info.spilled_rollback_head.b) {
assert(!found_head);
- found_head = TRUE;
+ found_head = true;
assert(log->sequence == 0);
}
next_log = log->previous;
@@ -110,7 +110,7 @@ apply_txn(TOKUTXN txn, LSN lsn, apply_rollback_item func) {
if (is_current) {
txn->roll_info.current_rollback = ROLLBACK_NONE;
txn->roll_info.current_rollback_hash = 0;
- is_current = FALSE;
+ is_current = false;
}
else {
txn->roll_info.spilled_rollback_tail = next_log;
@@ -199,7 +199,7 @@ int toku_rollback_commit(TOKUTXN txn, LSN lsn) {
// Merge the list of headers that must be checkpointed before commit
if (txn->checkpoint_needed_before_commit) {
- txn->parent->checkpoint_needed_before_commit = TRUE;
+ txn->parent->checkpoint_needed_before_commit = true;
}
//If this transaction needs an fsync (if it commits)
diff --git a/ft/rollback-ct-callbacks.cc b/ft/rollback-ct-callbacks.cc
index dbb588ef1cd..4e0444de74b 100644
--- a/ft/rollback-ct-callbacks.cc
+++ b/ft/rollback-ct-callbacks.cc
@@ -26,7 +26,7 @@ rollback_log_destroy(ROLLBACK_LOG_NODE log) {
// On success return nbytes.
void toku_rollback_flush_callback (CACHEFILE cachefile, int fd, BLOCKNUM logname,
void *rollback_v, void** UU(disk_data), void *extraargs, PAIR_ATTR size, PAIR_ATTR* new_size,
- BOOL write_me, BOOL keep_me, BOOL for_checkpoint, BOOL UU(is_clone)) {
+ bool write_me, bool keep_me, bool for_checkpoint, bool UU(is_clone)) {
int r;
ROLLBACK_LOG_NODE CAST_FROM_VOIDP(log, rollback_v);
FT CAST_FROM_VOIDP(h, extraargs);
@@ -53,7 +53,7 @@ void toku_rollback_flush_callback (CACHEFILE cachefile, int fd, BLOCKNUM logname
}
}
-int toku_rollback_fetch_callback (CACHEFILE cachefile, int fd, BLOCKNUM logname, u_int32_t fullhash,
+int toku_rollback_fetch_callback (CACHEFILE cachefile, int fd, BLOCKNUM logname, uint32_t fullhash,
void **rollback_pv, void** UU(disk_data), PAIR_ATTR *sizep, int * UU(dirtyp), void *extraargs) {
int r;
FT CAST_FROM_VOIDP(h, extraargs);
@@ -94,15 +94,15 @@ int toku_rollback_pe_callback (
}
// partial fetch is never required for a rollback log node
-BOOL toku_rollback_pf_req_callback(void* UU(ftnode_pv), void* UU(read_extraargs)) {
- return FALSE;
+bool toku_rollback_pf_req_callback(void* UU(ftnode_pv), void* UU(read_extraargs)) {
+ return false;
}
// a rollback node should never be partial fetched,
// because we always say it is not required.
// (pf req callback always returns false)
int toku_rollback_pf_callback(void* UU(ftnode_pv), void* UU(disk_data), void* UU(read_extraargs), int UU(fd), PAIR_ATTR* UU(sizep)) {
- assert(FALSE);
+ assert(false);
return 0;
}
@@ -110,11 +110,11 @@ int toku_rollback_pf_callback(void* UU(ftnode_pv), void* UU(disk_data), void* U
int toku_rollback_cleaner_callback (
void* UU(ftnode_pv),
BLOCKNUM UU(blocknum),
- u_int32_t UU(fullhash),
+ uint32_t UU(fullhash),
void* UU(extraargs)
)
{
- assert(FALSE);
+ assert(false);
return 0;
}
diff --git a/ft/rollback-ct-callbacks.h b/ft/rollback-ct-callbacks.h
index 6b6b16f935c..a1224be8f48 100644
--- a/ft/rollback-ct-callbacks.h
+++ b/ft/rollback-ct-callbacks.h
@@ -11,8 +11,8 @@
#include "cachetable.h"
#include "fttypes.h"
-void toku_rollback_flush_callback(CACHEFILE cachefile, int fd, BLOCKNUM logname, void *rollback_v, void** UU(disk_data), void *extraargs, PAIR_ATTR size, PAIR_ATTR* new_size, BOOL write_me, BOOL keep_me, BOOL for_checkpoint, BOOL UU(is_clone));
-int toku_rollback_fetch_callback(CACHEFILE cachefile, int fd, BLOCKNUM logname, u_int32_t fullhash, void **rollback_pv, void** UU(disk_data), PAIR_ATTR *sizep, int * UU(dirtyp), void *extraargs);
+void toku_rollback_flush_callback(CACHEFILE cachefile, int fd, BLOCKNUM logname, void *rollback_v, void** UU(disk_data), void *extraargs, PAIR_ATTR size, PAIR_ATTR* new_size, bool write_me, bool keep_me, bool for_checkpoint, bool UU(is_clone));
+int toku_rollback_fetch_callback(CACHEFILE cachefile, int fd, BLOCKNUM logname, uint32_t fullhash, void **rollback_pv, void** UU(disk_data), PAIR_ATTR *sizep, int * UU(dirtyp), void *extraargs);
void toku_rollback_pe_est_callback(
void* rollback_v,
void* UU(disk_data),
@@ -26,12 +26,12 @@ int toku_rollback_pe_callback (
PAIR_ATTR* new_attr,
void* UU(extraargs)
) ;
-BOOL toku_rollback_pf_req_callback(void* UU(ftnode_pv), void* UU(read_extraargs)) ;
+bool toku_rollback_pf_req_callback(void* UU(ftnode_pv), void* UU(read_extraargs)) ;
int toku_rollback_pf_callback(void* UU(ftnode_pv), void* UU(disk_data), void* UU(read_extraargs), int UU(fd), PAIR_ATTR* UU(sizep));
int toku_rollback_cleaner_callback (
void* UU(ftnode_pv),
BLOCKNUM UU(blocknum),
- u_int32_t UU(fullhash),
+ uint32_t UU(fullhash),
void* UU(extraargs)
);
diff --git a/ft/rollback.cc b/ft/rollback.cc
index 5165f1fef1d..eebbefb209c 100644
--- a/ft/rollback.cc
+++ b/ft/rollback.cc
@@ -7,7 +7,7 @@
#include "includes.h"
#include "rollback-ct-callbacks.h"
-static void rollback_unpin_remove_callback(CACHEKEY* cachekey, BOOL for_checkpoint, void* extra) {
+static void rollback_unpin_remove_callback(CACHEKEY* cachekey, bool for_checkpoint, void* extra) {
FT CAST_FROM_VOIDP(h, extra);
toku_free_blocknum(
h->blocktable,
@@ -50,7 +50,7 @@ static inline PAIR_ATTR make_rollback_pair_attr(long size) {
.leaf_size = 0,
.rollback_size = size,
.cache_pressure_size = 0,
- .is_valid = TRUE
+ .is_valid = true
};
return result;
}
@@ -76,7 +76,7 @@ static void rollback_log_create (TOKUTXN txn, BLOCKNUM previous, uint32_t previo
log->layout_version = FT_LAYOUT_VERSION;
log->layout_version_original = FT_LAYOUT_VERSION;
log->layout_version_read_from_disk = FT_LAYOUT_VERSION;
- log->dirty = TRUE;
+ log->dirty = true;
log->txnid = txn->txnid64;
log->sequence = txn->roll_info.num_rollback_nodes++;
toku_allocate_blocknum(h->blocktable, &log->blocknum, h);
@@ -158,7 +158,7 @@ exit:
}
// Return the number of bytes that went into the rollback data structure (the uncompressed count if there is compression)
-int toku_logger_txn_rollback_raw_count(TOKUTXN txn, u_int64_t *raw_count)
+int toku_logger_txn_rollback_raw_count(TOKUTXN txn, uint64_t *raw_count)
{
toku_txn_lock(txn);
*raw_count = txn->roll_info.rollentry_raw_count;
@@ -175,7 +175,7 @@ void toku_maybe_prefetch_previous_rollback_log(TOKUTXN txn, ROLLBACK_LOG_NODE lo
uint32_t hash = log->previous_hash;
CACHEFILE cf = txn->logger->rollback_cachefile;
FT CAST_FROM_VOIDP(h, toku_cachefile_get_userdata(cf));
- BOOL doing_prefetch = FALSE;
+ bool doing_prefetch = false;
r = toku_cachefile_prefetch(cf, name, hash,
get_write_callbacks_for_rollback_log(h),
toku_rollback_fetch_callback,
@@ -204,7 +204,7 @@ void toku_get_and_pin_rollback_log(TOKUTXN txn, BLOCKNUM blocknum, uint32_t hash
toku_rollback_fetch_callback,
toku_rollback_pf_req_callback,
toku_rollback_pf_callback,
- TRUE, // may_modify_value
+ true, // may_modify_value
h
);
assert(r == 0);
diff --git a/ft/rollback.h b/ft/rollback.h
index 7c22d923268..fce795f4c8e 100644
--- a/ft/rollback.h
+++ b/ft/rollback.h
@@ -47,7 +47,7 @@ void *toku_memdup_in_rollback(ROLLBACK_LOG_NODE log, const void *v, size_t len);
void toku_maybe_spill_rollbacks(TOKUTXN txn, ROLLBACK_LOG_NODE log);
void toku_txn_maybe_note_ft (TOKUTXN txn, FT h);
-int toku_logger_txn_rollback_raw_count(TOKUTXN txn, u_int64_t *raw_count);
+int toku_logger_txn_rollback_raw_count(TOKUTXN txn, uint64_t *raw_count);
int toku_find_xid_by_xid (const TXNID &xid, const TXNID &xidfind);
diff --git a/ft/rwlock.cc b/ft/rwlock.cc
index 7b9ad937370..fe2241418f4 100644
--- a/ft/rwlock.cc
+++ b/ft/rwlock.cc
@@ -74,7 +74,7 @@
* c) release mutex
* d) wait on the semaphore
* e) return success when the semaphore releases
- * 3) Otherwise set writer=TRUE, release mutex and return success.
+ * 3) Otherwise set writer=true, release mutex and return success.
*
* To unlock a read rwlock:
* 1) Acquire mutex
@@ -90,7 +90,7 @@
* ii) return success
* b) Else if the first element is a writer
* i) pop the list
- * ii) set writer to TRUE
+ * ii) set writer to true
* iii) increment the semaphore
* iv) return success
*/
diff --git a/ft/sub_block.cc b/ft/sub_block.cc
index ab5662e64d0..5668764030a 100644
--- a/ft/sub_block.cc
+++ b/ft/sub_block.cc
@@ -36,7 +36,7 @@ void sub_block_init(SUB_BLOCK sub_block) {
// get the size of the compression header
size_t
sub_block_header_size(int n_sub_blocks) {
- return sizeof (u_int32_t) + n_sub_blocks * sizeof (struct stored_sub_block);
+ return sizeof (uint32_t) + n_sub_blocks * sizeof (struct stored_sub_block);
}
void
@@ -152,11 +152,11 @@ compress_work_init(struct compress_work *w, enum toku_compression_method method,
// cs_bound is the compressed size bound
// Returns the size of the compressed data
//
-u_int32_t
+uint32_t
compress_nocrc_sub_block(
struct sub_block *sub_block,
void* sb_compressed_ptr,
- u_int32_t cs_bound,
+ uint32_t cs_bound,
enum toku_compression_method method
)
{
@@ -257,9 +257,9 @@ compress_all_sub_blocks(int n_sub_blocks, struct sub_block sub_block[], char *un
// initialize the decompression work
void
decompress_work_init(struct decompress_work *dw,
- void *compress_ptr, u_int32_t compress_size,
- void *uncompress_ptr, u_int32_t uncompress_size,
- u_int32_t xsum) {
+ void *compress_ptr, uint32_t compress_size,
+ void *uncompress_ptr, uint32_t uncompress_size,
+ uint32_t xsum) {
dw->compress_ptr = compress_ptr;
dw->compress_size = compress_size;
dw->uncompress_ptr = uncompress_ptr;
@@ -272,11 +272,11 @@ int verbose_decompress_sub_block = 1;
// decompress one block
int
-decompress_sub_block(void *compress_ptr, u_int32_t compress_size, void *uncompress_ptr, u_int32_t uncompress_size, u_int32_t expected_xsum) {
+decompress_sub_block(void *compress_ptr, uint32_t compress_size, void *uncompress_ptr, uint32_t uncompress_size, uint32_t expected_xsum) {
int result = 0;
// verify checksum
- u_int32_t xsum = x1764_memory(compress_ptr, compress_size);
+ uint32_t xsum = x1764_memory(compress_ptr, compress_size);
if (xsum != expected_xsum) {
if (verbose_decompress_sub_block) fprintf(stderr, "%s:%d xsum %u expected %u\n", __FUNCTION__, __LINE__, xsum, expected_xsum);
result = EINVAL;
diff --git a/ft/sub_block.h b/ft/sub_block.h
index 83e48fd1521..6051d5d617a 100644
--- a/ft/sub_block.h
+++ b/ft/sub_block.h
@@ -19,19 +19,19 @@ static const int max_basement_node_compressed_size = 64*1024;
struct sub_block {
void *uncompressed_ptr;
- u_int32_t uncompressed_size;
+ uint32_t uncompressed_size;
void *compressed_ptr;
- u_int32_t compressed_size; // real compressed size
- u_int32_t compressed_size_bound; // estimated compressed size
+ uint32_t compressed_size; // real compressed size
+ uint32_t compressed_size_bound; // estimated compressed size
- u_int32_t xsum; // sub block checksum
+ uint32_t xsum; // sub block checksum
};
struct stored_sub_block {
- u_int32_t uncompressed_size;
- u_int32_t compressed_size;
- u_int32_t xsum;
+ uint32_t uncompressed_size;
+ uint32_t compressed_size;
+ uint32_t xsum;
};
void sub_block_init(SUB_BLOCK);
@@ -79,11 +79,11 @@ struct compress_work {
void
compress_work_init(struct compress_work *w, enum toku_compression_method method, struct sub_block *sub_block);
-u_int32_t
+uint32_t
compress_nocrc_sub_block(
struct sub_block *sub_block,
void* sb_compressed_ptr,
- u_int32_t cs_bound,
+ uint32_t cs_bound,
enum toku_compression_method method
);
@@ -100,22 +100,22 @@ struct decompress_work {
struct work base;
void *compress_ptr;
void *uncompress_ptr;
- u_int32_t compress_size;
- u_int32_t uncompress_size;
- u_int32_t xsum;
+ uint32_t compress_size;
+ uint32_t uncompress_size;
+ uint32_t xsum;
int error;
};
// initialize the decompression work
void
decompress_work_init(struct decompress_work *dw,
- void *compress_ptr, u_int32_t compress_size,
- void *uncompress_ptr, u_int32_t uncompress_size,
- u_int32_t xsum);
+ void *compress_ptr, uint32_t compress_size,
+ void *uncompress_ptr, uint32_t uncompress_size,
+ uint32_t xsum);
// decompress one block
int
-decompress_sub_block(void *compress_ptr, u_int32_t compress_size, void *uncompress_ptr, u_int32_t uncompress_size, u_int32_t expected_xsum);
+decompress_sub_block(void *compress_ptr, uint32_t compress_size, void *uncompress_ptr, uint32_t uncompress_size, uint32_t expected_xsum);
// decompress blocks until there is no more work to do
void *
diff --git a/ft/sub_block_map.h b/ft/sub_block_map.h
index dd2fd451d57..70a00659590 100644
--- a/ft/sub_block_map.h
+++ b/ft/sub_block_map.h
@@ -9,9 +9,9 @@
// Map objects to a sequence of sub block
struct sub_block_map {
- u_int32_t idx;
- u_int32_t offset;
- u_int32_t size;
+ uint32_t idx;
+ uint32_t offset;
+ uint32_t size;
};
enum {
@@ -19,7 +19,7 @@ enum {
};
static inline void
-sub_block_map_init(struct sub_block_map *sbmap, u_int32_t idx, u_int32_t offset, u_int32_t size) {
+sub_block_map_init(struct sub_block_map *sbmap, uint32_t idx, uint32_t offset, uint32_t size) {
sbmap->idx = idx;
sbmap->offset = offset;
sbmap->size = size;
diff --git a/ft/tdb_logprint.cc b/ft/tdb_logprint.cc
index c1f2f5a210f..5edac51eaf1 100644
--- a/ft/tdb_logprint.cc
+++ b/ft/tdb_logprint.cc
@@ -9,8 +9,8 @@
#include "includes.h"
#if 0
-static u_int32_t crc=0;
-static u_int32_t actual_len=0;
+static uint32_t crc=0;
+static uint32_t actual_len=0;
static int get_char(void) {
int v = getchar();
@@ -21,17 +21,17 @@ static int get_char(void) {
return v;
}
-static u_int32_t get_uint32 (void) {
- u_int32_t a = get_char();
- u_int32_t b = get_char();
- u_int32_t c = get_char();
- u_int32_t d = get_char();
+static uint32_t get_uint32 (void) {
+ uint32_t a = get_char();
+ uint32_t b = get_char();
+ uint32_t c = get_char();
+ uint32_t d = get_char();
return (a<<24)|(b<<16)|(c<<8)|d;
}
-static u_int64_t get_uint64 (void) {
- u_int32_t hi = get_uint32();
- u_int32_t lo = get_uint32();
+static uint64_t get_uint64 (void) {
+ uint32_t hi = get_uint32();
+ uint32_t lo = get_uint32();
return ((((long long)hi) << 32)
|
lo);
@@ -48,7 +48,7 @@ static void transcribe_txnid (void) {
}
static void transcribe_fileid (void) {
- u_int32_t value = get_uint32();
+ uint32_t value = get_uint32();
printf(" fileid=%d", value);
}
@@ -59,30 +59,30 @@ static void transcribe_diskoff (void) {
}
static void transcribe_crc32 (void) {
- u_int32_t oldcrc=crc;
- u_int32_t l = get_uint32();
+ uint32_t oldcrc=crc;
+ uint32_t l = get_uint32();
printf(" crc=%08x", l);
assert(l==oldcrc);
}
static void transcribe_mode (void) {
- u_int32_t value = get_uint32();
+ uint32_t value = get_uint32();
printf(" mode=0%o", value);
}
static void transcribe_filenum(void) {
- u_int32_t value = get_uint32();
+ uint32_t value = get_uint32();
printf(" filenum=%d", value);
}
-static u_int32_t len1;
+static uint32_t len1;
static void transcribe_len1 (void) {
len1 = get_uint32();
//printf(" len=%d", len1);
}
static void transcribe_len (void) {
- u_int32_t l = get_uint32();
+ uint32_t l = get_uint32();
printf(" len=%d", l);
if (l!=actual_len) printf(" actual_len=%d", actual_len);
assert(l==actual_len);
@@ -90,11 +90,11 @@ static void transcribe_len (void) {
}
static void transcribe_key_or_data (char *what) {
- u_int32_t l = get_uint32();
+ uint32_t l = get_uint32();
unsigned int i;
printf(" %s(%d):\"", what, l);
for (i=0; i<l; i++) {
- u_int32_t c = get_char();
+ uint32_t c = get_char();
if (c=='\\') printf("\\\\");
else if (c=='\n') printf("\\n");
else if (c==' ') printf("\\ ");
@@ -124,7 +124,7 @@ static void transcribe_header (void) {
static void newmain (int count) {
int i;
- u_int32_t version;
+ uint32_t version;
int r = toku_read_and_print_logmagic(stdin, &version);
for (i=0; i!=count; i++) {
r = toku_logprint_one_record(stdout, stdin);
diff --git a/ft/tests/block_allocator_test.cc b/ft/tests/block_allocator_test.cc
index 404dda4d1cf..4a3b81d4ea5 100644
--- a/ft/tests/block_allocator_test.cc
+++ b/ft/tests/block_allocator_test.cc
@@ -7,28 +7,28 @@
#include "test.h"
#include "includes.h"
-static void ba_alloc_at (BLOCK_ALLOCATOR ba, u_int64_t size, u_int64_t offset) {
+static void ba_alloc_at (BLOCK_ALLOCATOR ba, uint64_t size, uint64_t offset) {
block_allocator_validate(ba);
block_allocator_alloc_block_at(ba, size, offset);
block_allocator_validate(ba);
}
-static void ba_alloc (BLOCK_ALLOCATOR ba, u_int64_t size, u_int64_t *answer) {
+static void ba_alloc (BLOCK_ALLOCATOR ba, uint64_t size, uint64_t *answer) {
block_allocator_validate(ba);
block_allocator_alloc_block(ba, size, answer);
block_allocator_validate(ba);
}
-static void ba_free (BLOCK_ALLOCATOR ba, u_int64_t offset) {
+static void ba_free (BLOCK_ALLOCATOR ba, uint64_t offset) {
block_allocator_validate(ba);
block_allocator_free_block(ba, offset);
block_allocator_validate(ba);
}
static void
-ba_check_l (BLOCK_ALLOCATOR ba, u_int64_t blocknum_in_layout_order, u_int64_t expected_offset, u_int64_t expected_size)
+ba_check_l (BLOCK_ALLOCATOR ba, uint64_t blocknum_in_layout_order, uint64_t expected_offset, uint64_t expected_size)
{
- u_int64_t actual_offset, actual_size;
+ uint64_t actual_offset, actual_size;
int r = block_allocator_get_nth_block_in_layout_order(ba, blocknum_in_layout_order, &actual_offset, &actual_size);
assert(r==0);
assert(expected_offset == actual_offset);
@@ -36,9 +36,9 @@ ba_check_l (BLOCK_ALLOCATOR ba, u_int64_t blocknum_in_layout_order, u_int64_t ex
}
static void
-ba_check_none (BLOCK_ALLOCATOR ba, u_int64_t blocknum_in_layout_order)
+ba_check_none (BLOCK_ALLOCATOR ba, uint64_t blocknum_in_layout_order)
{
- u_int64_t actual_offset, actual_size;
+ uint64_t actual_offset, actual_size;
int r = block_allocator_get_nth_block_in_layout_order(ba, blocknum_in_layout_order, &actual_offset, &actual_size);
assert(r==-1);
}
@@ -48,7 +48,7 @@ ba_check_none (BLOCK_ALLOCATOR ba, u_int64_t blocknum_in_layout_order)
static void
test_ba0 (void) {
BLOCK_ALLOCATOR ba;
- u_int64_t b0, b1;
+ uint64_t b0, b1;
create_block_allocator(&ba, 100, 1);
assert(block_allocator_allocated_limit(ba)==100);
ba_alloc_at(ba, 50, 100);
@@ -71,7 +71,7 @@ test_ba0 (void) {
assert(10==block_allocator_block_size(ba, b0));
assert(50==block_allocator_block_size(ba, 100));
- u_int64_t b2, b3, b4, b5, b6, b7;
+ uint64_t b2, b3, b4, b5, b6, b7;
ba_alloc(ba, 100, &b2);
ba_alloc(ba, 100, &b3);
ba_alloc(ba, 100, &b4);
@@ -82,7 +82,7 @@ test_ba0 (void) {
ba_alloc(ba, 100, &b2);
ba_free(ba, b4);
ba_free(ba, b6);
- u_int64_t b8, b9;
+ uint64_t b8, b9;
ba_alloc(ba, 100, &b4);
ba_free(ba, b2);
ba_alloc(ba, 100, &b6);
@@ -107,7 +107,7 @@ test_ba1 (int n_initial) {
create_block_allocator(&ba, 0, 1);
int i;
int n_blocks=0;
- u_int64_t blocks[1000];
+ uint64_t blocks[1000];
for (i=0; i<1000; i++) {
if (i<n_initial || random()%2 == 0) {
if (n_blocks<1000) {
@@ -135,7 +135,7 @@ static void
test_ba2 (void)
{
BLOCK_ALLOCATOR ba;
- u_int64_t b[6];
+ uint64_t b[6];
enum { BSIZE = 1024 };
create_block_allocator(&ba, 100, BSIZE);
assert(block_allocator_allocated_limit(ba)==100);
@@ -181,7 +181,7 @@ test_ba2 (void)
ba_check_l (ba, 5, 7*BSIZE, 100);
ba_check_none (ba, 6);
- u_int64_t b2;
+ uint64_t b2;
ba_alloc(ba, 100, &b2);
assert(b2==4*BSIZE);
ba_check_l (ba, 0, 0, 100);
@@ -203,13 +203,13 @@ test_ba2 (void)
ba_check_none (ba, 5);
// This alloc will allocate the first block after the reserve space in the case of first fit.
- u_int64_t b3;
+ uint64_t b3;
ba_alloc(ba, 100, &b3);
assert(b3== BSIZE); // First fit.
// if (b3==5*BSIZE) then it is next fit.
// Now 5*BSIZE is free
- u_int64_t b5;
+ uint64_t b5;
ba_alloc(ba, 100, &b5);
assert(b5==5*BSIZE);
ba_check_l (ba, 0, 0, 100);
@@ -222,7 +222,7 @@ test_ba2 (void)
ba_check_none (ba, 7);
// Now all blocks are busy
- u_int64_t b6, b7, b8;
+ uint64_t b6, b7, b8;
ba_alloc(ba, 100, &b6);
ba_alloc(ba, 100, &b7);
ba_alloc(ba, 100, &b8);
@@ -243,13 +243,13 @@ test_ba2 (void)
ba_free(ba, 9*BSIZE);
ba_free(ba, 7*BSIZE);
- u_int64_t b9;
+ uint64_t b9;
ba_alloc(ba, 100, &b9);
assert(b9==7*BSIZE);
ba_free(ba, 5*BSIZE);
ba_free(ba, 2*BSIZE);
- u_int64_t b10, b11;
+ uint64_t b10, b11;
ba_alloc(ba, 100, &b10);
assert(b10==2*BSIZE);
ba_alloc(ba, 100, &b11);
diff --git a/ft/tests/cachetable-3969.cc b/ft/tests/cachetable-3969.cc
index a4893512204..5bbc927914a 100644
--- a/ft/tests/cachetable-3969.cc
+++ b/ft/tests/cachetable-3969.cc
@@ -33,11 +33,11 @@ run_test (void) {
long s1;
long s2;
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8)); assert(r==0);
for (int i = 0; i < 20; i++) {
- r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(8)); assert(r==0);
}
@@ -49,16 +49,16 @@ run_test (void) {
// pin 1 and 2
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v2, &s2, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v2, &s2, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_begin_checkpoint(ct, NULL);
// mark nodes as pending a checkpoint, so that get_and_pin_nonblocking on block 1 will return TOKUDB_TRY_AGAIN
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8)); assert(r==0);
- r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
// now we try to pin 1, and it should get evicted out from under us
struct unlockers foo;
foo.extra = NULL;
- foo.locked = TRUE;
+ foo.locked = true;
foo.f = unlock_test_fun;
foo.next = NULL;
r = toku_cachetable_get_and_pin_nonblocking(
@@ -71,7 +71,7 @@ run_test (void) {
def_fetch,
def_pf_req_callback,
def_pf_callback,
- TRUE,
+ true,
NULL,
&foo
);
@@ -86,7 +86,7 @@ run_test (void) {
assert(r==0);
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
diff --git a/ft/tests/cachetable-4302.cc b/ft/tests/cachetable-4302.cc
index be0f5eea9d1..4bb62c24ce8 100644
--- a/ft/tests/cachetable-4302.cc
+++ b/ft/tests/cachetable-4302.cc
@@ -17,10 +17,10 @@ flush (CACHEFILE f __attribute__((__unused__)),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL w __attribute__((__unused__)),
- BOOL keep __attribute__((__unused__)),
- BOOL c __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
) {
/* Do nothing */
if (verbose) { printf("FLUSH: %d\n", (int)k.b); }
@@ -30,7 +30,7 @@ static int
fetch (CACHEFILE f __attribute__((__unused__)),
int UU(fd),
CACHEKEY k __attribute__((__unused__)),
- u_int32_t fullhash __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
void **value,
void** UU(dd),
PAIR_ATTR *sizep,
@@ -90,7 +90,7 @@ pe_callback (
static void
test_get_key_and_fullhash(
CACHEKEY* cachekey,
- u_int32_t* fullhash,
+ uint32_t* fullhash,
void* UU(extra))
{
CACHEKEY name;
@@ -111,8 +111,8 @@ cachetable_test (void) {
void* v1;
long s1;
- u_int64_t val1 = 0;
- u_int64_t val2 = 0;
+ uint64_t val1 = 0;
+ uint64_t val2 = 0;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
wc.pe_est_callback = pe_est_callback;
@@ -126,12 +126,12 @@ cachetable_test (void) {
wc,
fetch,
def_pf_req_callback, def_pf_callback,
- TRUE,
+ true,
&val1
);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
CACHEKEY key;
- u_int32_t fullhash;
+ uint32_t fullhash;
checkpoint_began = false;
r = toku_cachetable_put_with_dep_pairs(
f1,
@@ -160,7 +160,7 @@ cachetable_test (void) {
);
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
diff --git a/ft/tests/cachetable-4357.cc b/ft/tests/cachetable-4357.cc
index 0a47c5ae7e6..2240d4a4dc1 100644
--- a/ft/tests/cachetable-4357.cc
+++ b/ft/tests/cachetable-4357.cc
@@ -17,7 +17,7 @@ static void *pin_nonblocking(void *arg) {
&v1,
&s1,
def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback,
- TRUE,
+ true,
NULL,
NULL
);
@@ -45,7 +45,7 @@ cachetable_test (void) {
&v1,
&s1,
def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback,
- TRUE,
+ true,
NULL
);
toku_pthread_t pin_nonblocking_tid;
@@ -61,7 +61,7 @@ cachetable_test (void) {
assert_zero(r);
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
diff --git a/ft/tests/cachetable-4365.cc b/ft/tests/cachetable-4365.cc
index 9fc854650e2..311c090b37f 100644
--- a/ft/tests/cachetable-4365.cc
+++ b/ft/tests/cachetable-4365.cc
@@ -17,7 +17,7 @@ static void *pin_nonblocking(void *arg) {
&v1,
&s1,
def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback,
- TRUE,
+ true,
NULL,
NULL
);
@@ -41,7 +41,7 @@ static void *put_same_key(void *arg) {
toku_pthread_t put_tid;
-static void test_remove_key(CACHEKEY* UU(cachekey), BOOL UU(for_checkpoint), void* UU(extra)) {
+static void test_remove_key(CACHEKEY* UU(cachekey), bool UU(for_checkpoint), void* UU(extra)) {
int r = toku_pthread_create(&put_tid, NULL, put_same_key, NULL);
assert_zero(r);
}
@@ -66,7 +66,7 @@ cachetable_test (void) {
&v1,
&s1,
def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback,
- TRUE,
+ true,
NULL
);
toku_pthread_t pin_nonblocking_tid;
@@ -86,7 +86,7 @@ cachetable_test (void) {
r = toku_cachetable_unpin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), CACHETABLE_CLEAN, make_pair_attr(2));
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
diff --git a/ft/tests/cachetable-4545.cc b/ft/tests/cachetable-4545.cc
index 6e73a8906df..075d4b895b2 100644
--- a/ft/tests/cachetable-4545.cc
+++ b/ft/tests/cachetable-4545.cc
@@ -5,9 +5,9 @@
#include "includes.h"
#include "test.h"
-BOOL flush_called;
-BOOL pf_req_called;
-BOOL pf_called;
+bool flush_called;
+bool pf_req_called;
+bool pf_called;
static UU() void
flush (CACHEFILE f __attribute__((__unused__)),
@@ -18,25 +18,25 @@ flush (CACHEFILE f __attribute__((__unused__)),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL w __attribute__((__unused__)),
- BOOL keep __attribute__((__unused__)),
- BOOL c __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
) {
- flush_called = TRUE;
+ flush_called = true;
*new_size = make_pair_attr(8);
}
-static BOOL pf_req_callback(void* UU(ftnode_pv), void* UU(read_extraargs)) {
- pf_req_called = TRUE;
+static bool pf_req_callback(void* UU(ftnode_pv), void* UU(read_extraargs)) {
+ pf_req_called = true;
assert(flush_called);
- return TRUE;
+ return true;
}
static int pf_callback(void* UU(ftnode_pv), void* UU(disk_data), void* UU(read_extraargs), int UU(fd), PAIR_ATTR* sizep) {
assert(pf_req_called);
assert(flush_called);
- pf_called = TRUE;
+ pf_called = true;
*sizep = make_pair_attr(8);
return 0;
}
@@ -56,15 +56,15 @@ cachetable_test (void) {
long s1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, pf_req_callback, pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, pf_req_callback, pf_callback, true, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
- flush_called = FALSE;
- pf_req_called = FALSE;
- pf_called = FALSE;
+ flush_called = false;
+ pf_req_called = false;
+ pf_called = false;
r = toku_cachetable_begin_checkpoint(ct, NULL);
assert_zero(r);
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, pf_req_callback, pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, pf_req_callback, pf_callback, true, NULL);
assert_zero(r);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
assert_zero(r);
@@ -80,7 +80,7 @@ cachetable_test (void) {
assert(flush_called);
assert(pf_called);
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
}
diff --git a/ft/tests/cachetable-5097.cc b/ft/tests/cachetable-5097.cc
index dbf98bf27a5..411b0f427d2 100644
--- a/ft/tests/cachetable-5097.cc
+++ b/ft/tests/cachetable-5097.cc
@@ -8,10 +8,10 @@
CACHEFILE f1;
CACHEFILE f2;
-BOOL check_flush;
-BOOL dirty_flush_called;
-BOOL check_pe_callback;
-BOOL pe_callback_called;
+bool check_flush;
+bool dirty_flush_called;
+bool check_pe_callback;
+bool pe_callback_called;
static int
pe_callback (
@@ -23,7 +23,7 @@ pe_callback (
{
*bytes_freed = make_pair_attr(1);
if (check_pe_callback) {
- pe_callback_called = TRUE;
+ pe_callback_called = true;
}
usleep(4*1024*1024);
return 0;
@@ -38,13 +38,13 @@ flush (CACHEFILE f __attribute__((__unused__)),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL w __attribute__((__unused__)),
- BOOL keep __attribute__((__unused__)),
- BOOL c __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
) {
if (check_flush && w) {
- dirty_flush_called = TRUE;
+ dirty_flush_called = true;
}
}
@@ -60,13 +60,13 @@ static void *f2_pin(void *arg) {
// and we expect that to be enough so that the unpin does not invoke a partial eviction
// This is just to ensure that the bug is being exercised
//
- check_pe_callback = TRUE;
- r = toku_cachetable_get_and_pin(f2, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ check_pe_callback = true;
+ r = toku_cachetable_get_and_pin(f2, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert(r == 0);
assert(pe_callback_called);
- pe_callback_called = FALSE;
+ pe_callback_called = false;
r = toku_cachetable_unpin(f2, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
- check_pe_callback = FALSE;
+ check_pe_callback = false;
assert(!pe_callback_called);
assert(r == 0);
@@ -77,8 +77,8 @@ static void
cachetable_test (void) {
const int test_limit = 12;
int r;
- check_flush = FALSE;
- dirty_flush_called = FALSE;
+ check_flush = false;
+ dirty_flush_called = false;
CACHETABLE ct;
r = toku_create_cachetable(&ct, test_limit, ZERO_LSN, NULL_LOGGER); assert(r == 0);
@@ -99,7 +99,7 @@ cachetable_test (void) {
wc.flush_callback = flush;
// pin and unpin a node 20 times, just to get clock count up
for (int i = 0; i < 20; i++) {
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert(r == 0);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
assert(r == 0);
@@ -112,11 +112,11 @@ cachetable_test (void) {
assert_zero(r);
usleep(2*1024*1024);
- check_flush = TRUE;
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN);
+ check_flush = true;
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN);
assert(r == 0);
assert(dirty_flush_called);
- check_flush = FALSE;
+ check_flush = false;
void *ret;
r = toku_pthread_join(tid, &ret);
@@ -124,7 +124,7 @@ cachetable_test (void) {
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f2, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f2, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
}
diff --git a/ft/tests/cachetable-all-write.cc b/ft/tests/cachetable-all-write.cc
index 6dbb341dc07..2d6776945ce 100644
--- a/ft/tests/cachetable-all-write.cc
+++ b/ft/tests/cachetable-all-write.cc
@@ -14,10 +14,10 @@ flush (CACHEFILE f __attribute__((__unused__)),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL w __attribute__((__unused__)),
- BOOL keep __attribute__((__unused__)),
- BOOL c __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
) {
/* Do nothing */
if (verbose) { printf("FLUSH: %d write_me %d\n", (int)k.b, w); }
@@ -43,16 +43,16 @@ cachetable_test (void) {
long s1, s2;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
- r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(8));
//r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, 8);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); assert(r == 0 && ct == 0);
}
diff --git a/ft/tests/cachetable-checkpoint-pending.cc b/ft/tests/cachetable-checkpoint-pending.cc
index fa011573e64..63db470c2bb 100644
--- a/ft/tests/cachetable-checkpoint-pending.cc
+++ b/ft/tests/cachetable-checkpoint-pending.cc
@@ -42,10 +42,10 @@ flush (
void *UU(extraargs),
PAIR_ATTR size,
PAIR_ATTR* UU(new_size),
- BOOL write_me,
- BOOL keep_me,
- BOOL UU(for_checkpoint),
- BOOL UU(is_clone)
+ bool write_me,
+ bool keep_me,
+ bool UU(for_checkpoint),
+ bool UU(is_clone)
)
{
// printf("f");
@@ -64,7 +64,7 @@ fetch (
CACHEFILE UU(thiscf),
int UU(fd),
CACHEKEY UU(key),
- u_int32_t UU(fullhash),
+ uint32_t UU(fullhash),
void **UU(value),
void **UU(dd),
PAIR_ATTR *UU(sizep),
@@ -84,12 +84,12 @@ do_update (void *UU(ignore))
int i;
for (i=0; i<N; i++) {
CACHEKEY key = make_blocknum(i);
- u_int32_t hi = toku_cachetable_hash(cf, key);
+ uint32_t hi = toku_cachetable_hash(cf, key);
void *vv;
long size;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
- int r = toku_cachetable_get_and_pin(cf, key, hi, &vv, &size, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, 0);
+ int r = toku_cachetable_get_and_pin(cf, key, hi, &vv, &size, wc, fetch, def_pf_req_callback, def_pf_callback, true, 0);
//printf("g");
assert(r==0);
assert(size==sizeof(int));
@@ -136,7 +136,7 @@ static void checkpoint_pending(void) {
int i;
for (i=0; i<N; i++) {
CACHEKEY key = make_blocknum(i);
- u_int32_t hi = toku_cachetable_hash(cf, key);
+ uint32_t hi = toku_cachetable_hash(cf, key);
values[i] = 42;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
@@ -174,7 +174,7 @@ static void checkpoint_pending(void) {
assert(r == 0);
assert(n_flush == 0 && n_write_me == 0 && n_keep_me == 0);
- r = toku_cachefile_close(&cf, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&cf, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); assert(r == 0 && ct == 0);
}
diff --git a/ft/tests/cachetable-checkpoint-pinned-nodes.cc b/ft/tests/cachetable-checkpoint-pinned-nodes.cc
index 0b5ce66818e..21cc4b5a28d 100644
--- a/ft/tests/cachetable-checkpoint-pinned-nodes.cc
+++ b/ft/tests/cachetable-checkpoint-pinned-nodes.cc
@@ -5,11 +5,11 @@
#include "includes.h"
#include "test.h"
-u_int64_t clean_val = 0;
-u_int64_t dirty_val = 0;
+uint64_t clean_val = 0;
+uint64_t dirty_val = 0;
-BOOL check_me;
-BOOL flush_called;
+bool check_me;
+bool flush_called;
static void
flush (CACHEFILE f __attribute__((__unused__)),
@@ -20,17 +20,17 @@ flush (CACHEFILE f __attribute__((__unused__)),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL w __attribute__((__unused__)),
- BOOL keep __attribute__((__unused__)),
- BOOL c __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
) {
/* Do nothing */
if (verbose) { printf("FLUSH: %d\n", (int)k.b); }
//usleep (5*1024*1024);
// if the checkpoint is pending, assert that it is of what we made dirty
if (check_me) {
- flush_called = TRUE;
+ flush_called = true;
assert(c);
assert(e == &dirty_val);
assert(v == &dirty_val);
@@ -43,7 +43,7 @@ static int
fetch (CACHEFILE f __attribute__((__unused__)),
int UU(fd),
CACHEKEY k __attribute__((__unused__)),
- u_int32_t fullhash __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
void **value __attribute__((__unused__)),
void** UU(dd),
PAIR_ATTR *sizep __attribute__((__unused__)),
@@ -78,9 +78,9 @@ cachetable_test (void) {
long s2;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(&dirty_val);
wc.flush_callback = flush;
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, &dirty_val);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, fetch, def_pf_req_callback, def_pf_callback, true, &dirty_val);
wc.write_extraargs = NULL;
- r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
//
// Here is the test, we have two pairs, v1 is dirty, v2 is clean, but both are currently pinned
@@ -94,8 +94,8 @@ cachetable_test (void) {
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
r = toku_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(8));
- check_me = TRUE;
- flush_called = FALSE;
+ check_me = true;
+ flush_called = false;
r = toku_cachetable_end_checkpoint(
ct,
NULL,
@@ -104,10 +104,10 @@ cachetable_test (void) {
);
assert(r==0);
assert(flush_called);
- check_me = FALSE;
+ check_me = false;
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
diff --git a/ft/tests/cachetable-checkpoint-prefetched-nodes.cc b/ft/tests/cachetable-checkpoint-prefetched-nodes.cc
index 70d040b9f90..becb025476c 100644
--- a/ft/tests/cachetable-checkpoint-prefetched-nodes.cc
+++ b/ft/tests/cachetable-checkpoint-prefetched-nodes.cc
@@ -5,11 +5,11 @@
#include "includes.h"
#include "test.h"
-u_int64_t clean_val = 0;
-u_int64_t dirty_val = 0;
+uint64_t clean_val = 0;
+uint64_t dirty_val = 0;
-BOOL check_me;
-BOOL flush_called;
+bool check_me;
+bool flush_called;
static void
flush (CACHEFILE f __attribute__((__unused__)),
@@ -20,17 +20,17 @@ flush (CACHEFILE f __attribute__((__unused__)),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL w __attribute__((__unused__)),
- BOOL keep __attribute__((__unused__)),
- BOOL c __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
) {
/* Do nothing */
if (verbose) { printf("FLUSH: %d\n", (int)k.b); }
//usleep (5*1024*1024);
// if the checkpoint is pending, assert that it is of what we made dirty
if (check_me) {
- flush_called = TRUE;
+ flush_called = true;
assert(c);
assert(e == &dirty_val);
assert(v == &dirty_val);
@@ -43,7 +43,7 @@ static int
fetch (CACHEFILE f __attribute__((__unused__)),
int UU(fd),
CACHEKEY k __attribute__((__unused__)),
- u_int32_t fullhash __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
void **value __attribute__((__unused__)),
void** UU(dd),
PAIR_ATTR *sizep __attribute__((__unused__)),
@@ -53,11 +53,11 @@ fetch (CACHEFILE f __attribute__((__unused__)),
*dirtyp = 0;
if (extraargs) {
*value = &dirty_val;
- *dirtyp = TRUE;
+ *dirtyp = true;
}
else {
*value = &clean_val;
- *dirtyp = FALSE;
+ *dirtyp = false;
}
*sizep = make_pair_attr(8);
return 0;
@@ -74,12 +74,12 @@ cachetable_test (void) {
CACHEFILE f1;
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
- BOOL doing_prefetch = FALSE;
+ bool doing_prefetch = false;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(&dirty_val);
wc.flush_callback = flush;
r = toku_cachefile_prefetch(f1, make_blocknum(1), 1, wc, fetch, def_pf_req_callback, def_pf_callback, &dirty_val, &doing_prefetch);
assert(doing_prefetch);
- doing_prefetch = FALSE;
+ doing_prefetch = false;
wc.write_extraargs = NULL;
r = toku_cachefile_prefetch(f1, make_blocknum(2), 2, wc, fetch, def_pf_req_callback, def_pf_callback, NULL, &doing_prefetch);
assert(doing_prefetch);
@@ -93,8 +93,8 @@ cachetable_test (void) {
r = toku_cachetable_begin_checkpoint(ct, NULL); assert(r == 0);
- check_me = TRUE;
- flush_called = FALSE;
+ check_me = true;
+ flush_called = false;
r = toku_cachetable_end_checkpoint(
ct,
NULL,
@@ -103,12 +103,12 @@ cachetable_test (void) {
);
assert(r==0);
assert(flush_called);
- check_me = FALSE;
+ check_me = false;
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
diff --git a/ft/tests/cachetable-checkpoint-test.cc b/ft/tests/cachetable-checkpoint-test.cc
index 6ecb28ca952..7b08befa631 100644
--- a/ft/tests/cachetable-checkpoint-test.cc
+++ b/ft/tests/cachetable-checkpoint-test.cc
@@ -22,10 +22,10 @@ static void flush(
void *extraargs,
PAIR_ATTR size,
PAIR_ATTR* UU(new_size),
- BOOL write_me,
- BOOL keep_me,
- BOOL UU(for_checkpoint),
- BOOL UU(is_clone)
+ bool write_me,
+ bool keep_me,
+ bool UU(for_checkpoint),
+ bool UU(is_clone)
)
{
cf = cf; key = key; value = value; extraargs = extraargs;
@@ -75,7 +75,7 @@ static void cachetable_checkpoint_test(int n, enum cachetable_dirty dirty) {
int i;
for (i=0; i<n; i++) {
CACHEKEY key = make_blocknum(i);
- u_int32_t hi = toku_cachetable_hash(f1, key);
+ uint32_t hi = toku_cachetable_hash(f1, key);
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
r = toku_cachetable_put(f1, key, hi, (void *)(long)i, make_pair_attr(1), wc);
@@ -109,7 +109,7 @@ static void cachetable_checkpoint_test(int n, enum cachetable_dirty dirty) {
// after the checkpoint, all of the items should be clean
for (i=0; i<n; i++) {
CACHEKEY key = make_blocknum(i);
- u_int32_t hi = toku_cachetable_hash(f1, key);
+ uint32_t hi = toku_cachetable_hash(f1, key);
void *v;
r = toku_cachetable_maybe_get_and_pin(f1, key, hi, &v);
if (r != 0)
@@ -136,7 +136,7 @@ static void cachetable_checkpoint_test(int n, enum cachetable_dirty dirty) {
assert(r == 0);
assert(n_flush == 0 && n_write_me == 0 && n_keep_me == 0);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0 );
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0 );
r = toku_cachetable_close(&ct); assert(r == 0 && ct == 0);
}
diff --git a/ft/tests/cachetable-cleaner-checkpoint.cc b/ft/tests/cachetable-cleaner-checkpoint.cc
index 1c4e4c2b3c4..1d6fc2dc87d 100644
--- a/ft/tests/cachetable-cleaner-checkpoint.cc
+++ b/ft/tests/cachetable-cleaner-checkpoint.cc
@@ -7,7 +7,7 @@
CACHEFILE f1;
-BOOL flush_called;
+bool flush_called;
static void
flush (CACHEFILE f __attribute__((__unused__)),
@@ -18,10 +18,10 @@ flush (CACHEFILE f __attribute__((__unused__)),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL w __attribute__((__unused__)),
- BOOL keep __attribute__((__unused__)),
- BOOL c __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
) {
/* Do nothing */
if (verbose) { printf("FLUSH: %d\n", (int)k.b); }
@@ -32,17 +32,17 @@ flush (CACHEFILE f __attribute__((__unused__)),
if (w) {
assert(!flush_called);
assert(c);
- flush_called = TRUE;
+ flush_called = true;
}
}
-BOOL cleaner_called;
+bool cleaner_called;
static int
cleaner_callback(
void* UU(ftnode_pv),
BLOCKNUM blocknum,
- u_int32_t fullhash,
+ uint32_t fullhash,
void* UU(extraargs)
)
{
@@ -50,7 +50,7 @@ cleaner_callback(
assert(fullhash == 1);
assert(!cleaner_called);
assert(flush_called);
- cleaner_called = TRUE;
+ cleaner_called = true;
int r = toku_cachetable_unpin(f1, blocknum, fullhash, CACHETABLE_CLEAN, make_pair_attr(8));
assert_zero(r);
return 0;
@@ -74,12 +74,12 @@ cachetable_test (void) {
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
wc.cleaner_callback = cleaner_callback;
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
PAIR_ATTR attr = make_pair_attr(8);
attr.cache_pressure_size = 8;
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, attr);
- cleaner_called = FALSE;
+ cleaner_called = false;
r = toku_cachetable_begin_checkpoint(ct, NULL);
assert_zero(r);
toku_cleaner_thread(ct);
@@ -93,7 +93,7 @@ cachetable_test (void) {
assert(r==0);
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
diff --git a/ft/tests/cachetable-cleaner-checkpoint2.cc b/ft/tests/cachetable-cleaner-checkpoint2.cc
index 8c340753946..1a525d781d2 100644
--- a/ft/tests/cachetable-cleaner-checkpoint2.cc
+++ b/ft/tests/cachetable-cleaner-checkpoint2.cc
@@ -7,7 +7,7 @@
CACHEFILE f1;
-BOOL flush_called;
+bool flush_called;
static void
flush (CACHEFILE f __attribute__((__unused__)),
@@ -18,10 +18,10 @@ flush (CACHEFILE f __attribute__((__unused__)),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL w __attribute__((__unused__)),
- BOOL keep __attribute__((__unused__)),
- BOOL c __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
) {
/* Do nothing */
if (verbose) { printf("FLUSH: %d\n", (int)k.b); }
@@ -32,17 +32,17 @@ flush (CACHEFILE f __attribute__((__unused__)),
if (w) {
assert(!flush_called);
assert(c);
- flush_called = TRUE;
+ flush_called = true;
}
}
-BOOL cleaner_called;
+bool cleaner_called;
static int
cleaner_callback(
void* UU(ftnode_pv),
BLOCKNUM blocknum,
- u_int32_t fullhash,
+ uint32_t fullhash,
void* UU(extraargs)
)
{
@@ -50,7 +50,7 @@ cleaner_callback(
assert(fullhash == 1);
assert(!cleaner_called);
assert(flush_called);
- cleaner_called = TRUE;
+ cleaner_called = true;
int r = toku_cachetable_unpin(f1, blocknum, fullhash, CACHETABLE_CLEAN, make_pair_attr(8));
assert_zero(r);
return 0;
@@ -74,12 +74,12 @@ cachetable_test (void) {
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
wc.cleaner_callback = cleaner_callback;
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
PAIR_ATTR attr = make_pair_attr(8);
attr.cache_pressure_size = 8;
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, attr);
- cleaner_called = FALSE;
+ cleaner_called = false;
r = toku_cachetable_begin_checkpoint(ct, NULL);
assert_zero(r);
toku_cleaner_thread(ct);
@@ -93,7 +93,7 @@ cachetable_test (void) {
assert(r==0);
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
diff --git a/ft/tests/cachetable-cleaner-thread-attrs-accumulate.cc b/ft/tests/cachetable-cleaner-thread-attrs-accumulate.cc
index 3c88e16ed2e..bba9eebccdf 100644
--- a/ft/tests/cachetable-cleaner-thread-attrs-accumulate.cc
+++ b/ft/tests/cachetable-cleaner-thread-attrs-accumulate.cc
@@ -16,14 +16,14 @@ toku_mutex_t attr_mutex;
#define STATUS_VALUE(x) ct_status.status[x].value.num
const PAIR_ATTR attrs[] = {
- { .size = 20, .nonleaf_size = 13, .leaf_size = 900, .rollback_size = 123, .cache_pressure_size = 403, .is_valid = TRUE },
- { .size = 21, .nonleaf_size = 16, .leaf_size = 910, .rollback_size = 113, .cache_pressure_size = 401, .is_valid = TRUE },
- { .size = 22, .nonleaf_size = 17, .leaf_size = 940, .rollback_size = 133, .cache_pressure_size = 402, .is_valid = TRUE },
- { .size = 23, .nonleaf_size = 18, .leaf_size = 931, .rollback_size = 153, .cache_pressure_size = 404, .is_valid = TRUE },
- { .size = 25, .nonleaf_size = 19, .leaf_size = 903, .rollback_size = 173, .cache_pressure_size = 413, .is_valid = TRUE },
- { .size = 26, .nonleaf_size = 10, .leaf_size = 903, .rollback_size = 193, .cache_pressure_size = 423, .is_valid = TRUE },
- { .size = 20, .nonleaf_size = 11, .leaf_size = 902, .rollback_size = 103, .cache_pressure_size = 433, .is_valid = TRUE },
- { .size = 29, .nonleaf_size = 12, .leaf_size = 909, .rollback_size = 113, .cache_pressure_size = 443, .is_valid = TRUE }
+ { .size = 20, .nonleaf_size = 13, .leaf_size = 900, .rollback_size = 123, .cache_pressure_size = 403, .is_valid = true },
+ { .size = 21, .nonleaf_size = 16, .leaf_size = 910, .rollback_size = 113, .cache_pressure_size = 401, .is_valid = true },
+ { .size = 22, .nonleaf_size = 17, .leaf_size = 940, .rollback_size = 133, .cache_pressure_size = 402, .is_valid = true },
+ { .size = 23, .nonleaf_size = 18, .leaf_size = 931, .rollback_size = 153, .cache_pressure_size = 404, .is_valid = true },
+ { .size = 25, .nonleaf_size = 19, .leaf_size = 903, .rollback_size = 173, .cache_pressure_size = 413, .is_valid = true },
+ { .size = 26, .nonleaf_size = 10, .leaf_size = 903, .rollback_size = 193, .cache_pressure_size = 423, .is_valid = true },
+ { .size = 20, .nonleaf_size = 11, .leaf_size = 902, .rollback_size = 103, .cache_pressure_size = 433, .is_valid = true },
+ { .size = 29, .nonleaf_size = 12, .leaf_size = 909, .rollback_size = 113, .cache_pressure_size = 443, .is_valid = true }
};
const int n_pairs = (sizeof attrs) / (sizeof attrs[0]);
@@ -36,10 +36,10 @@ flush (CACHEFILE f __attribute__((__unused__)),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL w __attribute__((__unused__)),
- BOOL keep __attribute__((__unused__)),
- BOOL c __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
) {
PAIR_ATTR *CAST_FROM_VOIDP(expect, e);
if (!keep) {
@@ -87,7 +87,7 @@ run_test (void) {
def_fetch,
def_pf_req_callback,
def_pf_callback,
- TRUE,
+ true,
&expect);
assert_zero(r);
r = toku_cachetable_unpin(f1, make_blocknum(i+1), i+1, CACHETABLE_DIRTY, attrs[i]);
@@ -112,7 +112,7 @@ run_test (void) {
def_fetch,
def_pf_req_callback,
def_pf_callback,
- TRUE,
+ true,
&expect);
toku_cachetable_unpin(f1, make_blocknum(n_pairs + 1), n_pairs + 1, CACHETABLE_CLEAN,
make_pair_attr(test_limit - expect.size + 20));
@@ -126,7 +126,7 @@ run_test (void) {
assert(STATUS_VALUE(CT_SIZE_CACHEPRESSURE) == (uint64_t) expect.cache_pressure_size);
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
}
diff --git a/ft/tests/cachetable-cleaner-thread-empty-cachetable.cc b/ft/tests/cachetable-cleaner-thread-empty-cachetable.cc
index 159be01c253..5baaa4fa749 100644
--- a/ft/tests/cachetable-cleaner-thread-empty-cachetable.cc
+++ b/ft/tests/cachetable-cleaner-thread-empty-cachetable.cc
@@ -34,7 +34,7 @@ cachetable_test (void) {
assert(r==0);
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
}
diff --git a/ft/tests/cachetable-cleaner-thread-everything-pinned.cc b/ft/tests/cachetable-cleaner-thread-everything-pinned.cc
index 846ea591fba..ad992cff734 100644
--- a/ft/tests/cachetable-cleaner-thread-everything-pinned.cc
+++ b/ft/tests/cachetable-cleaner-thread-everything-pinned.cc
@@ -14,11 +14,11 @@ static int
everything_pinned_cleaner_callback(
void* UU(ftnode_pv),
BLOCKNUM UU(blocknum),
- u_int32_t UU(fullhash),
+ uint32_t UU(fullhash),
void* UU(extraargs)
)
{
- assert(FALSE); // everything is pinned so this should never be called
+ assert(false); // everything is pinned so this should never be called
return 0;
}
@@ -49,7 +49,7 @@ run_test (void) {
def_fetch,
def_pf_req_callback,
def_pf_callback,
- TRUE,
+ true,
NULL);
assert_zero(r);
}
@@ -62,7 +62,7 @@ run_test (void) {
}
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
}
diff --git a/ft/tests/cachetable-cleaner-thread-nothing-needs-flushing.cc b/ft/tests/cachetable-cleaner-thread-nothing-needs-flushing.cc
index 6d4579c0bcd..ee4e7ed9c84 100644
--- a/ft/tests/cachetable-cleaner-thread-nothing-needs-flushing.cc
+++ b/ft/tests/cachetable-cleaner-thread-nothing-needs-flushing.cc
@@ -14,11 +14,11 @@ static UU() int
everything_pinned_cleaner_callback(
void* UU(ftnode_pv),
BLOCKNUM UU(blocknum),
- u_int32_t UU(fullhash),
+ uint32_t UU(fullhash),
void* UU(extraargs)
)
{
- assert(FALSE); // everything is pinned so this should never be called
+ assert(false); // everything is pinned so this should never be called
return 0;
}
@@ -47,7 +47,7 @@ run_test (void) {
def_fetch,
def_pf_req_callback,
def_pf_callback,
- TRUE,
+ true,
NULL);
assert_zero(r);
// set cachepressure_size to 0
@@ -60,7 +60,7 @@ run_test (void) {
usleep(4000000);
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
}
diff --git a/ft/tests/cachetable-cleaner-thread-simple.cc b/ft/tests/cachetable-cleaner-thread-simple.cc
index 77ebe1fde0a..e19fcef5b85 100644
--- a/ft/tests/cachetable-cleaner-thread-simple.cc
+++ b/ft/tests/cachetable-cleaner-thread-simple.cc
@@ -17,7 +17,7 @@ static int
my_cleaner_callback(
void* UU(ftnode_pv),
BLOCKNUM UU(blocknum),
- u_int32_t UU(fullhash),
+ uint32_t UU(fullhash),
void* UU(extraargs)
)
{
@@ -37,7 +37,7 @@ run_test (void) {
CACHETABLE ct;
r = toku_create_cachetable(&ct, test_limit, ZERO_LSN, NULL_LOGGER); assert(r == 0);
r = toku_set_cleaner_period(ct, 1); assert(r == 0);
- my_cleaner_callback_called = FALSE;
+ my_cleaner_callback_called = false;
char fname1[] = __SRCFILE__ "test1.dat";
unlink(fname1);
@@ -54,7 +54,7 @@ run_test (void) {
def_fetch,
def_pf_req_callback,
def_pf_callback,
- TRUE,
+ true,
NULL);
PAIR_ATTR attr = make_pair_attr(8);
attr.cache_pressure_size = 100;
@@ -66,7 +66,7 @@ run_test (void) {
def_fetch,
def_pf_req_callback,
def_pf_callback,
- TRUE,
+ true,
NULL);
assert_zero(r);
// set cachepressure_size to 0
@@ -80,7 +80,7 @@ run_test (void) {
assert(my_cleaner_callback_called);
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0 );
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0 );
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
}
diff --git a/ft/tests/cachetable-clock-all-pinned.cc b/ft/tests/cachetable-clock-all-pinned.cc
index 3a7175d4af1..4a4edb7a3a7 100644
--- a/ft/tests/cachetable-clock-all-pinned.cc
+++ b/ft/tests/cachetable-clock-all-pinned.cc
@@ -35,7 +35,7 @@ cachetable_test (void) {
}
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0 );
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0 );
r = toku_cachetable_close(&ct); assert(r == 0 && ct == 0);
}
diff --git a/ft/tests/cachetable-clock-eviction.cc b/ft/tests/cachetable-clock-eviction.cc
index 23077dbd227..5a43343be66 100644
--- a/ft/tests/cachetable-clock-eviction.cc
+++ b/ft/tests/cachetable-clock-eviction.cc
@@ -6,9 +6,9 @@
#include "test.h"
int num_entries;
-BOOL flush_may_occur;
+bool flush_may_occur;
int expected_flushed_key;
-BOOL check_flush;
+bool check_flush;
static void
flush (CACHEFILE f __attribute__((__unused__)),
@@ -19,10 +19,10 @@ flush (CACHEFILE f __attribute__((__unused__)),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL w __attribute__((__unused__)),
- BOOL keep __attribute__((__unused__)),
- BOOL c __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
) {
/* Do nothing */
if (check_flush && !keep) {
@@ -38,7 +38,7 @@ static int
fetch (CACHEFILE f __attribute__((__unused__)),
int UU(fd),
CACHEKEY k __attribute__((__unused__)),
- u_int32_t fullhash __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
void **value __attribute__((__unused__)),
void** UU(dd),
PAIR_ATTR *sizep __attribute__((__unused__)),
@@ -66,35 +66,35 @@ cachetable_test (void) {
void* v1;
void* v2;
long s1, s2;
- flush_may_occur = FALSE;
- check_flush = TRUE;
+ flush_may_occur = false;
+ check_flush = true;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
for (int i = 0; i < 100000; i++) {
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(1));
}
for (int i = 0; i < 8; i++) {
- r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(1));
}
for (int i = 0; i < 4; i++) {
- r = toku_cachetable_get_and_pin(f1, make_blocknum(3), 3, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(3), 3, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(3), 3, CACHETABLE_CLEAN, make_pair_attr(1));
}
for (int i = 0; i < 2; i++) {
- r = toku_cachetable_get_and_pin(f1, make_blocknum(4), 4, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(4), 4, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(4), 4, CACHETABLE_CLEAN, make_pair_attr(1));
}
- flush_may_occur = TRUE;
+ flush_may_occur = true;
expected_flushed_key = 4;
r = toku_cachetable_put(f1, make_blocknum(5), 5, NULL, make_pair_attr(4), wc);
- flush_may_occur = TRUE;
+ flush_may_occur = true;
expected_flushed_key = 5;
r = toku_cachetable_unpin(f1, make_blocknum(5), 5, CACHETABLE_CLEAN, make_pair_attr(4));
- check_flush = FALSE;
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0 );
+ check_flush = false;
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0 );
r = toku_cachetable_close(&ct); assert(r == 0 && ct == 0);
}
diff --git a/ft/tests/cachetable-clock-eviction2.cc b/ft/tests/cachetable-clock-eviction2.cc
index 21bfe9da179..d062aef2212 100644
--- a/ft/tests/cachetable-clock-eviction2.cc
+++ b/ft/tests/cachetable-clock-eviction2.cc
@@ -6,7 +6,7 @@
#include "includes.h"
#include "test.h"
-BOOL flush_may_occur;
+bool flush_may_occur;
long expected_bytes_to_free;
static void
@@ -18,10 +18,10 @@ flush (CACHEFILE f __attribute__((__unused__)),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL w __attribute__((__unused__)),
- BOOL keep,
- BOOL c __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool w __attribute__((__unused__)),
+ bool keep,
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
) {
assert(flush_may_occur);
if (!keep) {
@@ -35,7 +35,7 @@ static int
fetch (CACHEFILE f __attribute__((__unused__)),
int UU(fd),
CACHEKEY k __attribute__((__unused__)),
- u_int32_t fullhash __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
void **value __attribute__((__unused__)),
void** UU(dd),
PAIR_ATTR *sizep __attribute__((__unused__)),
@@ -59,10 +59,10 @@ other_flush (CACHEFILE f __attribute__((__unused__)),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL w __attribute__((__unused__)),
- BOOL keep __attribute__((__unused__)),
- BOOL c __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
) {
}
@@ -107,47 +107,47 @@ cachetable_test (void) {
void* v1;
void* v2;
long s1, s2;
- flush_may_occur = FALSE;
+ flush_may_occur = false;
for (int i = 0; i < 100000; i++) {
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
wc.pe_callback = pe_callback;
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(4));
}
for (int i = 0; i < 8; i++) {
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
wc.pe_callback = pe_callback;
- r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(4));
}
for (int i = 0; i < 4; i++) {
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
wc.pe_callback = pe_callback;
- r = toku_cachetable_get_and_pin(f1, make_blocknum(3), 3, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(3), 3, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(3), 3, CACHETABLE_CLEAN, make_pair_attr(4));
}
for (int i = 0; i < 2; i++) {
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
wc.pe_callback = pe_callback;
- r = toku_cachetable_get_and_pin(f1, make_blocknum(4), 4, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(4), 4, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(4), 4, CACHETABLE_CLEAN, make_pair_attr(4));
}
- flush_may_occur = FALSE;
+ flush_may_occur = false;
expected_bytes_to_free = 4;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = other_flush;
wc.pe_callback = other_pe_callback;
r = toku_cachetable_put(f1, make_blocknum(5), 5, NULL, make_pair_attr(4), wc);
- flush_may_occur = TRUE;
+ flush_may_occur = true;
r = toku_cachetable_unpin(f1, make_blocknum(5), 5, CACHETABLE_CLEAN, make_pair_attr(4));
assert(expected_bytes_to_free == 0);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); assert(r == 0 && ct == 0);
}
diff --git a/ft/tests/cachetable-clock-eviction3.cc b/ft/tests/cachetable-clock-eviction3.cc
index ac734791f9d..5814a3aa5c9 100644
--- a/ft/tests/cachetable-clock-eviction3.cc
+++ b/ft/tests/cachetable-clock-eviction3.cc
@@ -6,7 +6,7 @@
#include "includes.h"
#include "test.h"
-BOOL flush_may_occur;
+bool flush_may_occur;
long expected_bytes_to_free;
static void
@@ -18,10 +18,10 @@ flush (CACHEFILE f __attribute__((__unused__)),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL w __attribute__((__unused__)),
- BOOL keep,
- BOOL c __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool w __attribute__((__unused__)),
+ bool keep,
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
) {
assert(flush_may_occur);
if (!keep) {
@@ -35,7 +35,7 @@ static int
fetch (CACHEFILE f __attribute__((__unused__)),
int UU(fd),
CACHEKEY k __attribute__((__unused__)),
- u_int32_t fullhash __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
void **value __attribute__((__unused__)),
void** UU(dd),
PAIR_ATTR *sizep __attribute__((__unused__)),
@@ -59,10 +59,10 @@ other_flush (CACHEFILE f __attribute__((__unused__)),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL w __attribute__((__unused__)),
- BOOL keep __attribute__((__unused__)),
- BOOL c __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
) {
}
@@ -122,13 +122,13 @@ cachetable_test (void) {
void* v1;
void* v2;
long s1, s2;
- flush_may_occur = FALSE;
+ flush_may_occur = false;
for (int i = 0; i < 100000; i++) {
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
wc.pe_est_callback = pe_est_callback;
wc.pe_callback = pe_callback;
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(4));
}
for (int i = 0; i < 8; i++) {
@@ -136,7 +136,7 @@ cachetable_test (void) {
wc.flush_callback = flush;
wc.pe_est_callback = pe_est_callback;
wc.pe_callback = pe_callback;
- r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(4));
}
for (int i = 0; i < 4; i++) {
@@ -144,7 +144,7 @@ cachetable_test (void) {
wc.flush_callback = flush;
wc.pe_est_callback = pe_est_callback;
wc.pe_callback = pe_callback;
- r = toku_cachetable_get_and_pin(f1, make_blocknum(3), 3, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(3), 3, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(3), 3, CACHETABLE_CLEAN, make_pair_attr(4));
}
for (int i = 0; i < 2; i++) {
@@ -152,17 +152,17 @@ cachetable_test (void) {
wc.flush_callback = flush;
wc.pe_est_callback = pe_est_callback;
wc.pe_callback = pe_callback;
- r = toku_cachetable_get_and_pin(f1, make_blocknum(4), 4, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(4), 4, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(4), 4, CACHETABLE_CLEAN, make_pair_attr(4));
}
- flush_may_occur = FALSE;
+ flush_may_occur = false;
expected_bytes_to_free = 4;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = other_flush;
wc.pe_est_callback = pe_est_callback;
wc.pe_callback = other_pe_callback;
r = toku_cachetable_put(f1, make_blocknum(5), 5, NULL, make_pair_attr(4), wc);
- flush_may_occur = TRUE;
+ flush_may_occur = true;
r = toku_cachetable_unpin(f1, make_blocknum(5), 5, CACHETABLE_CLEAN, make_pair_attr(8));
// we are testing that having a wildly different estimate than
@@ -175,7 +175,7 @@ cachetable_test (void) {
assert(expected_bytes_to_free == 3);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); assert(r == 0 && ct == 0);
}
diff --git a/ft/tests/cachetable-clock-eviction4.cc b/ft/tests/cachetable-clock-eviction4.cc
index 8e4b2b5e9f3..00643945670 100644
--- a/ft/tests/cachetable-clock-eviction4.cc
+++ b/ft/tests/cachetable-clock-eviction4.cc
@@ -6,16 +6,16 @@
#include "test.h"
int num_entries;
-BOOL flush_may_occur;
+bool flush_may_occur;
int expected_flushed_key;
-BOOL check_flush;
+bool check_flush;
//
// This test verifies that if partial eviction is expensive and
// does not estimate number of freed bytes to be greater than 0,
// then partial eviction is not called, and normal eviction
-// is used. The verification is done ia an assert(FALSE) in
+// is used. The verification is done ia an assert(false) in
// pe_callback.
//
@@ -29,10 +29,10 @@ flush (CACHEFILE f __attribute__((__unused__)),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL w __attribute__((__unused__)),
- BOOL keep __attribute__((__unused__)),
- BOOL c __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
) {
/* Do nothing */
if (check_flush && !keep) {
@@ -48,7 +48,7 @@ static int
fetch (CACHEFILE f __attribute__((__unused__)),
int UU(fd),
CACHEKEY k __attribute__((__unused__)),
- u_int32_t fullhash __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
void **value __attribute__((__unused__)),
void** UU(dd),
PAIR_ATTR *sizep __attribute__((__unused__)),
@@ -82,7 +82,7 @@ pe_callback (
void* extraargs __attribute__((__unused__))
)
{
- assert(FALSE);
+ assert(false);
*bytes_freed = bytes_to_free;
return 0;
}
@@ -103,37 +103,37 @@ cachetable_test (void) {
void* v1;
void* v2;
long s1, s2;
- flush_may_occur = FALSE;
- check_flush = TRUE;
+ flush_may_occur = false;
+ check_flush = true;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
wc.pe_est_callback = pe_est_callback;
wc.pe_callback = pe_callback;
for (int i = 0; i < 100000; i++) {
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(1));
}
for (int i = 0; i < 8; i++) {
- r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(1));
}
for (int i = 0; i < 4; i++) {
- r = toku_cachetable_get_and_pin(f1, make_blocknum(3), 3, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(3), 3, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(3), 3, CACHETABLE_CLEAN, make_pair_attr(1));
}
for (int i = 0; i < 2; i++) {
- r = toku_cachetable_get_and_pin(f1, make_blocknum(4), 4, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(4), 4, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(4), 4, CACHETABLE_CLEAN, make_pair_attr(1));
}
- flush_may_occur = TRUE;
+ flush_may_occur = true;
expected_flushed_key = 4;
r = toku_cachetable_put(f1, make_blocknum(5), 5, NULL, make_pair_attr(4), wc);
- flush_may_occur = TRUE;
+ flush_may_occur = true;
expected_flushed_key = 5;
r = toku_cachetable_unpin(f1, make_blocknum(5), 5, CACHETABLE_CLEAN, make_pair_attr(4));
- check_flush = FALSE;
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0 );
+ check_flush = false;
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0 );
r = toku_cachetable_close(&ct); assert(r == 0 && ct == 0);
}
diff --git a/ft/tests/cachetable-clone-checkpoint.cc b/ft/tests/cachetable-clone-checkpoint.cc
index e8058b134f8..ff2a69a2a9a 100644
--- a/ft/tests/cachetable-clone-checkpoint.cc
+++ b/ft/tests/cachetable-clone-checkpoint.cc
@@ -7,14 +7,14 @@
static void
-clone_callback(void* UU(value_data), void** cloned_value_data, PAIR_ATTR* new_attr, BOOL UU(for_checkpoint), void* UU(write_extraargs))
+clone_callback(void* UU(value_data), void** cloned_value_data, PAIR_ATTR* new_attr, bool UU(for_checkpoint), void* UU(write_extraargs))
{
*cloned_value_data = (void *)1;
- new_attr->is_valid = FALSE;
+ new_attr->is_valid = false;
}
-BOOL clone_flush_started;
-BOOL clone_flush_completed;
+bool clone_flush_started;
+bool clone_flush_completed;
CACHETABLE ct;
static void
@@ -27,16 +27,16 @@ flush (
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL w __attribute__((__unused__)),
- BOOL keep __attribute__((__unused__)),
- BOOL c __attribute__((__unused__)),
- BOOL is_clone
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool is_clone
)
{
if (is_clone) {
- clone_flush_started = TRUE;
+ clone_flush_started = true;
usleep(4*1024*1024);
- clone_flush_completed = TRUE;
+ clone_flush_completed = true;
}
}
@@ -70,22 +70,22 @@ cachetable_test (void) {
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
wc.clone_callback = clone_callback;
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert_zero(r);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
assert_zero(r);
r = toku_cachetable_begin_checkpoint(ct, NULL);
- clone_flush_started = FALSE;
- clone_flush_completed = FALSE;
+ clone_flush_started = false;
+ clone_flush_completed = false;
toku_pthread_t checkpoint_tid;
r = toku_pthread_create(&checkpoint_tid, NULL, run_end_checkpoint, NULL);
assert_zero(r);
usleep(1*1024*1024);
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert_zero(r);
assert(clone_flush_started && !clone_flush_completed);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
@@ -97,7 +97,7 @@ cachetable_test (void) {
assert(clone_flush_started && clone_flush_completed);
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
}
diff --git a/ft/tests/cachetable-clone-partial-fetch-pinned-node.cc b/ft/tests/cachetable-clone-partial-fetch-pinned-node.cc
index 41183da6916..dc646267826 100644
--- a/ft/tests/cachetable-clone-partial-fetch-pinned-node.cc
+++ b/ft/tests/cachetable-clone-partial-fetch-pinned-node.cc
@@ -6,14 +6,14 @@
#include "test.h"
-BOOL flush_completed;
-BOOL pf_called;
+bool flush_completed;
+bool pf_called;
static void
-clone_callback(void* UU(value_data), void** cloned_value_data, PAIR_ATTR* new_attr, BOOL UU(for_checkpoint), void* UU(write_extraargs))
+clone_callback(void* UU(value_data), void** cloned_value_data, PAIR_ATTR* new_attr, bool UU(for_checkpoint), void* UU(write_extraargs))
{
*cloned_value_data = (void *)1;
- new_attr->is_valid = FALSE;
+ new_attr->is_valid = false;
}
static void
@@ -26,21 +26,21 @@ flush (
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL w __attribute__((__unused__)),
- BOOL keep __attribute__((__unused__)),
- BOOL c __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
)
{
if (is_clone) {
usleep(2*1024*1024);
- flush_completed = TRUE;
+ flush_completed = true;
}
}
static int true_pf_callback(void* UU(ftnode_pv), void* UU(dd), void* UU(read_extraargs), int UU(fd), PAIR_ATTR* sizep) {
assert(flush_completed);
- pf_called = TRUE;
+ pf_called = true;
*sizep = make_pair_attr(9);
return 0;
}
@@ -65,21 +65,21 @@ cachetable_test (void) {
wc.clone_callback = clone_callback;
wc.flush_callback = flush;
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert_zero(r);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
assert_zero(r);
- flush_completed = FALSE;
+ flush_completed = false;
r = toku_cachetable_begin_checkpoint(ct, NULL); assert_zero(r);
assert_zero(r);
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert_zero(r);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
assert_zero(r);
- pf_called = FALSE;
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ pf_called = false;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert_zero(r);
assert(!pf_called);
toku_cachetable_pf_pinned_pair(v1, true_pf_callback, NULL, f1, make_blocknum(1), 1);
@@ -99,7 +99,7 @@ cachetable_test (void) {
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
diff --git a/ft/tests/cachetable-clone-partial-fetch.cc b/ft/tests/cachetable-clone-partial-fetch.cc
index 8885ea144a4..21a8463b0ca 100644
--- a/ft/tests/cachetable-clone-partial-fetch.cc
+++ b/ft/tests/cachetable-clone-partial-fetch.cc
@@ -6,14 +6,14 @@
#include "test.h"
-BOOL flush_completed;
-BOOL pf_called;
+bool flush_completed;
+bool pf_called;
static void
-clone_callback(void* UU(value_data), void** cloned_value_data, PAIR_ATTR* new_attr, BOOL UU(for_checkpoint), void* UU(write_extraargs))
+clone_callback(void* UU(value_data), void** cloned_value_data, PAIR_ATTR* new_attr, bool UU(for_checkpoint), void* UU(write_extraargs))
{
*cloned_value_data = (void *)1;
- new_attr->is_valid = FALSE;
+ new_attr->is_valid = false;
}
static void
@@ -26,25 +26,25 @@ flush (
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL w __attribute__((__unused__)),
- BOOL keep __attribute__((__unused__)),
- BOOL c __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
)
{
if (is_clone) {
usleep(2*1024*1024);
- flush_completed = TRUE;
+ flush_completed = true;
}
}
-static BOOL true_pf_req_callback(void* UU(ftnode_pv), void* UU(read_extraargs)) {
- return TRUE;
+static bool true_pf_req_callback(void* UU(ftnode_pv), void* UU(read_extraargs)) {
+ return true;
}
static int true_pf_callback(void* UU(ftnode_pv), void* UU(dd), void* UU(read_extraargs), int UU(fd), PAIR_ATTR* sizep) {
assert(flush_completed);
- pf_called = TRUE;
+ pf_called = true;
*sizep = make_pair_attr(9);
return 0;
}
@@ -69,21 +69,21 @@ cachetable_test (void) {
wc.clone_callback = clone_callback;
wc.flush_callback = flush;
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert_zero(r);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
assert_zero(r);
- flush_completed = FALSE;
+ flush_completed = false;
r = toku_cachetable_begin_checkpoint(ct, NULL); assert_zero(r);
assert_zero(r);
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert_zero(r);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
assert_zero(r);
- pf_called = FALSE;
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, true_pf_req_callback, true_pf_callback, TRUE, NULL);
+ pf_called = false;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, true_pf_req_callback, true_pf_callback, true, NULL);
assert_zero(r);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
assert_zero(r);
@@ -99,7 +99,7 @@ cachetable_test (void) {
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
diff --git a/ft/tests/cachetable-clone-pin-nonblocking.cc b/ft/tests/cachetable-clone-pin-nonblocking.cc
index e5dc30d33e6..7f54c205114 100644
--- a/ft/tests/cachetable-clone-pin-nonblocking.cc
+++ b/ft/tests/cachetable-clone-pin-nonblocking.cc
@@ -6,10 +6,10 @@
#include "test.h"
static void
-clone_callback(void* UU(value_data), void** cloned_value_data, PAIR_ATTR* new_attr, BOOL UU(for_checkpoint), void* UU(write_extraargs))
+clone_callback(void* UU(value_data), void** cloned_value_data, PAIR_ATTR* new_attr, bool UU(for_checkpoint), void* UU(write_extraargs))
{
*cloned_value_data = (void *)1;
- new_attr->is_valid = FALSE;
+ new_attr->is_valid = false;
}
static void
@@ -22,10 +22,10 @@ flush (
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL w __attribute__((__unused__)),
- BOOL keep __attribute__((__unused__)),
- BOOL c __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
)
{
}
@@ -34,7 +34,7 @@ flush (
// this test verifies that a partial fetch will wait for a cloned pair to complete
// writing to disk
static void
-cachetable_test (enum cachetable_dirty dirty, BOOL cloneable) {
+cachetable_test (enum cachetable_dirty dirty, bool cloneable) {
const int test_limit = 12;
int r;
CACHETABLE ct;
@@ -50,17 +50,17 @@ cachetable_test (enum cachetable_dirty dirty, BOOL cloneable) {
wc.clone_callback = cloneable ? clone_callback : NULL;
wc.flush_callback = flush;
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, dirty, make_pair_attr(8));
- // test that having a pin that passes FALSE for may_modify_value does not stall behind checkpoint
+ // test that having a pin that passes false for may_modify_value does not stall behind checkpoint
r = toku_cachetable_begin_checkpoint(ct, NULL); assert_zero(r);
- r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, FALSE, NULL, NULL);
+ r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, false, NULL, NULL);
assert(r == 0);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
assert(r == 0);
- r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL, NULL);
+ r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL, NULL);
if (dirty == CACHETABLE_DIRTY && !cloneable) {
assert(r == TOKUDB_TRY_AGAIN);
}
@@ -79,7 +79,7 @@ cachetable_test (enum cachetable_dirty dirty, BOOL cloneable) {
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
@@ -88,9 +88,9 @@ cachetable_test (enum cachetable_dirty dirty, BOOL cloneable) {
int
test_main(int argc, const char *argv[]) {
default_parse_args(argc, argv);
- cachetable_test(CACHETABLE_DIRTY, TRUE);
- cachetable_test(CACHETABLE_DIRTY, FALSE);
- cachetable_test(CACHETABLE_CLEAN, TRUE);
- cachetable_test(CACHETABLE_CLEAN, FALSE);
+ cachetable_test(CACHETABLE_DIRTY, true);
+ cachetable_test(CACHETABLE_DIRTY, false);
+ cachetable_test(CACHETABLE_CLEAN, true);
+ cachetable_test(CACHETABLE_CLEAN, false);
return 0;
}
diff --git a/ft/tests/cachetable-clone-unpin-remove.cc b/ft/tests/cachetable-clone-unpin-remove.cc
index 557b643df43..4218f54fa83 100644
--- a/ft/tests/cachetable-clone-unpin-remove.cc
+++ b/ft/tests/cachetable-clone-unpin-remove.cc
@@ -6,14 +6,14 @@
#include "test.h"
-BOOL flush_completed;
-BOOL evict_called;
+bool flush_completed;
+bool evict_called;
static void
-clone_callback(void* UU(value_data), void** cloned_value_data, PAIR_ATTR* new_attr, BOOL UU(for_checkpoint), void* UU(write_extraargs))
+clone_callback(void* UU(value_data), void** cloned_value_data, PAIR_ATTR* new_attr, bool UU(for_checkpoint), void* UU(write_extraargs))
{
*cloned_value_data = (void *)1;
- new_attr->is_valid = FALSE;
+ new_attr->is_valid = false;
}
static void
@@ -26,19 +26,19 @@ flush (
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL w __attribute__((__unused__)),
- BOOL keep __attribute__((__unused__)),
- BOOL c __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
)
{
if (is_clone) {
usleep(2*1024*1024);
- flush_completed = TRUE;
+ flush_completed = true;
}
else if (!keep && !is_clone) {
assert(flush_completed);
- evict_called = TRUE;
+ evict_called = true;
}
}
@@ -63,16 +63,16 @@ cachetable_test (void) {
wc.clone_callback = clone_callback;
wc.flush_callback = flush;
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert_zero(r);
r = toku_cachetable_unpin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), CACHETABLE_DIRTY, make_pair_attr(8));
assert_zero(r);
- flush_completed = FALSE;
- evict_called = FALSE;
+ flush_completed = false;
+ evict_called = false;
r = toku_cachetable_begin_checkpoint(ct, NULL); assert_zero(r);
assert_zero(r);
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert_zero(r);
r = toku_cachetable_unpin_and_remove(f1, make_blocknum(1), NULL, NULL);
assert_zero(r);
@@ -88,7 +88,7 @@ cachetable_test (void) {
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
diff --git a/ft/tests/cachetable-count-pinned-test.cc b/ft/tests/cachetable-count-pinned-test.cc
index 96f7175aae6..97315a762c8 100644
--- a/ft/tests/cachetable-count-pinned-test.cc
+++ b/ft/tests/cachetable-count-pinned-test.cc
@@ -20,7 +20,7 @@ cachetable_count_pinned_test (int n) {
int i;
for (i=1; i<=n; i++) {
- u_int32_t hi;
+ uint32_t hi;
hi = toku_cachetable_hash(f1, make_blocknum(i));
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
r = toku_cachetable_put(f1, make_blocknum(i), hi, (void *)(long)i, make_pair_attr(1), wc);
@@ -37,7 +37,7 @@ cachetable_count_pinned_test (int n) {
assert(toku_cachefile_count_pinned(f1, 0) == i);
}
for (i=n; i>0; i--) {
- u_int32_t hi;
+ uint32_t hi;
hi = toku_cachetable_hash(f1, make_blocknum(i));
r = toku_cachetable_unpin(f1, make_blocknum(i), hi, CACHETABLE_CLEAN, make_pair_attr(1));
assert(r == 0);
@@ -48,7 +48,7 @@ cachetable_count_pinned_test (int n) {
assert(toku_cachefile_count_pinned(f1, 1) == 0);
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); assert(r == 0 && ct == 0);
}
diff --git a/ft/tests/cachetable-debug-test.cc b/ft/tests/cachetable-debug-test.cc
index 7a55d6fa5f6..894be442d9d 100644
--- a/ft/tests/cachetable-debug-test.cc
+++ b/ft/tests/cachetable-debug-test.cc
@@ -27,7 +27,7 @@ cachetable_debug_test (int n) {
int i;
for (i=1; i<=n; i++) {
const int item_size = 1;
- u_int32_t hi;
+ uint32_t hi;
hi = toku_cachetable_hash(f1, make_blocknum(i));
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
r = toku_cachetable_put(f1, make_blocknum(i), hi, (void *)(long)i, make_pair_attr(item_size), wc);
@@ -53,7 +53,7 @@ cachetable_debug_test (int n) {
}
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); assert(r == 0 && ct == 0);
}
diff --git a/ft/tests/cachetable-eviction-close-test.cc b/ft/tests/cachetable-eviction-close-test.cc
index 8fbd896f943..b76076b39f3 100644
--- a/ft/tests/cachetable-eviction-close-test.cc
+++ b/ft/tests/cachetable-eviction-close-test.cc
@@ -7,9 +7,9 @@
#include "includes.h"
#include "test.h"
-BOOL check_flush;
-BOOL expect_full_flush;
-BOOL expect_pe;
+bool check_flush;
+bool expect_full_flush;
+bool expect_pe;
static void
flush (CACHEFILE f __attribute__((__unused__)),
@@ -20,10 +20,10 @@ flush (CACHEFILE f __attribute__((__unused__)),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL w __attribute__((__unused__)),
- BOOL keep __attribute__((__unused__)),
- BOOL c __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
) {
assert(expect_full_flush);
sleep(2);
@@ -35,7 +35,7 @@ static int
fetch (CACHEFILE f __attribute__((__unused__)),
int UU(fd),
CACHEKEY k __attribute__((__unused__)),
- u_int32_t fullhash __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
void **value __attribute__((__unused__)),
void** UU(dd),
PAIR_ATTR *sizep __attribute__((__unused__)),
@@ -76,7 +76,7 @@ static void cachetable_eviction_full_test (void) {
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
CACHEKEY key = make_blocknum(0);
- u_int32_t fullhash = toku_cachetable_hash(f1, make_blocknum(0));
+ uint32_t fullhash = toku_cachetable_hash(f1, make_blocknum(0));
void* value1;
long size1;
@@ -100,14 +100,14 @@ static void cachetable_eviction_full_test (void) {
fetch,
def_pf_req_callback,
def_pf_callback,
- TRUE,
+ true,
0
);
assert(r==0);
r = toku_cachetable_unpin(f1, key, fullhash, CACHETABLE_DIRTY, make_pair_attr(1));
assert(r == 0);
}
- expect_full_flush = TRUE;
+ expect_full_flush = true;
// now pin a different, causing an eviction
wc.flush_callback = def_flush;
wc.pe_est_callback = pe_est_callback;
@@ -121,7 +121,7 @@ static void cachetable_eviction_full_test (void) {
fetch,
def_pf_req_callback,
def_pf_callback,
- TRUE,
+ true,
0
);
assert(r==0);
@@ -131,7 +131,7 @@ static void cachetable_eviction_full_test (void) {
// close with the eviction in progress. the close should block until
// all of the reads and writes are complete.
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); assert(r == 0 && ct == 0);
}
diff --git a/ft/tests/cachetable-eviction-close-test2.cc b/ft/tests/cachetable-eviction-close-test2.cc
index 0e14a5f75ea..c1350f54faa 100644
--- a/ft/tests/cachetable-eviction-close-test2.cc
+++ b/ft/tests/cachetable-eviction-close-test2.cc
@@ -7,9 +7,9 @@
#include "includes.h"
#include "test.h"
-BOOL check_flush;
-BOOL expect_full_flush;
-BOOL expect_pe;
+bool check_flush;
+bool expect_full_flush;
+bool expect_pe;
static void
flush (CACHEFILE f __attribute__((__unused__)),
@@ -20,10 +20,10 @@ flush (CACHEFILE f __attribute__((__unused__)),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL w __attribute__((__unused__)),
- BOOL keep __attribute__((__unused__)),
- BOOL c __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
) {
assert(expect_full_flush);
}
@@ -34,7 +34,7 @@ static int
fetch (CACHEFILE f __attribute__((__unused__)),
int UU(fd),
CACHEKEY k __attribute__((__unused__)),
- u_int32_t fullhash __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
void **value __attribute__((__unused__)),
void** UU(dd),
PAIR_ATTR *sizep __attribute__((__unused__)),
@@ -88,7 +88,7 @@ static void cachetable_eviction_full_test (void) {
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
CACHEKEY key = make_blocknum(0);
- u_int32_t fullhash = toku_cachetable_hash(f1, make_blocknum(0));
+ uint32_t fullhash = toku_cachetable_hash(f1, make_blocknum(0));
void* value1;
long size1;
@@ -113,14 +113,14 @@ static void cachetable_eviction_full_test (void) {
fetch,
def_pf_req_callback,
def_pf_callback,
- TRUE,
+ true,
0
);
assert(r==0);
r = toku_cachetable_unpin(f1, key, fullhash, CACHETABLE_DIRTY, make_pair_attr(8));
assert(r == 0);
}
- expect_full_flush = TRUE;
+ expect_full_flush = true;
// now pin a different, causing an eviction
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.pe_est_callback = pe_est_callback;
@@ -135,7 +135,7 @@ static void cachetable_eviction_full_test (void) {
fetch,
def_pf_req_callback,
def_pf_callback,
- TRUE,
+ true,
0
);
assert(r==0);
@@ -145,7 +145,7 @@ static void cachetable_eviction_full_test (void) {
// close with the eviction in progress. the close should block until
// all of the reads and writes are complete.
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); assert(r == 0 && ct == 0);
}
diff --git a/ft/tests/cachetable-eviction-getandpin-test.cc b/ft/tests/cachetable-eviction-getandpin-test.cc
index 16384d70407..bb2de643a71 100644
--- a/ft/tests/cachetable-eviction-getandpin-test.cc
+++ b/ft/tests/cachetable-eviction-getandpin-test.cc
@@ -7,7 +7,7 @@
#include "includes.h"
#include "test.h"
-BOOL do_sleep;
+bool do_sleep;
static void
flush (CACHEFILE f __attribute__((__unused__)),
@@ -18,10 +18,10 @@ flush (CACHEFILE f __attribute__((__unused__)),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL w __attribute__((__unused__)),
- BOOL keep __attribute__((__unused__)),
- BOOL c __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
) {
if (do_sleep) {
sleep(2);
@@ -44,7 +44,7 @@ static void cachetable_predef_fetch_maybegetandpin_test (void) {
CACHEFILE f1;
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
CACHEKEY key = make_blocknum(0);
- u_int32_t fullhash = toku_cachetable_hash(f1, make_blocknum(0));
+ uint32_t fullhash = toku_cachetable_hash(f1, make_blocknum(0));
// let's get and pin this node a bunch of times to drive up the clock count
for (int i = 0; i < 20; i++) {
@@ -62,7 +62,7 @@ static void cachetable_predef_fetch_maybegetandpin_test (void) {
def_fetch,
def_pf_req_callback,
def_pf_callback,
- TRUE,
+ true,
0
);
assert(r==0);
@@ -73,7 +73,7 @@ static void cachetable_predef_fetch_maybegetandpin_test (void) {
gettimeofday(&tstart, NULL);
// def_fetch another block, causing an eviction of the first block we made above
- do_sleep = TRUE;
+ do_sleep = true;
void* value2;
long size2;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
@@ -87,7 +87,7 @@ static void cachetable_predef_fetch_maybegetandpin_test (void) {
def_fetch,
def_pf_req_callback,
def_pf_callback,
- TRUE,
+ true,
0
);
assert(r==0);
@@ -100,11 +100,11 @@ static void cachetable_predef_fetch_maybegetandpin_test (void) {
// now verify that the block we are trying to evict may be pinned
wc = def_write_callback(NULL);
wc.flush_callback = flush;
- r = toku_cachetable_get_and_pin_nonblocking(f1, key, fullhash, &v, &size, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL, NULL);
+ r = toku_cachetable_get_and_pin_nonblocking(f1, key, fullhash, &v, &size, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL, NULL);
assert(r == TOKUDB_TRY_AGAIN);
- r = toku_cachetable_get_and_pin(f1, key, fullhash, &v, &size, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, key, fullhash, &v, &size, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert(r == 0 && v == 0 && size == 8);
- do_sleep = FALSE;
+ do_sleep = false;
struct timeval tend;
gettimeofday(&tend, NULL);
@@ -117,7 +117,7 @@ static void cachetable_predef_fetch_maybegetandpin_test (void) {
assert(r == 0);
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); assert(r == 0 && ct == 0);
}
diff --git a/ft/tests/cachetable-eviction-getandpin-test2.cc b/ft/tests/cachetable-eviction-getandpin-test2.cc
index bf8b689acdb..66f3ee3662b 100644
--- a/ft/tests/cachetable-eviction-getandpin-test2.cc
+++ b/ft/tests/cachetable-eviction-getandpin-test2.cc
@@ -50,7 +50,7 @@ static void cachetable_prefetch_maybegetandpin_test (void) {
CACHEFILE f1;
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
CACHEKEY key = make_blocknum(0);
- u_int32_t fullhash = toku_cachetable_hash(f1, make_blocknum(0));
+ uint32_t fullhash = toku_cachetable_hash(f1, make_blocknum(0));
// let's get and pin this node a bunch of times to drive up the clock count
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
@@ -69,7 +69,7 @@ static void cachetable_prefetch_maybegetandpin_test (void) {
def_fetch,
def_pf_req_callback,
def_pf_callback,
- TRUE,
+ true,
0
);
assert(r==0);
@@ -92,7 +92,7 @@ static void cachetable_prefetch_maybegetandpin_test (void) {
def_fetch,
def_pf_req_callback,
def_pf_callback,
- TRUE,
+ true,
0
);
assert(r==0);
@@ -113,7 +113,7 @@ static void cachetable_prefetch_maybegetandpin_test (void) {
def_fetch,
def_pf_req_callback,
def_pf_callback,
- TRUE,
+ true,
NULL,
NULL
);
@@ -128,7 +128,7 @@ static void cachetable_prefetch_maybegetandpin_test (void) {
def_fetch,
def_pf_req_callback,
def_pf_callback,
- TRUE,
+ true,
NULL
);
assert(r == 0 && v == 0 && size == 1);
@@ -144,7 +144,7 @@ static void cachetable_prefetch_maybegetandpin_test (void) {
assert(r == 0);
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); assert(r == 0 && ct == 0);
}
diff --git a/ft/tests/cachetable-fd-test.cc b/ft/tests/cachetable-fd-test.cc
index 6912ecfbf77..e4a6bbcac37 100644
--- a/ft/tests/cachetable-fd-test.cc
+++ b/ft/tests/cachetable-fd-test.cc
@@ -44,7 +44,7 @@ cachetable_fd_test (void) {
r = toku_cachefile_of_filenum(ct, fn, &newcf);
assert(r == ENOENT);
- r = toku_cachefile_close(&cf, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&cf, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); assert(r == 0 && ct == 0);
}
diff --git a/ft/tests/cachetable-flush-during-cleaner.cc b/ft/tests/cachetable-flush-during-cleaner.cc
index 95932a768aa..83482e8a684 100644
--- a/ft/tests/cachetable-flush-during-cleaner.cc
+++ b/ft/tests/cachetable-flush-during-cleaner.cc
@@ -11,7 +11,7 @@ static int
cleaner_callback(
void* UU(ftnode_pv),
BLOCKNUM blocknum,
- u_int32_t fullhash,
+ uint32_t fullhash,
void* UU(extraargs)
)
{
@@ -40,14 +40,14 @@ cachetable_test (void) {
for (int i = 0; i < 10; i++) {
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.cleaner_callback = cleaner_callback;
- r = toku_cachetable_get_and_pin(f1, make_blocknum(i), i, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(i), i, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(i), i, CACHETABLE_DIRTY, make_pair_attr(8));
}
r = toku_cachefile_flush(f1);
assert(r == 0);
}
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
diff --git a/ft/tests/cachetable-flush-test.cc b/ft/tests/cachetable-flush-test.cc
index ad2bf2346c7..1861a01933b 100644
--- a/ft/tests/cachetable-flush-test.cc
+++ b/ft/tests/cachetable-flush-test.cc
@@ -26,7 +26,7 @@ test_cachetable_def_flush (int n) {
// insert keys 0..n-1
int i;
for (i=0; i<n; i++) {
- u_int32_t hi;
+ uint32_t hi;
hi = toku_cachetable_hash(f1, make_blocknum(i));
r = toku_cachetable_put(f1, make_blocknum(i), hi, (void *)(long)i, make_pair_attr(1), wc);
assert(r == 0);
@@ -42,7 +42,7 @@ test_cachetable_def_flush (int n) {
// verify keys exists
for (i=0; i<n; i++) {
- u_int32_t hi;
+ uint32_t hi;
void *v;
hi = toku_cachetable_hash(f1, make_blocknum(i));
r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(i), hi, &v);
@@ -62,7 +62,7 @@ test_cachetable_def_flush (int n) {
// verify keys do not exist in f1 but do exist in f2
for (i=0; i<n; i++) {
- u_int32_t hi;
+ uint32_t hi;
void *v;
hi = toku_cachetable_hash(f1, make_blocknum(i));
r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(i), hi, &v);
@@ -74,8 +74,8 @@ test_cachetable_def_flush (int n) {
assert(r == 0);
}
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
- r = toku_cachefile_close(&f2, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f2, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); assert(r == 0 && ct == 0);
}
diff --git a/ft/tests/cachetable-getandpin-test.cc b/ft/tests/cachetable-getandpin-test.cc
index 8cf765d419b..aff16fa9136 100644
--- a/ft/tests/cachetable-getandpin-test.cc
+++ b/ft/tests/cachetable-getandpin-test.cc
@@ -15,10 +15,10 @@ flush (CACHEFILE cf __attribute__((__unused__)),
void *extraargs __attribute__((__unused__)),
PAIR_ATTR size __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL write_me __attribute__((__unused__)),
- BOOL keep_me __attribute__((__unused__)),
- BOOL for_checkpoint __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool write_me __attribute__((__unused__)),
+ bool keep_me __attribute__((__unused__)),
+ bool for_checkpoint __attribute__((__unused__)),
+ bool UU(is_clone)
) {
assert((long) key.b == size.size);
if (!keep_me) toku_free(v);
@@ -29,7 +29,7 @@ fetch (
CACHEFILE cf,
int UU(fd),
CACHEKEY key,
- u_int32_t hash,
+ uint32_t hash,
void **vptr,
void** UU(dd),
PAIR_ATTR *sizep,
@@ -59,12 +59,12 @@ cachetable_getandpin_test (int n) {
// test get_and_pin size
for (i=1; i<=n; i++) {
- u_int32_t hi;
+ uint32_t hi;
hi = toku_cachetable_hash(f1, make_blocknum(i));
void *v; long size;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
- r = toku_cachetable_get_and_pin(f1, make_blocknum(i), hi, &v, &size, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, 0);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(i), hi, &v, &size, wc, fetch, def_pf_req_callback, def_pf_callback, true, 0);
assert(r == 0);
assert(size == i);
@@ -73,7 +73,7 @@ cachetable_getandpin_test (int n) {
}
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); assert(r == 0 && ct == 0);
}
diff --git a/ft/tests/cachetable-kibbutz_and_flush_cachefile.cc b/ft/tests/cachetable-kibbutz_and_flush_cachefile.cc
index d8891a66be8..e6d003d3a88 100644
--- a/ft/tests/cachetable-kibbutz_and_flush_cachefile.cc
+++ b/ft/tests/cachetable-kibbutz_and_flush_cachefile.cc
@@ -5,7 +5,7 @@
#include "includes.h"
#include "test.h"
-BOOL foo;
+bool foo;
//
// This test verifies that flushing a cachefile will wait on kibbutzes to finish
@@ -14,7 +14,7 @@ static void kibbutz_work(void *fe_v)
{
CACHEFILE CAST_FROM_VOIDP(f1, fe_v);
sleep(2);
- foo = TRUE;
+ foo = true;
// note that we make the size 16 to induce an eviction
// once evictions are moved to their own thread, we need
// to modify this test
@@ -40,17 +40,17 @@ run_test (void) {
long s1;
//long s2;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
- foo = FALSE;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ foo = false;
cachefile_kibbutz_enq(f1, kibbutz_work, f1);
r = toku_cachefile_flush(f1); assert(r == 0);
assert(foo);
assert(f1);
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
- foo = FALSE;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ foo = false;
cachefile_kibbutz_enq(f1, kibbutz_work, f1);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
assert(foo);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
diff --git a/ft/tests/cachetable-partial-fetch.cc b/ft/tests/cachetable-partial-fetch.cc
index b096c6fba5d..a36e7775bf7 100644
--- a/ft/tests/cachetable-partial-fetch.cc
+++ b/ft/tests/cachetable-partial-fetch.cc
@@ -10,14 +10,14 @@
// it works correctly
//
-u_int32_t fetch_val = 0;
-BOOL pf_req_called;
+uint32_t fetch_val = 0;
+bool pf_req_called;
static int
fetch (CACHEFILE f __attribute__((__unused__)),
int UU(fd),
CACHEKEY k __attribute__((__unused__)),
- u_int32_t fullhash __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
void **value __attribute__((__unused__)),
void** UU(dd),
PAIR_ATTR *sizep __attribute__((__unused__)),
@@ -34,38 +34,38 @@ static int
err_fetch (CACHEFILE f __attribute__((__unused__)),
int UU(fd),
CACHEKEY k __attribute__((__unused__)),
- u_int32_t fullhash __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
void **value __attribute__((__unused__)),
void** UU(dd),
PAIR_ATTR *sizep __attribute__((__unused__)),
int *dirtyp,
void *extraargs __attribute__((__unused__))
) {
- assert(FALSE);
+ assert(false);
*dirtyp = 0;
return 0;
}
-static BOOL pf_req_callback(void* UU(ftnode_pv), void* UU(read_extraargs)) {
- return FALSE;
+static bool pf_req_callback(void* UU(ftnode_pv), void* UU(read_extraargs)) {
+ return false;
}
-static BOOL true_pf_req_callback(void* UU(ftnode_pv), void* UU(read_extraargs)) {
- return TRUE;
+static bool true_pf_req_callback(void* UU(ftnode_pv), void* UU(read_extraargs)) {
+ return true;
}
static int err_pf_callback(void* UU(ftnode_pv), void* UU(dd), void* UU(read_extraargs), int UU(fd), PAIR_ATTR* UU(sizep)) {
- assert(FALSE);
+ assert(false);
return 0; // gcov
}
static int pf_callback(void* UU(ftnode_pv), void* UU(dd), void* UU(read_extraargs), int UU(fd), PAIR_ATTR* UU(sizep)) {
- assert(FALSE);
+ assert(false);
return 0; // gcov
}
static int true_pf_callback(void* UU(ftnode_pv), void* UU(dd), void* read_extraargs, int UU(fd), PAIR_ATTR* sizep) {
- pf_req_called = TRUE;
+ pf_req_called = true;
*sizep = make_pair_attr(sizeof(fetch_val)+1);
assert(read_extraargs == &fetch_val);
return 0;
@@ -77,7 +77,7 @@ cachetable_test (void) {
const int test_limit = 12;
int r;
CACHETABLE ct;
- BOOL doing_prefetch = FALSE;
+ bool doing_prefetch = false;
r = toku_create_cachetable(&ct, test_limit, ZERO_LSN, NULL_LOGGER); assert(r == 0);
char fname1[] = __SRCFILE__ "test1.dat";
unlink(fname1);
@@ -89,7 +89,7 @@ cachetable_test (void) {
long s1;
//long s2;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, fetch, pf_req_callback, pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, fetch, pf_req_callback, pf_callback, true, NULL);
assert(&fetch_val == v1);
//
// verify that a prefetch of this node will fail
@@ -112,20 +112,20 @@ cachetable_test (void) {
//
// now get and pin node again, and make sure that partial fetch and fetch are not called
//
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, err_fetch, pf_req_callback, err_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, err_fetch, pf_req_callback, err_pf_callback, true, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
//
// now make sure that if we say a partial fetch is required, that we get a partial fetch
// and that read_extraargs properly passed down
//
- pf_req_called = FALSE;
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, err_fetch, true_pf_req_callback, true_pf_callback, TRUE, &fetch_val);
+ pf_req_called = false;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, err_fetch, true_pf_req_callback, true_pf_callback, true, &fetch_val);
assert(pf_req_called);
assert(s1 == sizeof(fetch_val)+1);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
// close and reopen cachefile so we can do some simple prefetch tests
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
//
// verify that a prefetch of the node will succeed
@@ -147,7 +147,7 @@ cachetable_test (void) {
//
// now verify we can pin it, and NO fetch callback should get called
//
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, err_fetch, pf_req_callback, err_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, err_fetch, pf_req_callback, err_pf_callback, true, NULL);
assert(&fetch_val == v1);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
@@ -166,13 +166,13 @@ cachetable_test (void) {
&doing_prefetch
);
assert(doing_prefetch);
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, err_fetch, pf_req_callback, err_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, err_fetch, pf_req_callback, err_pf_callback, true, NULL);
assert(&fetch_val == v1);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
diff --git a/ft/tests/cachetable-pin-checkpoint.cc b/ft/tests/cachetable-pin-checkpoint.cc
index 9fce5782627..046a82cdc37 100644
--- a/ft/tests/cachetable-pin-checkpoint.cc
+++ b/ft/tests/cachetable-pin-checkpoint.cc
@@ -21,19 +21,19 @@
int64_t data[NUM_ELEMENTS];
int64_t checkpointed_data[NUM_ELEMENTS];
-u_int32_t time_of_test;
-BOOL run_test;
+uint32_t time_of_test;
+bool run_test;
static void
clone_callback(
void* value_data,
void** cloned_value_data,
PAIR_ATTR* new_attr,
- BOOL UU(for_checkpoint),
+ bool UU(for_checkpoint),
void* UU(write_extraargs)
)
{
- new_attr->is_valid = FALSE;
+ new_attr->is_valid = false;
int64_t* XMALLOC(data_val);
*data_val = *(int64_t *)value_data;
*cloned_value_data = data_val;
@@ -49,10 +49,10 @@ flush (CACHEFILE f __attribute__((__unused__)),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL write_me,
- BOOL keep_me,
- BOOL checkpoint_me,
- BOOL UU(is_clone)
+ bool write_me,
+ bool keep_me,
+ bool checkpoint_me,
+ bool UU(is_clone)
) {
/* Do nothing */
int64_t val_to_write = *(int64_t *)v;
@@ -72,7 +72,7 @@ static int
fetch (CACHEFILE f __attribute__((__unused__)),
int UU(fd),
CACHEKEY k,
- u_int32_t fullhash __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
void **value,
void** UU(dd),
PAIR_ATTR *sizep,
@@ -98,7 +98,7 @@ static void *test_time(void *arg) {
if (time_of_test != 0) {
usleep(time_of_test*1000*1000);
if (verbose) printf("should now end test\n");
- run_test = FALSE;
+ run_test = false;
}
if (verbose) printf("should be ending test now\n");
return arg;
@@ -136,7 +136,7 @@ static void *move_numbers(void *arg) {
long s1;
CACHEKEY less_key;
less_key.b = less;
- u_int32_t less_fullhash = less;
+ uint32_t less_fullhash = less;
enum cachetable_dirty less_dirty = CACHETABLE_DIRTY;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
@@ -148,7 +148,7 @@ static void *move_numbers(void *arg) {
&v1,
&s1,
wc, fetch, def_pf_req_callback, def_pf_callback,
- TRUE,
+ true,
NULL,
0, //num_dependent_pairs
NULL,
@@ -161,7 +161,7 @@ static void *move_numbers(void *arg) {
CACHEKEY greater_key;
greater_key.b = greater;
- u_int32_t greater_fullhash = greater;
+ uint32_t greater_fullhash = greater;
enum cachetable_dirty greater_dirty = CACHETABLE_DIRTY;
r = toku_cachetable_get_and_pin_with_dep_pairs(
f1,
@@ -170,7 +170,7 @@ static void *move_numbers(void *arg) {
&v1,
&s1,
wc, fetch, def_pf_req_callback, def_pf_callback,
- TRUE,
+ true,
NULL,
1, //num_dependent_pairs
&f1,
@@ -195,7 +195,7 @@ static void *move_numbers(void *arg) {
third = (random() % (num_possible_values)) + greater + 1;
CACHEKEY third_key;
third_key.b = third;
- u_int32_t third_fullhash = third;
+ uint32_t third_fullhash = third;
enum cachetable_dirty third_dirty = CACHETABLE_DIRTY;
r = toku_cachetable_get_and_pin_with_dep_pairs(
f1,
@@ -204,7 +204,7 @@ static void *move_numbers(void *arg) {
&v1,
&s1,
wc, fetch, def_pf_req_callback, def_pf_callback,
- TRUE,
+ true,
NULL,
1, //num_dependent_pairs
&f1,
@@ -242,7 +242,7 @@ static void *read_random_numbers(void *arg) {
&v1,
&s1,
wc, fetch, def_pf_req_callback, def_pf_callback,
- FALSE,
+ false,
NULL,
NULL
);
@@ -357,7 +357,7 @@ cachetable_test (void) {
toku_pthread_t checkpoint_tid;
toku_pthread_t move_tid[NUM_MOVER_THREADS];
toku_pthread_t read_random_tid[NUM_MOVER_THREADS];
- run_test = TRUE;
+ run_test = true;
for (int i = 0; i < NUM_MOVER_THREADS; i++) {
r = toku_pthread_create(&read_random_tid[i], NULL, read_random_numbers, NULL);
@@ -388,7 +388,7 @@ cachetable_test (void) {
}
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
sum_vals();
diff --git a/ft/tests/cachetable-pin-nonblocking-checkpoint-clean.cc b/ft/tests/cachetable-pin-nonblocking-checkpoint-clean.cc
index 2f7ceb7c817..6e53c110515 100644
--- a/ft/tests/cachetable-pin-nonblocking-checkpoint-clean.cc
+++ b/ft/tests/cachetable-pin-nonblocking-checkpoint-clean.cc
@@ -26,15 +26,15 @@ run_test (void) {
long s1;
long s2;
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8)); assert(r==0);
for (int i = 0; i < 20; i++) {
- r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(8)); assert(r==0);
}
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v2, &s2, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v2, &s2, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_begin_checkpoint(ct, NULL);
// mark nodes as pending a checkpoint, so that get_and_pin_nonblocking on block 1 will return TOKUDB_TRY_AGAIN
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8)); assert(r==0);
@@ -49,7 +49,7 @@ run_test (void) {
def_fetch,
def_pf_req_callback,
def_pf_callback,
- TRUE,
+ true,
NULL,
NULL
);
@@ -65,7 +65,7 @@ run_test (void) {
assert(r==0);
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
diff --git a/ft/tests/cachetable-prefetch-checkpoint-test.cc b/ft/tests/cachetable-prefetch-checkpoint-test.cc
index 2ae5ac03099..ddd3ce990aa 100644
--- a/ft/tests/cachetable-prefetch-checkpoint-test.cc
+++ b/ft/tests/cachetable-prefetch-checkpoint-test.cc
@@ -25,10 +25,10 @@ static void flush(
void *extraargs,
PAIR_ATTR size,
PAIR_ATTR* UU(new_size),
- BOOL write_me,
- BOOL keep_me,
- BOOL UU(for_checkpoint),
- BOOL UU(is_clone)
+ bool write_me,
+ bool keep_me,
+ bool UU(for_checkpoint),
+ bool UU(is_clone)
)
{
cf = cf; key = key; value = value; extraargs = extraargs;
@@ -43,7 +43,7 @@ static int fetch(
CACHEFILE cf,
int UU(fd),
CACHEKEY key,
- u_int32_t fullhash,
+ uint32_t fullhash,
void **value,
void** UU(dd),
PAIR_ATTR *sizep,
@@ -83,7 +83,7 @@ static void cachetable_prefetch_checkpoint_test(int n, enum cachetable_dirty dir
// prefetch block n+1. this will take 10 seconds.
{
CACHEKEY key = make_blocknum(n+1);
- u_int32_t fullhash = toku_cachetable_hash(f1, key);
+ uint32_t fullhash = toku_cachetable_hash(f1, key);
r = toku_cachefile_prefetch(f1, key, fullhash, wc, fetch, def_pf_req_callback, def_pf_callback, 0, NULL);
toku_cachetable_verify(ct);
}
@@ -92,7 +92,7 @@ static void cachetable_prefetch_checkpoint_test(int n, enum cachetable_dirty dir
int i;
for (i=0; i<n; i++) {
CACHEKEY key = make_blocknum(i);
- u_int32_t hi = toku_cachetable_hash(f1, key);
+ uint32_t hi = toku_cachetable_hash(f1, key);
r = toku_cachetable_put(f1, key, hi, (void *)(long)i, make_pair_attr(1), wc);
assert(r == 0);
@@ -122,7 +122,7 @@ static void cachetable_prefetch_checkpoint_test(int n, enum cachetable_dirty dir
// after the checkpoint, all of the items should be clean
for (i=0; i<n; i++) {
CACHEKEY key = make_blocknum(i);
- u_int32_t hi = toku_cachetable_hash(f1, key);
+ uint32_t hi = toku_cachetable_hash(f1, key);
void *v;
r = toku_cachetable_maybe_get_and_pin(f1, key, hi, &v);
if (r != 0)
@@ -148,7 +148,7 @@ static void cachetable_prefetch_checkpoint_test(int n, enum cachetable_dirty dir
assert(r == 0);
assert(n_flush == 0 && n_write_me == 0 && n_keep_me == 0);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); assert(r == 0 && ct == 0);
}
diff --git a/ft/tests/cachetable-prefetch-close-leak-test.cc b/ft/tests/cachetable-prefetch-close-leak-test.cc
index ee8580fd212..2ba355ef548 100644
--- a/ft/tests/cachetable-prefetch-close-leak-test.cc
+++ b/ft/tests/cachetable-prefetch-close-leak-test.cc
@@ -16,12 +16,12 @@ flush (CACHEFILE f __attribute__((__unused__)),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL w __attribute__((__unused__)),
- BOOL keep __attribute__((__unused__)),
- BOOL c __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
) {
- assert(w == FALSE && v != NULL);
+ assert(w == false && v != NULL);
toku_free(v);
}
@@ -31,7 +31,7 @@ static int
fetch (CACHEFILE f __attribute__((__unused__)),
int UU(fd),
CACHEKEY k __attribute__((__unused__)),
- u_int32_t fullhash __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
void **value __attribute__((__unused__)),
void** UU(dd),
PAIR_ATTR *sizep __attribute__((__unused__)),
@@ -61,7 +61,7 @@ static void cachetable_prefetch_close_leak_test (void) {
// prefetch block 0. this will take 10 seconds.
CACHEKEY key = make_blocknum(0);
- u_int32_t fullhash = toku_cachetable_hash(f1, make_blocknum(0));
+ uint32_t fullhash = toku_cachetable_hash(f1, make_blocknum(0));
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
r = toku_cachefile_prefetch(f1, key, fullhash, wc, fetch, def_pf_req_callback, def_pf_callback, 0, NULL);
@@ -69,7 +69,7 @@ static void cachetable_prefetch_close_leak_test (void) {
// close with the prefetch in progress. the close should block until
// all of the reads and writes are complete.
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); assert(r == 0 && ct == 0);
}
diff --git a/ft/tests/cachetable-prefetch-close-test.cc b/ft/tests/cachetable-prefetch-close-test.cc
index 945fbad4247..c974fdff412 100644
--- a/ft/tests/cachetable-prefetch-close-test.cc
+++ b/ft/tests/cachetable-prefetch-close-test.cc
@@ -7,7 +7,7 @@
#include "includes.h"
#include "test.h"
-BOOL expect_pf;
+bool expect_pf;
static void
flush (CACHEFILE f __attribute__((__unused__)),
@@ -18,12 +18,12 @@ flush (CACHEFILE f __attribute__((__unused__)),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL w __attribute__((__unused__)),
- BOOL keep __attribute__((__unused__)),
- BOOL c __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
) {
- assert(w == FALSE);
+ assert(w == false);
}
static int fetch_calls = 0;
@@ -32,7 +32,7 @@ static int
fetch (CACHEFILE f __attribute__((__unused__)),
int UU(fd),
CACHEKEY k __attribute__((__unused__)),
- u_int32_t fullhash __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
void **value __attribute__((__unused__)),
void** UU(dd),
PAIR_ATTR *sizep __attribute__((__unused__)),
@@ -50,9 +50,9 @@ fetch (CACHEFILE f __attribute__((__unused__)),
return 0;
}
-static void cachetable_prefetch_full_test (BOOL partial_fetch) {
+static void cachetable_prefetch_full_test (bool partial_fetch) {
const int test_limit = 2;
- expect_pf = FALSE;
+ expect_pf = false;
int r;
CACHETABLE ct;
r = toku_create_cachetable(&ct, test_limit, ZERO_LSN, NULL_LOGGER); assert(r == 0);
@@ -63,7 +63,7 @@ static void cachetable_prefetch_full_test (BOOL partial_fetch) {
// prefetch block 0. this will take 2 seconds.
CACHEKEY key = make_blocknum(0);
- u_int32_t fullhash = toku_cachetable_hash(f1, make_blocknum(0));
+ uint32_t fullhash = toku_cachetable_hash(f1, make_blocknum(0));
// if we want to do a test of partial fetch,
// we first put the key into the cachefile so that
@@ -71,7 +71,7 @@ static void cachetable_prefetch_full_test (BOOL partial_fetch) {
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
if (partial_fetch) {
- expect_pf = TRUE;
+ expect_pf = true;
void* value;
long size;
r = toku_cachetable_get_and_pin(
@@ -84,7 +84,7 @@ static void cachetable_prefetch_full_test (BOOL partial_fetch) {
fetch,
def_pf_req_callback,
def_pf_callback,
- TRUE,
+ true,
0
);
assert(r==0);
@@ -96,14 +96,14 @@ static void cachetable_prefetch_full_test (BOOL partial_fetch) {
// close with the prefetch in progress. the close should block until
// all of the reads and writes are complete.
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); assert(r == 0 && ct == 0);
}
int
test_main(int argc, const char *argv[]) {
default_parse_args(argc, argv);
- cachetable_prefetch_full_test(TRUE);
- cachetable_prefetch_full_test(FALSE);
+ cachetable_prefetch_full_test(true);
+ cachetable_prefetch_full_test(false);
return 0;
}
diff --git a/ft/tests/cachetable-prefetch-flowcontrol-test.cc b/ft/tests/cachetable-prefetch-flowcontrol-test.cc
index bf376cdddff..5bd00d707ab 100644
--- a/ft/tests/cachetable-prefetch-flowcontrol-test.cc
+++ b/ft/tests/cachetable-prefetch-flowcontrol-test.cc
@@ -22,14 +22,14 @@ flush (CACHEFILE f __attribute__((__unused__)),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL w,
- BOOL keep,
- BOOL f_ckpt __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool w,
+ bool keep,
+ bool f_ckpt __attribute__((__unused__)),
+ bool UU(is_clone)
) {
- assert(w == FALSE);
+ assert(w == false);
flush_calls++;
- if (keep == FALSE) {
+ if (keep == false) {
flush_evict_calls++;
if (verbose) printf("%s:%d flush %" PRId64 "\n", __FUNCTION__, __LINE__, k.b);
evicted_keys |= 1 << k.b;
@@ -42,7 +42,7 @@ static int
fetch (CACHEFILE f __attribute__((__unused__)),
int UU(fd),
CACHEKEY k,
- u_int32_t fullhash __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
void **value,
void** UU(dd),
PAIR_ATTR *sizep,
@@ -77,7 +77,7 @@ static void cachetable_prefetch_flowcontrol_test (int cachetable_size_limit) {
// prefetch keys 0 .. N-1. they should all fit in the cachetable
for (i=0; i<cachetable_size_limit; i++) {
CACHEKEY key = make_blocknum(i);
- u_int32_t fullhash = toku_cachetable_hash(f1, key);
+ uint32_t fullhash = toku_cachetable_hash(f1, key);
r = toku_cachefile_prefetch(f1, key, fullhash, wc, fetch, def_pf_req_callback, def_pf_callback, 0, NULL);
toku_cachetable_verify(ct);
}
@@ -88,7 +88,7 @@ static void cachetable_prefetch_flowcontrol_test (int cachetable_size_limit) {
// prefetch keys N .. 2*N-1. 0 .. N-1 should be evicted.
for (i=i; i<2*cachetable_size_limit; i++) {
CACHEKEY key = make_blocknum(i);
- u_int32_t fullhash = toku_cachetable_hash(f1, key);
+ uint32_t fullhash = toku_cachetable_hash(f1, key);
r = toku_cachefile_prefetch(f1, key, fullhash, wc, fetch, def_pf_req_callback, def_pf_callback, 0, NULL);
toku_cachetable_verify(ct);
// sleep(1);
@@ -105,7 +105,7 @@ static void cachetable_prefetch_flowcontrol_test (int cachetable_size_limit) {
#endif
char *error_string;
- r = toku_cachefile_close(&f1, &error_string, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, &error_string, false, ZERO_LSN); assert(r == 0);
if (verbose) printf("%s:%d 0x%x 0x%x\n", __FUNCTION__, __LINE__,
evicted_keys, (1 << (2*cachetable_size_limit))-1);
assert(evicted_keys == (1 << (2*cachetable_size_limit))-1);
diff --git a/ft/tests/cachetable-prefetch-getandpin-test.cc b/ft/tests/cachetable-prefetch-getandpin-test.cc
index 3861fd787d7..f86bf4c26af 100644
--- a/ft/tests/cachetable-prefetch-getandpin-test.cc
+++ b/ft/tests/cachetable-prefetch-getandpin-test.cc
@@ -7,8 +7,8 @@
#include "includes.h"
#include "test.h"
-BOOL do_pf;
-BOOL expect_pf;
+bool do_pf;
+bool expect_pf;
static void
flush (CACHEFILE f __attribute__((__unused__)),
@@ -19,19 +19,19 @@ flush (CACHEFILE f __attribute__((__unused__)),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL w __attribute__((__unused__)),
- BOOL keep __attribute__((__unused__)),
- BOOL c __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
) {
- assert(w == FALSE);
+ assert(w == false);
}
static int
fetch (CACHEFILE f __attribute__((__unused__)),
int UU(fd),
CACHEKEY k __attribute__((__unused__)),
- u_int32_t fullhash __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
void **value __attribute__((__unused__)),
void** UU(dd),
PAIR_ATTR *sizep __attribute__((__unused__)),
@@ -49,13 +49,13 @@ fetch (CACHEFILE f __attribute__((__unused__)),
return 0;
}
-static BOOL pf_req_callback(void* UU(ftnode_pv), void* UU(read_extraargs)) {
+static bool pf_req_callback(void* UU(ftnode_pv), void* UU(read_extraargs)) {
if (do_pf) {
assert(expect_pf);
- return TRUE;
+ return true;
}
else {
- return FALSE;
+ return false;
}
}
@@ -72,7 +72,7 @@ static uint64_t tdelta_usec(struct timeval *tend, struct timeval *tstart) {
return t;
}
-static void cachetable_prefetch_maybegetandpin_test (BOOL do_partial_fetch) {
+static void cachetable_prefetch_maybegetandpin_test (bool do_partial_fetch) {
const int test_limit = 2;
int r;
CACHETABLE ct;
@@ -81,14 +81,14 @@ static void cachetable_prefetch_maybegetandpin_test (BOOL do_partial_fetch) {
unlink(fname1);
CACHEFILE f1;
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
- expect_pf = FALSE;
- do_pf = FALSE;
+ expect_pf = false;
+ do_pf = false;
CACHEKEY key = make_blocknum(0);
- u_int32_t fullhash = toku_cachetable_hash(f1, make_blocknum(0));
+ uint32_t fullhash = toku_cachetable_hash(f1, make_blocknum(0));
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
if (do_partial_fetch) {
- expect_pf = TRUE;
+ expect_pf = true;
void* value;
long size;
r = toku_cachetable_get_and_pin(
@@ -101,7 +101,7 @@ static void cachetable_prefetch_maybegetandpin_test (BOOL do_partial_fetch) {
fetch,
pf_req_callback,
pf_callback,
- TRUE,
+ true,
0
);
assert(r==0);
@@ -112,17 +112,17 @@ static void cachetable_prefetch_maybegetandpin_test (BOOL do_partial_fetch) {
gettimeofday(&tstart, NULL);
// prefetch block 0. this will take 2 seconds.
- do_pf = TRUE;
+ do_pf = true;
r = toku_cachefile_prefetch(f1, key, fullhash, wc, fetch, pf_req_callback, pf_callback, 0, NULL);
toku_cachetable_verify(ct);
// verify that get_and_pin waits while the prefetch is in progress
void *v = 0;
long size = 0;
- do_pf = FALSE;
- r = toku_cachetable_get_and_pin_nonblocking(f1, key, fullhash, &v, &size, wc, fetch, pf_req_callback, pf_callback, TRUE, NULL, NULL);
+ do_pf = false;
+ r = toku_cachetable_get_and_pin_nonblocking(f1, key, fullhash, &v, &size, wc, fetch, pf_req_callback, pf_callback, true, NULL, NULL);
assert(r==TOKUDB_TRY_AGAIN);
- r = toku_cachetable_get_and_pin(f1, key, fullhash, &v, &size, wc, fetch, pf_req_callback, pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, key, fullhash, &v, &size, wc, fetch, pf_req_callback, pf_callback, true, NULL);
assert(r == 0 && v == 0 && size == 2);
struct timeval tend;
@@ -136,14 +136,14 @@ static void cachetable_prefetch_maybegetandpin_test (BOOL do_partial_fetch) {
assert(r == 0);
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); assert(r == 0 && ct == 0);
}
int
test_main(int argc, const char *argv[]) {
default_parse_args(argc, argv);
- cachetable_prefetch_maybegetandpin_test(TRUE);
- cachetable_prefetch_maybegetandpin_test(FALSE);
+ cachetable_prefetch_maybegetandpin_test(true);
+ cachetable_prefetch_maybegetandpin_test(false);
return 0;
}
diff --git a/ft/tests/cachetable-prefetch-maybegetandpin-test.cc b/ft/tests/cachetable-prefetch-maybegetandpin-test.cc
index e83b7111e5b..884167d815f 100644
--- a/ft/tests/cachetable-prefetch-maybegetandpin-test.cc
+++ b/ft/tests/cachetable-prefetch-maybegetandpin-test.cc
@@ -11,7 +11,7 @@ static int
fetch (CACHEFILE f __attribute__((__unused__)),
int UU(fd),
CACHEKEY k __attribute__((__unused__)),
- u_int32_t fullhash __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
void **value __attribute__((__unused__)),
void** UU(dd),
PAIR_ATTR *sizep __attribute__((__unused__)),
@@ -41,7 +41,7 @@ static void cachetable_prefetch_maybegetandpin_test (void) {
// prefetch block 0. this will take 10 seconds.
CACHEKEY key = make_blocknum(0);
- u_int32_t fullhash = toku_cachetable_hash(f1, make_blocknum(0));
+ uint32_t fullhash = toku_cachetable_hash(f1, make_blocknum(0));
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
r = toku_cachefile_prefetch(f1, key, fullhash, wc, fetch, def_pf_req_callback, def_pf_callback, 0, NULL);
toku_cachetable_verify(ct);
@@ -62,7 +62,7 @@ static void cachetable_prefetch_maybegetandpin_test (void) {
assert(r == 0);
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); assert(r == 0 && ct == 0);
}
diff --git a/ft/tests/cachetable-prefetch2-test.cc b/ft/tests/cachetable-prefetch2-test.cc
index 7a7d62e6705..0f6b17c1059 100644
--- a/ft/tests/cachetable-prefetch2-test.cc
+++ b/ft/tests/cachetable-prefetch2-test.cc
@@ -14,7 +14,7 @@ static int
fetch (CACHEFILE f __attribute__((__unused__)),
int UU(fd),
CACHEKEY k __attribute__((__unused__)),
- u_int32_t fullhash __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
void **value __attribute__((__unused__)),
void** UU(dd),
PAIR_ATTR *sizep __attribute__((__unused__)),
@@ -44,7 +44,7 @@ static void cachetable_prefetch_maybegetandpin_test (void) {
// prefetch block 0. this will take 10 seconds.
CACHEKEY key = make_blocknum(0);
- u_int32_t fullhash = toku_cachetable_hash(f1, make_blocknum(0));
+ uint32_t fullhash = toku_cachetable_hash(f1, make_blocknum(0));
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
r = toku_cachefile_prefetch(f1, key, fullhash, wc, fetch, def_pf_req_callback, def_pf_callback, 0, NULL);
toku_cachetable_verify(ct);
@@ -72,7 +72,7 @@ static void cachetable_prefetch_maybegetandpin_test (void) {
assert(r == 0);
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); assert(r == 0 && ct == 0);
}
diff --git a/ft/tests/cachetable-put-checkpoint.cc b/ft/tests/cachetable-put-checkpoint.cc
index d5a87bde30c..0fee69f7b35 100644
--- a/ft/tests/cachetable-put-checkpoint.cc
+++ b/ft/tests/cachetable-put-checkpoint.cc
@@ -24,19 +24,19 @@
int64_t data[NUM_ELEMENTS];
int64_t checkpointed_data[NUM_ELEMENTS];
-u_int32_t time_of_test;
-BOOL run_test;
+uint32_t time_of_test;
+bool run_test;
static void
clone_callback(
void* value_data,
void** cloned_value_data,
PAIR_ATTR* new_attr,
- BOOL UU(for_checkpoint),
+ bool UU(for_checkpoint),
void* UU(write_extraargs)
)
{
- new_attr->is_valid = FALSE;
+ new_attr->is_valid = false;
int64_t* XMALLOC(data_val);
*data_val = *(int64_t *)value_data;
*cloned_value_data = data_val;
@@ -52,10 +52,10 @@ flush (CACHEFILE f __attribute__((__unused__)),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size,
- BOOL write_me,
- BOOL keep_me,
- BOOL checkpoint_me,
- BOOL UU(is_clone)
+ bool write_me,
+ bool keep_me,
+ bool checkpoint_me,
+ bool UU(is_clone)
) {
int64_t val_to_write = *(int64_t *)v;
size_t data_index = (size_t)k.b;
@@ -74,7 +74,7 @@ static int
fetch (CACHEFILE f __attribute__((__unused__)),
int UU(fd),
CACHEKEY k,
- u_int32_t fullhash __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
void **value,
void** UU(dd),
PAIR_ATTR *sizep,
@@ -103,7 +103,7 @@ static void *test_time(void *arg) {
if (time_of_test != 0) {
usleep(time_of_test*1000*1000);
if (verbose) printf("should now end test\n");
- run_test = FALSE;
+ run_test = false;
}
if (verbose) printf("should be ending test now\n");
return arg;
@@ -126,12 +126,12 @@ static void move_number_to_child(
long s1;
CACHEKEY parent_key;
parent_key.b = parent;
- u_int32_t parent_fullhash = toku_cachetable_hash(f1, parent_key);
+ uint32_t parent_fullhash = toku_cachetable_hash(f1, parent_key);
CACHEKEY child_key;
child_key.b = child;
- u_int32_t child_fullhash = toku_cachetable_hash(f1, child_key);
+ uint32_t child_fullhash = toku_cachetable_hash(f1, child_key);
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
wc.clone_callback = clone_callback;
@@ -142,7 +142,7 @@ static void move_number_to_child(
&v1,
&s1,
wc, fetch, def_pf_req_callback, def_pf_callback,
- TRUE,
+ true,
NULL,
1, //num_dependent_pairs
&f1,
@@ -178,7 +178,7 @@ static void *move_numbers(void *arg) {
long s1;
CACHEKEY parent_key;
parent_key.b = parent;
- u_int32_t parent_fullhash = toku_cachetable_hash(f1, parent_key);
+ uint32_t parent_fullhash = toku_cachetable_hash(f1, parent_key);
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
wc.clone_callback = clone_callback;
@@ -189,7 +189,7 @@ static void *move_numbers(void *arg) {
&v1,
&s1,
wc, fetch, def_pf_req_callback, def_pf_callback,
- TRUE,
+ true,
NULL,
0, //num_dependent_pairs
NULL,
@@ -204,7 +204,7 @@ static void *move_numbers(void *arg) {
return arg;
}
-static void remove_data(CACHEKEY* cachekey, BOOL for_checkpoint, void* UU(extra)) {
+static void remove_data(CACHEKEY* cachekey, bool for_checkpoint, void* UU(extra)) {
assert(cachekey->b < NUM_ELEMENTS);
data[cachekey->b] = INT64_MAX;
if (for_checkpoint) {
@@ -213,7 +213,7 @@ static void remove_data(CACHEKEY* cachekey, BOOL for_checkpoint, void* UU(extra)
}
-static void get_data(CACHEKEY* cachekey, u_int32_t* fullhash, void* extra) {
+static void get_data(CACHEKEY* cachekey, uint32_t* fullhash, void* extra) {
int* CAST_FROM_VOIDP(key, extra);
cachekey->b = *key;
*fullhash = toku_cachetable_hash(f1, *cachekey);
@@ -229,7 +229,7 @@ static void merge_and_split_child(
int child = 0;
int other_child = 0;
int r;
- BOOL even = (random() % 2) == 0;
+ bool even = (random() % 2) == 0;
child = (even) ? (2*parent + 1) : (2*parent + 2);
other_child = (!even) ? (2*parent + 1) : (2*parent + 2);
assert(child != other_child);
@@ -239,11 +239,11 @@ static void merge_and_split_child(
CACHEKEY parent_key;
parent_key.b = parent;
- u_int32_t parent_fullhash = toku_cachetable_hash(f1, parent_key);
+ uint32_t parent_fullhash = toku_cachetable_hash(f1, parent_key);
CACHEKEY child_key;
child_key.b = child;
- u_int32_t child_fullhash = toku_cachetable_hash(f1, child_key);
+ uint32_t child_fullhash = toku_cachetable_hash(f1, child_key);
enum cachetable_dirty child_dirty = CACHETABLE_CLEAN;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
@@ -255,7 +255,7 @@ static void merge_and_split_child(
&v1,
&s1,
wc, fetch, def_pf_req_callback, def_pf_callback,
- TRUE,
+ true,
NULL,
1, //num_dependent_pairs
&f1,
@@ -268,14 +268,14 @@ static void merge_and_split_child(
CACHEKEY other_child_key;
other_child_key.b = other_child;
- u_int32_t other_child_fullhash = toku_cachetable_hash(f1, other_child_key);
+ uint32_t other_child_fullhash = toku_cachetable_hash(f1, other_child_key);
CACHEFILE cfs[2];
cfs[0] = f1;
cfs[1] = f1;
CACHEKEY keys[2];
keys[0] = parent_key;
keys[1] = child_key;
- u_int32_t hashes[2];
+ uint32_t hashes[2];
hashes[0] = parent_fullhash;
hashes[1] = child_fullhash;
enum cachetable_dirty dirties[2];
@@ -289,7 +289,7 @@ static void merge_and_split_child(
&v1,
&s1,
wc, fetch, def_pf_req_callback, def_pf_callback,
- TRUE,
+ true,
NULL,
2, //num_dependent_pairs
cfs,
@@ -312,7 +312,7 @@ static void merge_and_split_child(
// now do a split
CACHEKEY new_key;
- u_int32_t new_fullhash;
+ uint32_t new_fullhash;
int64_t* XMALLOC(data_val);
r = toku_cachetable_put_with_dep_pairs(
f1,
@@ -355,7 +355,7 @@ static void *merge_and_split(void *arg) {
long s1;
CACHEKEY parent_key;
parent_key.b = parent;
- u_int32_t parent_fullhash = toku_cachetable_hash(f1, parent_key);
+ uint32_t parent_fullhash = toku_cachetable_hash(f1, parent_key);
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
wc.clone_callback = clone_callback;
@@ -366,7 +366,7 @@ static void *merge_and_split(void *arg) {
&v1,
&s1,
wc, fetch, def_pf_req_callback, def_pf_callback,
- TRUE,
+ true,
NULL,
0, //num_dependent_pairs
NULL,
@@ -490,7 +490,7 @@ cachetable_test (void) {
toku_pthread_t checkpoint_tid;
toku_pthread_t move_tid[NUM_MOVER_THREADS];
toku_pthread_t merge_and_split_tid[NUM_MOVER_THREADS];
- run_test = TRUE;
+ run_test = true;
for (int i = 0; i < NUM_MOVER_THREADS; i++) {
r = toku_pthread_create(&move_tid[i], NULL, move_numbers, NULL);
@@ -521,7 +521,7 @@ cachetable_test (void) {
}
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
sum_vals();
diff --git a/ft/tests/cachetable-put-test.cc b/ft/tests/cachetable-put-test.cc
index 6c61448c9e2..dbab9d0cb13 100644
--- a/ft/tests/cachetable-put-test.cc
+++ b/ft/tests/cachetable-put-test.cc
@@ -19,7 +19,7 @@ cachetable_put_test (int n) {
int i;
for (i=1; i<=n; i++) {
- u_int32_t hi;
+ uint32_t hi;
hi = toku_cachetable_hash(f1, make_blocknum(i));
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
r = toku_cachetable_put(f1, make_blocknum(i), hi, (void *)(long)i, make_pair_attr(1), wc);
@@ -36,7 +36,7 @@ cachetable_put_test (int n) {
assert(toku_cachefile_count_pinned(f1, 0) == i);
}
for (i=n; i>0; i--) {
- u_int32_t hi;
+ uint32_t hi;
hi = toku_cachetable_hash(f1, make_blocknum(i));
r = toku_cachetable_unpin(f1, make_blocknum(i), hi, CACHETABLE_CLEAN, make_pair_attr(1));
assert(r == 0);
@@ -49,7 +49,7 @@ cachetable_put_test (int n) {
r = toku_cachetable_unpin(f1, k, toku_cachetable_hash(f1, k), CACHETABLE_CLEAN, make_pair_attr(1));
assert(r != 0);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); assert(r == 0 && ct == 0);
}
diff --git a/ft/tests/cachetable-rename-test.cc b/ft/tests/cachetable-rename-test.cc
index 24697214c7f..69a7a58a231 100644
--- a/ft/tests/cachetable-rename-test.cc
+++ b/ft/tests/cachetable-rename-test.cc
@@ -45,10 +45,10 @@ static void r_flush (CACHEFILE f __attribute__((__unused__)),
void *extra __attribute__((__unused__)),
PAIR_ATTR size __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL write_me __attribute__((__unused__)),
- BOOL keep_me,
- BOOL for_checkpoint __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool write_me __attribute__((__unused__)),
+ bool keep_me,
+ bool for_checkpoint __attribute__((__unused__)),
+ bool UU(is_clone)
) {
int i;
//printf("Flush\n");
@@ -76,7 +76,7 @@ static void r_flush (CACHEFILE f __attribute__((__unused__)),
static int r_fetch (CACHEFILE f __attribute__((__unused__)),
int UU(fd),
CACHEKEY key __attribute__((__unused__)),
- u_int32_t fullhash __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
void**value __attribute__((__unused__)),
void** UU(dd),
PAIR_ATTR *sizep __attribute__((__unused__)),
@@ -104,7 +104,7 @@ static void test_rename (void) {
CACHEKEY nkey = make_blocknum(random());
long nval = random();
if (verbose) printf("n_keys=%d Insert %08" PRIx64 "\n", n_keys, nkey.b);
- u_int32_t hnkey = toku_cachetable_hash(f, nkey);
+ uint32_t hnkey = toku_cachetable_hash(f, nkey);
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = r_flush;
r = toku_cachetable_put(f, nkey, hnkey,
@@ -136,7 +136,7 @@ static void test_rename (void) {
if (verbose) printf("Rename %" PRIx64 " to %" PRIx64 "\n", okey.b, nkey.b);
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = r_flush;
- r = toku_cachetable_get_and_pin(f, okey, toku_cachetable_hash(f, okey), &current_value, &current_size, wc, r_fetch, def_pf_req_callback, def_pf_callback, TRUE, 0);
+ r = toku_cachetable_get_and_pin(f, okey, toku_cachetable_hash(f, okey), &current_value, &current_size, wc, r_fetch, def_pf_req_callback, def_pf_callback, true, 0);
if (r == -42) continue;
assert(r==0);
r = toku_cachetable_rename(f, okey, nkey);
@@ -170,7 +170,7 @@ static void test_rename (void) {
r = toku_cachetable_rename(f, okey, nkey);
assert(r != 0);
- r = toku_cachefile_close(&f, 0, FALSE, ZERO_LSN);
+ r = toku_cachefile_close(&f, 0, false, ZERO_LSN);
assert(r == 0);
r = toku_cachetable_close(&t);
assert(r == 0);
diff --git a/ft/tests/cachetable-scan.cc b/ft/tests/cachetable-scan.cc
index 9ceb71d7086..6fed5e34be1 100644
--- a/ft/tests/cachetable-scan.cc
+++ b/ft/tests/cachetable-scan.cc
@@ -19,10 +19,10 @@ static void f_flush (CACHEFILE f,
void *extra __attribute__((__unused__)),
PAIR_ATTR size,
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL write_me,
- BOOL keep_me,
- BOOL for_checkpoint __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool write_me,
+ bool keep_me,
+ bool for_checkpoint __attribute__((__unused__)),
+ bool UU(is_clone)
) {
assert(size.size==BLOCKSIZE);
if (write_me) {
@@ -36,7 +36,7 @@ static void f_flush (CACHEFILE f,
static int f_fetch (CACHEFILE f,
int UU(fd),
CACHEKEY key,
- u_int32_t fullhash __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
void**value,
void** UU(dd),
PAIR_ATTR *sizep,
@@ -69,7 +69,7 @@ static void writeit (void) {
for (i=0; i<N; i++) {
void *buf = toku_malloc(BLOCKSIZE);
CACHEKEY key = make_blocknum(i*BLOCKSIZE);
- u_int32_t fullhash = toku_cachetable_hash(f, key);
+ uint32_t fullhash = toku_cachetable_hash(f, key);
int j;
for (j=0; j<BLOCKSIZE; j++) ((char*)buf)[j]=(char)((i+j)%256);
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
@@ -94,13 +94,13 @@ static void readit (void) {
long current_size;
for (i=0; i<N; i++) {
CACHEKEY key = make_blocknum(i*BLOCKSIZE);
- u_int32_t fullhash = toku_cachetable_hash(f, key);
+ uint32_t fullhash = toku_cachetable_hash(f, key);
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = f_flush;
- r=toku_cachetable_get_and_pin(f, key, fullhash, &block, &current_size, wc, f_fetch, def_pf_req_callback, def_pf_callback, TRUE, 0); assert(r==0);
+ r=toku_cachetable_get_and_pin(f, key, fullhash, &block, &current_size, wc, f_fetch, def_pf_req_callback, def_pf_callback, true, 0); assert(r==0);
r=toku_cachetable_unpin(f, key, fullhash, CACHETABLE_CLEAN, make_pair_attr(BLOCKSIZE)); assert(r==0);
}
- r = toku_cachefile_close(&f, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&t); assert(r == 0);
gettimeofday(&end, 0);
toku_os_get_process_times(&end_usertime, &end_systime);
diff --git a/ft/tests/cachetable-simple-clone.cc b/ft/tests/cachetable-simple-clone.cc
index 99a821140e9..dafcd194ac7 100644
--- a/ft/tests/cachetable-simple-clone.cc
+++ b/ft/tests/cachetable-simple-clone.cc
@@ -5,17 +5,17 @@
#include "includes.h"
#include "test.h"
-BOOL clone_called;
-BOOL check_flush;
-BOOL flush_expected;
-BOOL flush_called;
+bool clone_called;
+bool check_flush;
+bool flush_expected;
+bool flush_called;
static void
-clone_callback(void* UU(value_data), void** cloned_value_data, PAIR_ATTR* new_attr, BOOL UU(for_checkpoint), void* UU(write_extraargs))
+clone_callback(void* UU(value_data), void** cloned_value_data, PAIR_ATTR* new_attr, bool UU(for_checkpoint), void* UU(write_extraargs))
{
*cloned_value_data = (void *)1;
- new_attr->is_valid = FALSE;
- clone_called = TRUE;
+ new_attr->is_valid = false;
+ clone_called = true;
}
static void
@@ -28,10 +28,10 @@ flush (
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL w __attribute__((__unused__)),
- BOOL keep __attribute__((__unused__)),
- BOOL c __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
)
{
if (w) usleep(5*1024*1024);
@@ -39,7 +39,7 @@ flush (
assert(flush_expected);
if (clone_called) assert(is_clone);
}
- flush_called = TRUE;
+ flush_called = true;
if (is_clone) assert(!keep);
}
@@ -59,7 +59,7 @@ static uint64_t tdelta_usec(struct timeval *tend, struct timeval *tstart) {
// blocks until the pair is written out
//
static void
-test_clean (enum cachetable_dirty dirty, BOOL cloneable) {
+test_clean (enum cachetable_dirty dirty, bool cloneable) {
const int test_limit = 12;
int r;
CACHETABLE ct;
@@ -74,13 +74,13 @@ test_clean (enum cachetable_dirty dirty, BOOL cloneable) {
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.clone_callback = cloneable ? clone_callback : NULL;
wc.flush_callback = flush;
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, dirty, make_pair_attr(8));
- check_flush = TRUE;
- clone_called = FALSE;
- flush_expected = (dirty == CACHETABLE_DIRTY) ? TRUE : FALSE;
- flush_called = FALSE;
+ check_flush = true;
+ clone_called = false;
+ flush_expected = (dirty == CACHETABLE_DIRTY) ? true : false;
+ flush_called = false;
// begin checkpoint, since pair is clean, we should not
// have the clone called
r = toku_cachetable_begin_checkpoint(ct, NULL);
@@ -89,14 +89,14 @@ test_clean (enum cachetable_dirty dirty, BOOL cloneable) {
struct timeval tend;
gettimeofday(&tstart, NULL);
- // test that having a pin that passes FALSE for may_modify_value does not stall behind checkpoint
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, FALSE, NULL);
+ // test that having a pin that passes false for may_modify_value does not stall behind checkpoint
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, false, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
gettimeofday(&tend, NULL);
assert(tdelta_usec(&tend, &tstart) <= 2000000);
assert(!clone_called);
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
gettimeofday(&tend, NULL);
// we take 5 seconds for a write
@@ -135,19 +135,19 @@ test_clean (enum cachetable_dirty dirty, BOOL cloneable) {
);
assert_zero(r);
- check_flush = FALSE;
+ check_flush = false;
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
}
int
test_main(int argc, const char *argv[]) {
default_parse_args(argc, argv);
- test_clean(CACHETABLE_CLEAN, TRUE);
- test_clean(CACHETABLE_DIRTY, TRUE);
- test_clean(CACHETABLE_CLEAN, FALSE);
- test_clean(CACHETABLE_DIRTY, FALSE);
+ test_clean(CACHETABLE_CLEAN, true);
+ test_clean(CACHETABLE_DIRTY, true);
+ test_clean(CACHETABLE_CLEAN, false);
+ test_clean(CACHETABLE_DIRTY, false);
return 0;
}
diff --git a/ft/tests/cachetable-simple-clone2.cc b/ft/tests/cachetable-simple-clone2.cc
index 754b8bc084c..bba1e66c1a7 100644
--- a/ft/tests/cachetable-simple-clone2.cc
+++ b/ft/tests/cachetable-simple-clone2.cc
@@ -5,17 +5,17 @@
#include "includes.h"
#include "test.h"
-BOOL clone_called;
-BOOL check_flush;
-BOOL flush_expected;
-BOOL flush_called;
+bool clone_called;
+bool check_flush;
+bool flush_expected;
+bool flush_called;
static void
-clone_callback(void* UU(value_data), void** cloned_value_data, PAIR_ATTR* new_attr, BOOL UU(for_checkpoint), void* UU(write_extraargs))
+clone_callback(void* UU(value_data), void** cloned_value_data, PAIR_ATTR* new_attr, bool UU(for_checkpoint), void* UU(write_extraargs))
{
*cloned_value_data = (void *)1;
- new_attr->is_valid = FALSE;
- clone_called = TRUE;
+ new_attr->is_valid = false;
+ clone_called = true;
}
static void
@@ -28,15 +28,15 @@ flush (
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL w __attribute__((__unused__)),
- BOOL keep __attribute__((__unused__)),
- BOOL c __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
)
{
if (w && check_flush) {
assert(flush_expected);
- flush_called = TRUE;
+ flush_called = true;
}
}
@@ -46,7 +46,7 @@ flush (
// dirty or clean based on the second unpin
//
static void
-test_clean (enum cachetable_dirty dirty, BOOL cloneable) {
+test_clean (enum cachetable_dirty dirty, bool cloneable) {
const int test_limit = 200;
int r;
CACHETABLE ct;
@@ -55,21 +55,21 @@ test_clean (enum cachetable_dirty dirty, BOOL cloneable) {
unlink(fname1);
CACHEFILE f1;
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
- check_flush = FALSE;
+ check_flush = false;
void* v1;
long s1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.clone_callback = cloneable ? clone_callback : NULL;
wc.flush_callback = flush;
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
// begin checkpoint, since pair is clean, we should not
// have the clone called
r = toku_cachetable_begin_checkpoint(ct, NULL);
assert_zero(r);
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
// at this point, there should be no more dirty writes
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, dirty, make_pair_attr(8));
@@ -82,12 +82,12 @@ test_clean (enum cachetable_dirty dirty, BOOL cloneable) {
);
assert_zero(r);
- check_flush = TRUE;
- flush_expected = (dirty == CACHETABLE_DIRTY) ? TRUE : FALSE;
- flush_called = FALSE;
+ check_flush = true;
+ flush_expected = (dirty == CACHETABLE_DIRTY) ? true : false;
+ flush_called = false;
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0 );
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0 );
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
if (flush_expected) assert(flush_called);
}
@@ -95,9 +95,9 @@ test_clean (enum cachetable_dirty dirty, BOOL cloneable) {
int
test_main(int argc, const char *argv[]) {
default_parse_args(argc, argv);
- test_clean(CACHETABLE_CLEAN, TRUE);
- test_clean(CACHETABLE_DIRTY, TRUE);
- test_clean(CACHETABLE_CLEAN, FALSE);
- test_clean(CACHETABLE_DIRTY, FALSE);
+ test_clean(CACHETABLE_CLEAN, true);
+ test_clean(CACHETABLE_DIRTY, true);
+ test_clean(CACHETABLE_CLEAN, false);
+ test_clean(CACHETABLE_DIRTY, false);
return 0;
}
diff --git a/ft/tests/cachetable-simple-maybe-get-pin.cc b/ft/tests/cachetable-simple-maybe-get-pin.cc
index 4820f71184f..81110cbf425 100644
--- a/ft/tests/cachetable-simple-maybe-get-pin.cc
+++ b/ft/tests/cachetable-simple-maybe-get-pin.cc
@@ -26,7 +26,7 @@ cachetable_test (void) {
// nothing in cachetable, so this should fail
r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(1), 1, &v1);
assert(r==-1);
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
// maybe_get_and_pin_clean should succeed, maybe_get_and_pin should fail
@@ -66,7 +66,7 @@ cachetable_test (void) {
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
}
diff --git a/ft/tests/cachetable-simple-pin-dep-nodes.cc b/ft/tests/cachetable-simple-pin-dep-nodes.cc
index f444d4b337d..0d452c00833 100644
--- a/ft/tests/cachetable-simple-pin-dep-nodes.cc
+++ b/ft/tests/cachetable-simple-pin-dep-nodes.cc
@@ -7,12 +7,12 @@
-BOOL v1_written;
-u_int64_t val1;
-BOOL v2_written;
-u_int64_t val2;
-u_int64_t val3;
-BOOL check_me;
+bool v1_written;
+uint64_t val1;
+bool v2_written;
+uint64_t val2;
+uint64_t val3;
+bool check_me;
static void
@@ -24,10 +24,10 @@ flush (CACHEFILE f __attribute__((__unused__)),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL w __attribute__((__unused__)),
- BOOL keep __attribute__((__unused__)),
- BOOL c __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
) {
/* Do nothing */
if (verbose) { printf("FLUSH: %d\n", (int)k.b); }
@@ -37,13 +37,13 @@ flush (CACHEFILE f __attribute__((__unused__)),
assert(keep);
assert(w);
if (v == &val1) {
- v1_written = TRUE;
+ v1_written = true;
}
else if (v == &val2) {
- v2_written = TRUE;
+ v2_written = true;
}
else {
- assert(FALSE);
+ assert(false);
}
}
}
@@ -52,7 +52,7 @@ static int
fetch (CACHEFILE f __attribute__((__unused__)),
int UU(fd),
CACHEKEY k __attribute__((__unused__)),
- u_int32_t fullhash __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
void **value __attribute__((__unused__)),
void** UU(dd),
PAIR_ATTR *sizep __attribute__((__unused__)),
@@ -66,7 +66,7 @@ fetch (CACHEFILE f __attribute__((__unused__)),
}
static void
-cachetable_test (BOOL write_first, BOOL write_second, BOOL start_checkpoint) {
+cachetable_test (bool write_first, bool write_second, bool start_checkpoint) {
const int test_limit = 12;
int r;
CACHETABLE ct;
@@ -85,9 +85,9 @@ cachetable_test (BOOL write_first, BOOL write_second, BOOL start_checkpoint) {
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(&val1);
wc.flush_callback = flush;
wc.write_extraargs = &val1;
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, &val1);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, fetch, def_pf_req_callback, def_pf_callback, true, &val1);
wc.write_extraargs = &val2;
- r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, &val2);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, true, &val2);
CACHEFILE dependent_cfs[2];
dependent_cfs[0] = f1;
@@ -95,7 +95,7 @@ cachetable_test (BOOL write_first, BOOL write_second, BOOL start_checkpoint) {
CACHEKEY dependent_keys[2];
dependent_keys[0] = make_blocknum(1);
dependent_keys[1] = make_blocknum(2);
- u_int32_t dependent_fullhash[2];
+ uint32_t dependent_fullhash[2];
dependent_fullhash[0] = 1;
dependent_fullhash[1] = 2;
// now we set the dirty state of these two.
@@ -111,9 +111,9 @@ cachetable_test (BOOL write_first, BOOL write_second, BOOL start_checkpoint) {
//
// This call should cause a flush for both
//
- check_me = TRUE;
- v1_written = FALSE;
- v2_written = FALSE;
+ check_me = true;
+ v1_written = false;
+ v2_written = false;
wc.write_extraargs = &val3;
r = toku_cachetable_get_and_pin_with_dep_pairs(
f1,
@@ -122,7 +122,7 @@ cachetable_test (BOOL write_first, BOOL write_second, BOOL start_checkpoint) {
&v3,
&s3,
wc, fetch, def_pf_req_callback, def_pf_callback,
- TRUE,
+ true,
&val3,
2, //num_dependent_pairs
dependent_cfs,
@@ -138,7 +138,7 @@ cachetable_test (BOOL write_first, BOOL write_second, BOOL start_checkpoint) {
assert(!v1_written);
assert(!v2_written);
}
- check_me = FALSE;
+ check_me = false;
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
r = toku_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(8));
r = toku_cachetable_unpin(f1, make_blocknum(3), 3, CACHETABLE_CLEAN, make_pair_attr(8));
@@ -154,7 +154,7 @@ cachetable_test (BOOL write_first, BOOL write_second, BOOL start_checkpoint) {
}
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
@@ -163,13 +163,13 @@ cachetable_test (BOOL write_first, BOOL write_second, BOOL start_checkpoint) {
int
test_main(int argc, const char *argv[]) {
default_parse_args(argc, argv);
- cachetable_test(FALSE,FALSE,TRUE);
- cachetable_test(FALSE,TRUE,TRUE);
- cachetable_test(TRUE,FALSE,TRUE);
- cachetable_test(TRUE,TRUE,TRUE);
- cachetable_test(FALSE,FALSE,FALSE);
- cachetable_test(FALSE,TRUE,FALSE);
- cachetable_test(TRUE,FALSE,FALSE);
- cachetable_test(TRUE,TRUE,FALSE);
+ cachetable_test(false,false,true);
+ cachetable_test(false,true,true);
+ cachetable_test(true,false,true);
+ cachetable_test(true,true,true);
+ cachetable_test(false,false,false);
+ cachetable_test(false,true,false);
+ cachetable_test(true,false,false);
+ cachetable_test(true,true,false);
return 0;
}
diff --git a/ft/tests/cachetable-simple-pin-nonblocking.cc b/ft/tests/cachetable-simple-pin-nonblocking.cc
index a9f50a353ea..b4310db4099 100644
--- a/ft/tests/cachetable-simple-pin-nonblocking.cc
+++ b/ft/tests/cachetable-simple-pin-nonblocking.cc
@@ -5,7 +5,7 @@
#include "includes.h"
#include "test.h"
-BOOL foo;
+bool foo;
//
// This test verifies that get_and_pin_nonblocking works and returns DB_TRYAGAIN when the PAIR is being used.
@@ -20,10 +20,10 @@ flush (CACHEFILE f __attribute__((__unused__)),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL w __attribute__((__unused__)),
- BOOL keep __attribute__((__unused__)),
- BOOL c __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
) {
/* Do nothing */
if (verbose) { printf("FLUSH: %d\n", (int)k.b); }
@@ -36,8 +36,8 @@ flush (CACHEFILE f __attribute__((__unused__)),
//usleep (5*1024*1024);
}
-static BOOL true_def_pf_req_callback(void* UU(ftnode_pv), void* UU(read_extraargs)) {
- return TRUE;
+static bool true_def_pf_req_callback(void* UU(ftnode_pv), void* UU(read_extraargs)) {
+ return true;
}
static int true_def_pf_callback(void* UU(ftnode_pv), void* UU(dd), void* UU(read_extraargs), int UU(fd), PAIR_ATTR* sizep) {
*sizep = make_pair_attr(8);
@@ -49,7 +49,7 @@ static void kibbutz_work(void *fe_v)
{
CACHEFILE CAST_FROM_VOIDP(f1, fe_v);
sleep(2);
- foo = TRUE;
+ foo = true;
int r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
assert(r==0);
remove_background_job_from_cf(f1);
@@ -76,39 +76,39 @@ run_test (void) {
// test that if we are getting a PAIR for the first time that TOKUDB_TRY_AGAIN is returned
// because the PAIR was not in the cachetable.
//
- r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL, NULL);
+ r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL, NULL);
assert(r==TOKUDB_TRY_AGAIN);
// now it should succeed
- r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL, NULL);
+ r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL, NULL);
assert(r==0);
- foo = FALSE;
+ foo = false;
cachefile_kibbutz_enq(f1, kibbutz_work, f1);
// because node is in use, should return TOKUDB_TRY_AGAIN
- r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL, NULL);
+ r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL, NULL);
assert(r==TOKUDB_TRY_AGAIN);
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert(foo);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8)); assert(r==0);
// now make sure we get TOKUDB_TRY_AGAIN when a partial fetch is involved
// first make sure value is there
- r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL, NULL);
+ r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL, NULL);
assert(r==0);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8)); assert(r==0);
// now make sure that we get TOKUDB_TRY_AGAIN for the partial fetch
- r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, true_def_pf_req_callback, true_def_pf_callback, TRUE, NULL, NULL);
+ r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, true_def_pf_req_callback, true_def_pf_callback, true, NULL, NULL);
assert(r==TOKUDB_TRY_AGAIN);
//
// now test that if there is a checkpoint pending,
// first pin and unpin with dirty
//
- r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL, NULL);
+ r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL, NULL);
assert(r==0);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8)); assert(r==0);
// this should mark the PAIR as pending
r = toku_cachetable_begin_checkpoint(ct, NULL); assert(r == 0);
- r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL, NULL);
+ r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL, NULL);
assert(r==TOKUDB_TRY_AGAIN);
r = toku_cachetable_end_checkpoint(
ct,
@@ -121,7 +121,7 @@ run_test (void) {
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN);
assert(r == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
diff --git a/ft/tests/cachetable-simple-pin.cc b/ft/tests/cachetable-simple-pin.cc
index 45f51a352a1..ee52f504339 100644
--- a/ft/tests/cachetable-simple-pin.cc
+++ b/ft/tests/cachetable-simple-pin.cc
@@ -5,9 +5,9 @@
#include "includes.h"
#include "test.h"
-BOOL foo;
-BOOL check_me;
-BOOL flush_called;
+bool foo;
+bool check_me;
+bool flush_called;
//
// This test verifies that get_and_pin takes a write lock on a PAIR.
@@ -22,16 +22,16 @@ flush (CACHEFILE f __attribute__((__unused__)),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL w __attribute__((__unused__)),
- BOOL keep __attribute__((__unused__)),
- BOOL c __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
) {
/* Do nothing */
if (verbose) { printf("FLUSH: %d\n", (int)k.b); }
//usleep (5*1024*1024);
if (check_me) {
- flush_called = TRUE;
+ flush_called = true;
assert(c);
assert(keep);
assert(w);
@@ -42,7 +42,7 @@ static void kibbutz_work(void *fe_v)
{
CACHEFILE CAST_FROM_VOIDP(f1, fe_v);
sleep(2);
- foo = TRUE;
+ foo = true;
int r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
assert(r==0);
remove_background_job_from_cf(f1);
@@ -65,16 +65,16 @@ run_test (void) {
//long s2;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
- foo = FALSE;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
+ foo = false;
cachefile_kibbutz_enq(f1, kibbutz_work, f1);
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert(foo);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
//now let's do a simple checkpoint test
// first dirty the PAIR
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
// now this should mark the pair for checkpoint
@@ -83,14 +83,14 @@ run_test (void) {
//
// now we pin the pair again, and verify in flush callback that the pair is being checkpointed
//
- check_me = TRUE;
- flush_called = FALSE;
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ check_me = true;
+ flush_called = false;
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert(flush_called);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
- check_me = FALSE;
+ check_me = false;
r = toku_cachetable_end_checkpoint(
ct,
NULL,
@@ -101,7 +101,7 @@ run_test (void) {
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
diff --git a/ft/tests/cachetable-simple-put-dep-nodes.cc b/ft/tests/cachetable-simple-put-dep-nodes.cc
index 45c538f00ca..92d945e2d62 100644
--- a/ft/tests/cachetable-simple-put-dep-nodes.cc
+++ b/ft/tests/cachetable-simple-put-dep-nodes.cc
@@ -7,12 +7,12 @@
-BOOL v1_written;
-u_int64_t val1;
-BOOL v2_written;
-u_int64_t val2;
-u_int64_t val3;
-BOOL check_me;
+bool v1_written;
+uint64_t val1;
+bool v2_written;
+uint64_t val2;
+uint64_t val3;
+bool check_me;
static void
@@ -24,10 +24,10 @@ flush (CACHEFILE f __attribute__((__unused__)),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL w __attribute__((__unused__)),
- BOOL keep __attribute__((__unused__)),
- BOOL c __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
) {
/* Do nothing */
if (verbose) { printf("FLUSH: %d\n", (int)k.b); }
@@ -37,13 +37,13 @@ flush (CACHEFILE f __attribute__((__unused__)),
assert(keep);
assert(w);
if (v == &val1) {
- v1_written = TRUE;
+ v1_written = true;
}
else if (v == &val2) {
- v2_written = TRUE;
+ v2_written = true;
}
else {
- assert(FALSE);
+ assert(false);
}
}
}
@@ -52,7 +52,7 @@ static int
fetch (CACHEFILE f __attribute__((__unused__)),
int UU(fd),
CACHEKEY k __attribute__((__unused__)),
- u_int32_t fullhash __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
void **value __attribute__((__unused__)),
void** UU(dd),
PAIR_ATTR *sizep __attribute__((__unused__)),
@@ -65,7 +65,7 @@ fetch (CACHEFILE f __attribute__((__unused__)),
return 0;
}
-static void get_key_and_fullhash(CACHEKEY* cachekey, u_int32_t* fullhash, void* extra) {
+static void get_key_and_fullhash(CACHEKEY* cachekey, uint32_t* fullhash, void* extra) {
assert(extra == NULL);
cachekey->b = 3;
*fullhash = 3;
@@ -73,7 +73,7 @@ static void get_key_and_fullhash(CACHEKEY* cachekey, u_int32_t* fullhash, void*
static void
-cachetable_test (BOOL write_first, BOOL write_second, BOOL start_checkpoint) {
+cachetable_test (bool write_first, bool write_second, bool start_checkpoint) {
const int test_limit = 12;
int r;
CACHETABLE ct;
@@ -89,9 +89,9 @@ cachetable_test (BOOL write_first, BOOL write_second, BOOL start_checkpoint) {
long s2;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, &val1);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, fetch, def_pf_req_callback, def_pf_callback, true, &val1);
assert(r==0);
- r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, &val2);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, true, &val2);
assert(r==0);
CACHEFILE dependent_cfs[2];
@@ -100,7 +100,7 @@ cachetable_test (BOOL write_first, BOOL write_second, BOOL start_checkpoint) {
CACHEKEY dependent_keys[2];
dependent_keys[0] = make_blocknum(1);
dependent_keys[1] = make_blocknum(2);
- u_int32_t dependent_fullhash[2];
+ uint32_t dependent_fullhash[2];
dependent_fullhash[0] = 1;
dependent_fullhash[1] = 2;
// now we set the dirty state of these two.
@@ -116,12 +116,12 @@ cachetable_test (BOOL write_first, BOOL write_second, BOOL start_checkpoint) {
//
// This call should cause a flush for both
//
- check_me = TRUE;
- v1_written = FALSE;
- v2_written = FALSE;
+ check_me = true;
+ v1_written = false;
+ v2_written = false;
CACHEKEY put_key;
- u_int32_t put_fullhash;
+ uint32_t put_fullhash;
r = toku_cachetable_put_with_dep_pairs(
f1,
get_key_and_fullhash,
@@ -146,7 +146,7 @@ cachetable_test (BOOL write_first, BOOL write_second, BOOL start_checkpoint) {
assert(v2_written == write_second);
}
- check_me = FALSE;
+ check_me = false;
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
r = toku_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(8));
r = toku_cachetable_unpin(f1, make_blocknum(3), 3, CACHETABLE_CLEAN, make_pair_attr(8));
@@ -162,7 +162,7 @@ cachetable_test (BOOL write_first, BOOL write_second, BOOL start_checkpoint) {
}
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
@@ -171,13 +171,13 @@ cachetable_test (BOOL write_first, BOOL write_second, BOOL start_checkpoint) {
int
test_main(int argc, const char *argv[]) {
default_parse_args(argc, argv);
- cachetable_test(FALSE,FALSE,TRUE);
- cachetable_test(FALSE,TRUE,TRUE);
- cachetable_test(TRUE,FALSE,TRUE);
- cachetable_test(TRUE,TRUE,TRUE);
- cachetable_test(FALSE,FALSE,FALSE);
- cachetable_test(FALSE,TRUE,FALSE);
- cachetable_test(TRUE,FALSE,FALSE);
- cachetable_test(TRUE,TRUE,FALSE);
+ cachetable_test(false,false,true);
+ cachetable_test(false,true,true);
+ cachetable_test(true,false,true);
+ cachetable_test(true,true,true);
+ cachetable_test(false,false,false);
+ cachetable_test(false,true,false);
+ cachetable_test(true,false,false);
+ cachetable_test(true,true,false);
return 0;
}
diff --git a/ft/tests/cachetable-simple-unpin-remove-checkpoint.cc b/ft/tests/cachetable-simple-unpin-remove-checkpoint.cc
index 15a626116af..676398619f9 100644
--- a/ft/tests/cachetable-simple-unpin-remove-checkpoint.cc
+++ b/ft/tests/cachetable-simple-unpin-remove-checkpoint.cc
@@ -7,7 +7,7 @@
static void remove_key_expect_checkpoint(
CACHEKEY* UU(cachekey),
- BOOL for_checkpoint,
+ bool for_checkpoint,
void* UU(extra)
)
{
@@ -16,7 +16,7 @@ static void remove_key_expect_checkpoint(
static void remove_key_expect_no_checkpoint(
CACHEKEY* UU(cachekey),
- BOOL for_checkpoint,
+ bool for_checkpoint,
void* UU(extra)
)
{
@@ -39,7 +39,7 @@ cachetable_test (void) {
long s1;
//long s2;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_begin_checkpoint(ct, NULL); assert(r == 0);
r = toku_cachetable_unpin_and_remove(f1, make_blocknum(1), remove_key_expect_checkpoint, NULL);
r = toku_cachetable_end_checkpoint(
@@ -50,12 +50,12 @@ cachetable_test (void) {
);
assert(r==0);
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_unpin_and_remove(f1, make_blocknum(1), remove_key_expect_no_checkpoint, NULL);
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
diff --git a/ft/tests/cachetable-simple-verify.cc b/ft/tests/cachetable-simple-verify.cc
index 3fd470e8c12..87a258c6917 100644
--- a/ft/tests/cachetable-simple-verify.cc
+++ b/ft/tests/cachetable-simple-verify.cc
@@ -21,10 +21,10 @@ cachetable_test (void) {
long s1;
//long s2;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
diff --git a/ft/tests/cachetable-test.cc b/ft/tests/cachetable-test.cc
index c2e2ea2458a..016061b7975 100644
--- a/ft/tests/cachetable-test.cc
+++ b/ft/tests/cachetable-test.cc
@@ -96,10 +96,10 @@ static void flush (CACHEFILE f,
void *extra __attribute__((__unused__)),
PAIR_ATTR size __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL write_me __attribute__((__unused__)),
- BOOL keep_me __attribute__((__unused__)),
- BOOL for_checkpoint __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool write_me __attribute__((__unused__)),
+ bool keep_me __attribute__((__unused__)),
+ bool for_checkpoint __attribute__((__unused__)),
+ bool UU(is_clone)
) {
struct item *CAST_FROM_VOIDP(it, value);
int i;
@@ -128,7 +128,7 @@ static void flush (CACHEFILE f,
toku_free(value);
}
-static struct item *make_item (u_int64_t key) {
+static struct item *make_item (uint64_t key) {
struct item *MALLOC(it);
it->key.b=key;
it->something="something";
@@ -136,7 +136,7 @@ static struct item *make_item (u_int64_t key) {
}
static CACHEKEY did_fetch={-1};
-static int fetch (CACHEFILE f, int UU(fd), CACHEKEY key, u_int32_t fullhash __attribute__((__unused__)), void**value, void** UU(dd), PAIR_ATTR *sizep __attribute__((__unused__)), int *dirtyp, void*extraargs) {
+static int fetch (CACHEFILE f, int UU(fd), CACHEKEY key, uint32_t fullhash __attribute__((__unused__)), void**value, void** UU(dd), PAIR_ATTR *sizep __attribute__((__unused__)), int *dirtyp, void*extraargs) {
if (verbose) printf("Fetch %" PRId64 "\n", key.b);
assert (expect_f==f);
assert((long)extraargs==23);
@@ -173,13 +173,13 @@ static void test0 (void) {
expect_f = f;
expect_n_flushes=0;
- u_int32_t h1 = toku_cachetable_hash(f, make_blocknum(1));
- u_int32_t h2 = toku_cachetable_hash(f, make_blocknum(2));
- u_int32_t h3 = toku_cachetable_hash(f, make_blocknum(3));
- u_int32_t h4 = toku_cachetable_hash(f, make_blocknum(4));
- u_int32_t h5 = toku_cachetable_hash(f, make_blocknum(5));
- u_int32_t h6 = toku_cachetable_hash(f, make_blocknum(6));
- u_int32_t h7 = toku_cachetable_hash(f, make_blocknum(7));
+ uint32_t h1 = toku_cachetable_hash(f, make_blocknum(1));
+ uint32_t h2 = toku_cachetable_hash(f, make_blocknum(2));
+ uint32_t h3 = toku_cachetable_hash(f, make_blocknum(3));
+ uint32_t h4 = toku_cachetable_hash(f, make_blocknum(4));
+ uint32_t h5 = toku_cachetable_hash(f, make_blocknum(5));
+ uint32_t h6 = toku_cachetable_hash(f, make_blocknum(6));
+ uint32_t h7 = toku_cachetable_hash(f, make_blocknum(7));
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(t3);
wc.flush_callback = flush;
r=toku_cachetable_put(f, make_blocknum(1), h1, make_item(1), make_pair_attr(test_object_size), wc); /* 1P */ /* this is the lru list. 1 is pinned. */
@@ -236,7 +236,7 @@ static void test0 (void) {
{
void *item_v=0;
expect_init();
- r=toku_cachetable_get_and_pin(f, make_blocknum(5), toku_cachetable_hash(f, make_blocknum(5)), &item_v, NULL, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, t3); /* 5P 7U 6P 4P 1P */
+ r=toku_cachetable_get_and_pin(f, make_blocknum(5), toku_cachetable_hash(f, make_blocknum(5)), &item_v, NULL, wc, fetch, def_pf_req_callback, def_pf_callback, true, t3); /* 5P 7U 6P 4P 1P */
assert(r==0);
assert(((struct item *)item_v)->key.b==5);
assert(strcmp(((struct item *)item_v)->something,"something")==0);
@@ -253,7 +253,7 @@ static void test0 (void) {
did_fetch=make_blocknum(-1);
CACHETABLE_WRITE_CALLBACK wc2 = def_write_callback(t3);
wc2.flush_callback = flush;
- r=toku_cachetable_get_and_pin(f, make_blocknum(2), toku_cachetable_hash(f, make_blocknum(2)), &item_v, NULL, wc2, fetch, def_pf_req_callback, def_pf_callback, TRUE, t3); /* 2p 5P 7U 6P 1P */
+ r=toku_cachetable_get_and_pin(f, make_blocknum(2), toku_cachetable_hash(f, make_blocknum(2)), &item_v, NULL, wc2, fetch, def_pf_req_callback, def_pf_callback, true, t3); /* 2p 5P 7U 6P 1P */
assert(r==0);
assert(did_fetch.b==2); /* Expect that 2 is fetched in. */
assert(((struct item *)item_v)->key.b==2);
@@ -283,7 +283,7 @@ static void test0 (void) {
expectN(7);
expectN(6);
expectN(1);
- r=toku_cachefile_close(&f, 0, FALSE, ZERO_LSN);
+ r=toku_cachefile_close(&f, 0, false, ZERO_LSN);
assert(r==0);
r=toku_cachetable_close(&t);
assert(r==0);
@@ -298,15 +298,15 @@ static void flush_n (CACHEFILE f __attribute__((__unused__)), int UU(fd), CACHEK
void *extra __attribute__((__unused__)),
PAIR_ATTR size __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL write_me __attribute__((__unused__)), BOOL keep_me __attribute__((__unused__)),
- BOOL for_checkpoint __attribute__ ((__unused__)),
- BOOL UU(is_clone)
+ bool write_me __attribute__((__unused__)), bool keep_me __attribute__((__unused__)),
+ bool for_checkpoint __attribute__ ((__unused__)),
+ bool UU(is_clone)
) {
int *CAST_FROM_VOIDP(v, value);
assert(*v==0);
}
static int fetch_n (CACHEFILE f __attribute__((__unused__)), int UU(fd), CACHEKEY key __attribute__((__unused__)),
- u_int32_t fullhash __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
void**value,
void** UU(dd),
PAIR_ATTR *sizep __attribute__((__unused__)),
@@ -336,13 +336,13 @@ static void test_nested_pin (void) {
expect_f = f;
i0=0; i1=0;
- u_int32_t f1hash = toku_cachetable_hash(f, make_blocknum(1));
+ uint32_t f1hash = toku_cachetable_hash(f, make_blocknum(1));
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(f2);
wc.flush_callback = flush_n;
r = toku_cachetable_put(f, make_blocknum(1), f1hash, &i0, make_pair_attr(1), wc);
assert(r==0);
r = toku_cachetable_unpin(f, make_blocknum(1), f1hash, CACHETABLE_CLEAN, make_pair_attr(test_object_size));
- r = toku_cachetable_get_and_pin(f, make_blocknum(1), f1hash, &vv, NULL, wc, fetch_n, def_pf_req_callback, def_pf_callback, TRUE, f2);
+ r = toku_cachetable_get_and_pin(f, make_blocknum(1), f1hash, &vv, NULL, wc, fetch_n, def_pf_req_callback, def_pf_callback, true, f2);
assert(r==0);
assert(vv==&i0);
assert(i0==0);
@@ -353,13 +353,13 @@ static void test_nested_pin (void) {
assert(vv2==vv);
r = toku_cachetable_unpin(f, make_blocknum(1), f1hash, CACHETABLE_CLEAN, make_pair_attr(test_object_size));
assert(r==0);
- u_int32_t f2hash = toku_cachetable_hash(f, make_blocknum(2));
+ uint32_t f2hash = toku_cachetable_hash(f, make_blocknum(2));
r = toku_cachetable_put(f, make_blocknum(2), f2hash, &i1, make_pair_attr(test_object_size), wc);
assert(r==0); // The other one is pinned, but now the cachetable fails gracefully: It allows the pin to happen
r = toku_cachetable_unpin(f, make_blocknum(2), f2hash, CACHETABLE_CLEAN, make_pair_attr(test_object_size));
assert(r==0);
// toku_os_usleep(1*1000000);
- r = toku_cachefile_close(&f, 0, FALSE, ZERO_LSN); assert(r==0);
+ r = toku_cachefile_close(&f, 0, false, ZERO_LSN); assert(r==0);
r = toku_cachetable_close(&t); assert(r==0);
}
@@ -372,14 +372,14 @@ static void null_flush (CACHEFILE cf __attribute__((__unused__)),
void *extra __attribute__((__unused__)),
PAIR_ATTR size __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL write_me __attribute__((__unused__)),
- BOOL keep_me __attribute__((__unused__)),
- BOOL for_checkpoint __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool write_me __attribute__((__unused__)),
+ bool keep_me __attribute__((__unused__)),
+ bool for_checkpoint __attribute__((__unused__)),
+ bool UU(is_clone)
) {
}
-static int add123_fetch (CACHEFILE cf, int UU(fd), CACHEKEY key, u_int32_t fullhash, void **value,
+static int add123_fetch (CACHEFILE cf, int UU(fd), CACHEKEY key, uint32_t fullhash, void **value,
void** UU(dd),
PAIR_ATTR *sizep __attribute__((__unused__)), int * dirtyp, void*extraargs) {
assert(fullhash==toku_cachetable_hash(cf,key));
@@ -390,7 +390,7 @@ PAIR_ATTR *sizep __attribute__((__unused__)), int * dirtyp, void*extraargs) {
return 0;
}
-static int add222_fetch (CACHEFILE cf, int UU(fd), CACHEKEY key, u_int32_t fullhash, void **value,
+static int add222_fetch (CACHEFILE cf, int UU(fd), CACHEKEY key, uint32_t fullhash, void **value,
void** UU(dd),
PAIR_ATTR *sizep __attribute__((__unused__)), int * dirtyp, void*extraargs) {
assert(fullhash==toku_cachetable_hash(cf,key));
@@ -427,21 +427,21 @@ static void test_multi_filehandles (void) {
wc.flush_callback = null_flush;
r = toku_cachetable_put(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), (void*)124, make_pair_attr(test_object_size), wc); assert(r==0);
r = toku_cachetable_unpin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), CACHETABLE_DIRTY, make_pair_attr(0)); assert(r==0);
- r = toku_cachetable_get_and_pin(f2, make_blocknum(1), toku_cachetable_hash(f2, make_blocknum(1)), &v, NULL, wc, add123_fetch, def_pf_req_callback, def_pf_callback, TRUE, (void*)123); assert(r==0);
+ r = toku_cachetable_get_and_pin(f2, make_blocknum(1), toku_cachetable_hash(f2, make_blocknum(1)), &v, NULL, wc, add123_fetch, def_pf_req_callback, def_pf_callback, true, (void*)123); assert(r==0);
assert((unsigned long)v==124);
- r = toku_cachetable_get_and_pin(f2, make_blocknum(2), toku_cachetable_hash(f2, make_blocknum(2)), &v, NULL, wc, add123_fetch, def_pf_req_callback, def_pf_callback, TRUE, (void*)123); assert(r==0);
+ r = toku_cachetable_get_and_pin(f2, make_blocknum(2), toku_cachetable_hash(f2, make_blocknum(2)), &v, NULL, wc, add123_fetch, def_pf_req_callback, def_pf_callback, true, (void*)123); assert(r==0);
assert((unsigned long)v==125);
wc.write_extraargs = (void*)222;
- r = toku_cachetable_get_and_pin(f3, make_blocknum(2), toku_cachetable_hash(f3, make_blocknum(2)), &v, NULL, wc, add222_fetch, def_pf_req_callback, def_pf_callback, TRUE, (void*)222); assert(r==0);
+ r = toku_cachetable_get_and_pin(f3, make_blocknum(2), toku_cachetable_hash(f3, make_blocknum(2)), &v, NULL, wc, add222_fetch, def_pf_req_callback, def_pf_callback, true, (void*)222); assert(r==0);
assert((unsigned long)v==224);
// we support only one close for a file handle
r = toku_cachetable_unpin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), CACHETABLE_CLEAN, make_pair_attr(0)); assert(r==0);
r = toku_cachetable_unpin(f2, make_blocknum(2), toku_cachetable_hash(f2, make_blocknum(2)), CACHETABLE_CLEAN, make_pair_attr(0)); assert(r==0);
- r = toku_cachefile_close(&f2, 0, FALSE, ZERO_LSN); assert(r==0);
+ r = toku_cachefile_close(&f2, 0, false, ZERO_LSN); assert(r==0);
r = toku_cachetable_unpin(f3, make_blocknum(2), toku_cachetable_hash(f3, make_blocknum(2)), CACHETABLE_CLEAN, make_pair_attr(0)); assert(r==0);
- r = toku_cachefile_close(&f3, 0, FALSE, ZERO_LSN); assert(r==0);
+ r = toku_cachefile_close(&f3, 0, false, ZERO_LSN); assert(r==0);
r = toku_cachetable_close(&t); assert(r==0);
}
@@ -456,15 +456,15 @@ static void test_dirty_flush(CACHEFILE f,
void *extra __attribute__((__unused__)),
PAIR_ATTR size,
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL do_write,
- BOOL keep,
- BOOL for_checkpoint __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool do_write,
+ bool keep,
+ bool for_checkpoint __attribute__((__unused__)),
+ bool UU(is_clone)
) {
if (verbose) printf("test_dirty_flush %p %" PRId64 " %p %ld %u %u\n", f, key.b, value, size.size, (unsigned)do_write, (unsigned)keep);
}
-static int test_dirty_fetch(CACHEFILE f, int UU(fd), CACHEKEY key, u_int32_t fullhash, void **value_ptr,
+static int test_dirty_fetch(CACHEFILE f, int UU(fd), CACHEKEY key, uint32_t fullhash, void **value_ptr,
void** UU(dd),
PAIR_ATTR *size_ptr, int * dirtyp, void *arg) {
*value_ptr = arg;
@@ -493,7 +493,7 @@ static void test_dirty(void) {
assert(r == 0);
key = make_blocknum(1); value = (void*)1;
- u_int32_t hkey = toku_cachetable_hash(f, key);
+ uint32_t hkey = toku_cachetable_hash(f, key);
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = test_dirty_flush;
r = toku_cachetable_put(f, key, hkey, value, make_pair_attr(test_object_size), wc);
@@ -513,7 +513,7 @@ static void test_dirty(void) {
assert(pinned == 0);
r = toku_cachetable_get_and_pin(f, key, hkey, &value, NULL, wc,
- test_dirty_fetch, def_pf_req_callback, def_pf_callback, TRUE, 0);
+ test_dirty_fetch, def_pf_req_callback, def_pf_callback, true, 0);
assert(r == 0);
// cachetable_print_state(t);
@@ -535,7 +535,7 @@ static void test_dirty(void) {
hkey = toku_cachetable_hash(f, key);
r = toku_cachetable_get_and_pin(f, key, hkey,
&value, NULL, wc,
- test_dirty_fetch, def_pf_req_callback, def_pf_callback, TRUE, 0);
+ test_dirty_fetch, def_pf_req_callback, def_pf_callback, true, 0);
assert(r == 0);
// cachetable_print_state(t);
@@ -555,7 +555,7 @@ static void test_dirty(void) {
r = toku_cachetable_get_and_pin(f, key, hkey,
&value, NULL, wc,
- test_dirty_fetch, def_pf_req_callback, def_pf_callback, TRUE, 0);
+ test_dirty_fetch, def_pf_req_callback, def_pf_callback, true, 0);
assert(r == 0);
// cachetable_print_state(t);
@@ -573,7 +573,7 @@ static void test_dirty(void) {
assert(dirty == 1);
assert(pinned == 0);
- r = toku_cachefile_close(&f, 0, FALSE, ZERO_LSN);
+ r = toku_cachefile_close(&f, 0, false, ZERO_LSN);
assert(r == 0);
r = toku_cachetable_close(&t);
assert(r == 0);
@@ -590,10 +590,10 @@ static void test_size_flush_callback(CACHEFILE f,
void *extra __attribute__((__unused__)),
PAIR_ATTR size,
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL do_write,
- BOOL keep,
- BOOL for_checkpoint __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool do_write,
+ bool keep,
+ bool for_checkpoint __attribute__((__unused__)),
+ bool UU(is_clone)
) {
if (test_size_debug && verbose) printf("test_size_flush %p %" PRId64 " %p %ld %u %u\n", f, key.b, value, size.size, (unsigned)do_write, (unsigned)keep);
if (keep) {
@@ -628,7 +628,7 @@ static void test_size_resize(void) {
CACHEKEY key = make_blocknum(42);
void *value = (void *) -42;
- u_int32_t hkey = toku_cachetable_hash(f, key);
+ uint32_t hkey = toku_cachetable_hash(f, key);
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = test_size_flush_callback;
@@ -649,7 +649,7 @@ static void test_size_resize(void) {
void *current_value;
long current_size;
- r = toku_cachetable_get_and_pin(f, key, hkey, &current_value, &current_size, wc, 0, def_pf_req_callback, def_pf_callback, TRUE, 0);
+ r = toku_cachetable_get_and_pin(f, key, hkey, &current_value, &current_size, wc, 0, def_pf_req_callback, def_pf_callback, true, 0);
assert(r == 0);
assert(current_value == value);
assert(current_size == new_size);
@@ -657,7 +657,7 @@ static void test_size_resize(void) {
r = toku_cachetable_unpin(f, key, hkey, CACHETABLE_CLEAN, make_pair_attr(new_size));
assert(r == 0);
- r = toku_cachefile_close(&f, 0, FALSE, ZERO_LSN);
+ r = toku_cachefile_close(&f, 0, false, ZERO_LSN);
assert(r == 0);
r = toku_cachetable_close(&t);
assert(r == 0);
@@ -694,7 +694,7 @@ static void test_size_flush(void) {
CACHEKEY key = make_blocknum(i);
void *value = (void *)(long)-i;
// printf("test_size put %lld %p %lld\n", key, value, size);
- u_int32_t hkey = toku_cachetable_hash(f, key);
+ uint32_t hkey = toku_cachetable_hash(f, key);
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = test_size_flush_callback;
r = toku_cachetable_put(f, key, hkey, value, make_pair_attr(size), wc);
@@ -728,7 +728,7 @@ static void test_size_flush(void) {
assert(r == 0);
}
- r = toku_cachefile_close(&f, 0, FALSE, ZERO_LSN);
+ r = toku_cachefile_close(&f, 0, false, ZERO_LSN);
assert(r == 0);
r = toku_cachetable_close(&t);
assert(r == 0);
diff --git a/ft/tests/cachetable-test2.cc b/ft/tests/cachetable-test2.cc
index d8a6ce86622..e92a1aa3ee4 100644
--- a/ft/tests/cachetable-test2.cc
+++ b/ft/tests/cachetable-test2.cc
@@ -103,10 +103,10 @@ static void flush_forchain (CACHEFILE f __attribute__((__unused__)),
void *extra __attribute__((__unused__)),
PAIR_ATTR size __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL write_me __attribute__((__unused__)),
- BOOL keep_me __attribute__((__unused__)),
- BOOL for_checkpoint __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool write_me __attribute__((__unused__)),
+ bool keep_me __attribute__((__unused__)),
+ bool for_checkpoint __attribute__((__unused__)),
+ bool UU(is_clone)
) {
if (keep_me) return;
int *CAST_FROM_VOIDP(v, value);
@@ -117,7 +117,7 @@ static void flush_forchain (CACHEFILE f __attribute__((__unused__)),
//print_ints();
}
-static int fetch_forchain (CACHEFILE f, int UU(fd), CACHEKEY key, u_int32_t fullhash, void**value,
+static int fetch_forchain (CACHEFILE f, int UU(fd), CACHEKEY key, uint32_t fullhash, void**value,
void** UU(dd),
PAIR_ATTR *sizep __attribute__((__unused__)), int * dirtyp, void*extraargs) {
assert(toku_cachetable_hash(f, key)==fullhash);
@@ -140,7 +140,7 @@ again:
for (i=0; i<my_n_present; i++) {
void *v;
- u_int32_t fullhash = toku_cachetable_hash(my_present_items[i].cf, my_present_items[i].key);
+ uint32_t fullhash = toku_cachetable_hash(my_present_items[i].cf, my_present_items[i].key);
int r=toku_cachetable_maybe_get_and_pin_clean(my_present_items[i].cf,
my_present_items[i].key,
toku_cachetable_hash(my_present_items[i].cf, my_present_items[i].key),
@@ -169,7 +169,7 @@ static void test_chaining (void) {
for (i=0; i<N_PRESENT_LIMIT; i++) {
int fnum = i%N_FILES;
//printf("%s:%d Add %d\n", __SRCFILE__, __LINE__, i);
- u_int32_t fhash = toku_cachetable_hash(f[fnum], make_blocknum(i));
+ uint32_t fhash = toku_cachetable_hash(f[fnum], make_blocknum(i));
CACHETABLE_WRITE_CALLBACK wc = def_write_callback((void *)i);
wc.flush_callback = flush_forchain;
r = toku_cachetable_put(f[fnum], make_blocknum(i), fhash, (void*)i, make_pair_attr(test_object_size), wc);
@@ -192,7 +192,7 @@ static void test_chaining (void) {
test_mutex_unlock();
void *value;
//printf("Touching %d (%lld, %p)\n", whichone, whichkey, whichcf);
- u_int32_t fhash = toku_cachetable_hash(whichcf, whichkey);
+ uint32_t fhash = toku_cachetable_hash(whichcf, whichkey);
CACHETABLE_WRITE_CALLBACK wc = def_write_callback((void*)(long)whichkey.b);
wc.flush_callback = flush_forchain;
r = toku_cachetable_get_and_pin(whichcf,
@@ -204,7 +204,7 @@ static void test_chaining (void) {
fetch_forchain,
def_pf_req_callback,
def_pf_callback,
- TRUE,
+ true,
(void*)(long)whichkey.b
);
assert(r==0);
@@ -220,7 +220,7 @@ static void test_chaining (void) {
// i is always incrementing, so we need not worry about inserting a duplicate
// if i is a duplicate, cachetable_put will return -1
// printf("%s:%d Add {%ld,%p}\n", __SRCFILE__, __LINE__, i, f[fnum]);
- u_int32_t fhash = toku_cachetable_hash(f[fnum], make_blocknum(i));
+ uint32_t fhash = toku_cachetable_hash(f[fnum], make_blocknum(i));
CACHETABLE_WRITE_CALLBACK wc = def_write_callback((void *)i);
wc.flush_callback = flush_forchain;
r = toku_cachetable_put(f[fnum], make_blocknum(i), fhash, (void*)i, make_pair_attr(test_object_size), wc);
@@ -247,13 +247,13 @@ static void test_chaining (void) {
//printf("Close %d (%p), now n_present=%d\n", i, f[i], n_present);
//print_ints();
CACHEFILE oldcf=f[i];
- r = toku_cachefile_close(&f[i], 0, FALSE, ZERO_LSN); assert(r==0);
+ r = toku_cachefile_close(&f[i], 0, false, ZERO_LSN); assert(r==0);
file_is_not_present(oldcf);
r = toku_cachetable_openf(&f[i], ct, fname[i], O_RDWR, S_IRWXU|S_IRWXG|S_IRWXO); assert(r==0);
}
}
for (i=0; i<N_FILES; i++) {
- r = toku_cachefile_close(&f[i], 0, FALSE, ZERO_LSN); assert(r==0);
+ r = toku_cachefile_close(&f[i], 0, false, ZERO_LSN); assert(r==0);
}
r = toku_cachetable_close(&ct); assert(r==0);
}
diff --git a/ft/tests/cachetable-unpin-and-remove-test.cc b/ft/tests/cachetable-unpin-and-remove-test.cc
index 6841d7a911b..e2f41a98ec5 100644
--- a/ft/tests/cachetable-unpin-and-remove-test.cc
+++ b/ft/tests/cachetable-unpin-and-remove-test.cc
@@ -9,7 +9,7 @@ static int
fetch (CACHEFILE f __attribute__((__unused__)),
int UU(fd),
CACHEKEY k __attribute__((__unused__)),
- u_int32_t fullhash __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
void **value __attribute__((__unused__)),
void** UU(dd),
PAIR_ATTR *sizep __attribute__((__unused__)),
@@ -45,7 +45,7 @@ cachetable_unpin_and_remove_test (int n) {
// put the keys into the cachetable
for (i=0; i<n; i++) {
- u_int32_t hi = toku_cachetable_hash(f1, make_blocknum(keys[i].b));
+ uint32_t hi = toku_cachetable_hash(f1, make_blocknum(keys[i].b));
r = toku_cachetable_put(f1, make_blocknum(keys[i].b), hi, (void *)(long) keys[i].b, make_pair_attr(1),wc);
assert(r == 0);
}
@@ -55,7 +55,7 @@ cachetable_unpin_and_remove_test (int n) {
for (i=0; i<n; i++) testkeys[i] = keys[i];
while (nkeys > 0) {
i = random() % nkeys;
- u_int32_t hi = toku_cachetable_hash(f1, make_blocknum(testkeys[i].b));
+ uint32_t hi = toku_cachetable_hash(f1, make_blocknum(testkeys[i].b));
r = toku_cachetable_unpin_and_remove(f1, testkeys[i], NULL, NULL);
assert(r == 0);
@@ -82,7 +82,7 @@ cachetable_unpin_and_remove_test (int n) {
assert(nentries == 0);
char *error_string;
- r = toku_cachefile_close(&f1, &error_string, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, &error_string, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); assert(r == 0 && ct == 0);
}
@@ -102,7 +102,7 @@ cachetable_put_evict_remove_test (int n) {
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, 0777); assert(r == 0);
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
- u_int32_t hi[n];
+ uint32_t hi[n];
for (i=0; i<n; i++)
hi[i] = toku_cachetable_hash(f1, make_blocknum(i));
@@ -116,7 +116,7 @@ cachetable_put_evict_remove_test (int n) {
// get 0
void *v; long s;
- r = toku_cachetable_get_and_pin(f1, make_blocknum(0), hi[0], &v, &s, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, 0);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(0), hi[0], &v, &s, wc, fetch, def_pf_req_callback, def_pf_callback, true, 0);
assert(r == 0);
// remove 0
@@ -124,7 +124,7 @@ cachetable_put_evict_remove_test (int n) {
assert(r == 0);
char *error_string;
- r = toku_cachefile_close(&f1, &error_string, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, &error_string, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); assert(r == 0 && ct == 0);
}
diff --git a/ft/tests/cachetable-unpin-remove-and-checkpoint.cc b/ft/tests/cachetable-unpin-remove-and-checkpoint.cc
index 948e0a07c79..31f0a9f03a6 100644
--- a/ft/tests/cachetable-unpin-remove-and-checkpoint.cc
+++ b/ft/tests/cachetable-unpin-remove-and-checkpoint.cc
@@ -39,7 +39,7 @@ run_test (void) {
//void* v2;
long s1;
//long s2;
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
toku_cachetable_unpin(
f1,
make_blocknum(1),
@@ -50,7 +50,7 @@ run_test (void) {
// now this should mark the pair for checkpoint
r = toku_cachetable_begin_checkpoint(ct, NULL);
- r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
+ r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
toku_pthread_t mytid;
r = toku_pthread_create(&mytid, NULL, run_end_chkpt, NULL);
@@ -66,7 +66,7 @@ run_test (void) {
assert(r==0);
toku_cachetable_verify(ct);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
diff --git a/ft/tests/cachetable-unpin-test.cc b/ft/tests/cachetable-unpin-test.cc
index 53f5632769f..6ffc397efd6 100644
--- a/ft/tests/cachetable-unpin-test.cc
+++ b/ft/tests/cachetable-unpin-test.cc
@@ -20,7 +20,7 @@ cachetable_unpin_test (int n) {
int i;
for (i=1; i<=n; i++) {
- u_int32_t hi;
+ uint32_t hi;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
hi = toku_cachetable_hash(f1, make_blocknum(i));
r = toku_cachetable_put(f1, make_blocknum(i), hi, (void *)(long)i, make_pair_attr(1), wc);
@@ -37,7 +37,7 @@ cachetable_unpin_test (int n) {
assert(toku_cachefile_count_pinned(f1, 0) == i);
}
for (i=n; i>0; i--) {
- u_int32_t hi;
+ uint32_t hi;
hi = toku_cachetable_hash(f1, make_blocknum(i));
r = toku_cachetable_unpin(f1, make_blocknum(i), hi, CACHETABLE_CLEAN, make_pair_attr(1));
assert(r == 0);
@@ -50,7 +50,7 @@ cachetable_unpin_test (int n) {
r = toku_cachetable_unpin(f1, k, toku_cachetable_hash(f1, k), CACHETABLE_CLEAN, make_pair_attr(1));
assert(r != 0);
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); assert(r == 0 && ct == 0);
}
diff --git a/ft/tests/cachetable-writer-thread-limit.cc b/ft/tests/cachetable-writer-thread-limit.cc
index 8a92f34c7f4..8da68ba80ac 100644
--- a/ft/tests/cachetable-writer-thread-limit.cc
+++ b/ft/tests/cachetable-writer-thread-limit.cc
@@ -19,10 +19,10 @@ flush (CACHEFILE f __attribute__((__unused__)),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL w __attribute__((__unused__)),
- BOOL keep __attribute__((__unused__)),
- BOOL c __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
) {
if (w) {
int curr_size = __sync_fetch_and_sub(&total_size, 1);
@@ -54,7 +54,7 @@ cachetable_test (void) {
r = toku_cachetable_unpin(f1, make_blocknum(i), i, CACHETABLE_DIRTY, make_pair_attr(4));
}
- r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0);
+ r = toku_cachefile_close(&f1, 0, false, ZERO_LSN); assert(r == 0);
r = toku_cachetable_close(&ct); assert(r == 0 && ct == 0);
}
diff --git a/ft/tests/ft-bfe-query.cc b/ft/tests/ft-bfe-query.cc
index 2665190c867..5bdba73419f 100644
--- a/ft/tests/ft-bfe-query.cc
+++ b/ft/tests/ft-bfe-query.cc
@@ -27,15 +27,15 @@ test_prefetch_read(int fd, FT_HANDLE UU(brt), FT brt_h) {
// first test that prefetching everything should work
memset(&cursor->range_lock_left_key, 0 , sizeof(DBT));
memset(&cursor->range_lock_right_key, 0 , sizeof(DBT));
- cursor->left_is_neg_infty = TRUE;
- cursor->right_is_pos_infty = TRUE;
- cursor->disable_prefetching = FALSE;
+ cursor->left_is_neg_infty = true;
+ cursor->right_is_pos_infty = true;
+ cursor->disable_prefetching = false;
struct ftnode_fetch_extra bfe;
// quick test to see that we have the right behavior when we set
- // disable_prefetching to TRUE
- cursor->disable_prefetching = TRUE;
+ // disable_prefetching to true
+ cursor->disable_prefetching = true;
fill_bfe_for_prefetch(&bfe, brt_h, cursor);
FTNODE_DISK_DATA ndd = NULL;
r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe);
@@ -53,7 +53,7 @@ test_prefetch_read(int fd, FT_HANDLE UU(brt), FT brt_h) {
toku_free(ndd);
// now enable prefetching again
- cursor->disable_prefetching = FALSE;
+ cursor->disable_prefetching = false;
fill_bfe_for_prefetch(&bfe, brt_h, cursor);
r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe);
@@ -74,9 +74,9 @@ test_prefetch_read(int fd, FT_HANDLE UU(brt), FT brt_h) {
toku_ftnode_free(&dn);
toku_free(ndd);
- u_int64_t left_key = 150;
- toku_fill_dbt(&cursor->range_lock_left_key, &left_key, sizeof(u_int64_t));
- cursor->left_is_neg_infty = FALSE;
+ uint64_t left_key = 150;
+ toku_fill_dbt(&cursor->range_lock_left_key, &left_key, sizeof(uint64_t));
+ cursor->left_is_neg_infty = false;
fill_bfe_for_prefetch(&bfe, brt_h, cursor);
r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe);
assert(r==0);
@@ -96,9 +96,9 @@ test_prefetch_read(int fd, FT_HANDLE UU(brt), FT brt_h) {
toku_ftnode_free(&dn);
toku_free(ndd);
- u_int64_t right_key = 151;
- toku_fill_dbt(&cursor->range_lock_right_key, &right_key, sizeof(u_int64_t));
- cursor->right_is_pos_infty = FALSE;
+ uint64_t right_key = 151;
+ toku_fill_dbt(&cursor->range_lock_right_key, &right_key, sizeof(uint64_t));
+ cursor->right_is_pos_infty = false;
fill_bfe_for_prefetch(&bfe, brt_h, cursor);
r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe);
assert(r==0);
@@ -175,13 +175,13 @@ test_subset_read(int fd, FT_HANDLE UU(brt), FT brt_h) {
// first test that prefetching everything should work
memset(&cursor->range_lock_left_key, 0 , sizeof(DBT));
memset(&cursor->range_lock_right_key, 0 , sizeof(DBT));
- cursor->left_is_neg_infty = TRUE;
- cursor->right_is_pos_infty = TRUE;
+ cursor->left_is_neg_infty = true;
+ cursor->right_is_pos_infty = true;
struct ftnode_fetch_extra bfe;
- u_int64_t left_key = 150;
- u_int64_t right_key = 151;
+ uint64_t left_key = 150;
+ uint64_t right_key = 151;
DBT left, right;
toku_fill_dbt(&left, &left_key, sizeof(left_key));
toku_fill_dbt(&right, &right_key, sizeof(right_key));
@@ -191,15 +191,15 @@ test_subset_read(int fd, FT_HANDLE UU(brt), FT brt_h) {
NULL,
&left,
&right,
- FALSE,
- FALSE,
- FALSE
+ false,
+ false,
+ false
);
// fake the childnum to read
// set disable_prefetching ON
bfe.child_to_read = 2;
- bfe.disable_prefetching = TRUE;
+ bfe.disable_prefetching = true;
r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe);
assert(r==0);
assert(dn->n_children == 3);
@@ -224,7 +224,7 @@ test_subset_read(int fd, FT_HANDLE UU(brt), FT brt_h) {
// fake the childnum to read
bfe.child_to_read = 2;
- bfe.disable_prefetching = FALSE;
+ bfe.disable_prefetching = false;
r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe);
assert(r==0);
assert(dn->n_children == 3);
@@ -296,8 +296,8 @@ test_prefetching(void) {
sn.n_children = 3;
sn.dirty = 1;
- u_int64_t key1 = 100;
- u_int64_t key2 = 200;
+ uint64_t key1 = 100;
+ uint64_t key2 = 200;
MALLOC_N(sn.n_children, sn.bp);
MALLOC_N(sn.n_children-1, sn.childkeys);
@@ -352,7 +352,7 @@ test_prefetching(void) {
{
DISKOFF offset;
DISKOFF size;
- toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, FALSE);
+ toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size);
@@ -360,7 +360,7 @@ test_prefetching(void) {
assert(size == 100);
}
FTNODE_DISK_DATA ndd = NULL;
- r = toku_serialize_ftnode_to(fd, make_blocknum(20), &sn, &ndd, TRUE, brt->ft, FALSE);
+ r = toku_serialize_ftnode_to(fd, make_blocknum(20), &sn, &ndd, true, brt->ft, false);
assert(r==0);
test_prefetch_read(fd, brt, brt_h);
diff --git a/ft/tests/ft-clock-test.cc b/ft/tests/ft-clock-test.cc
index 8b071b97c2a..90dc4532c0c 100644
--- a/ft/tests/ft-clock-test.cc
+++ b/ft/tests/ft-clock-test.cc
@@ -28,7 +28,7 @@ static int omt_cmp(OMTVALUE p, void *q)
LEAFENTRY CAST_FROM_VOIDP(a, p);
LEAFENTRY CAST_FROM_VOIDP(b, q);
void *ak, *bk;
- u_int32_t al, bl;
+ uint32_t al, bl;
ak = le_key_and_len(a, &al);
bk = le_key_and_len(b, &bl);
int l = MIN(al, bl);
@@ -72,7 +72,7 @@ test1(int fd, FT brt_h, FTNODE *dn) {
fill_bfe_for_full_read(&bfe_all, brt_h);
FTNODE_DISK_DATA ndd = NULL;
r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, &ndd, &bfe_all);
- BOOL is_leaf = ((*dn)->height == 0);
+ bool is_leaf = ((*dn)->height == 0);
assert(r==0);
for (int i = 0; i < (*dn)->n_children; i++) {
assert(BP_STATE(*dn,i) == PT_AVAIL);
@@ -95,7 +95,7 @@ test1(int fd, FT brt_h, FTNODE *dn) {
}
}
PAIR_ATTR size;
- BOOL req = toku_ftnode_pf_req_callback(*dn, &bfe_all);
+ bool req = toku_ftnode_pf_req_callback(*dn, &bfe_all);
assert(req);
toku_ftnode_pf_callback(*dn, ndd, &bfe_all, fd, &size);
toku_ftnode_pe_callback(*dn, attr, &attr, brt_h);
@@ -160,14 +160,14 @@ test2(int fd, FT brt_h, FTNODE *dn) {
),
&left,
&right,
- TRUE,
- TRUE,
- FALSE
+ true,
+ true,
+ false
);
FTNODE_DISK_DATA ndd = NULL;
int r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, &ndd, &bfe_subset);
assert(r==0);
- BOOL is_leaf = ((*dn)->height == 0);
+ bool is_leaf = ((*dn)->height == 0);
// at this point, although both partitions are available, only the
// second basement node should have had its clock
// touched
@@ -184,7 +184,7 @@ test2(int fd, FT brt_h, FTNODE *dn) {
toku_ftnode_pe_callback(*dn, attr, &attr, brt_h);
assert(BP_STATE(*dn, 1) == (is_leaf) ? PT_ON_DISK : PT_COMPRESSED);
- BOOL req = toku_ftnode_pf_req_callback(*dn, &bfe_subset);
+ bool req = toku_ftnode_pf_req_callback(*dn, &bfe_subset);
assert(req);
toku_ftnode_pf_callback(*dn, ndd, &bfe_subset, fd, &attr);
assert(BP_STATE(*dn, 0) == PT_AVAIL);
@@ -297,7 +297,7 @@ test_serialize_nonleaf(void) {
{
DISKOFF offset;
DISKOFF size;
- toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, FALSE);
+ toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size);
@@ -305,7 +305,7 @@ test_serialize_nonleaf(void) {
assert(size == 100);
}
FTNODE_DISK_DATA ndd = NULL;
- r = toku_serialize_ftnode_to(fd, make_blocknum(20), &sn, &ndd, TRUE, brt->ft, FALSE);
+ r = toku_serialize_ftnode_to(fd, make_blocknum(20), &sn, &ndd, true, brt->ft, false);
assert(r==0);
test1(fd, brt_h, &dn);
@@ -389,7 +389,7 @@ test_serialize_leaf(void) {
{
DISKOFF offset;
DISKOFF size;
- toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, FALSE);
+ toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size);
@@ -397,7 +397,7 @@ test_serialize_leaf(void) {
assert(size == 100);
}
FTNODE_DISK_DATA ndd = NULL;
- r = toku_serialize_ftnode_to(fd, make_blocknum(20), &sn, &ndd, TRUE, brt->ft, FALSE);
+ r = toku_serialize_ftnode_to(fd, make_blocknum(20), &sn, &ndd, true, brt->ft, false);
assert(r==0);
test1(fd, brt_h, &dn);
diff --git a/ft/tests/ft-serialize-benchmark.cc b/ft/tests/ft-serialize-benchmark.cc
index ea82c80b0d2..761698c17a9 100644
--- a/ft/tests/ft-serialize-benchmark.cc
+++ b/ft/tests/ft-serialize-benchmark.cc
@@ -18,7 +18,7 @@ static int omt_cmp(OMTVALUE p, void *q)
LEAFENTRY CAST_FROM_VOIDP(a, p);
LEAFENTRY CAST_FROM_VOIDP(b, q);
void *ak, *bk;
- u_int32_t al, bl;
+ uint32_t al, bl;
ak = le_key_and_len(a, &al);
bk = le_key_and_len(b, &bl);
int l = MIN(al, bl);
@@ -131,7 +131,7 @@ test_serialize_leaf(int valsize, int nelts, double entropy) {
{
DISKOFF offset;
DISKOFF size;
- toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, FALSE);
+ toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size);
@@ -142,7 +142,7 @@ test_serialize_leaf(int valsize, int nelts, double entropy) {
struct timeval t[2];
gettimeofday(&t[0], NULL);
FTNODE_DISK_DATA ndd = NULL;
- r = toku_serialize_ftnode_to(fd, make_blocknum(20), sn, &ndd, TRUE, brt->ft, FALSE);
+ r = toku_serialize_ftnode_to(fd, make_blocknum(20), sn, &ndd, true, brt->ft, false);
assert(r==0);
gettimeofday(&t[1], NULL);
double dt;
@@ -265,7 +265,7 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy) {
{
DISKOFF offset;
DISKOFF size;
- toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, FALSE);
+ toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size);
@@ -276,7 +276,7 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy) {
struct timeval t[2];
gettimeofday(&t[0], NULL);
FTNODE_DISK_DATA ndd = NULL;
- r = toku_serialize_ftnode_to(fd, make_blocknum(20), &sn, &ndd, TRUE, brt->ft, FALSE);
+ r = toku_serialize_ftnode_to(fd, make_blocknum(20), &sn, &ndd, true, brt->ft, false);
assert(r==0);
gettimeofday(&t[1], NULL);
double dt;
diff --git a/ft/tests/ft-serialize-sub-block-test.cc b/ft/tests/ft-serialize-sub-block-test.cc
index 09a69a48605..c840ce9a72a 100644
--- a/ft/tests/ft-serialize-sub-block-test.cc
+++ b/ft/tests/ft-serialize-sub-block-test.cc
@@ -30,7 +30,7 @@ static void test_sub_block(int n) {
error = toku_create_cachetable(&ct, 0, ZERO_LSN, NULL_LOGGER);
assert(error == 0);
- error = toku_open_ft_handle(fname, TRUE, &brt, nodesize, basementnodesize, compression_method, ct, null_txn, toku_builtin_compare_fun);
+ error = toku_open_ft_handle(fname, true, &brt, nodesize, basementnodesize, compression_method, ct, null_txn, toku_builtin_compare_fun);
assert(error == 0);
// insert keys 0, 1, 2, .. (n-1)
@@ -49,11 +49,11 @@ static void test_sub_block(int n) {
assert(error == 0);
// verify the brt by walking a cursor through the rows
- error = toku_open_ft_handle(fname, FALSE, &brt, nodesize, basementnodesize, compression_method, ct, null_txn, toku_builtin_compare_fun);
+ error = toku_open_ft_handle(fname, false, &brt, nodesize, basementnodesize, compression_method, ct, null_txn, toku_builtin_compare_fun);
assert(error == 0);
FT_CURSOR cursor;
- error = toku_ft_cursor(brt, &cursor, NULL, FALSE, FALSE);
+ error = toku_ft_cursor(brt, &cursor, NULL, false, false);
assert(error == 0);
for (i=0; ; i++) {
diff --git a/ft/tests/ft-serialize-test.cc b/ft/tests/ft-serialize-test.cc
index 7d3d8b6c27f..8ab4a1341b9 100644
--- a/ft/tests/ft-serialize-test.cc
+++ b/ft/tests/ft-serialize-test.cc
@@ -14,7 +14,7 @@ static int omt_int_cmp(OMTVALUE p, void *q)
LEAFENTRY CAST_FROM_VOIDP(a, p);
LEAFENTRY CAST_FROM_VOIDP(b, q);
void *ak, *bk;
- u_int32_t al, bl;
+ uint32_t al, bl;
ak = le_key_and_len(a, &al);
bk = le_key_and_len(b, &bl);
assert(al == 4 && bl == 4);
@@ -32,7 +32,7 @@ static int omt_cmp(OMTVALUE p, void *q)
LEAFENTRY CAST_FROM_VOIDP(a, p);
LEAFENTRY CAST_FROM_VOIDP(b, q);
void *ak, *bk;
- u_int32_t al, bl;
+ uint32_t al, bl;
ak = le_key_and_len(a, &al);
bk = le_key_and_len(b, &bl);
int l = MIN(al, bl);
@@ -83,7 +83,7 @@ struct check_leafentries_struct {
int (*cmp)(OMTVALUE, void *);
};
-static int check_leafentries(OMTVALUE v, u_int32_t UU(i), void *extra) {
+static int check_leafentries(OMTVALUE v, uint32_t UU(i), void *extra) {
struct check_leafentries_struct *CAST_FROM_VOIDP(e, extra);
assert(e->i < e->nelts);
assert(e->cmp(v, e->elts[e->i]) == 0);
@@ -180,29 +180,29 @@ setup_dn(enum ftnode_verify_type bft, int fd, FT brt_h, FTNODE *dn, FTNODE_DISK_
}
else {
// if we get here, this is a test bug, NOT a bug in development code
- assert(FALSE);
+ assert(false);
}
}
-static void write_sn_to_disk(int fd, FT_HANDLE brt, FTNODE sn, FTNODE_DISK_DATA* src_ndd, BOOL do_clone) {
+static void write_sn_to_disk(int fd, FT_HANDLE brt, FTNODE sn, FTNODE_DISK_DATA* src_ndd, bool do_clone) {
int r;
if (do_clone) {
void* cloned_node_v = NULL;
PAIR_ATTR attr;
- toku_ftnode_clone_callback(sn, &cloned_node_v, &attr, FALSE, brt->ft);
+ toku_ftnode_clone_callback(sn, &cloned_node_v, &attr, false, brt->ft);
FTNODE CAST_FROM_VOIDP(cloned_node, cloned_node_v);
- r = toku_serialize_ftnode_to(fd, make_blocknum(20), cloned_node, src_ndd, FALSE, brt->ft, FALSE);
+ r = toku_serialize_ftnode_to(fd, make_blocknum(20), cloned_node, src_ndd, false, brt->ft, false);
assert(r==0);
toku_ftnode_free(&cloned_node);
}
else {
- r = toku_serialize_ftnode_to(fd, make_blocknum(20), sn, src_ndd, TRUE, brt->ft, FALSE);
+ r = toku_serialize_ftnode_to(fd, make_blocknum(20), sn, src_ndd, true, brt->ft, false);
assert(r==0);
}
}
static void
-test_serialize_leaf_check_msn(enum ftnode_verify_type bft, BOOL do_clone) {
+test_serialize_leaf_check_msn(enum ftnode_verify_type bft, bool do_clone) {
// struct ft_handle source_ft;
const int nodesize = 1024;
struct ftnode sn, *dn;
@@ -276,7 +276,7 @@ test_serialize_leaf_check_msn(enum ftnode_verify_type bft, BOOL do_clone) {
{
DISKOFF offset;
DISKOFF size;
- toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, FALSE);
+ toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size);
@@ -306,11 +306,11 @@ test_serialize_leaf_check_msn(enum ftnode_verify_type bft, BOOL do_clone) {
elts[0] = le_malloc(&dummy_mp, "a", "aval");
elts[1] = le_malloc(&dummy_mp, "b", "bval");
elts[2] = le_malloc(&dummy_mp, "x", "xval");
- const u_int32_t npartitions = dn->n_children;
+ const uint32_t npartitions = dn->n_children;
assert(dn->totalchildkeylens==(2*(npartitions-1)));
struct check_leafentries_struct extra = { .nelts = 3, .elts = elts, .i = 0, .cmp = omt_cmp };
- u_int32_t last_i = 0;
- for (u_int32_t i = 0; i < npartitions; ++i) {
+ uint32_t last_i = 0;
+ for (uint32_t i = 0; i < npartitions; ++i) {
assert(BLB_MAX_MSN_APPLIED(dn, i).msn == POSTSERIALIZE_MSN_ON_DISK.msn);
assert(dest_ndd[i].start > 0);
assert(dest_ndd[i].size > 0);
@@ -318,7 +318,7 @@ test_serialize_leaf_check_msn(enum ftnode_verify_type bft, BOOL do_clone) {
assert(dest_ndd[i].start >= dest_ndd[i-1].start + dest_ndd[i-1].size);
}
toku_omt_iterate(BLB_BUFFER(dn, i), check_leafentries, &extra);
- u_int32_t keylen;
+ uint32_t keylen;
if (i < npartitions-1) {
assert(strcmp((char*)dn->childkeys[i].data, (char*)le_key_and_len(elts[extra.i-1], &keylen))==0);
}
@@ -356,7 +356,7 @@ test_serialize_leaf_check_msn(enum ftnode_verify_type bft, BOOL do_clone) {
}
static void
-test_serialize_leaf_with_large_pivots(enum ftnode_verify_type bft, BOOL do_clone) {
+test_serialize_leaf_with_large_pivots(enum ftnode_verify_type bft, bool do_clone) {
int r;
struct ftnode sn, *dn;
const int keylens = 256*1024, vallens = 0, nrows = 8;
@@ -394,7 +394,7 @@ test_serialize_leaf_with_large_pivots(enum ftnode_verify_type bft, BOOL do_clone
r = toku_omt_insert(BLB_BUFFER(&sn, i), le, omt_cmp, le, NULL); assert(r==0);
BLB_NBYTESINBUF(&sn, i) = leafentry_disksize(le);
if (i < nrows-1) {
- u_int32_t keylen;
+ uint32_t keylen;
char *CAST_FROM_VOIDP(keyp, le_key_and_len(le, &keylen));
toku_fill_dbt(&sn.childkeys[i], toku_xmemdup(keyp, keylen), keylen);
}
@@ -424,7 +424,7 @@ test_serialize_leaf_with_large_pivots(enum ftnode_verify_type bft, BOOL do_clone
{
DISKOFF offset;
DISKOFF size;
- toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, FALSE);
+ toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size);
@@ -459,11 +459,11 @@ test_serialize_leaf_with_large_pivots(enum ftnode_verify_type bft, BOOL do_clone
les[i] = le_fastmalloc(&dummy_mp, (char *) &key, sizeof(key), (char *) &val, sizeof(val));
}
}
- const u_int32_t npartitions = dn->n_children;
+ const uint32_t npartitions = dn->n_children;
assert(dn->totalchildkeylens==(keylens*(npartitions-1)));
struct check_leafentries_struct extra = { .nelts = nrows, .elts = les, .i = 0, .cmp = omt_cmp };
- u_int32_t last_i = 0;
- for (u_int32_t i = 0; i < npartitions; ++i) {
+ uint32_t last_i = 0;
+ for (uint32_t i = 0; i < npartitions; ++i) {
assert(dest_ndd[i].start > 0);
assert(dest_ndd[i].size > 0);
if (i > 0) {
@@ -505,7 +505,7 @@ test_serialize_leaf_with_large_pivots(enum ftnode_verify_type bft, BOOL do_clone
}
static void
-test_serialize_leaf_with_many_rows(enum ftnode_verify_type bft, BOOL do_clone) {
+test_serialize_leaf_with_many_rows(enum ftnode_verify_type bft, bool do_clone) {
int r;
struct ftnode sn, *dn;
const int keylens = sizeof(int), vallens = sizeof(int), nrows = 196*1024;
@@ -569,7 +569,7 @@ test_serialize_leaf_with_many_rows(enum ftnode_verify_type bft, BOOL do_clone) {
{
DISKOFF offset;
DISKOFF size;
- toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, FALSE);
+ toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size);
@@ -601,11 +601,11 @@ test_serialize_leaf_with_many_rows(enum ftnode_verify_type bft, BOOL do_clone) {
les[i] = le_fastmalloc(&dummy_mp, (char *) &key, sizeof(key), (char *) &val, sizeof(val));
}
}
- const u_int32_t npartitions = dn->n_children;
+ const uint32_t npartitions = dn->n_children;
assert(dn->totalchildkeylens==(sizeof(int)*(npartitions-1)));
struct check_leafentries_struct extra = { .nelts = nrows, .elts = les, .i = 0, .cmp = omt_int_cmp };
- u_int32_t last_i = 0;
- for (u_int32_t i = 0; i < npartitions; ++i) {
+ uint32_t last_i = 0;
+ for (uint32_t i = 0; i < npartitions; ++i) {
assert(dest_ndd[i].start > 0);
assert(dest_ndd[i].size > 0);
if (i > 0) {
@@ -649,7 +649,7 @@ test_serialize_leaf_with_many_rows(enum ftnode_verify_type bft, BOOL do_clone) {
static void
-test_serialize_leaf_with_large_rows(enum ftnode_verify_type bft, BOOL do_clone) {
+test_serialize_leaf_with_large_rows(enum ftnode_verify_type bft, bool do_clone) {
int r;
struct ftnode sn, *dn;
const uint32_t nrows = 7;
@@ -719,7 +719,7 @@ test_serialize_leaf_with_large_rows(enum ftnode_verify_type bft, BOOL do_clone)
{
DISKOFF offset;
DISKOFF size;
- toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, FALSE);
+ toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size);
@@ -756,12 +756,12 @@ test_serialize_leaf_with_large_rows(enum ftnode_verify_type bft, BOOL do_clone)
les[i] = le_fastmalloc(&dummy_mp, key, key_size, val, val_size);
}
}
- const u_int32_t npartitions = dn->n_children;
+ const uint32_t npartitions = dn->n_children;
assert(npartitions == nrows);
assert(dn->totalchildkeylens==(key_size*(npartitions-1)));
struct check_leafentries_struct extra = { .nelts = nrows, .elts = les, .i = 0, .cmp = omt_cmp };
- u_int32_t last_i = 0;
- for (u_int32_t i = 0; i < npartitions; ++i) {
+ uint32_t last_i = 0;
+ for (uint32_t i = 0; i < npartitions; ++i) {
assert(dest_ndd[i].start > 0);
assert(dest_ndd[i].size > 0);
if (i > 0) {
@@ -804,7 +804,7 @@ test_serialize_leaf_with_large_rows(enum ftnode_verify_type bft, BOOL do_clone)
static void
-test_serialize_leaf_with_empty_basement_nodes(enum ftnode_verify_type bft, BOOL do_clone) {
+test_serialize_leaf_with_empty_basement_nodes(enum ftnode_verify_type bft, bool do_clone) {
const int nodesize = 1024;
struct ftnode sn, *dn;
@@ -885,7 +885,7 @@ test_serialize_leaf_with_empty_basement_nodes(enum ftnode_verify_type bft, BOOL
{
DISKOFF offset;
DISKOFF size;
- toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, FALSE);
+ toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size);
@@ -913,11 +913,11 @@ test_serialize_leaf_with_empty_basement_nodes(enum ftnode_verify_type bft, BOOL
elts[0] = le_malloc(&dummy_mp, "a", "aval");
elts[1] = le_malloc(&dummy_mp, "b", "bval");
elts[2] = le_malloc(&dummy_mp, "x", "xval");
- const u_int32_t npartitions = dn->n_children;
+ const uint32_t npartitions = dn->n_children;
assert(dn->totalchildkeylens==(2*(npartitions-1)));
struct check_leafentries_struct extra = { .nelts = 3, .elts = elts, .i = 0, .cmp = omt_cmp };
- u_int32_t last_i = 0;
- for (u_int32_t i = 0; i < npartitions; ++i) {
+ uint32_t last_i = 0;
+ for (uint32_t i = 0; i < npartitions; ++i) {
assert(dest_ndd[i].start > 0);
assert(dest_ndd[i].size > 0);
if (i > 0) {
@@ -959,7 +959,7 @@ test_serialize_leaf_with_empty_basement_nodes(enum ftnode_verify_type bft, BOOL
}
static void
-test_serialize_leaf_with_multiple_empty_basement_nodes(enum ftnode_verify_type bft, BOOL do_clone) {
+test_serialize_leaf_with_multiple_empty_basement_nodes(enum ftnode_verify_type bft, bool do_clone) {
const int nodesize = 1024;
struct ftnode sn, *dn;
@@ -1015,7 +1015,7 @@ test_serialize_leaf_with_multiple_empty_basement_nodes(enum ftnode_verify_type b
{
DISKOFF offset;
DISKOFF size;
- toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, FALSE);
+ toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size);
@@ -1037,11 +1037,11 @@ test_serialize_leaf_with_multiple_empty_basement_nodes(enum ftnode_verify_type b
assert(dn->height == 0);
assert(dn->n_children == 1);
{
- const u_int32_t npartitions = dn->n_children;
+ const uint32_t npartitions = dn->n_children;
assert(dn->totalchildkeylens==(2*(npartitions-1)));
struct check_leafentries_struct extra = { .nelts = 0, .elts = NULL, .i = 0, .cmp = omt_cmp };
- u_int32_t last_i = 0;
- for (u_int32_t i = 0; i < npartitions; ++i) {
+ uint32_t last_i = 0;
+ for (uint32_t i = 0; i < npartitions; ++i) {
assert(dest_ndd[i].start > 0);
assert(dest_ndd[i].size > 0);
if (i > 0) {
@@ -1080,7 +1080,7 @@ test_serialize_leaf_with_multiple_empty_basement_nodes(enum ftnode_verify_type b
static void
-test_serialize_leaf(enum ftnode_verify_type bft, BOOL do_clone) {
+test_serialize_leaf(enum ftnode_verify_type bft, bool do_clone) {
// struct ft_handle source_ft;
const int nodesize = 1024;
struct ftnode sn, *dn;
@@ -1150,7 +1150,7 @@ test_serialize_leaf(enum ftnode_verify_type bft, BOOL do_clone) {
{
DISKOFF offset;
DISKOFF size;
- toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, FALSE);
+ toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size);
@@ -1177,18 +1177,18 @@ test_serialize_leaf(enum ftnode_verify_type bft, BOOL do_clone) {
elts[0] = le_malloc(&dummy_mp, "a", "aval");
elts[1] = le_malloc(&dummy_mp, "b", "bval");
elts[2] = le_malloc(&dummy_mp, "x", "xval");
- const u_int32_t npartitions = dn->n_children;
+ const uint32_t npartitions = dn->n_children;
assert(dn->totalchildkeylens==(2*(npartitions-1)));
struct check_leafentries_struct extra = { .nelts = 3, .elts = elts, .i = 0, .cmp = omt_cmp };
- u_int32_t last_i = 0;
- for (u_int32_t i = 0; i < npartitions; ++i) {
+ uint32_t last_i = 0;
+ for (uint32_t i = 0; i < npartitions; ++i) {
assert(dest_ndd[i].start > 0);
assert(dest_ndd[i].size > 0);
if (i > 0) {
assert(dest_ndd[i].start >= dest_ndd[i-1].start + dest_ndd[i-1].size);
}
toku_omt_iterate(BLB_BUFFER(dn, i), check_leafentries, &extra);
- u_int32_t keylen;
+ uint32_t keylen;
if (i < npartitions-1) {
assert(strcmp((char*)dn->childkeys[i].data, (char*)le_key_and_len(elts[extra.i-1], &keylen))==0);
}
@@ -1226,7 +1226,7 @@ test_serialize_leaf(enum ftnode_verify_type bft, BOOL do_clone) {
}
static void
-test_serialize_nonleaf(enum ftnode_verify_type bft, BOOL do_clone) {
+test_serialize_nonleaf(enum ftnode_verify_type bft, bool do_clone) {
// struct ft_handle source_ft;
const int nodesize = 1024;
struct ftnode sn, *dn;
@@ -1298,7 +1298,7 @@ test_serialize_nonleaf(enum ftnode_verify_type bft, BOOL do_clone) {
{
DISKOFF offset;
DISKOFF size;
- toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, FALSE);
+ toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size);
@@ -1356,61 +1356,61 @@ int
test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) {
initialize_dummymsn();
- test_serialize_leaf(read_none, FALSE);
- test_serialize_leaf(read_all, FALSE);
- test_serialize_leaf(read_compressed, FALSE);
- test_serialize_leaf(read_none, TRUE);
- test_serialize_leaf(read_all, TRUE);
- test_serialize_leaf(read_compressed, TRUE);
-
- test_serialize_leaf_with_empty_basement_nodes(read_none, FALSE);
- test_serialize_leaf_with_empty_basement_nodes(read_all, FALSE);
- test_serialize_leaf_with_empty_basement_nodes(read_compressed, FALSE);
- test_serialize_leaf_with_empty_basement_nodes(read_none, TRUE);
- test_serialize_leaf_with_empty_basement_nodes(read_all, TRUE);
- test_serialize_leaf_with_empty_basement_nodes(read_compressed, TRUE);
-
- test_serialize_leaf_with_multiple_empty_basement_nodes(read_none, FALSE);
- test_serialize_leaf_with_multiple_empty_basement_nodes(read_all, FALSE);
- test_serialize_leaf_with_multiple_empty_basement_nodes(read_compressed, FALSE);
- test_serialize_leaf_with_multiple_empty_basement_nodes(read_none, TRUE);
- test_serialize_leaf_with_multiple_empty_basement_nodes(read_all, TRUE);
- test_serialize_leaf_with_multiple_empty_basement_nodes(read_compressed, TRUE);
-
- test_serialize_leaf_with_large_rows(read_none, FALSE);
- test_serialize_leaf_with_large_rows(read_all, FALSE);
- test_serialize_leaf_with_large_rows(read_compressed, FALSE);
- test_serialize_leaf_with_large_rows(read_none, TRUE);
- test_serialize_leaf_with_large_rows(read_all, TRUE);
- test_serialize_leaf_with_large_rows(read_compressed, TRUE);
-
- test_serialize_leaf_with_many_rows(read_none, FALSE);
- test_serialize_leaf_with_many_rows(read_all, FALSE);
- test_serialize_leaf_with_many_rows(read_compressed, FALSE);
- test_serialize_leaf_with_many_rows(read_none, TRUE);
- test_serialize_leaf_with_many_rows(read_all, TRUE);
- test_serialize_leaf_with_many_rows(read_compressed, TRUE);
-
- test_serialize_leaf_with_large_pivots(read_none, FALSE);
- test_serialize_leaf_with_large_pivots(read_all, FALSE);
- test_serialize_leaf_with_large_pivots(read_compressed, FALSE);
- test_serialize_leaf_with_large_pivots(read_none, TRUE);
- test_serialize_leaf_with_large_pivots(read_all, TRUE);
- test_serialize_leaf_with_large_pivots(read_compressed, TRUE);
-
- test_serialize_leaf_check_msn(read_none, FALSE);
- test_serialize_leaf_check_msn(read_all, FALSE);
- test_serialize_leaf_check_msn(read_compressed, FALSE);
- test_serialize_leaf_check_msn(read_none, TRUE);
- test_serialize_leaf_check_msn(read_all, TRUE);
- test_serialize_leaf_check_msn(read_compressed, TRUE);
-
- test_serialize_nonleaf(read_none, FALSE);
- test_serialize_nonleaf(read_all, FALSE);
- test_serialize_nonleaf(read_compressed, FALSE);
- test_serialize_nonleaf(read_none, TRUE);
- test_serialize_nonleaf(read_all, TRUE);
- test_serialize_nonleaf(read_compressed, TRUE);
+ test_serialize_leaf(read_none, false);
+ test_serialize_leaf(read_all, false);
+ test_serialize_leaf(read_compressed, false);
+ test_serialize_leaf(read_none, true);
+ test_serialize_leaf(read_all, true);
+ test_serialize_leaf(read_compressed, true);
+
+ test_serialize_leaf_with_empty_basement_nodes(read_none, false);
+ test_serialize_leaf_with_empty_basement_nodes(read_all, false);
+ test_serialize_leaf_with_empty_basement_nodes(read_compressed, false);
+ test_serialize_leaf_with_empty_basement_nodes(read_none, true);
+ test_serialize_leaf_with_empty_basement_nodes(read_all, true);
+ test_serialize_leaf_with_empty_basement_nodes(read_compressed, true);
+
+ test_serialize_leaf_with_multiple_empty_basement_nodes(read_none, false);
+ test_serialize_leaf_with_multiple_empty_basement_nodes(read_all, false);
+ test_serialize_leaf_with_multiple_empty_basement_nodes(read_compressed, false);
+ test_serialize_leaf_with_multiple_empty_basement_nodes(read_none, true);
+ test_serialize_leaf_with_multiple_empty_basement_nodes(read_all, true);
+ test_serialize_leaf_with_multiple_empty_basement_nodes(read_compressed, true);
+
+ test_serialize_leaf_with_large_rows(read_none, false);
+ test_serialize_leaf_with_large_rows(read_all, false);
+ test_serialize_leaf_with_large_rows(read_compressed, false);
+ test_serialize_leaf_with_large_rows(read_none, true);
+ test_serialize_leaf_with_large_rows(read_all, true);
+ test_serialize_leaf_with_large_rows(read_compressed, true);
+
+ test_serialize_leaf_with_many_rows(read_none, false);
+ test_serialize_leaf_with_many_rows(read_all, false);
+ test_serialize_leaf_with_many_rows(read_compressed, false);
+ test_serialize_leaf_with_many_rows(read_none, true);
+ test_serialize_leaf_with_many_rows(read_all, true);
+ test_serialize_leaf_with_many_rows(read_compressed, true);
+
+ test_serialize_leaf_with_large_pivots(read_none, false);
+ test_serialize_leaf_with_large_pivots(read_all, false);
+ test_serialize_leaf_with_large_pivots(read_compressed, false);
+ test_serialize_leaf_with_large_pivots(read_none, true);
+ test_serialize_leaf_with_large_pivots(read_all, true);
+ test_serialize_leaf_with_large_pivots(read_compressed, true);
+
+ test_serialize_leaf_check_msn(read_none, false);
+ test_serialize_leaf_check_msn(read_all, false);
+ test_serialize_leaf_check_msn(read_compressed, false);
+ test_serialize_leaf_check_msn(read_none, true);
+ test_serialize_leaf_check_msn(read_all, true);
+ test_serialize_leaf_check_msn(read_compressed, true);
+
+ test_serialize_nonleaf(read_none, false);
+ test_serialize_nonleaf(read_all, false);
+ test_serialize_nonleaf(read_compressed, false);
+ test_serialize_nonleaf(read_none, true);
+ test_serialize_nonleaf(read_all, true);
+ test_serialize_nonleaf(read_compressed, true);
return 0;
}
diff --git a/ft/tests/ft-test-cursor-2.cc b/ft/tests/ft-test-cursor-2.cc
index 277836065fc..086b1630a61 100644
--- a/ft/tests/ft-test-cursor-2.cc
+++ b/ft/tests/ft-test-cursor-2.cc
@@ -53,7 +53,7 @@ static void test_multiple_ft_cursor_dbts(int n) {
}
for (i=0; i<n; i++) {
- r = toku_ft_cursor(brt, &cursors[i], NULL, FALSE, FALSE);
+ r = toku_ft_cursor(brt, &cursors[i], NULL, false, false);
assert(r == 0);
}
diff --git a/ft/tests/ft-test-cursor.cc b/ft/tests/ft-test-cursor.cc
index 760448df57f..5b0221883e8 100644
--- a/ft/tests/ft-test-cursor.cc
+++ b/ft/tests/ft-test-cursor.cc
@@ -21,7 +21,7 @@ static void assert_cursor_notfound(FT_HANDLE brt, int position) {
FT_CURSOR cursor=0;
int r;
- r = toku_ft_cursor(brt, &cursor, NULL, FALSE, FALSE);
+ r = toku_ft_cursor(brt, &cursor, NULL, false, false);
assert(r==0);
struct check_pair pair = {0,0,0,0,0};
@@ -37,7 +37,7 @@ static void assert_cursor_value(FT_HANDLE brt, int position, long long value) {
FT_CURSOR cursor=0;
int r;
- r = toku_ft_cursor(brt, &cursor, NULL, FALSE, FALSE);
+ r = toku_ft_cursor(brt, &cursor, NULL, false, false);
assert(r==0);
if (test_cursor_debug && verbose) printf("key: ");
@@ -54,7 +54,7 @@ static void assert_cursor_first_last(FT_HANDLE brt, long long firstv, long long
FT_CURSOR cursor=0;
int r;
- r = toku_ft_cursor(brt, &cursor, NULL, FALSE, FALSE);
+ r = toku_ft_cursor(brt, &cursor, NULL, false, false);
assert(r==0);
if (test_cursor_debug && verbose) printf("first key: ");
@@ -252,7 +252,7 @@ static void assert_cursor_walk(FT_HANDLE brt, int n) {
int i;
int r;
- r = toku_ft_cursor(brt, &cursor, NULL, FALSE, FALSE);
+ r = toku_ft_cursor(brt, &cursor, NULL, false, false);
assert(r==0);
if (test_cursor_debug && verbose) printf("key: ");
@@ -318,7 +318,7 @@ static void assert_cursor_rwalk(FT_HANDLE brt, int n) {
int i;
int r;
- r = toku_ft_cursor(brt, &cursor, NULL, FALSE, FALSE);
+ r = toku_ft_cursor(brt, &cursor, NULL, false, false);
assert(r==0);
if (test_cursor_debug && verbose) printf("key: ");
@@ -405,7 +405,7 @@ static void assert_cursor_walk_inorder(FT_HANDLE brt, int n) {
int r;
char *prevkey = 0;
- r = toku_ft_cursor(brt, &cursor, NULL, FALSE, FALSE);
+ r = toku_ft_cursor(brt, &cursor, NULL, false, false);
assert(r==0);
if (test_cursor_debug && verbose) printf("key: ");
@@ -507,7 +507,7 @@ static void test_ft_cursor_split(int n) {
assert(r==0);
}
- r = toku_ft_cursor(brt, &cursor, NULL, FALSE, FALSE);
+ r = toku_ft_cursor(brt, &cursor, NULL, false, false);
assert(r==0);
if (test_cursor_debug && verbose) printf("key: ");
@@ -572,7 +572,7 @@ static void test_multiple_ft_cursors(int n) {
int i;
for (i=0; i<n; i++) {
- r = toku_ft_cursor(brt, &cursors[i], NULL, FALSE, FALSE);
+ r = toku_ft_cursor(brt, &cursors[i], NULL, false, false);
assert(r == 0);
}
@@ -622,7 +622,7 @@ static void test_multiple_ft_cursor_walk(int n) {
int c;
/* create the cursors */
for (c=0; c<ncursors; c++) {
- r = toku_ft_cursor(brt, &cursors[c], NULL, FALSE, FALSE);
+ r = toku_ft_cursor(brt, &cursors[c], NULL, false, false);
assert(r == 0);
}
@@ -709,7 +709,7 @@ static void test_ft_cursor_set(int n, int cursor_op) {
assert(r == 0);
}
- r = toku_ft_cursor(brt, &cursor, NULL, FALSE, FALSE);
+ r = toku_ft_cursor(brt, &cursor, NULL, false, false);
assert(r==0);
/* set cursor to random keys in set { 0, 10, 20, .. 10*(n-1) } */
@@ -782,7 +782,7 @@ static void test_ft_cursor_set_range(int n) {
assert(r == 0);
}
- r = toku_ft_cursor(brt, &cursor, NULL, FALSE, FALSE);
+ r = toku_ft_cursor(brt, &cursor, NULL, false, false);
assert(r==0);
/* pick random keys v in 0 <= v < 10*n, the cursor should point
@@ -832,7 +832,7 @@ static void test_ft_cursor_delete(int n) {
error = toku_open_ft_handle(fname, 1, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare);
assert(error == 0);
- error = toku_ft_cursor(brt, &cursor, NULL, FALSE, FALSE);
+ error = toku_ft_cursor(brt, &cursor, NULL, false, false);
assert(error == 0);
DBT key, val;
diff --git a/ft/tests/ft-test.cc b/ft/tests/ft-test.cc
index c703bde152a..03e77914613 100644
--- a/ft/tests/ft-test.cc
+++ b/ft/tests/ft-test.cc
@@ -261,7 +261,7 @@ static void test_cursor_last_empty(void) {
//printf("%s:%d %d alloced\n", __SRCFILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items();
r = toku_open_ft_handle(fname, 1, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
//printf("%s:%d %d alloced\n", __SRCFILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items();
- r = toku_ft_cursor(brt, &cursor, NULL, FALSE, FALSE); assert(r==0);
+ r = toku_ft_cursor(brt, &cursor, NULL, false, false); assert(r==0);
{
struct check_pair pair = {0,0,0,0,0};
r = toku_ft_cursor_get(cursor, NULL, lookup_checkf, &pair, DB_LAST);
@@ -299,7 +299,7 @@ static void test_cursor_next (void) {
r = toku_ft_insert(brt, toku_fill_dbt(&kbt, "hello", 6), toku_fill_dbt(&vbt, "there", 6), null_txn);
r = toku_ft_insert(brt, toku_fill_dbt(&kbt, "byebye", 7), toku_fill_dbt(&vbt, "byenow", 7), null_txn);
if (verbose) printf("%s:%d calling toku_ft_cursor(...)\n", __SRCFILE__, __LINE__);
- r = toku_ft_cursor(brt, &cursor, NULL, FALSE, FALSE); assert(r==0);
+ r = toku_ft_cursor(brt, &cursor, NULL, false, false); assert(r==0);
toku_init_dbt(&kbt);
//printf("%s:%d %d alloced\n", __SRCFILE__, __LINE__, toku_get_n_items_malloced()); toku_print_malloced_items();
toku_init_dbt(&vbt);
@@ -393,7 +393,7 @@ static void test_wrongendian_compare (int wrong_p, unsigned int N) {
}
{
FT_CURSOR cursor=0;
- r = toku_ft_cursor(brt, &cursor, NULL, FALSE, FALSE); assert(r==0);
+ r = toku_ft_cursor(brt, &cursor, NULL, false, false); assert(r==0);
for (i=0; i<2; i++) {
unsigned char a[4],b[4];
@@ -435,7 +435,7 @@ static void test_wrongendian_compare (int wrong_p, unsigned int N) {
toku_cachetable_verify(ct);
}
FT_CURSOR cursor=0;
- r = toku_ft_cursor(brt, &cursor, NULL, FALSE, FALSE); assert(r==0);
+ r = toku_ft_cursor(brt, &cursor, NULL, false, false); assert(r==0);
for (i=0; i<N; i++) {
unsigned char a[4],b[4];
@@ -584,7 +584,7 @@ static void test_ft_delete_present(int n) {
/* cursor should not find anything */
FT_CURSOR cursor=0;
- r = toku_ft_cursor(t, &cursor, NULL, FALSE, FALSE);
+ r = toku_ft_cursor(t, &cursor, NULL, false, false);
assert(r == 0);
{
@@ -721,7 +721,7 @@ static void test_ft_delete_cursor_first(int n) {
/* cursor should find the last key: n-1 */
FT_CURSOR cursor=0;
- r = toku_ft_cursor(t, &cursor, NULL, FALSE, FALSE);
+ r = toku_ft_cursor(t, &cursor, NULL, false, false);
assert(r == 0);
{
@@ -826,7 +826,7 @@ static void test_new_ft_cursor_create_close (void) {
int i;
for (i=0; i<n; i++) {
- r = toku_ft_cursor(brt, &cursors[i], NULL, FALSE, FALSE); assert(r == 0);
+ r = toku_ft_cursor(brt, &cursors[i], NULL, false, false); assert(r == 0);
}
for (i=0; i<n; i++) {
@@ -860,7 +860,7 @@ static void test_new_ft_cursor_first(int n) {
FT_CURSOR cursor=0;
- r = toku_ft_cursor(t, &cursor, NULL, FALSE, FALSE); assert(r == 0);
+ r = toku_ft_cursor(t, &cursor, NULL, false, false); assert(r == 0);
toku_init_dbt(&key); key.flags = DB_DBT_REALLOC;
toku_init_dbt(&val); val.flags = DB_DBT_REALLOC;
@@ -912,7 +912,7 @@ static void test_new_ft_cursor_last(int n) {
FT_CURSOR cursor=0;
- r = toku_ft_cursor(t, &cursor, NULL, FALSE, FALSE); assert(r == 0);
+ r = toku_ft_cursor(t, &cursor, NULL, false, false); assert(r == 0);
toku_init_dbt(&key); key.flags = DB_DBT_REALLOC;
toku_init_dbt(&val); val.flags = DB_DBT_REALLOC;
@@ -964,7 +964,7 @@ static void test_new_ft_cursor_next(int n) {
FT_CURSOR cursor=0;
- r = toku_ft_cursor(t, &cursor, NULL, FALSE, FALSE); assert(r == 0);
+ r = toku_ft_cursor(t, &cursor, NULL, false, false); assert(r == 0);
for (i=0; ; i++) {
int kk = toku_htonl(i);
@@ -1007,7 +1007,7 @@ static void test_new_ft_cursor_prev(int n) {
FT_CURSOR cursor=0;
- r = toku_ft_cursor(t, &cursor, NULL, FALSE, FALSE); assert(r == 0);
+ r = toku_ft_cursor(t, &cursor, NULL, false, false); assert(r == 0);
for (i=n-1; ; i--) {
int kk = toku_htonl(i);
@@ -1050,7 +1050,7 @@ static void test_new_ft_cursor_current(int n) {
FT_CURSOR cursor=0;
- r = toku_ft_cursor(t, &cursor, NULL, FALSE, FALSE); assert(r == 0);
+ r = toku_ft_cursor(t, &cursor, NULL, false, false); assert(r == 0);
for (i=0; ; i++) {
{
@@ -1134,7 +1134,7 @@ static void test_new_ft_cursor_set_range(int n) {
r = toku_ft_insert(brt, toku_fill_dbt(&key, &k, sizeof k), toku_fill_dbt(&val, &v, sizeof v), 0); assert(r == 0);
}
- r = toku_ft_cursor(brt, &cursor, NULL, FALSE, FALSE); assert(r==0);
+ r = toku_ft_cursor(brt, &cursor, NULL, false, false); assert(r==0);
/* pick random keys v in 0 <= v < 10*n, the cursor should point
to the smallest key in the tree that is >= v */
@@ -1192,7 +1192,7 @@ static void test_new_ft_cursor_set(int n, int cursor_op, DB *db) {
r = toku_ft_insert(brt, toku_fill_dbt(&key, &k, sizeof k), toku_fill_dbt(&val, &v, sizeof v), 0); assert(r == 0);
}
- r = toku_ft_cursor(brt, &cursor, NULL, FALSE, FALSE); assert(r==0);
+ r = toku_ft_cursor(brt, &cursor, NULL, false, false); assert(r==0);
/* set cursor to random keys in set { 0, 10, 20, .. 10*(n-1) } */
for (i=0; i<n; i++) {
diff --git a/ft/tests/ftloader-test-bad-generate.cc b/ft/tests/ftloader-test-bad-generate.cc
index 35b29063971..0d3720207e5 100644
--- a/ft/tests/ftloader-test-bad-generate.cc
+++ b/ft/tests/ftloader-test-bad-generate.cc
@@ -68,7 +68,7 @@ static void populate_rowset(struct rowset *rowset, int seq, int nrows) {
}
}
-static void test_extractor(int nrows, int nrowsets, BOOL expect_fail) {
+static void test_extractor(int nrows, int nrowsets, bool expect_fail) {
if (verbose) printf("%s %d %d\n", __FUNCTION__, nrows, nrowsets);
int r;
@@ -87,7 +87,7 @@ static void test_extractor(int nrows, int nrowsets, BOOL expect_fail) {
}
FTLOADER loader;
- r = toku_ft_loader_open(&loader, NULL, generate, NULL, N, brts, dbs, fnames, compares, "tempXXXXXX", ZERO_LSN, TXNID_NONE, TRUE);
+ r = toku_ft_loader_open(&loader, NULL, generate, NULL, N, brts, dbs, fnames, compares, "tempXXXXXX", ZERO_LSN, TXNID_NONE, true);
assert(r == 0);
struct rowset *rowset[nrowsets];
@@ -114,7 +114,7 @@ static void test_extractor(int nrows, int nrowsets, BOOL expect_fail) {
assert(expect_fail ? loader_error != 0 : loader_error == 0);
// abort the ft_loader. this ends the test
- r = toku_ft_loader_abort(loader, TRUE);
+ r = toku_ft_loader_abort(loader, true);
assert(r == 0);
}
@@ -155,7 +155,7 @@ int test_main (int argc, const char *argv[]) {
}
// callibrate
- test_extractor(nrows, nrowsets, FALSE);
+ test_extractor(nrows, nrowsets, false);
// run tests
int event_limit = event_count;
@@ -164,7 +164,7 @@ int test_main (int argc, const char *argv[]) {
for (int i = 1; i <= event_limit; i++) {
reset_event_counts();
event_count_trigger = i;
- test_extractor(nrows, nrowsets, TRUE);
+ test_extractor(nrows, nrowsets, true);
}
return 0;
diff --git a/ft/tests/ftloader-test-extractor-errors.cc b/ft/tests/ftloader-test-extractor-errors.cc
index 3a5b59638bc..8e8309ddd7e 100644
--- a/ft/tests/ftloader-test-extractor-errors.cc
+++ b/ft/tests/ftloader-test-extractor-errors.cc
@@ -70,7 +70,7 @@ static int ascending_keys = 0;
static int descending_keys = 0;
static int random_keys = 0;
-static void test_extractor(int nrows, int nrowsets, BOOL expect_fail, const char *testdir) {
+static void test_extractor(int nrows, int nrowsets, bool expect_fail, const char *testdir) {
if (verbose) printf("%s %d %d %s\n", __FUNCTION__, nrows, nrowsets, testdir);
int r;
@@ -99,7 +99,7 @@ static void test_extractor(int nrows, int nrowsets, BOOL expect_fail, const char
sprintf(temp, "%s/%s", testdir, "tempXXXXXX");
FTLOADER loader;
- r = toku_ft_loader_open(&loader, NULL, generate, NULL, N, brts, dbs, fnames, compares, "tempXXXXXX", ZERO_LSN, TXNID_NONE, TRUE);
+ r = toku_ft_loader_open(&loader, NULL, generate, NULL, N, brts, dbs, fnames, compares, "tempXXXXXX", ZERO_LSN, TXNID_NONE, true);
assert(r == 0);
struct rowset *rowset[nrowsets];
@@ -141,7 +141,7 @@ static void test_extractor(int nrows, int nrowsets, BOOL expect_fail, const char
// verify the temp files
// abort the ft_loader. this ends the test
- r = toku_ft_loader_abort(loader, TRUE);
+ r = toku_ft_loader_abort(loader, true);
assert(r == 0);
toku_free(keys);
@@ -217,7 +217,7 @@ int test_main (int argc, const char *argv[]) {
ascending_keys = 1;
// callibrate
- test_extractor(nrows, nrowsets, FALSE, testdir);
+ test_extractor(nrows, nrowsets, false, testdir);
// run tests
int error_limit = event_count;
@@ -229,7 +229,7 @@ int test_main (int argc, const char *argv[]) {
reset_event_counts();
reset_my_malloc_counts();
event_count_trigger = i;
- test_extractor(nrows, nrowsets, TRUE, testdir);
+ test_extractor(nrows, nrowsets, true, testdir);
}
return 0;
diff --git a/ft/tests/ftloader-test-extractor.cc b/ft/tests/ftloader-test-extractor.cc
index af855b1c4d7..38ea83fd152 100644
--- a/ft/tests/ftloader-test-extractor.cc
+++ b/ft/tests/ftloader-test-extractor.cc
@@ -134,7 +134,7 @@ static void verify_sorted(int a[], int n) {
struct merge_file {
FILE *f;
DBT key, val;
- BOOL row_valid;
+ bool row_valid;
};
static DBT zero_dbt;
@@ -143,7 +143,7 @@ static void merge_file_init(struct merge_file *mf) {
mf->f = NULL;
mf->key = zero_dbt; mf->key.flags = DB_DBT_REALLOC;
mf->val = zero_dbt; mf->val.flags = DB_DBT_REALLOC;
- mf->row_valid = FALSE;
+ mf->row_valid = false;
}
static void merge_file_destroy(struct merge_file *mf) {
@@ -174,7 +174,7 @@ static char *merge(char **tempfiles, int ntempfiles, const char *testdir) {
}
assert(f[i].f != NULL);
if (read_row(f[i].f, &f[i].key, &f[i].val) == 0)
- f[i].row_valid = TRUE;
+ f[i].row_valid = true;
}
while (1) {
@@ -200,7 +200,7 @@ static char *merge(char **tempfiles, int ntempfiles, const char *testdir) {
// refresh mini
if (read_row(f[mini].f, &f[mini].key, &f[mini].val) != 0)
- f[mini].row_valid = FALSE;
+ f[mini].row_valid = false;
}
for (int i = 0; i < ntempfiles; i++) {
@@ -319,7 +319,7 @@ static void test_extractor(int nrows, int nrowsets, const char *testdir) {
sprintf(temp, "%s/%s", testdir, "tempXXXXXX");
FTLOADER loader;
- r = toku_ft_loader_open(&loader, NULL, generate, NULL, N, brts, dbs, fnames, compares, temp, ZERO_LSN, TXNID_NONE, TRUE);
+ r = toku_ft_loader_open(&loader, NULL, generate, NULL, N, brts, dbs, fnames, compares, temp, ZERO_LSN, TXNID_NONE, true);
assert(r == 0);
struct rowset *rowset[nrowsets];
@@ -350,7 +350,7 @@ static void test_extractor(int nrows, int nrowsets, const char *testdir) {
verify(keys, nkeys, testdir);
// abort the ft_loader. this ends the test
- r = toku_ft_loader_abort(loader, TRUE);
+ r = toku_ft_loader_abort(loader, true);
assert(r == 0);
toku_free(keys);
diff --git a/ft/tests/ftloader-test-merge-files-dbufio.cc b/ft/tests/ftloader-test-merge-files-dbufio.cc
index bf7dbae579b..2acf692bec2 100644
--- a/ft/tests/ftloader-test-merge-files-dbufio.cc
+++ b/ft/tests/ftloader-test-merge-files-dbufio.cc
@@ -271,7 +271,7 @@ static void *consumer_thread (void *ctv) {
}
-static void test (const char *directory, BOOL is_error) {
+static void test (const char *directory, bool is_error) {
int *XMALLOC_N(N_SOURCES, fds);
@@ -327,7 +327,7 @@ static void test (const char *directory, BOOL is_error) {
bt_compare_functions,
"tempxxxxxx",
*lsnp,
- TXNID_NONE, TRUE);
+ TXNID_NONE, true);
assert(r==0);
}
@@ -353,8 +353,8 @@ static void test (const char *directory, BOOL is_error) {
// all we really need is the number of records in the file. The rest of the file_info is unused by the dbufio code.n
bl->file_infos.file_infos[i].n_rows = n_records_in_fd[i];
// However we need these for the destroy method to work right.
- bl->file_infos.file_infos[i].is_extant = FALSE;
- bl->file_infos.file_infos[i].is_open = FALSE;
+ bl->file_infos.file_infos[i].is_extant = false;
+ bl->file_infos.file_infos[i].is_open = false;
bl->file_infos.file_infos[i].buffer = NULL;
src_fidxs[i].idx = i;
}
@@ -378,7 +378,7 @@ static void test (const char *directory, BOOL is_error) {
int result = 0;
{
- int r = toku_merge_some_files_using_dbufio(TRUE, FIDX_NULL, q, N_SOURCES, bfs, src_fidxs, bl, 0, (DB*)NULL, compare_ints, 10000);
+ int r = toku_merge_some_files_using_dbufio(true, FIDX_NULL, q, N_SOURCES, bfs, src_fidxs, bl, 0, (DB*)NULL, compare_ints, 10000);
if (is_error && r!=0) {
result = r;
} else {
@@ -424,7 +424,7 @@ static void test (const char *directory, BOOL is_error) {
int r = queue_destroy(q);
assert(r==0);
}
- toku_ft_loader_internal_destroy(bl, FALSE);
+ toku_ft_loader_internal_destroy(bl, false);
{
int r = toku_cachetable_close(&ct);
assert(r==0);
@@ -514,7 +514,7 @@ int test_main (int argc, const char *argv[]) {
int r;
r = system(unlink_all); CKERR(r);
r = toku_os_mkdir(directory, 0755); CKERR(r);
- test(directory, FALSE);
+ test(directory, false);
if (verbose) printf("my_malloc_count=%d big_count=%d\n", my_malloc_count, my_big_malloc_count);
@@ -530,7 +530,7 @@ int test_main (int argc, const char *argv[]) {
r = system(unlink_all); CKERR(r);
r = toku_os_mkdir(directory, 0755); CKERR(r);
if (verbose) printf("event=%d\n", i);
- test(directory, TRUE);
+ test(directory, true);
}
r = system(unlink_all); CKERR(r);
}
diff --git a/ft/tests/ftloader-test-open.cc b/ft/tests/ftloader-test-open.cc
index 064f24ec940..9d439500ce2 100644
--- a/ft/tests/ftloader-test-open.cc
+++ b/ft/tests/ftloader-test-open.cc
@@ -57,14 +57,14 @@ static void test_loader_open(int ndbs) {
for (i = 0; ; i++) {
set_my_malloc_trigger(i+1);
- r = toku_ft_loader_open(&loader, NULL, NULL, NULL, ndbs, brts, dbs, fnames, compares, "", ZERO_LSN, TXNID_NONE, TRUE);
+ r = toku_ft_loader_open(&loader, NULL, NULL, NULL, ndbs, brts, dbs, fnames, compares, "", ZERO_LSN, TXNID_NONE, true);
if (r == 0)
break;
}
if (verbose) printf("i=%d\n", i);
- r = toku_ft_loader_abort(loader, TRUE);
+ r = toku_ft_loader_abort(loader, true);
assert(r == 0);
}
diff --git a/ft/tests/ftloader-test-writer-errors.cc b/ft/tests/ftloader-test-writer-errors.cc
index f5c3a953eb7..8a7f2bac023 100644
--- a/ft/tests/ftloader-test-writer-errors.cc
+++ b/ft/tests/ftloader-test-writer-errors.cc
@@ -34,7 +34,7 @@ static void err_cb(DB *db UU(), int dbn UU(), int err UU(), DBT *key UU(), DBT *
abort();
}
-static int write_dbfile (char *tf_template, int n, char *output_name, BOOL expect_error, int testno) {
+static int write_dbfile (char *tf_template, int n, char *output_name, bool expect_error, int testno) {
if (verbose) printf("test start %d %d testno=%d\n", n, expect_error, testno);
int result = 0;
@@ -216,7 +216,7 @@ int test_main (int argc, const char *argv[]) {
int r;
r = system(unlink_all); CKERR(r);
r = toku_os_mkdir(directory, 0755); CKERR(r);
- r = write_dbfile(tf_template, n, output_name, FALSE, 0); CKERR(r);
+ r = write_dbfile(tf_template, n, output_name, false, 0); CKERR(r);
if (verbose) printf("my_malloc_count=%d big_count=%d\n", my_malloc_count, my_big_malloc_count);
if (verbose) printf("my_realloc_count=%d big_count=%d\n", my_realloc_count, my_big_realloc_count);
@@ -233,7 +233,7 @@ int test_main (int argc, const char *argv[]) {
event_count_trigger = i;
r = system(unlink_all); CKERR(r);
r = toku_os_mkdir(directory, 0755); CKERR(r);
- r = write_dbfile(tf_template, n, output_name, TRUE, i);
+ r = write_dbfile(tf_template, n, output_name, true, i);
if (verbose) printf("event_count=%d\n", event_count);
if (r == 0)
break;
diff --git a/ft/tests/ftloader-test-writer.cc b/ft/tests/ftloader-test-writer.cc
index d4b73d828d1..bd4b024a46f 100644
--- a/ft/tests/ftloader-test-writer.cc
+++ b/ft/tests/ftloader-test-writer.cc
@@ -57,7 +57,7 @@ static void verify_dbfile(int n, const char *name) {
if (verbose) traceit("Verified brt internals");
FT_CURSOR cursor = NULL;
- r = toku_ft_cursor(t, &cursor, NULL, FALSE, FALSE); assert(r == 0);
+ r = toku_ft_cursor(t, &cursor, NULL, false, false); assert(r == 0);
size_t userdata = 0;
int i;
@@ -80,7 +80,7 @@ static void verify_dbfile(int n, const char *name) {
struct ftstat64_s s;
r = toku_ft_handle_stat64(t, NULL, &s); assert(r == 0);
- assert(s.nkeys == (u_int64_t)n && s.ndata == (u_int64_t)n && s.dsize == userdata);
+ assert(s.nkeys == (uint64_t)n && s.ndata == (uint64_t)n && s.dsize == userdata);
r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
r = toku_cachetable_close(&ct);assert(r==0);
@@ -184,7 +184,7 @@ static void test_write_dbfile (char *tf_template, int n, char *output_name, TXNI
assert_zero(r);
destroy_merge_fileset(&fs);
- ft_loader_fi_destroy(&bl.file_infos, FALSE);
+ ft_loader_fi_destroy(&bl.file_infos, false);
// walk a cursor through the dbfile and verify the rows
verify_dbfile(n, output_name);
diff --git a/ft/tests/ftloader-test.cc b/ft/tests/ftloader-test.cc
index 12365b57bf1..8b959c3f9ea 100644
--- a/ft/tests/ftloader-test.cc
+++ b/ft/tests/ftloader-test.cc
@@ -33,12 +33,12 @@ static void err_cb(DB *db UU(), int dbn UU(), int err UU(), DBT *key UU(), DBT *
abort();
}
-BOOL founddup;
+bool founddup;
static void expect_dups_cb(DB *db UU(), int dbn UU(), int err UU(), DBT *key UU(), DBT *val UU(), void *extra UU()) {
- founddup=TRUE;
+ founddup=true;
}
-static void test_merge_internal (int a[], int na, int b[], int nb, BOOL dups) {
+static void test_merge_internal (int a[], int na, int b[], int nb, bool dups) {
int *MALLOC_N(na+nb, ab); // the combined array a and b
for (int i=0; i<na; i++) {
ab[i]=a[i];
@@ -101,20 +101,20 @@ static void test_merge (void) {
{
int avals[]={1,2,3,4,5};
int *bvals = NULL; //icc won't let us use a zero-sized array explicitly or by [] = {} construction.
- test_merge_internal(avals, 5, bvals, 0, FALSE);
- test_merge_internal(bvals, 0, avals, 5, FALSE);
+ test_merge_internal(avals, 5, bvals, 0, false);
+ test_merge_internal(bvals, 0, avals, 5, false);
}
{
int avals[]={1,3,5,7};
int bvals[]={2,4};
- test_merge_internal(avals, 4, bvals, 2, FALSE);
- test_merge_internal(bvals, 2, avals, 4, FALSE);
+ test_merge_internal(avals, 4, bvals, 2, false);
+ test_merge_internal(bvals, 2, avals, 4, false);
}
{
int avals[]={1,2,3,5,6,7};
int bvals[]={2,4,5,6,8};
- test_merge_internal(avals, 6, bvals, 5, TRUE);
- test_merge_internal(bvals, 5, avals, 6, TRUE);
+ test_merge_internal(avals, 6, bvals, 5, true);
+ test_merge_internal(bvals, 5, avals, 6, true);
}
}
@@ -151,8 +151,8 @@ static void test_mergesort_row_array (void) {
const int MAX_LEN = 100;
enum { MAX_VAL = 1000 };
for (int i=0; i<MAX_LEN; i++) {
- BOOL used[MAX_VAL];
- for (int j=0; j<MAX_VAL; j++) used[j]=FALSE;
+ bool used[MAX_VAL];
+ for (int j=0; j<MAX_VAL; j++) used[j]=false;
int len=1+random()%MAX_LEN;
int avals[len];
for (int j=0; j<len; j++) {
@@ -161,7 +161,7 @@ static void test_mergesort_row_array (void) {
v = random()%MAX_VAL;
} while (used[v]);
avals[j] = v;
- used[v] = TRUE;
+ used[v] = true;
}
test_internal_mergesort_row_array(avals, len);
}
@@ -176,11 +176,11 @@ static void test_read_write_rows (char *tf_template) {
r = ft_loader_open_temp_file(&bl, &file);
CKERR(r);
- u_int64_t dataoff=0;
+ uint64_t dataoff=0;
const char *keystrings[] = {"abc", "b", "cefgh"};
const char *valstrings[] = {"defg", "", "xyz"};
- u_int64_t actual_size=0;
+ uint64_t actual_size=0;
for (int i=0; i<3; i++) {
DBT key;
toku_fill_dbt(&key, keystrings[i], strlen(keystrings[i]));
@@ -193,7 +193,7 @@ static void test_read_write_rows (char *tf_template) {
if (actual_size != dataoff) fprintf(stderr, "actual_size=%" PRIu64 ", dataoff=%" PRIu64 "\n", actual_size, dataoff);
assert(actual_size == dataoff);
- r = ft_loader_fi_close(&bl.file_infos, file, TRUE);
+ r = ft_loader_fi_close(&bl.file_infos, file, true);
CKERR(r);
r = ft_loader_fi_reopen(&bl.file_infos, file, "r");
@@ -217,7 +217,7 @@ static void test_read_write_rows (char *tf_template) {
toku_free(key.data);
toku_free(val.data);
}
- r = ft_loader_fi_close(&bl.file_infos, file, TRUE);
+ r = ft_loader_fi_close(&bl.file_infos, file, true);
CKERR(r);
r = ft_loader_fi_unlink(&bl.file_infos, file);
@@ -226,7 +226,7 @@ static void test_read_write_rows (char *tf_template) {
assert(bl.file_infos.n_files_open==0);
assert(bl.file_infos.n_files_extant==0);
- ft_loader_fi_destroy(&bl.file_infos, FALSE);
+ ft_loader_fi_destroy(&bl.file_infos, false);
}
static void fill_rowset (struct rowset *rows,
@@ -258,7 +258,7 @@ static void verify_dbfile(int n, int sorted_keys[], const char *sorted_vals[], c
r = toku_ft_handle_open(t, name, 0, 0, ct, null_txn); assert(r==0);
FT_CURSOR cursor = NULL;
- r = toku_ft_cursor(t, &cursor, NULL, FALSE, FALSE); assert(r == 0);
+ r = toku_ft_cursor(t, &cursor, NULL, false, false); assert(r == 0);
size_t userdata = 0;
int i;
@@ -281,7 +281,7 @@ static void verify_dbfile(int n, int sorted_keys[], const char *sorted_vals[], c
struct ftstat64_s s;
r = toku_ft_handle_stat64(t, NULL, &s); assert(r == 0);
- assert(s.nkeys == (u_int64_t) n && s.ndata == (u_int64_t) n && s.dsize == userdata);
+ assert(s.nkeys == (uint64_t) n && s.ndata == (uint64_t) n && s.dsize == userdata);
r = toku_close_ft_handle_nolsn(t, 0); assert(r==0);
r = toku_cachetable_close(&ct);assert(r==0);
@@ -342,7 +342,7 @@ static void test_merge_files (const char *tf_template, const char *output_name)
assert(r==0);
destroy_merge_fileset(&fs);
- ft_loader_fi_destroy(&bl.file_infos, FALSE);
+ ft_loader_fi_destroy(&bl.file_infos, false);
ft_loader_destroy_error_callback(&bl.error_callback);
ft_loader_lock_destroy(&bl);
diff --git a/ft/tests/is_empty.cc b/ft/tests/is_empty.cc
index f9b7f12d8c1..cac7ff6151c 100644
--- a/ft/tests/is_empty.cc
+++ b/ft/tests/is_empty.cc
@@ -31,14 +31,14 @@ static void test_it (int N) {
toku_logger_set_cachetable(logger, ct);
- r = toku_logger_open_rollback(logger, ct, TRUE); CKERR(r);
+ r = toku_logger_open_rollback(logger, ct, true); CKERR(r);
TOKUTXN txn;
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT); CKERR(r);
r = toku_open_ft_handle(FILENAME, 1, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun); CKERR(r);
- r = toku_txn_commit_txn(txn, FALSE, NULL, NULL); CKERR(r);
+ r = toku_txn_commit_txn(txn, false, NULL, NULL); CKERR(r);
toku_txn_close_txn(txn);
r = toku_checkpoint(ct, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT); CKERR(r);
@@ -48,7 +48,7 @@ static void test_it (int N) {
for (int i=0; i<N; i++) {
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT); CKERR(r);
r = toku_open_ft_handle(FILENAME, 0, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun); CKERR(r);
- r = toku_txn_commit_txn(txn, FALSE, NULL, NULL); CKERR(r);
+ r = toku_txn_commit_txn(txn, false, NULL, NULL); CKERR(r);
toku_txn_close_txn(txn);
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT); CKERR(r);
@@ -59,7 +59,7 @@ static void test_it (int N) {
memset(val, 'v', sizeof(val));
val[sizeof(val)-1]=0;
r = toku_ft_insert(brt, toku_fill_dbt(&k, key, 1+strlen(key)), toku_fill_dbt(&v, val, 1+strlen(val)), txn);
- r = toku_txn_commit_txn(txn, FALSE, NULL, NULL); CKERR(r);
+ r = toku_txn_commit_txn(txn, false, NULL, NULL); CKERR(r);
toku_txn_close_txn(txn);
@@ -71,7 +71,7 @@ static void test_it (int N) {
for (int i=0; i<N; i++) {
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT); CKERR(r);
r = toku_open_ft_handle(FILENAME, 0, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun); CKERR(r);
- r = toku_txn_commit_txn(txn, FALSE, NULL, NULL); CKERR(r);
+ r = toku_txn_commit_txn(txn, false, NULL, NULL); CKERR(r);
toku_txn_close_txn(txn);
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT); CKERR(r);
@@ -81,12 +81,12 @@ static void test_it (int N) {
r = toku_ft_delete(brt, toku_fill_dbt(&k, key, 1+strlen(key)), txn);
if (0) {
- BOOL is_empty;
+ bool is_empty;
is_empty = toku_ft_is_empty_fast(brt);
assert(!is_empty);
}
- r = toku_txn_commit_txn(txn, FALSE, NULL, NULL); CKERR(r);
+ r = toku_txn_commit_txn(txn, false, NULL, NULL); CKERR(r);
toku_txn_close_txn(txn);
@@ -97,11 +97,11 @@ static void test_it (int N) {
}
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT); CKERR(r);
r = toku_open_ft_handle(FILENAME, 0, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun); CKERR(r);
- r = toku_txn_commit_txn(txn, FALSE, NULL, NULL); CKERR(r);
+ r = toku_txn_commit_txn(txn, false, NULL, NULL); CKERR(r);
toku_txn_close_txn(txn);
if (0) {
- BOOL is_empty;
+ bool is_empty;
is_empty = toku_ft_is_empty_fast(brt);
assert(is_empty);
}
@@ -110,7 +110,7 @@ static void test_it (int N) {
r = toku_close_ft_handle_nolsn(brt, NULL); CKERR(r);
r = toku_checkpoint(ct, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT); CKERR(r);
- r = toku_logger_close_rollback(logger, FALSE); CKERR(r);
+ r = toku_logger_close_rollback(logger, false); CKERR(r);
r = toku_checkpoint(ct, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT); CKERR(r);
r = toku_cachetable_close(&ct); CKERR(r);
r = toku_logger_close(&logger); assert(r==0);
diff --git a/ft/tests/keyrange.cc b/ft/tests/keyrange.cc
index 54df9861d3b..46b0a406f7b 100644
--- a/ft/tests/keyrange.cc
+++ b/ft/tests/keyrange.cc
@@ -35,9 +35,9 @@ static void close_and_reopen (void) {
open_ft_and_ct(false);
}
-static void reload (u_int64_t limit) {
+static void reload (uint64_t limit) {
// insert keys 1, 3, 5, ...
- for (u_int64_t i=0; i<limit; i++) {
+ for (uint64_t i=0; i<limit; i++) {
char key[100],val[100];
snprintf(key, 100, "%08llu", (unsigned long long)2*i+1);
snprintf(val, 100, "%08llu", (unsigned long long)2*i+1);
@@ -51,7 +51,7 @@ enum memory_state {
CLOSE_AND_REOPEN_LEAVE_ON_DISK // close the brts, reopen them, but leave the state on disk.
};
-static void maybe_reopen (enum memory_state ms, u_int64_t limit) {
+static void maybe_reopen (enum memory_state ms, uint64_t limit) {
switch (ms) {
case CLOSE_AND_RELOAD:
close_and_reopen();
@@ -66,11 +66,11 @@ static void maybe_reopen (enum memory_state ms, u_int64_t limit) {
assert(0);
}
-static void test_keyrange (enum memory_state ms, u_int64_t limit) {
+static void test_keyrange (enum memory_state ms, uint64_t limit) {
open_ft_and_ct(true);
// insert keys 1, 3, 5, ...
- for (u_int64_t i=0; i<limit; i++) {
+ for (uint64_t i=0; i<limit; i++) {
char key[100],val[100];
snprintf(key, 100, "%08llu", (unsigned long long)2*i+1);
snprintf(val, 100, "%08llu", (unsigned long long)2*i+1);
@@ -90,16 +90,16 @@ static void test_keyrange (enum memory_state ms, u_int64_t limit) {
maybe_reopen(ms, limit);
{
- u_int64_t prev_less = 0, prev_greater = 1LL << 60;
- u_int64_t count_less_adjacent = 0, count_greater_adjacent = 0; // count the number of times that the next value is 1 more (less) than the previous.
- u_int64_t equal_count = 0;
+ uint64_t prev_less = 0, prev_greater = 1LL << 60;
+ uint64_t count_less_adjacent = 0, count_greater_adjacent = 0; // count the number of times that the next value is 1 more (less) than the previous.
+ uint64_t equal_count = 0;
// lookup keys 1, 3, 5, ...
- for (u_int64_t i=0; i<limit; i++) {
+ for (uint64_t i=0; i<limit; i++) {
char key[100];
snprintf(key, 100, "%08llu", (unsigned long long)2*i+1);
DBT k;
- u_int64_t less,equal,greater;
+ uint64_t less,equal,greater;
int r = toku_ft_keyrange(t, toku_fill_dbt(&k, key, 1+strlen(key)), &less, &equal, &greater);
assert(r == 0);
if (verbose > 1)
@@ -158,11 +158,11 @@ static void test_keyrange (enum memory_state ms, u_int64_t limit) {
maybe_reopen(ms, limit);
// lookup keys 0, 2, 4, ... not in the tree
- for (u_int64_t i=0; i<1+limit; i++) {
+ for (uint64_t i=0; i<1+limit; i++) {
char key[100];
snprintf(key, 100, "%08llu", (unsigned long long)2*i);
DBT k;
- u_int64_t less,equal,greater;
+ uint64_t less,equal,greater;
int r = toku_ft_keyrange(t, toku_fill_dbt(&k, key, 1+strlen(key)), &less, &equal, &greater);
assert(r == 0);
if (verbose > 1)
@@ -197,7 +197,7 @@ static void test_keyrange (enum memory_state ms, u_int64_t limit) {
int
test_main (int argc , const char *argv[]) {
- u_int64_t limit = 30000;
+ uint64_t limit = 30000;
for (int i = 1; i < argc; i++) {
if (strcmp(argv[i], "-v") == 0) {
diff --git a/ft/tests/le-cursor-provdel.cc b/ft/tests/le-cursor-provdel.cc
index 28dbb3d2aa1..757bbb0cfbd 100644
--- a/ft/tests/le-cursor-provdel.cc
+++ b/ft/tests/le-cursor-provdel.cc
@@ -47,7 +47,7 @@ create_populate_tree(const char *logdir, const char *fname, int n) {
error = toku_create_cachetable(&ct, 0, ZERO_LSN, logger);
assert(error == 0);
toku_logger_set_cachetable(logger, ct);
- error = toku_logger_open_rollback(logger, ct, TRUE);
+ error = toku_logger_open_rollback(logger, ct, true);
assert(error == 0);
TOKUTXN txn = NULL;
@@ -58,7 +58,7 @@ create_populate_tree(const char *logdir, const char *fname, int n) {
error = toku_open_ft_handle(fname, 1, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, test_ft_cursor_keycompare);
assert(error == 0);
- error = toku_txn_commit_txn(txn, TRUE, NULL, NULL);
+ error = toku_txn_commit_txn(txn, true, NULL, NULL);
assert(error == 0);
toku_txn_close_txn(txn);
@@ -78,7 +78,7 @@ create_populate_tree(const char *logdir, const char *fname, int n) {
assert(error == 0);
}
- error = toku_txn_commit_txn(txn, TRUE, NULL, NULL);
+ error = toku_txn_commit_txn(txn, true, NULL, NULL);
assert(error == 0);
toku_txn_close_txn(txn);
@@ -88,7 +88,7 @@ create_populate_tree(const char *logdir, const char *fname, int n) {
error = toku_checkpoint(ct, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
assert(error == 0);
- error = toku_logger_close_rollback(logger, FALSE);
+ error = toku_logger_close_rollback(logger, false);
assert(error == 0);
error = toku_checkpoint(ct, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
@@ -120,7 +120,7 @@ test_provdel(const char *logdir, const char *fname, int n) {
error = toku_create_cachetable(&ct, 0, ZERO_LSN, logger);
assert(error == 0);
toku_logger_set_cachetable(logger, ct);
- error = toku_logger_open_rollback(logger, ct, FALSE);
+ error = toku_logger_open_rollback(logger, ct, false);
assert(error == 0);
TOKUTXN txn = NULL;
@@ -131,7 +131,7 @@ test_provdel(const char *logdir, const char *fname, int n) {
error = toku_open_ft_handle(fname, 1, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, test_ft_cursor_keycompare);
assert(error == 0);
- error = toku_txn_commit_txn(txn, TRUE, NULL, NULL);
+ error = toku_txn_commit_txn(txn, true, NULL, NULL);
assert(error == 0);
toku_txn_close_txn(txn);
@@ -182,11 +182,11 @@ test_provdel(const char *logdir, const char *fname, int n) {
error = toku_le_cursor_close(cursor);
assert(error == 0);
- error = toku_txn_commit_txn(cursortxn, TRUE, NULL, NULL);
+ error = toku_txn_commit_txn(cursortxn, true, NULL, NULL);
assert(error == 0);
toku_txn_close_txn(cursortxn);
- error = toku_txn_commit_txn(txn, TRUE, NULL, NULL);
+ error = toku_txn_commit_txn(txn, true, NULL, NULL);
assert(error == 0);
toku_txn_close_txn(txn);
@@ -196,7 +196,7 @@ test_provdel(const char *logdir, const char *fname, int n) {
error = toku_checkpoint(ct, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
assert(error == 0);
- error = toku_logger_close_rollback(logger, FALSE);
+ error = toku_logger_close_rollback(logger, false);
assert(error == 0);
error = toku_logger_close(&logger);
assert(error == 0);
diff --git a/ft/tests/le-cursor-right.cc b/ft/tests/le-cursor-right.cc
index 33441e511ff..fb3ba348d2a 100644
--- a/ft/tests/le-cursor-right.cc
+++ b/ft/tests/le-cursor-right.cc
@@ -51,7 +51,7 @@ create_populate_tree(const char *logdir, const char *fname, int n) {
error = toku_create_cachetable(&ct, 0, ZERO_LSN, logger);
assert(error == 0);
toku_logger_set_cachetable(logger, ct);
- error = toku_logger_open_rollback(logger, ct, TRUE);
+ error = toku_logger_open_rollback(logger, ct, true);
assert(error == 0);
TOKUTXN txn = NULL;
@@ -62,7 +62,7 @@ create_populate_tree(const char *logdir, const char *fname, int n) {
error = toku_open_ft_handle(fname, 1, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, test_keycompare);
assert(error == 0);
- error = toku_txn_commit_txn(txn, TRUE, NULL, NULL);
+ error = toku_txn_commit_txn(txn, true, NULL, NULL);
assert(error == 0);
toku_txn_close_txn(txn);
@@ -82,7 +82,7 @@ create_populate_tree(const char *logdir, const char *fname, int n) {
assert(error == 0);
}
- error = toku_txn_commit_txn(txn, TRUE, NULL, NULL);
+ error = toku_txn_commit_txn(txn, true, NULL, NULL);
assert(error == 0);
toku_txn_close_txn(txn);
@@ -91,7 +91,7 @@ create_populate_tree(const char *logdir, const char *fname, int n) {
error = toku_checkpoint(ct, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
assert(error == 0);
- error = toku_logger_close_rollback(logger, FALSE);
+ error = toku_logger_close_rollback(logger, false);
assert(error == 0);
error = toku_checkpoint(ct, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
@@ -131,7 +131,7 @@ test_neg_infinity(const char *fname, int n) {
DBT key;
toku_fill_dbt(&key, &k, sizeof k);
int right = toku_le_cursor_is_key_greater(cursor, &key);
- assert(right == TRUE);
+ assert(right == true);
}
error = toku_le_cursor_close(cursor);
@@ -192,7 +192,7 @@ test_pos_infinity(const char *fname, int n) {
DBT key2;
toku_fill_dbt(&key2, &k, sizeof k);
int right = toku_le_cursor_is_key_greater(cursor, &key2);
- assert(right == FALSE);
+ assert(right == false);
}
error = toku_le_cursor_close(cursor);
@@ -249,7 +249,7 @@ test_between(const char *fname, int n) {
DBT key2;
toku_fill_dbt(&key2, &k, sizeof k);
int right = toku_le_cursor_is_key_greater(cursor, &key2);
- assert(right == FALSE);
+ assert(right == false);
}
// test that i+1 .. n is left of the cursor
@@ -258,7 +258,7 @@ test_between(const char *fname, int n) {
DBT key2;
toku_fill_dbt(&key2, &k, sizeof k);
int right = toku_le_cursor_is_key_greater(cursor, &key2);
- assert(right == TRUE);
+ assert(right == true);
}
}
diff --git a/ft/tests/le-cursor-walk.cc b/ft/tests/le-cursor-walk.cc
index a682427f783..4fb21936e4c 100644
--- a/ft/tests/le-cursor-walk.cc
+++ b/ft/tests/le-cursor-walk.cc
@@ -47,7 +47,7 @@ create_populate_tree(const char *logdir, const char *fname, int n) {
error = toku_create_cachetable(&ct, 0, ZERO_LSN, logger);
assert(error == 0);
toku_logger_set_cachetable(logger, ct);
- error = toku_logger_open_rollback(logger, ct, TRUE);
+ error = toku_logger_open_rollback(logger, ct, true);
assert(error == 0);
TOKUTXN txn = NULL;
@@ -58,7 +58,7 @@ create_populate_tree(const char *logdir, const char *fname, int n) {
error = toku_open_ft_handle(fname, 1, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, test_ft_cursor_keycompare);
assert(error == 0);
- error = toku_txn_commit_txn(txn, TRUE, NULL, NULL);
+ error = toku_txn_commit_txn(txn, true, NULL, NULL);
assert(error == 0);
toku_txn_close_txn(txn);
@@ -78,7 +78,7 @@ create_populate_tree(const char *logdir, const char *fname, int n) {
assert(error == 0);
}
- error = toku_txn_commit_txn(txn, TRUE, NULL, NULL);
+ error = toku_txn_commit_txn(txn, true, NULL, NULL);
assert(error == 0);
toku_txn_close_txn(txn);
@@ -87,7 +87,7 @@ create_populate_tree(const char *logdir, const char *fname, int n) {
error = toku_checkpoint(ct, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
assert(error == 0);
- error = toku_logger_close_rollback(logger, FALSE);
+ error = toku_logger_close_rollback(logger, false);
assert(error == 0);
error = toku_checkpoint(ct, logger, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
assert(error == 0);
diff --git a/ft/tests/log-test-maybe-trim.cc b/ft/tests/log-test-maybe-trim.cc
index be7d32fad06..eaa9e1868b8 100644
--- a/ft/tests/log-test-maybe-trim.cc
+++ b/ft/tests/log-test-maybe-trim.cc
@@ -27,11 +27,11 @@ test_main (int argc __attribute__((__unused__)),
r = toku_logger_open(dname, logger); assert(r == 0);
BYTESTRING hello = (BYTESTRING) { 5, (char *) "hello"};
LSN comment_lsn;
- r = toku_log_comment(logger, &comment_lsn, TRUE, 0, hello);
+ r = toku_log_comment(logger, &comment_lsn, true, 0, hello);
LSN begin_lsn;
- r = toku_log_begin_checkpoint(logger, &begin_lsn, TRUE, 0, 0); assert(r == 0);
+ r = toku_log_begin_checkpoint(logger, &begin_lsn, true, 0, 0); assert(r == 0);
LSN end_lsn;
- r = toku_log_end_checkpoint(logger, &end_lsn, TRUE, begin_lsn, 0, 0, 0); assert(r == 0);
+ r = toku_log_end_checkpoint(logger, &end_lsn, true, begin_lsn, 0, 0, 0); assert(r == 0);
r = toku_logger_maybe_trim_log(logger, begin_lsn); assert(r == 0);
r = toku_logger_close(&logger); assert(r == 0);
diff --git a/ft/tests/log-test5.cc b/ft/tests/log-test5.cc
index ab0459355c7..26fa609d246 100644
--- a/ft/tests/log-test5.cc
+++ b/ft/tests/log-test5.cc
@@ -28,7 +28,7 @@ test_main (int argc __attribute__((__unused__)),
assert(r == 0);
r = toku_logger_set_lg_max(logger, LSIZE);
{
- u_int32_t n;
+ uint32_t n;
r = toku_logger_get_lg_max(logger, &n);
assert(n==LSIZE);
}
diff --git a/ft/tests/log-test6.cc b/ft/tests/log-test6.cc
index f9b80f55215..6ee0d99f0f5 100644
--- a/ft/tests/log-test6.cc
+++ b/ft/tests/log-test6.cc
@@ -34,7 +34,7 @@ test_main (int argc __attribute__((__unused__)),
assert(r == 0);
r = toku_logger_set_lg_max(logger, LSIZE);
{
- u_int32_t n;
+ uint32_t n;
r = toku_logger_get_lg_max(logger, &n);
assert(n==LSIZE);
}
diff --git a/ft/tests/log-test7.cc b/ft/tests/log-test7.cc
index b90c3180d7e..8e717abe3bd 100644
--- a/ft/tests/log-test7.cc
+++ b/ft/tests/log-test7.cc
@@ -32,7 +32,7 @@ static void setup_logger(int which) {
assert(r == 0);
r = toku_logger_set_lg_max(logger[which], LSIZE);
{
- u_int32_t n;
+ uint32_t n;
r = toku_logger_get_lg_max(logger[which], &n);
assert(n==LSIZE);
}
diff --git a/ft/tests/logcursor-timestamp.cc b/ft/tests/logcursor-timestamp.cc
index 97a80372eef..58d70cd416f 100644
--- a/ft/tests/logcursor-timestamp.cc
+++ b/ft/tests/logcursor-timestamp.cc
@@ -7,7 +7,7 @@
#include "includes.h"
-static u_int64_t now(void) {
+static uint64_t now(void) {
struct timeval tv;
int r = gettimeofday(&tv, NULL);
assert(r == 0);
@@ -62,7 +62,7 @@ test_main (int argc, const char *argv[]) {
r = toku_logcursor_next(lc, &le);
assert(r == 0 && le->cmd == LT_comment);
assert(le->u.comment.comment.len == 5 && memcmp(le->u.comment.comment.data, "hello", 5) == 0);
- u_int64_t t = le->u.comment.timestamp;
+ uint64_t t = le->u.comment.timestamp;
r = toku_logcursor_next(lc, &le);
assert(r == 0 && le->cmd == LT_comment);
diff --git a/ft/tests/msnfilter.cc b/ft/tests/msnfilter.cc
index 465ae867c0b..46c5f4ea7a0 100644
--- a/ft/tests/msnfilter.cc
+++ b/ft/tests/msnfilter.cc
@@ -47,7 +47,7 @@ append_leaf(FT_HANDLE brt, FTNODE leafnode, void *key, uint32_t keylen, void *va
MSN msn = next_dummymsn();
FT_MSG_S cmd = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &theval }} };
- u_int64_t workdone=0;
+ uint64_t workdone=0;
toku_ft_leaf_apply_cmd(brt->ft->compare_fun, brt->ft->update_fun, &brt->ft->cmp_descriptor, leafnode, &cmd, &workdone, NULL);
{
int r = toku_ft_lookup(brt, &thekey, lookup_checkf, &pair);
diff --git a/ft/tests/omt-test.cc b/ft/tests/omt-test.cc
index ecfc49dae13..e6f5a0b7c00 100644
--- a/ft/tests/omt-test.cc
+++ b/ft/tests/omt-test.cc
@@ -43,7 +43,7 @@ parse_args (int argc, const char *argv[]) {
/* End ".h like" stuff. */
struct value {
- u_int32_t number;
+ uint32_t number;
};
#define V(x) ((struct value *)(x))
@@ -67,7 +67,7 @@ enum create_type {
OMT omt;
TESTVALUE* values = NULL;
struct value* nums = NULL;
-u_int32_t length;
+uint32_t length;
static void
cleanup_globals (void) {
@@ -82,7 +82,7 @@ cleanup_globals (void) {
const unsigned int random_seed = 0xFEADACBA;
static void
-init_init_values (unsigned int seed, u_int32_t num_elements) {
+init_init_values (unsigned int seed, uint32_t num_elements) {
srandom(seed);
cleanup_globals();
@@ -95,8 +95,8 @@ init_init_values (unsigned int seed, u_int32_t num_elements) {
}
static void
-init_identity_values (unsigned int seed, u_int32_t num_elements) {
- u_int32_t i;
+init_identity_values (unsigned int seed, uint32_t num_elements) {
+ uint32_t i;
init_init_values(seed, num_elements);
@@ -107,27 +107,27 @@ init_identity_values (unsigned int seed, u_int32_t num_elements) {
}
static void
-init_distinct_sorted_values (unsigned int seed, u_int32_t num_elements) {
- u_int32_t i;
+init_distinct_sorted_values (unsigned int seed, uint32_t num_elements) {
+ uint32_t i;
init_init_values(seed, num_elements);
- u_int32_t number = 0;
+ uint32_t number = 0;
for (i = 0; i < length; i++) {
- number += (u_int32_t)(random() % 32) + 1;
+ number += (uint32_t)(random() % 32) + 1;
nums[i].number = number;
values[i] = (TESTVALUE)&nums[i];
}
}
static void
-init_distinct_random_values (unsigned int seed, u_int32_t num_elements) {
+init_distinct_random_values (unsigned int seed, uint32_t num_elements) {
init_distinct_sorted_values(seed, num_elements);
- u_int32_t i;
- u_int32_t choice;
- u_int32_t choices;
+ uint32_t i;
+ uint32_t choice;
+ uint32_t choices;
struct value temp;
for (i = 0; i < length - 1; i++) {
choices = length - i;
@@ -177,9 +177,9 @@ test_create_size (enum close_when_done do_close) {
static void
test_create_insert_at_almost_random (enum close_when_done do_close) {
- u_int32_t i;
+ uint32_t i;
int r;
- u_int32_t size = 0;
+ uint32_t size = 0;
test_create(KEEP_WHEN_DONE);
r = toku_omt_insert_at(omt, values[0], toku_omt_size(omt)+1);
@@ -205,9 +205,9 @@ test_create_insert_at_almost_random (enum close_when_done do_close) {
static void
test_create_insert_at_sequential (enum close_when_done do_close) {
- u_int32_t i;
+ uint32_t i;
int r;
- u_int32_t size = 0;
+ uint32_t size = 0;
test_create(KEEP_WHEN_DONE);
r = toku_omt_insert_at(omt, values[0], toku_omt_size(omt)+1);
@@ -250,7 +250,7 @@ test_create_from_sorted_array (enum create_type create_choice, enum close_when_d
else if (create_choice == INSERT_AT_ALMOST_RANDOM) {
test_create_insert_at_almost_random(KEEP_WHEN_DONE);
}
- else assert(FALSE);
+ else assert(false);
assert(omt!=NULL);
test_close(do_close);
@@ -264,8 +264,8 @@ test_create_from_sorted_array_size (enum create_type create_choice, enum close_w
}
static void
-test_fetch_verify (OMT omtree, TESTVALUE* val, u_int32_t len ) {
- u_int32_t i;
+test_fetch_verify (OMT omtree, TESTVALUE* val, uint32_t len ) {
+ uint32_t i;
int r;
TESTVALUE v = (TESTVALUE)&i;
TESTVALUE oldv = v;
@@ -302,7 +302,7 @@ test_create_fetch_verify (enum create_type create_choice, enum close_when_done d
static int iterate_helper_error_return = 1;
static int
-iterate_helper (TESTVALUE v, u_int32_t idx, void* extra) {
+iterate_helper (TESTVALUE v, uint32_t idx, void* extra) {
if (extra == NULL) return iterate_helper_error_return;
TESTVALUE* vals = (TESTVALUE *)extra;
assert(v != NULL);
@@ -312,7 +312,7 @@ iterate_helper (TESTVALUE v, u_int32_t idx, void* extra) {
}
static void
-test_iterate_verify (OMT omtree, TESTVALUE* vals, u_int32_t len) {
+test_iterate_verify (OMT omtree, TESTVALUE* vals, uint32_t len) {
int r;
iterate_helper_error_return = 0;
r = toku_omt_iterate(omtree, iterate_helper, (void*)vals);
@@ -336,19 +336,19 @@ test_create_iterate_verify (enum create_type create_choice, enum close_when_done
static void
-permute_array (u_int32_t* arr, u_int32_t len) {
+permute_array (uint32_t* arr, uint32_t len) {
//
// create a permutation of 0...size-1
//
- u_int32_t i = 0;
+ uint32_t i = 0;
for (i = 0; i < len; i++) {
arr[i] = i;
}
for (i = 0; i < len - 1; i++) {
- u_int32_t choices = len - i;
- u_int32_t choice = random() % choices;
+ uint32_t choices = len - i;
+ uint32_t choice = random() % choices;
if (choice != i) {
- u_int32_t temp = arr[i];
+ uint32_t temp = arr[i];
arr[i] = arr[choice];
arr[choice] = temp;
}
@@ -357,13 +357,13 @@ permute_array (u_int32_t* arr, u_int32_t len) {
static void
test_create_set_at (enum create_type create_choice, enum close_when_done do_close) {
- u_int32_t i = 0;
+ uint32_t i = 0;
struct value* old_nums = NULL;
MALLOC_N(length, old_nums);
assert(nums);
- u_int32_t* perm = NULL;
+ uint32_t* perm = NULL;
MALLOC_N(length, perm);
assert(perm);
@@ -388,9 +388,9 @@ test_create_set_at (enum create_type create_choice, enum close_when_done do_clos
r = toku_omt_set_at (omt, values[0], length+1);
CKERR2(r,EINVAL);
for (i = 0; i < length; i++) {
- u_int32_t choice = perm[i];
+ uint32_t choice = perm[i];
values[choice] = &nums[choice];
- nums[choice].number = (u_int32_t)random();
+ nums[choice].number = (uint32_t)random();
r = toku_omt_set_at (omt, values[choice], choice);
CKERR(r);
test_iterate_verify(omt, values, length);
@@ -420,9 +420,9 @@ insert_helper (TESTVALUE value, void* extra_insert) {
static void
test_create_insert (enum close_when_done do_close) {
- u_int32_t i = 0;
+ uint32_t i = 0;
- u_int32_t* perm = NULL;
+ uint32_t* perm = NULL;
MALLOC_N(length, perm);
assert(perm);
@@ -430,12 +430,12 @@ test_create_insert (enum close_when_done do_close) {
test_create(KEEP_WHEN_DONE);
int r;
- u_int32_t size = length;
+ uint32_t size = length;
length = 0;
while (length < size) {
- u_int32_t choice = perm[length];
+ uint32_t choice = perm[length];
TESTVALUE to_insert = &nums[choice];
- u_int32_t idx = UINT32_MAX;
+ uint32_t idx = UINT32_MAX;
assert(length==toku_omt_size(omt));
r = toku_omt_insert(omt, to_insert, insert_helper, to_insert, &idx);
@@ -475,7 +475,7 @@ test_create_insert (enum close_when_done do_close) {
static void
test_create_delete_at (enum create_type create_choice, enum close_when_done do_close) {
- u_int32_t i = 0;
+ uint32_t i = 0;
int r = ENOSYS;
test_create_from_sorted_array(create_choice, KEEP_WHEN_DONE);
@@ -487,7 +487,7 @@ test_create_delete_at (enum create_type create_choice, enum close_when_done do_c
CKERR2(r,EINVAL);
while (length > 0) {
assert(length == toku_omt_size(omt));
- u_int32_t index_to_delete = random()%length;
+ uint32_t index_to_delete = random()%length;
r = toku_omt_delete_at(omt, index_to_delete);
CKERR(r);
for (i = index_to_delete+1; i < length; i++) {
@@ -510,7 +510,7 @@ test_create_delete_at (enum create_type create_choice, enum close_when_done do_c
static void
test_split_merge (enum create_type create_choice, enum close_when_done do_close) {
int r = ENOSYS;
- u_int32_t i = 0;
+ uint32_t i = 0;
OMT left_split = NULL;
OMT right_split = NULL;
test_create_from_sorted_array(create_choice, KEEP_WHEN_DONE);
@@ -571,7 +571,7 @@ test_split_merge (enum create_type create_choice, enum close_when_done do_close)
static void
init_values (enum rand_type rand_choice) {
- const u_int32_t test_size = 100;
+ const uint32_t test_size = 100;
if (rand_choice == TEST_RANDOM) {
init_distinct_random_values(random_seed, test_size);
}
@@ -581,7 +581,7 @@ init_values (enum rand_type rand_choice) {
else if (rand_choice == TEST_IDENTITY) {
init_identity_values( random_seed, test_size);
}
- else assert(FALSE);
+ else assert(false);
}
static void
@@ -611,8 +611,8 @@ test_create_array (enum create_type create_choice, enum rand_type rand_choice) {
}
typedef struct {
- u_int32_t first_zero;
- u_int32_t first_pos;
+ uint32_t first_zero;
+ uint32_t first_pos;
} h_extra;
@@ -623,24 +623,24 @@ test_heaviside (OMTVALUE v_omt, void* x) {
assert(v && x);
assert(extra->first_zero <= extra->first_pos);
- u_int32_t value = V(v)->number;
+ uint32_t value = V(v)->number;
if (value < extra->first_zero) return -1;
if (value < extra->first_pos) return 0;
return 1;
}
static void
-heavy_extra (h_extra* extra, u_int32_t first_zero, u_int32_t first_pos) {
+heavy_extra (h_extra* extra, uint32_t first_zero, uint32_t first_pos) {
extra->first_zero = first_zero;
extra->first_pos = first_pos;
}
static void
test_find_dir (int dir, void* extra, int (*h)(OMTVALUE, void*),
- int r_expect, BOOL idx_will_change, u_int32_t idx_expect,
- u_int32_t number_expect, BOOL UU(cursor_valid)) {
- u_int32_t idx = UINT32_MAX;
- u_int32_t old_idx = idx;
+ int r_expect, bool idx_will_change, uint32_t idx_expect,
+ uint32_t number_expect, bool UU(cursor_valid)) {
+ uint32_t idx = UINT32_MAX;
+ uint32_t old_idx = idx;
TESTVALUE omt_val;
int r;
@@ -707,9 +707,9 @@ test_find (enum create_type create_choice, enum close_when_done do_close) {
A
*/
heavy_extra(&extra, length, length);
- test_find_dir(-1, &extra, test_heaviside, 0, TRUE, length-1, length-1, TRUE);
- test_find_dir(+1, &extra, test_heaviside, DB_NOTFOUND, FALSE, 0, 0, FALSE);
- test_find_dir(0, &extra, test_heaviside, DB_NOTFOUND, TRUE, length, length, FALSE);
+ test_find_dir(-1, &extra, test_heaviside, 0, true, length-1, length-1, true);
+ test_find_dir(+1, &extra, test_heaviside, DB_NOTFOUND, false, 0, 0, false);
+ test_find_dir(0, &extra, test_heaviside, DB_NOTFOUND, true, length, length, false);
/*
@@ -717,54 +717,54 @@ test_find (enum create_type create_choice, enum close_when_done do_close) {
B
*/
heavy_extra(&extra, 0, 0);
- test_find_dir(-1, &extra, test_heaviside, DB_NOTFOUND, FALSE, 0, 0, FALSE);
- test_find_dir(+1, &extra, test_heaviside, 0, TRUE, 0, 0, TRUE);
- test_find_dir(0, &extra, test_heaviside, DB_NOTFOUND, TRUE, 0, 0, FALSE);
+ test_find_dir(-1, &extra, test_heaviside, DB_NOTFOUND, false, 0, 0, false);
+ test_find_dir(+1, &extra, test_heaviside, 0, true, 0, 0, true);
+ test_find_dir(0, &extra, test_heaviside, DB_NOTFOUND, true, 0, 0, false);
/*
0...0
C
*/
heavy_extra(&extra, 0, length);
- test_find_dir(-1, &extra, test_heaviside, DB_NOTFOUND, FALSE, 0, 0, FALSE);
- test_find_dir(+1, &extra, test_heaviside, DB_NOTFOUND, FALSE, 0, 0, FALSE);
- test_find_dir(0, &extra, test_heaviside, 0, TRUE, 0, 0, TRUE);
+ test_find_dir(-1, &extra, test_heaviside, DB_NOTFOUND, false, 0, 0, false);
+ test_find_dir(+1, &extra, test_heaviside, DB_NOTFOUND, false, 0, 0, false);
+ test_find_dir(0, &extra, test_heaviside, 0, true, 0, 0, true);
/*
-...-0...0
AC
*/
heavy_extra(&extra, length/2, length);
- test_find_dir(-1, &extra, test_heaviside, 0, TRUE, length/2-1, length/2-1, TRUE);
- test_find_dir(+1, &extra, test_heaviside, DB_NOTFOUND, FALSE, 0, 0, FALSE);
- test_find_dir(0, &extra, test_heaviside, 0, TRUE, length/2, length/2, TRUE);
+ test_find_dir(-1, &extra, test_heaviside, 0, true, length/2-1, length/2-1, true);
+ test_find_dir(+1, &extra, test_heaviside, DB_NOTFOUND, false, 0, 0, false);
+ test_find_dir(0, &extra, test_heaviside, 0, true, length/2, length/2, true);
/*
0...0+...+
C B
*/
heavy_extra(&extra, 0, length/2);
- test_find_dir(-1, &extra, test_heaviside, DB_NOTFOUND, FALSE, 0, 0, FALSE);
- test_find_dir(+1, &extra, test_heaviside, 0, TRUE, length/2, length/2, TRUE);
- test_find_dir(0, &extra, test_heaviside, 0, TRUE, 0, 0, TRUE);
+ test_find_dir(-1, &extra, test_heaviside, DB_NOTFOUND, false, 0, 0, false);
+ test_find_dir(+1, &extra, test_heaviside, 0, true, length/2, length/2, true);
+ test_find_dir(0, &extra, test_heaviside, 0, true, 0, 0, true);
/*
-...-+...+
AB
*/
heavy_extra(&extra, length/2, length/2);
- test_find_dir(-1, &extra, test_heaviside, 0, TRUE, length/2-1, length/2-1, TRUE);
- test_find_dir(+1, &extra, test_heaviside, 0, TRUE, length/2, length/2, TRUE);
- test_find_dir(0, &extra, test_heaviside, DB_NOTFOUND, TRUE, length/2, length/2, FALSE);
+ test_find_dir(-1, &extra, test_heaviside, 0, true, length/2-1, length/2-1, true);
+ test_find_dir(+1, &extra, test_heaviside, 0, true, length/2, length/2, true);
+ test_find_dir(0, &extra, test_heaviside, DB_NOTFOUND, true, length/2, length/2, false);
/*
-...-0...0+...+
AC B
*/
heavy_extra(&extra, length/3, 2*length/3);
- test_find_dir(-1, &extra, test_heaviside, 0, TRUE, length/3-1, length/3-1, TRUE);
- test_find_dir(+1, &extra, test_heaviside, 0, TRUE, 2*length/3, 2*length/3, TRUE);
- test_find_dir(0, &extra, test_heaviside, 0, TRUE, length/3, length/3, TRUE);
+ test_find_dir(-1, &extra, test_heaviside, 0, true, length/3-1, length/3-1, true);
+ test_find_dir(+1, &extra, test_heaviside, 0, true, 2*length/3, 2*length/3, true);
+ test_find_dir(0, &extra, test_heaviside, 0, true, length/3, length/3, true);
/* Cleanup */
test_close(do_close);
@@ -779,7 +779,7 @@ runtests_create_choice (enum create_type create_choice) {
}
static void
-test_clone(u_int32_t nelts)
+test_clone(uint32_t nelts)
// Test that each clone operation gives the right data back. If nelts is
// zero, also tests that you still get a valid OMT back and that the way
// to deallocate it still works.
diff --git a/ft/tests/orthopush-flush.cc b/ft/tests/orthopush-flush.cc
index 96a45bb32eb..e05c6a52d99 100644
--- a/ft/tests/orthopush-flush.cc
+++ b/ft/tests/orthopush-flush.cc
@@ -545,7 +545,7 @@ flush_to_leaf(FT_HANDLE t, bool make_leaf_up_to_date, bool use_flush) {
insert_random_message_to_bn(t, child_blbs[i%8], &child_messages[i], xids_123, i%8);
total_size += child_blbs[i%8]->n_bytes_in_buffer;
if (i % 8 < 7) {
- u_int32_t keylen;
+ uint32_t keylen;
char *CAST_FROM_VOIDP(key, le_key_and_len(child_messages[i], &keylen));
DBT keydbt;
if (childkeys[i%8].size == 0 || dummy_cmp(NULL, toku_fill_dbt(&keydbt, key, keylen), &childkeys[i%8]) > 0) {
@@ -556,7 +556,7 @@ flush_to_leaf(FT_HANDLE t, bool make_leaf_up_to_date, bool use_flush) {
int num_child_messages = i;
for (i = 0; i < num_child_messages; ++i) {
- u_int32_t keylen;
+ uint32_t keylen;
char *CAST_FROM_VOIDP(key, le_key_and_len(child_messages[i], &keylen));
DBT keydbt;
if (i % 8 < 7) {
@@ -615,7 +615,7 @@ flush_to_leaf(FT_HANDLE t, bool make_leaf_up_to_date, bool use_flush) {
parentnode->max_msn_applied_to_node_on_disk = max_parent_msn;
struct ancestors ancestors = { .node = parentnode, .childnum = 0, .next = NULL };
const struct pivot_bounds infinite_bounds = { .lower_bound_exclusive = NULL, .upper_bound_inclusive = NULL };
- BOOL msgs_applied;
+ bool msgs_applied;
maybe_apply_ancestors_messages_to_node(t, child, &ancestors, &infinite_bounds, &msgs_applied);
FIFO_ITERATE(parent_bnc->buffer, key, keylen, val, vallen, type, msn, xids, is_fresh,
@@ -645,8 +645,8 @@ flush_to_leaf(FT_HANDLE t, bool make_leaf_up_to_date, bool use_flush) {
memset(child_messages_present, 0, sizeof child_messages_present);
for (int j = 0; j < 8; ++j) {
OMT omt = BLB_BUFFER(child, j);
- u_int32_t len = toku_omt_size(omt);
- for (u_int32_t idx = 0; idx < len; ++idx) {
+ uint32_t len = toku_omt_size(omt);
+ for (uint32_t idx = 0; idx < len; ++idx) {
LEAFENTRY le;
DBT keydbt, valdbt;
{
@@ -654,7 +654,7 @@ flush_to_leaf(FT_HANDLE t, bool make_leaf_up_to_date, bool use_flush) {
r = toku_omt_fetch(omt, idx, &v);
assert_zero(r);
CAST_FROM_VOIDP(le, v);
- u_int32_t keylen, vallen;
+ uint32_t keylen, vallen;
void *keyp = le_key_and_len(le, &keylen);
void *valp = le_latest_val_and_len(le, &vallen);
toku_fill_dbt(&keydbt, keyp, keylen);
@@ -676,7 +676,7 @@ flush_to_leaf(FT_HANDLE t, bool make_leaf_up_to_date, bool use_flush) {
if (i >= num_child_messages) { continue; }
DBT childkeydbt, childvaldbt;
{
- u_int32_t keylen, vallen;
+ uint32_t keylen, vallen;
void *keyp = le_key_and_len(child_messages[i], &keylen);
void *valp = le_latest_val_and_len(child_messages[i], &vallen);
toku_fill_dbt(&childkeydbt, keyp, keylen);
@@ -770,7 +770,7 @@ flush_to_leaf_with_keyrange(FT_HANDLE t, bool make_leaf_up_to_date) {
total_size -= child_blbs[i%8]->n_bytes_in_buffer;
insert_random_message_to_bn(t, child_blbs[i%8], &child_messages[i], xids_123, i%8);
total_size += child_blbs[i%8]->n_bytes_in_buffer;
- u_int32_t keylen;
+ uint32_t keylen;
char *CAST_FROM_VOIDP(key, le_key_and_len(child_messages[i], &keylen));
DBT keydbt;
if (childkeys[i%8].size == 0 || dummy_cmp(NULL, toku_fill_dbt(&keydbt, key, keylen), &childkeys[i%8]) > 0) {
@@ -780,7 +780,7 @@ flush_to_leaf_with_keyrange(FT_HANDLE t, bool make_leaf_up_to_date) {
int num_child_messages = i;
for (i = 0; i < num_child_messages; ++i) {
- u_int32_t keylen;
+ uint32_t keylen;
char *CAST_FROM_VOIDP(key, le_key_and_len(child_messages[i], &keylen));
DBT keydbt;
assert(dummy_cmp(NULL, toku_fill_dbt(&keydbt, key, keylen), &childkeys[i%8]) <= 0);
@@ -840,7 +840,7 @@ flush_to_leaf_with_keyrange(FT_HANDLE t, bool make_leaf_up_to_date) {
.lower_bound_exclusive = toku_init_dbt(&lbe),
.upper_bound_inclusive = toku_clone_dbt(&ubi, childkeys[7])
};
- BOOL msgs_applied;
+ bool msgs_applied;
maybe_apply_ancestors_messages_to_node(t, child, &ancestors, &bounds, &msgs_applied);
FIFO_ITERATE(parent_bnc->buffer, key, keylen, val, vallen, type, msn, xids, is_fresh,
@@ -957,7 +957,7 @@ compare_apply_and_flush(FT_HANDLE t, bool make_leaf_up_to_date) {
insert_same_message_to_bns(t, child1_blbs[i%8], child2_blbs[i%8], &child_messages[i], xids_123, i%8);
total_size += child1_blbs[i%8]->n_bytes_in_buffer;
if (i % 8 < 7) {
- u_int32_t keylen;
+ uint32_t keylen;
char *CAST_FROM_VOIDP(key, le_key_and_len(child_messages[i], &keylen));
DBT keydbt;
if (child1keys[i%8].size == 0 || dummy_cmp(NULL, toku_fill_dbt(&keydbt, key, keylen), &child1keys[i%8]) > 0) {
@@ -969,7 +969,7 @@ compare_apply_and_flush(FT_HANDLE t, bool make_leaf_up_to_date) {
int num_child_messages = i;
for (i = 0; i < num_child_messages; ++i) {
- u_int32_t keylen;
+ uint32_t keylen;
char *CAST_FROM_VOIDP(key, le_key_and_len(child_messages[i], &keylen));
DBT keydbt;
if (i % 8 < 7) {
@@ -1023,7 +1023,7 @@ compare_apply_and_flush(FT_HANDLE t, bool make_leaf_up_to_date) {
parentnode->max_msn_applied_to_node_on_disk = max_parent_msn;
struct ancestors ancestors = { .node = parentnode, .childnum = 0, .next = NULL };
const struct pivot_bounds infinite_bounds = { .lower_bound_exclusive = NULL, .upper_bound_inclusive = NULL };
- BOOL msgs_applied;
+ bool msgs_applied;
maybe_apply_ancestors_messages_to_node(t, child2, &ancestors, &infinite_bounds, &msgs_applied);
FIFO_ITERATE(parent_bnc->buffer, key, keylen, val, vallen, type, msn, xids, is_fresh,
@@ -1039,9 +1039,9 @@ compare_apply_and_flush(FT_HANDLE t, bool make_leaf_up_to_date) {
for (int j = 0; j < 8; ++j) {
OMT omt1 = BLB_BUFFER(child1, j);
OMT omt2 = BLB_BUFFER(child2, j);
- u_int32_t len = toku_omt_size(omt1);
+ uint32_t len = toku_omt_size(omt1);
assert(len == toku_omt_size(omt2));
- for (u_int32_t idx = 0; idx < len; ++idx) {
+ for (uint32_t idx = 0; idx < len; ++idx) {
LEAFENTRY le1, le2;
DBT key1dbt, val1dbt, key2dbt, val2dbt;
{
@@ -1049,7 +1049,7 @@ compare_apply_and_flush(FT_HANDLE t, bool make_leaf_up_to_date) {
r = toku_omt_fetch(omt1, idx, &v);
assert_zero(r);
CAST_FROM_VOIDP(le1, v);
- u_int32_t keylen, vallen;
+ uint32_t keylen, vallen;
void *keyp = le_key_and_len(le1, &keylen);
void *valp = le_latest_val_and_len(le1, &vallen);
toku_fill_dbt(&key1dbt, keyp, keylen);
@@ -1060,7 +1060,7 @@ compare_apply_and_flush(FT_HANDLE t, bool make_leaf_up_to_date) {
r = toku_omt_fetch(omt2, idx, &v);
assert_zero(r);
CAST_FROM_VOIDP(le2, v);
- u_int32_t keylen, vallen;
+ uint32_t keylen, vallen;
void *keyp = le_key_and_len(le2, &keylen);
void *valp = le_latest_val_and_len(le2, &vallen);
toku_fill_dbt(&key2dbt, keyp, keylen);
diff --git a/ft/tests/pqueue-test.cc b/ft/tests/pqueue-test.cc
index 68e608452c5..a424e85cec7 100644
--- a/ft/tests/pqueue-test.cc
+++ b/ft/tests/pqueue-test.cc
@@ -21,7 +21,7 @@ static int test_compare(DB * UU(db), const DBT *dbta, const DBT *dbtb)
return 0;
}
-static inline DBT *dbt_init(DBT *dbt, void *data, u_int32_t size) {
+static inline DBT *dbt_init(DBT *dbt, void *data, uint32_t size) {
memset(dbt, 0, sizeof *dbt);
dbt->data = data;
dbt->size = size;
diff --git a/ft/tests/queue-test.cc b/ft/tests/queue-test.cc
index d9d58f66d15..29bfa75305b 100644
--- a/ft/tests/queue-test.cc
+++ b/ft/tests/queue-test.cc
@@ -14,15 +14,15 @@
static int verbose=1;
static int count_0 = 0;
-static u_int64_t e_max_weight=0, d_max_weight = 0; // max weight seen by enqueue thread and dequeue thread respectively.
+static uint64_t e_max_weight=0, d_max_weight = 0; // max weight seen by enqueue thread and dequeue thread respectively.
static void *start_0 (void *arg) {
QUEUE q = (QUEUE)arg;
void *item;
- u_int64_t weight;
+ uint64_t weight;
long count = 0;
while (1) {
- u_int64_t this_max_weight;
+ uint64_t this_max_weight;
int r=queue_deq(q, &item, &weight, &this_max_weight);
if (r==EOF) break;
assert(r==0);
@@ -36,15 +36,15 @@ static void *start_0 (void *arg) {
return NULL;
}
-static void enq (QUEUE q, long v, u_int64_t weight) {
- u_int64_t this_max_weight;
+static void enq (QUEUE q, long v, uint64_t weight) {
+ uint64_t this_max_weight;
int r = queue_enq(q, (void*)v, (weight==0)?0:1, &this_max_weight);
assert(r==0);
if (this_max_weight>e_max_weight) e_max_weight=this_max_weight;
//printf("E(%ld)=%ld %ld\n", v, this_max_weight, e_max_weight);
}
-static void queue_test_0 (u_int64_t weight)
+static void queue_test_0 (uint64_t weight)
// Test a queue that can hold WEIGHT items.
{
//printf("\n");
diff --git a/ft/tests/recovery-bad-last-entry.cc b/ft/tests/recovery-bad-last-entry.cc
index b9bdf615e25..7b90c128276 100644
--- a/ft/tests/recovery-bad-last-entry.cc
+++ b/ft/tests/recovery-bad-last-entry.cc
@@ -37,14 +37,14 @@ run_test(void) {
r = toku_logger_open(TESTDIR, logger); assert(r == 0);
LSN beginlsn;
// all logs must contain a valid checkpoint
- r = toku_log_begin_checkpoint(logger, &beginlsn, TRUE, 0, 0); assert(r == 0);
- r = toku_log_end_checkpoint(logger, NULL, TRUE, beginlsn, 0, 0, 0); assert(r == 0);
- r = toku_log_comment(logger, NULL, TRUE, 0, hello); assert(r == 0);
- r = toku_log_comment(logger, NULL, TRUE, 0, world); assert(r == 0);
- r = toku_log_begin_checkpoint(logger, &beginlsn, TRUE, 0, 0); assert(r == 0);
- r = toku_log_end_checkpoint(logger, NULL, TRUE, beginlsn, 0, 0, 0); assert(r == 0);
- r = toku_log_comment(logger, NULL, TRUE, 0, hello); assert(r == 0);
- r = toku_log_comment(logger, NULL, TRUE, 0, there); assert(r == 0);
+ r = toku_log_begin_checkpoint(logger, &beginlsn, true, 0, 0); assert(r == 0);
+ r = toku_log_end_checkpoint(logger, NULL, true, beginlsn, 0, 0, 0); assert(r == 0);
+ r = toku_log_comment(logger, NULL, true, 0, hello); assert(r == 0);
+ r = toku_log_comment(logger, NULL, true, 0, world); assert(r == 0);
+ r = toku_log_begin_checkpoint(logger, &beginlsn, true, 0, 0); assert(r == 0);
+ r = toku_log_end_checkpoint(logger, NULL, true, beginlsn, 0, 0, 0); assert(r == 0);
+ r = toku_log_comment(logger, NULL, true, 0, hello); assert(r == 0);
+ r = toku_log_comment(logger, NULL, true, 0, there); assert(r == 0);
r = toku_logger_close(&logger); assert(r == 0);
// redirect stderr
diff --git a/ft/tests/recovery-cbegin-cend-hello.cc b/ft/tests/recovery-cbegin-cend-hello.cc
index 9ea65985c5d..069335afe79 100644
--- a/ft/tests/recovery-cbegin-cend-hello.cc
+++ b/ft/tests/recovery-cbegin-cend-hello.cc
@@ -26,8 +26,8 @@ run_test(void) {
// add begin checkpoint, end checkpoint
LSN beginlsn;
- r = toku_log_begin_checkpoint(logger, &beginlsn, FALSE, 0, 0); assert(r == 0);
- r = toku_log_end_checkpoint(logger, NULL, TRUE, beginlsn, 0, 0, 0); assert(r == 0);
+ r = toku_log_begin_checkpoint(logger, &beginlsn, false, 0, 0); assert(r == 0);
+ r = toku_log_end_checkpoint(logger, NULL, true, beginlsn, 0, 0, 0); assert(r == 0);
r = toku_logger_close(&logger); assert(r == 0);
// add hello
@@ -35,7 +35,7 @@ run_test(void) {
r = toku_logger_create(&logger); assert(r == 0);
r = toku_logger_open(TESTDIR, logger); assert(r == 0);
BYTESTRING hello = { (uint32_t) strlen("hello"), (char *) "hello" };
- r = toku_log_comment(logger, NULL, TRUE, 0, hello);
+ r = toku_log_comment(logger, NULL, true, 0, hello);
r = toku_logger_close(&logger); assert(r == 0);
}
diff --git a/ft/tests/recovery-cbegin-cend.cc b/ft/tests/recovery-cbegin-cend.cc
index 464012d4ae7..0aaa47d115d 100644
--- a/ft/tests/recovery-cbegin-cend.cc
+++ b/ft/tests/recovery-cbegin-cend.cc
@@ -22,12 +22,12 @@ run_test(void) {
r = toku_logger_create(&logger); assert(r == 0);
r = toku_logger_open(TESTDIR, logger); assert(r == 0);
LSN beginlsn;
- r = toku_log_begin_checkpoint(logger, &beginlsn, FALSE, 0, 0); assert(r == 0);
+ r = toku_log_begin_checkpoint(logger, &beginlsn, false, 0, 0); assert(r == 0);
r = toku_logger_close(&logger); assert(r == 0);
r = toku_logger_create(&logger); assert(r == 0);
r = toku_logger_open(TESTDIR, logger); assert(r == 0);
- r = toku_log_end_checkpoint(logger, NULL, TRUE, beginlsn, 0, 0, 0); assert(r == 0);
+ r = toku_log_end_checkpoint(logger, NULL, true, beginlsn, 0, 0, 0); assert(r == 0);
r = toku_logger_close(&logger); assert(r == 0);
// run recovery
diff --git a/ft/tests/recovery-cbegin.cc b/ft/tests/recovery-cbegin.cc
index 45e688907ff..dc6a9c0a488 100644
--- a/ft/tests/recovery-cbegin.cc
+++ b/ft/tests/recovery-cbegin.cc
@@ -22,9 +22,9 @@ run_test(void) {
r = toku_logger_create(&logger); assert(r == 0);
r = toku_logger_open(TESTDIR, logger); assert(r == 0);
LSN beginlsn;
- r = toku_log_begin_checkpoint(logger, &beginlsn, TRUE, 0, 0); assert(r == 0);
- r = toku_log_end_checkpoint(logger, NULL, TRUE, beginlsn, 0, 0, 0); assert(r == 0);
- r = toku_log_begin_checkpoint(logger, &beginlsn, TRUE, 0, 0); assert(r == 0);
+ r = toku_log_begin_checkpoint(logger, &beginlsn, true, 0, 0); assert(r == 0);
+ r = toku_log_end_checkpoint(logger, NULL, true, beginlsn, 0, 0, 0); assert(r == 0);
+ r = toku_log_begin_checkpoint(logger, &beginlsn, true, 0, 0); assert(r == 0);
r = toku_logger_close(&logger); assert(r == 0);
// run recovery
diff --git a/ft/tests/recovery-cend-cbegin.cc b/ft/tests/recovery-cend-cbegin.cc
index 93105b4b216..a80ef7b9586 100644
--- a/ft/tests/recovery-cend-cbegin.cc
+++ b/ft/tests/recovery-cend-cbegin.cc
@@ -24,10 +24,10 @@ run_test(void) {
r = toku_logger_create(&logger); assert(r == 0);
r = toku_logger_open(TESTDIR, logger); assert(r == 0);
LSN firstbegin = ZERO_LSN;
- r = toku_log_begin_checkpoint(logger, &firstbegin, TRUE, 0, 0); assert(r == 0);
+ r = toku_log_begin_checkpoint(logger, &firstbegin, true, 0, 0); assert(r == 0);
assert(firstbegin.lsn != ZERO_LSN.lsn);
- r = toku_log_end_checkpoint(logger, NULL, FALSE, firstbegin, 0, 0, 0); assert(r == 0);
- r = toku_log_begin_checkpoint(logger, NULL, TRUE, 0, 0); assert(r == 0);
+ r = toku_log_end_checkpoint(logger, NULL, false, firstbegin, 0, 0, 0); assert(r == 0);
+ r = toku_log_begin_checkpoint(logger, NULL, true, 0, 0); assert(r == 0);
r = toku_logger_close(&logger); assert(r == 0);
if (!verbose) {
diff --git a/ft/tests/recovery-datadir-is-file.cc b/ft/tests/recovery-datadir-is-file.cc
index c800589e5f7..1d47b21258a 100644
--- a/ft/tests/recovery-datadir-is-file.cc
+++ b/ft/tests/recovery-datadir-is-file.cc
@@ -25,7 +25,7 @@ run_test(void) {
r = toku_logger_create(&logger); assert(r == 0);
r = toku_logger_open(TESTDIR, logger); assert(r == 0);
BYTESTRING hello = { (uint32_t) strlen("hello"), (char *) "hello" };
- r = toku_log_comment(logger, NULL, TRUE, 0, hello);
+ r = toku_log_comment(logger, NULL, true, 0, hello);
r = toku_logger_close(&logger); assert(r == 0);
// redirect stderr
diff --git a/ft/tests/recovery-fopen-missing-file.cc b/ft/tests/recovery-fopen-missing-file.cc
index 79e7749bf9a..077a9a07957 100644
--- a/ft/tests/recovery-fopen-missing-file.cc
+++ b/ft/tests/recovery-fopen-missing-file.cc
@@ -24,13 +24,13 @@ run_test(void) {
r = toku_logger_create(&logger); assert(r == 0);
r = toku_logger_open(TESTDIR, logger); assert(r == 0);
LSN beginlsn;
- r = toku_log_begin_checkpoint(logger, &beginlsn, TRUE, 0, 0); assert(r == 0);
- r = toku_log_end_checkpoint(logger, NULL, TRUE, beginlsn, 0, 0, 0); assert(r == 0);
+ r = toku_log_begin_checkpoint(logger, &beginlsn, true, 0, 0); assert(r == 0);
+ r = toku_log_end_checkpoint(logger, NULL, true, beginlsn, 0, 0, 0); assert(r == 0);
BYTESTRING iname = { (uint32_t) strlen("missing_tokudb_file"), (char *) "missing_tokudb_file" };
FILENUM filenum = {42};
uint32_t treeflags = 0;
- r = toku_log_fopen(logger, NULL, TRUE, iname, filenum, treeflags);
+ r = toku_log_fopen(logger, NULL, true, iname, filenum, treeflags);
r = toku_logger_close(&logger); assert(r == 0);
// redirect stderr
diff --git a/ft/tests/recovery-hello.cc b/ft/tests/recovery-hello.cc
index e2380c4e45f..749a586d7d7 100644
--- a/ft/tests/recovery-hello.cc
+++ b/ft/tests/recovery-hello.cc
@@ -24,13 +24,13 @@ run_test(void) {
r = toku_logger_create(&logger); assert(r == 0);
r = toku_logger_open(TESTDIR, logger); assert(r == 0);
BYTESTRING hello = { (uint32_t) strlen("hello"), (char *) "hello" };
- r = toku_log_comment(logger, NULL, TRUE, 0, hello); assert(r == 0);
+ r = toku_log_comment(logger, NULL, true, 0, hello); assert(r == 0);
LSN beginlsn;
- r = toku_log_begin_checkpoint(logger, &beginlsn, TRUE, 0, 0); assert(r == 0);
- r = toku_log_end_checkpoint(logger, NULL, TRUE, beginlsn, 0, 0, 0); assert(r == 0);
- r = toku_log_comment(logger, NULL, TRUE, 0, hello); assert(r == 0);
+ r = toku_log_begin_checkpoint(logger, &beginlsn, true, 0, 0); assert(r == 0);
+ r = toku_log_end_checkpoint(logger, NULL, true, beginlsn, 0, 0, 0); assert(r == 0);
+ r = toku_log_comment(logger, NULL, true, 0, hello); assert(r == 0);
BYTESTRING there = { (uint32_t) strlen("there"), (char *) "there" };
- r = toku_log_comment(logger, NULL, TRUE, 0, there); assert(r == 0);
+ r = toku_log_comment(logger, NULL, true, 0, there); assert(r == 0);
r = toku_logger_close(&logger); assert(r == 0);
// redirect stderr
diff --git a/ft/tests/recovery-lsn-error-during-forward-scan.cc b/ft/tests/recovery-lsn-error-during-forward-scan.cc
index a254823786f..a73b086e967 100644
--- a/ft/tests/recovery-lsn-error-during-forward-scan.cc
+++ b/ft/tests/recovery-lsn-error-during-forward-scan.cc
@@ -43,8 +43,8 @@ run_test(void) {
r = toku_logger_open(TESTDIR, logger); assert(r == 0);
LSN beginlsn;
- r = toku_log_begin_checkpoint(logger, &beginlsn, TRUE, 0, 0); assert(r == 0);
- r = toku_log_end_checkpoint(logger, NULL, TRUE, beginlsn, 0, 0, 0); assert(r == 0);
+ r = toku_log_begin_checkpoint(logger, &beginlsn, true, 0, 0); assert(r == 0);
+ r = toku_log_end_checkpoint(logger, NULL, true, beginlsn, 0, 0, 0); assert(r == 0);
r = toku_logger_close(&logger); assert(r == 0);
@@ -53,7 +53,7 @@ run_test(void) {
r = toku_logger_open(TESTDIR, logger); assert(r == 0);
BYTESTRING hello = { (uint32_t) strlen("hello"), (char *) "hello" };
- r = toku_log_comment(logger, NULL, TRUE, 0, hello); assert(r == 0);
+ r = toku_log_comment(logger, NULL, true, 0, hello); assert(r == 0);
r = toku_logger_close(&logger); assert(r == 0);
@@ -62,7 +62,7 @@ run_test(void) {
r = toku_logger_open(TESTDIR, logger); assert(r == 0);
BYTESTRING there = { (uint32_t) strlen("there"), (char *) "there" };
- r = toku_log_comment(logger, NULL, TRUE, 0, there); assert(r == 0);
+ r = toku_log_comment(logger, NULL, true, 0, there); assert(r == 0);
r = toku_logger_close(&logger); assert(r == 0);
diff --git a/ft/tests/recovery-no-datadir.cc b/ft/tests/recovery-no-datadir.cc
index a69b9c01d6d..0e57a787337 100644
--- a/ft/tests/recovery-no-datadir.cc
+++ b/ft/tests/recovery-no-datadir.cc
@@ -24,7 +24,7 @@ run_test(void) {
r = toku_logger_create(&logger); assert(r == 0);
r = toku_logger_open(TESTDIR, logger); assert(r == 0);
BYTESTRING hello = { (uint32_t) strlen("hello"), (char *) "hello" };
- r = toku_log_comment(logger, NULL, TRUE, 0, hello);
+ r = toku_log_comment(logger, NULL, true, 0, hello);
r = toku_logger_close(&logger); assert(r == 0);
// redirect stderr
diff --git a/ft/tests/shortcut.cc b/ft/tests/shortcut.cc
index bca7266bdb8..6f39aecb3c9 100644
--- a/ft/tests/shortcut.cc
+++ b/ft/tests/shortcut.cc
@@ -25,7 +25,7 @@ test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute
r = toku_create_cachetable(&ct, 0, ZERO_LSN, NULL_LOGGER); assert(r==0);
r = toku_open_ft_handle(fname, 1, &brt, 1<<12, 1<<9, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, test_ft_cursor_keycompare); assert(r==0);
- r = toku_ft_cursor(brt, &cursor, NULL, FALSE, FALSE); assert(r==0);
+ r = toku_ft_cursor(brt, &cursor, NULL, false, false); assert(r==0);
int i;
for (i=0; i<1000; i++) {
diff --git a/ft/tests/test-assert.cc b/ft/tests/test-assert.cc
index ece234b012d..55ddf9cfc79 100644
--- a/ft/tests/test-assert.cc
+++ b/ft/tests/test-assert.cc
@@ -12,8 +12,8 @@ static __attribute__((__noreturn__)) void catch_abort (int sig __attribute__((__
exit(1);
}
-static BOOL foo (void) {
- return TRUE;
+static bool foo (void) {
+ return true;
}
diff --git a/ft/tests/test-checkpoint-during-flush.cc b/ft/tests/test-checkpoint-during-flush.cc
index d0d5afa85e3..f274db95e29 100644
--- a/ft/tests/test-checkpoint-during-flush.cc
+++ b/ft/tests/test-checkpoint-during-flush.cc
@@ -21,8 +21,8 @@ enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 };
CACHETABLE ct;
FT_HANDLE t;
-BOOL checkpoint_called;
-BOOL checkpoint_callback_called;
+bool checkpoint_called;
+bool checkpoint_callback_called;
toku_pthread_t checkpoint_tid;
@@ -39,11 +39,11 @@ static void merge_should_not_happen(struct flusher_advice* UU(fa),
FTNODE UU(child),
void* UU(extra))
{
- assert(FALSE);
+ assert(false);
}
static bool recursively_flush_should_not_happen(FTNODE UU(child), void* UU(extra)) {
- assert(FALSE);
+ assert(false);
}
static int child_to_flush(FT UU(h), FTNODE parent, void* UU(extra)) {
@@ -58,7 +58,7 @@ static void dummy_update_status(FTNODE UU(child), int UU(dirtied), void* UU(extr
static void checkpoint_callback(void* UU(extra)) {
usleep(1*1024*1024);
- checkpoint_callback_called = TRUE;
+ checkpoint_callback_called = true;
}
@@ -73,13 +73,13 @@ static void *do_checkpoint(void *arg) {
static void flusher_callback(int state, void* extra) {
- BOOL after_child_pin = *(BOOL *)extra;
+ bool after_child_pin = *(bool *)extra;
if (verbose) {
printf("state %d\n", state);
}
if ((state == flt_flush_before_child_pin && !after_child_pin) ||
(state == ft_flush_aflter_child_pin && after_child_pin)) {
- checkpoint_called = TRUE;
+ checkpoint_called = true;
int r = toku_pthread_create(&checkpoint_tid, NULL, do_checkpoint, NULL);
assert_zero(r);
while (!checkpoint_callback_called) {
@@ -89,12 +89,12 @@ static void flusher_callback(int state, void* extra) {
}
static void
-doit (BOOL after_child_pin) {
+doit (bool after_child_pin) {
BLOCKNUM node_leaf, node_root;
int r;
- checkpoint_called = FALSE;
- checkpoint_callback_called = FALSE;
+ checkpoint_called = false;
+ checkpoint_callback_called = false;
toku_flusher_thread_set_callback(flusher_callback, &after_child_pin);
@@ -149,7 +149,7 @@ doit (BOOL after_child_pin) {
node_root,
toku_cachetable_hash(t->ft->cf, node_root),
&bfe,
- TRUE,
+ true,
0,
NULL,
&node
@@ -168,7 +168,7 @@ doit (BOOL after_child_pin) {
node_root,
toku_cachetable_hash(t->ft->cf, node_root),
&bfe,
- TRUE,
+ true,
0,
NULL,
&node
@@ -205,7 +205,7 @@ doit (BOOL after_child_pin) {
node_root,
toku_cachetable_hash(c_ft->ft->cf, node_root),
&bfe,
- TRUE,
+ true,
0,
NULL,
&node
@@ -226,7 +226,7 @@ doit (BOOL after_child_pin) {
node_leaf,
toku_cachetable_hash(c_ft->ft->cf, node_root),
&bfe,
- TRUE,
+ true,
0,
NULL,
&node
@@ -256,7 +256,7 @@ doit (BOOL after_child_pin) {
int
test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) {
default_parse_args(argc, argv);
- doit(FALSE);
- doit(TRUE);
+ doit(false);
+ doit(true);
return 0;
}
diff --git a/ft/tests/test-checkpoint-during-merge.cc b/ft/tests/test-checkpoint-during-merge.cc
index 284241dbaf0..31936dad736 100644
--- a/ft/tests/test-checkpoint-during-merge.cc
+++ b/ft/tests/test-checkpoint-during-merge.cc
@@ -21,8 +21,8 @@ enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 };
CACHETABLE ct;
FT_HANDLE t;
-BOOL checkpoint_called;
-BOOL checkpoint_callback_called;
+bool checkpoint_called;
+bool checkpoint_callback_called;
toku_pthread_t checkpoint_tid;
@@ -34,7 +34,7 @@ dont_destroy_bn(void* UU(extra))
}
static bool recursively_flush_should_not_happen(FTNODE UU(child), void* UU(extra)) {
- assert(FALSE);
+ assert(false);
}
static int child_to_flush(FT UU(h), FTNODE parent, void* UU(extra)) {
@@ -49,7 +49,7 @@ static void dummy_update_status(FTNODE UU(child), int UU(dirtied), void* UU(extr
static void checkpoint_callback(void* UU(extra)) {
usleep(1*1024*1024);
- checkpoint_callback_called = TRUE;
+ checkpoint_callback_called = true;
}
@@ -69,7 +69,7 @@ static void flusher_callback(int state, void* extra) {
printf("state %d\n", state);
}
if (state == desired_state) {
- checkpoint_called = TRUE;
+ checkpoint_called = true;
int r = toku_pthread_create(&checkpoint_tid, NULL, do_checkpoint, NULL);
assert_zero(r);
while (!checkpoint_callback_called) {
@@ -84,8 +84,8 @@ doit (int state) {
BLOCKNUM node_leaves[2];
int r;
- checkpoint_called = FALSE;
- checkpoint_callback_called = FALSE;
+ checkpoint_called = false;
+ checkpoint_callback_called = false;
toku_flusher_thread_set_callback(flusher_callback, &state);
@@ -153,10 +153,10 @@ doit (int state) {
// hack to get merge going
FTNODE node = NULL;
toku_pin_node_with_min_bfe(&node, node_leaves[0], t);
- BLB_SEQINSERT(node, node->n_children-1) = FALSE;
+ BLB_SEQINSERT(node, node->n_children-1) = false;
toku_unpin_ftnode(t->ft, node);
toku_pin_node_with_min_bfe(&node, node_leaves[1], t);
- BLB_SEQINSERT(node, node->n_children-1) = FALSE;
+ BLB_SEQINSERT(node, node->n_children-1) = false;
toku_unpin_ftnode(t->ft, node);
@@ -167,7 +167,7 @@ doit (int state) {
node_root,
toku_cachetable_hash(t->ft->cf, node_root),
&bfe,
- TRUE,
+ true,
0,
NULL,
&node
@@ -185,7 +185,7 @@ doit (int state) {
node_root,
toku_cachetable_hash(t->ft->cf, node_root),
&bfe,
- TRUE,
+ true,
0,
NULL,
&node
@@ -224,7 +224,7 @@ doit (int state) {
node_root,
toku_cachetable_hash(c_ft->ft->cf, node_root),
&bfe,
- TRUE,
+ true,
0,
NULL,
&node
@@ -254,7 +254,7 @@ doit (int state) {
left_child,
toku_cachetable_hash(c_ft->ft->cf, left_child),
&bfe,
- TRUE,
+ true,
0,
NULL,
&node
@@ -270,7 +270,7 @@ doit (int state) {
right_child,
toku_cachetable_hash(c_ft->ft->cf, right_child),
&bfe,
- TRUE,
+ true,
0,
NULL,
&node
@@ -287,7 +287,7 @@ doit (int state) {
left_child,
toku_cachetable_hash(c_ft->ft->cf, left_child),
&bfe,
- TRUE,
+ true,
0,
NULL,
&node
@@ -299,7 +299,7 @@ doit (int state) {
toku_unpin_ftnode_off_client_thread(c_ft->ft, node);
}
else {
- assert(FALSE);
+ assert(false);
}
diff --git a/ft/tests/test-checkpoint-during-rebalance.cc b/ft/tests/test-checkpoint-during-rebalance.cc
index 730d7722494..82f148745d2 100644
--- a/ft/tests/test-checkpoint-during-rebalance.cc
+++ b/ft/tests/test-checkpoint-during-rebalance.cc
@@ -21,8 +21,8 @@ enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 };
CACHETABLE ct;
FT_HANDLE t;
-BOOL checkpoint_called;
-BOOL checkpoint_callback_called;
+bool checkpoint_called;
+bool checkpoint_callback_called;
toku_pthread_t checkpoint_tid;
@@ -34,7 +34,7 @@ dont_destroy_bn(void* UU(extra))
}
static bool recursively_flush_should_not_happen(FTNODE UU(child), void* UU(extra)) {
- assert(FALSE);
+ assert(false);
}
static int child_to_flush(FT UU(h), FTNODE parent, void* UU(extra)) {
@@ -49,7 +49,7 @@ static void dummy_update_status(FTNODE UU(child), int UU(dirtied), void* UU(extr
static void checkpoint_callback(void* UU(extra)) {
usleep(1*1024*1024);
- checkpoint_callback_called = TRUE;
+ checkpoint_callback_called = true;
}
@@ -69,7 +69,7 @@ static void flusher_callback(int state, void* extra) {
printf("state %d\n", state);
}
if (state == desired_state) {
- checkpoint_called = TRUE;
+ checkpoint_called = true;
int r = toku_pthread_create(&checkpoint_tid, NULL, do_checkpoint, NULL);
assert_zero(r);
while (!checkpoint_callback_called) {
@@ -84,8 +84,8 @@ doit (int state) {
BLOCKNUM node_leaves[2];
int r;
- checkpoint_called = FALSE;
- checkpoint_callback_called = FALSE;
+ checkpoint_called = false;
+ checkpoint_callback_called = false;
toku_flusher_thread_set_callback(flusher_callback, &state);
@@ -173,10 +173,10 @@ doit (int state) {
// hack to get merge going
FTNODE node = NULL;
toku_pin_node_with_min_bfe(&node, node_leaves[0], t);
- BLB_SEQINSERT(node, node->n_children-1) = FALSE;
+ BLB_SEQINSERT(node, node->n_children-1) = false;
toku_unpin_ftnode(t->ft, node);
toku_pin_node_with_min_bfe(&node, node_leaves[1], t);
- BLB_SEQINSERT(node, node->n_children-1) = FALSE;
+ BLB_SEQINSERT(node, node->n_children-1) = false;
toku_unpin_ftnode(t->ft, node);
@@ -187,7 +187,7 @@ doit (int state) {
node_root,
toku_cachetable_hash(t->ft->cf, node_root),
&bfe,
- TRUE,
+ true,
0,
NULL,
&node
@@ -205,7 +205,7 @@ doit (int state) {
node_root,
toku_cachetable_hash(t->ft->cf, node_root),
&bfe,
- TRUE,
+ true,
0,
NULL,
&node
@@ -244,7 +244,7 @@ doit (int state) {
node_root,
toku_cachetable_hash(c_ft->ft->cf, node_root),
&bfe,
- TRUE,
+ true,
0,
NULL,
&node
@@ -265,7 +265,7 @@ doit (int state) {
left_child,
toku_cachetable_hash(c_ft->ft->cf, left_child),
&bfe,
- TRUE,
+ true,
0,
NULL,
&node
@@ -281,7 +281,7 @@ doit (int state) {
right_child,
toku_cachetable_hash(c_ft->ft->cf, right_child),
&bfe,
- TRUE,
+ true,
0,
NULL,
&node
diff --git a/ft/tests/test-checkpoint-during-split.cc b/ft/tests/test-checkpoint-during-split.cc
index b6e2a82bef9..acdf39995a1 100644
--- a/ft/tests/test-checkpoint-during-split.cc
+++ b/ft/tests/test-checkpoint-during-split.cc
@@ -21,8 +21,8 @@ enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 };
CACHETABLE ct;
FT_HANDLE t;
-BOOL checkpoint_called;
-BOOL checkpoint_callback_called;
+bool checkpoint_called;
+bool checkpoint_callback_called;
toku_pthread_t checkpoint_tid;
@@ -39,11 +39,11 @@ static void merge_should_not_happen(struct flusher_advice* UU(fa),
FTNODE UU(child),
void* UU(extra))
{
- assert(FALSE);
+ assert(false);
}
static bool recursively_flush_should_not_happen(FTNODE UU(child), void* UU(extra)) {
- assert(FALSE);
+ assert(false);
}
static int child_to_flush(FT UU(h), FTNODE parent, void* UU(extra)) {
@@ -58,7 +58,7 @@ static void dummy_update_status(FTNODE UU(child), int UU(dirtied), void* UU(extr
static void checkpoint_callback(void* UU(extra)) {
usleep(1*1024*1024);
- checkpoint_callback_called = TRUE;
+ checkpoint_callback_called = true;
}
@@ -73,13 +73,13 @@ static void *do_checkpoint(void *arg) {
static void flusher_callback(int state, void* extra) {
- BOOL after_split = *(BOOL *)extra;
+ bool after_split = *(bool *)extra;
if (verbose) {
printf("state %d\n", state);
}
if ((state == flt_flush_before_split && !after_split) ||
(state == flt_flush_during_split && after_split)) {
- checkpoint_called = TRUE;
+ checkpoint_called = true;
int r = toku_pthread_create(&checkpoint_tid, NULL, do_checkpoint, NULL);
assert_zero(r);
while (!checkpoint_callback_called) {
@@ -89,12 +89,12 @@ static void flusher_callback(int state, void* extra) {
}
static void
-doit (BOOL after_split) {
+doit (bool after_split) {
BLOCKNUM node_leaf, node_root;
int r;
- checkpoint_called = FALSE;
- checkpoint_callback_called = FALSE;
+ checkpoint_called = false;
+ checkpoint_callback_called = false;
toku_flusher_thread_set_callback(flusher_callback, &after_split);
@@ -163,7 +163,7 @@ doit (BOOL after_split) {
node_root,
toku_cachetable_hash(t->ft->cf, node_root),
&bfe,
- TRUE,
+ true,
0,
NULL,
&node
@@ -181,7 +181,7 @@ doit (BOOL after_split) {
node_root,
toku_cachetable_hash(t->ft->cf, node_root),
&bfe,
- TRUE,
+ true,
0,
NULL,
&node
@@ -220,7 +220,7 @@ doit (BOOL after_split) {
node_root,
toku_cachetable_hash(c_ft->ft->cf, node_root),
&bfe,
- TRUE,
+ true,
0,
NULL,
&node
@@ -248,7 +248,7 @@ doit (BOOL after_split) {
left_child,
toku_cachetable_hash(c_ft->ft->cf, left_child),
&bfe,
- TRUE,
+ true,
0,
NULL,
&node
@@ -264,7 +264,7 @@ doit (BOOL after_split) {
right_child,
toku_cachetable_hash(c_ft->ft->cf, right_child),
&bfe,
- TRUE,
+ true,
0,
NULL,
&node
@@ -281,7 +281,7 @@ doit (BOOL after_split) {
left_child,
toku_cachetable_hash(c_ft->ft->cf, left_child),
&bfe,
- TRUE,
+ true,
0,
NULL,
&node
@@ -311,7 +311,7 @@ doit (BOOL after_split) {
int
test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) {
default_parse_args(argc, argv);
- doit(FALSE);
- doit(TRUE);
+ doit(false);
+ doit(true);
return 0;
}
diff --git a/ft/tests/test-dirty-flushes-on-cleaner.cc b/ft/tests/test-dirty-flushes-on-cleaner.cc
index 9f4011db01d..c3dc7952c36 100644
--- a/ft/tests/test-dirty-flushes-on-cleaner.cc
+++ b/ft/tests/test-dirty-flushes-on-cleaner.cc
@@ -166,7 +166,7 @@ doit (void) {
node_leaf,
toku_cachetable_hash(brt->ft->cf, node_leaf),
&bfe,
- TRUE,
+ true,
0,
NULL,
&node
@@ -195,7 +195,7 @@ doit (void) {
node_leaf,
toku_cachetable_hash(brt->ft->cf, node_leaf),
&bfe,
- TRUE,
+ true,
0,
NULL,
&node
@@ -215,7 +215,7 @@ doit (void) {
node_internal,
toku_cachetable_hash(brt->ft->cf, node_internal),
&bfe,
- TRUE,
+ true,
0,
NULL,
&node
@@ -239,7 +239,7 @@ doit (void) {
node_internal,
toku_cachetable_hash(brt->ft->cf, node_internal),
&bfe,
- TRUE,
+ true,
0,
NULL,
&node
diff --git a/ft/tests/test-flushes-on-cleaner.cc b/ft/tests/test-flushes-on-cleaner.cc
index 62effeb43a0..6fcd86485c3 100644
--- a/ft/tests/test-flushes-on-cleaner.cc
+++ b/ft/tests/test-flushes-on-cleaner.cc
@@ -42,7 +42,7 @@ static int update_func(
static void
-doit (BOOL keep_other_bn_in_memory) {
+doit (bool keep_other_bn_in_memory) {
BLOCKNUM node_leaf;
BLOCKNUM node_internal, node_root;
@@ -171,7 +171,7 @@ doit (BOOL keep_other_bn_in_memory) {
node_leaf,
toku_cachetable_hash(brt->ft->cf, node_leaf),
&bfe,
- TRUE,
+ true,
0,
NULL,
&node
@@ -219,7 +219,7 @@ doit (BOOL keep_other_bn_in_memory) {
node_leaf,
toku_cachetable_hash(brt->ft->cf, node_leaf),
&bfe,
- TRUE,
+ true,
0,
NULL,
&node
@@ -244,7 +244,7 @@ doit (BOOL keep_other_bn_in_memory) {
node_internal,
toku_cachetable_hash(brt->ft->cf, node_internal),
&bfe,
- TRUE,
+ true,
0,
NULL,
&node
@@ -268,7 +268,7 @@ doit (BOOL keep_other_bn_in_memory) {
node_internal,
toku_cachetable_hash(brt->ft->cf, node_internal),
&bfe,
- TRUE,
+ true,
0,
NULL,
&node
@@ -302,7 +302,7 @@ doit (BOOL keep_other_bn_in_memory) {
int
test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) {
default_parse_args(argc, argv);
- doit(FALSE);
- doit(TRUE);
+ doit(false);
+ doit(true);
return 0;
}
diff --git a/ft/tests/test-ft-overflow.cc b/ft/tests/test-ft-overflow.cc
index 809864fa053..a92f978660a 100644
--- a/ft/tests/test-ft-overflow.cc
+++ b/ft/tests/test-ft-overflow.cc
@@ -18,14 +18,14 @@ static void
test_overflow (void) {
FT_HANDLE t;
CACHETABLE ct;
- u_int32_t nodesize = 1<<20;
+ uint32_t nodesize = 1<<20;
int r;
unlink(fname);
r = toku_create_cachetable(&ct, 0, ZERO_LSN, NULL_LOGGER); assert(r==0);
r = toku_open_ft_handle(fname, 1, &t, nodesize, nodesize / 8, TOKU_DEFAULT_COMPRESSION_METHOD, ct, null_txn, toku_builtin_compare_fun); assert(r==0);
DBT k,v;
- u_int32_t vsize = nodesize/8;
+ uint32_t vsize = nodesize/8;
char buf[vsize];
memset(buf, 'a', vsize);
int i;
diff --git a/ft/tests/test-ft-txns.h b/ft/tests/test-ft-txns.h
index 36e7eccca0a..dc382ff3045 100644
--- a/ft/tests/test-ft-txns.h
+++ b/ft/tests/test-ft-txns.h
@@ -35,7 +35,7 @@ test_setup(TOKULOGGER *loggerp, CACHETABLE *ctp) {
toku_logger_set_cachetable(logger, ct);
- r = toku_logger_open_rollback(logger, ct, TRUE);
+ r = toku_logger_open_rollback(logger, ct, true);
CKERR(r);
r = toku_checkpoint(ct, logger, NULL, NULL, NULL, NULL, STARTUP_CHECKPOINT);
diff --git a/ft/tests/test-leafentry-nested.cc b/ft/tests/test-leafentry-nested.cc
index 72e6ebbb4a8..bbdb90c32c3 100644
--- a/ft/tests/test-leafentry-nested.cc
+++ b/ft/tests/test-leafentry-nested.cc
@@ -22,7 +22,7 @@ verify_ule_equal(ULE a, ULE b) {
assert(memcmp(a->keyp, b->keyp, a->keylen) == 0);
assert(a->num_cuxrs == b->num_cuxrs);
assert(a->num_puxrs == b->num_puxrs);
- u_int32_t i;
+ uint32_t i;
for (i = 0; i < (a->num_cuxrs + a->num_puxrs); i++) {
assert(a->uxrs[i].type == b->uxrs[i].type);
assert(a->uxrs[i].xid == b->uxrs[i].xid);
@@ -56,9 +56,9 @@ verify_le_equal(LEAFENTRY a, LEAFENTRY b) {
}
static void
-fillrandom(u_int8_t buf[MAX_SIZE], u_int32_t length) {
+fillrandom(uint8_t buf[MAX_SIZE], uint32_t length) {
assert(length < MAX_SIZE);
- u_int32_t i;
+ uint32_t i;
for (i = 0; i < length; i++) {
buf[i] = random() & 0xFF;
}
@@ -186,9 +186,9 @@ le_verify_accessors(LEAFENTRY le, ULE ule, size_t pre_calculated_memsize) {
size_t num_uxrs = ule->num_cuxrs + ule->num_puxrs;
void *key = ule->keyp;
- u_int32_t keylen = ule->keylen;
+ uint32_t keylen = ule->keylen;
void *latest_val = ule->uxrs[num_uxrs -1].type == XR_DELETE ? NULL : ule->uxrs[num_uxrs -1].valp;
- u_int32_t latest_vallen = ule->uxrs[num_uxrs -1].type == XR_DELETE ? 0 : ule->uxrs[num_uxrs -1].vallen;
+ uint32_t latest_vallen = ule->uxrs[num_uxrs -1].type == XR_DELETE ? 0 : ule->uxrs[num_uxrs -1].vallen;
{
int i;
for (i = num_uxrs - 1; i >= 0; i--) {
@@ -196,7 +196,7 @@ le_verify_accessors(LEAFENTRY le, ULE ule, size_t pre_calculated_memsize) {
goto found_insert;
}
}
- assert(FALSE);
+ assert(false);
}
found_insert:;
TXNID outermost_uncommitted_xid = ule->num_puxrs == 0 ? TXNID_NONE : ule->uxrs[ule->num_cuxrs].xid;
@@ -207,7 +207,7 @@ found_insert:;
assert(memsize == pre_calculated_memsize);
assert(memsize == leafentry_memsize(le));
{
- u_int32_t test_keylen;
+ uint32_t test_keylen;
void* test_keyp = le_key_and_len(le, &test_keylen);
if (key != NULL) assert(test_keyp != key);
assert(test_keylen == keylen);
@@ -216,7 +216,7 @@ found_insert:;
assert(le_keylen(le) == test_keylen);
}
{
- u_int32_t test_vallen;
+ uint32_t test_vallen;
void* test_valp = le_latest_val_and_len(le, &test_vallen);
if (latest_val != NULL) assert(test_valp != latest_val);
assert(test_vallen == latest_vallen);
@@ -239,10 +239,10 @@ test_le_pack_committed (void) {
ULE_S ule;
ule.uxrs = ule.uxrs_static;
- u_int8_t key[MAX_SIZE];
- u_int8_t val[MAX_SIZE];
- u_int32_t keysize;
- u_int32_t valsize;
+ uint8_t key[MAX_SIZE];
+ uint8_t val[MAX_SIZE];
+ uint32_t keysize;
+ uint32_t valsize;
for (keysize = 0; keysize < MAX_SIZE; keysize += (random() % MAX_SIZE) + 1) {
for (valsize = 0; valsize < MAX_SIZE; valsize += (random() % MAX_SIZE) + 1) {
fillrandom(key, keysize);
@@ -281,17 +281,17 @@ test_le_pack_committed (void) {
}
static void
-test_le_pack_uncommitted (u_int8_t committed_type, u_int8_t prov_type, int num_placeholders) {
+test_le_pack_uncommitted (uint8_t committed_type, uint8_t prov_type, int num_placeholders) {
ULE_S ule;
ule.uxrs = ule.uxrs_static;
assert(num_placeholders >= 0);
- u_int8_t key[MAX_SIZE];
- u_int8_t cval[MAX_SIZE];
- u_int8_t pval[MAX_SIZE];
- u_int32_t keysize;
- u_int32_t cvalsize;
- u_int32_t pvalsize;
+ uint8_t key[MAX_SIZE];
+ uint8_t cval[MAX_SIZE];
+ uint8_t pval[MAX_SIZE];
+ uint32_t keysize;
+ uint32_t cvalsize;
+ uint32_t pvalsize;
for (keysize = 0; keysize < MAX_SIZE; keysize += (random() % MAX_SIZE) + 1) {
for (cvalsize = 0; cvalsize < MAX_SIZE; cvalsize += (random() % MAX_SIZE) + 1) {
pvalsize = (cvalsize + random()) % MAX_SIZE;
@@ -479,7 +479,7 @@ generate_provpair_for(ULE ule, FT_MSG msg) {
ule->num_cuxrs = 1;
ule->num_puxrs = xids_get_num_xids(xids);
- u_int32_t num_uxrs = ule->num_cuxrs + ule->num_puxrs;
+ uint32_t num_uxrs = ule->num_cuxrs + ule->num_puxrs;
ule->keylen = msg->u.id.key->size;
ule->keyp = msg->u.id.key->data;
ule->uxrs[0].type = XR_DELETE;
@@ -507,10 +507,10 @@ test_le_empty_apply(void) {
DBT key;
DBT val;
- u_int8_t keybuf[MAX_SIZE];
- u_int8_t valbuf[MAX_SIZE];
- u_int32_t keysize;
- u_int32_t valsize;
+ uint8_t keybuf[MAX_SIZE];
+ uint8_t valbuf[MAX_SIZE];
+ uint32_t keysize;
+ uint32_t valsize;
uint32_t nesting_level;
for (keysize = 0; keysize < MAX_SIZE; keysize += (random() % MAX_SIZE) + 1) {
for (valsize = 0; valsize < MAX_SIZE; valsize += (random() % MAX_SIZE) + 1) {
@@ -569,7 +569,7 @@ generate_provdel_for(ULE ule, FT_MSG msg) {
ule->num_cuxrs = 1;
ule->num_puxrs = xids_get_num_xids(xids);
- u_int32_t num_uxrs = ule->num_cuxrs + ule->num_puxrs;
+ uint32_t num_uxrs = ule->num_cuxrs + ule->num_puxrs;
ule->keylen = msg->u.id.key->size;
ule->keyp = msg->u.id.key->data;
ule->uxrs[0].type = XR_INSERT;
@@ -595,7 +595,7 @@ generate_both_for(ULE ule, DBT *oldval, FT_MSG msg) {
ule->num_cuxrs = 1;
ule->num_puxrs = xids_get_num_xids(xids);
- u_int32_t num_uxrs = ule->num_cuxrs + ule->num_puxrs;
+ uint32_t num_uxrs = ule->num_cuxrs + ule->num_puxrs;
ule->keylen = msg->u.id.key->size;
ule->keyp = msg->u.id.key->data;
ule->uxrs[0].type = XR_INSERT;
@@ -624,10 +624,10 @@ test_le_committed_apply(void) {
DBT key;
DBT val;
- u_int8_t keybuf[MAX_SIZE];
- u_int8_t valbuf[MAX_SIZE];
- u_int32_t keysize;
- u_int32_t valsize;
+ uint8_t keybuf[MAX_SIZE];
+ uint8_t valbuf[MAX_SIZE];
+ uint32_t keysize;
+ uint32_t valsize;
uint32_t nesting_level;
for (keysize = 0; keysize < MAX_SIZE; keysize += (random() % MAX_SIZE) + 1) {
for (valsize = 0; valsize < MAX_SIZE; valsize += (random() % MAX_SIZE) + 1) {
@@ -668,8 +668,8 @@ test_le_committed_apply(void) {
}
{
- u_int8_t valbuf2[MAX_SIZE];
- u_int32_t valsize2 = random() % MAX_SIZE;
+ uint8_t valbuf2[MAX_SIZE];
+ uint32_t valsize2 = random() % MAX_SIZE;
fillrandom(valbuf2, valsize2);
DBT val2;
toku_fill_dbt(&val2, valbuf2, valsize2);
@@ -682,8 +682,8 @@ test_le_committed_apply(void) {
{
//INSERT_NO_OVERWRITE will not change a committed insert
ULE_S ule_expected = ule_initial;
- u_int8_t valbuf2[MAX_SIZE];
- u_int32_t valsize2 = random() % MAX_SIZE;
+ uint8_t valbuf2[MAX_SIZE];
+ uint32_t valsize2 = random() % MAX_SIZE;
fillrandom(valbuf2, valsize2);
DBT val2;
toku_fill_dbt(&val2, valbuf2, valsize2);
@@ -708,10 +708,10 @@ static void test_le_optimize(void) {
DBT val;
ULE_S ule_initial;
ULE_S ule_expected;
- u_int8_t keybuf[MAX_SIZE];
- u_int32_t keysize=8;
- u_int8_t valbuf[MAX_SIZE];
- u_int32_t valsize=8;
+ uint8_t keybuf[MAX_SIZE];
+ uint32_t keysize=8;
+ uint8_t valbuf[MAX_SIZE];
+ uint32_t valsize=8;
ule_initial.uxrs = ule_initial.uxrs_static;
ule_expected.uxrs = ule_expected.uxrs_static;
TXNID optimize_txnid = 1000;
diff --git a/ft/tests/test-merges-on-cleaner.cc b/ft/tests/test-merges-on-cleaner.cc
index 9ef48680944..263db4b38e0 100644
--- a/ft/tests/test-merges-on-cleaner.cc
+++ b/ft/tests/test-merges-on-cleaner.cc
@@ -142,7 +142,7 @@ doit (void) {
FTNODE node;
toku_pin_node_with_min_bfe(&node, node_leaf[1], brt);
// hack to get merge going
- BLB_SEQINSERT(node, node->n_children-1) = FALSE;
+ BLB_SEQINSERT(node, node->n_children-1) = false;
toku_unpin_ftnode(brt->ft, node);
// now do a lookup on one of the keys, this should bring a leaf node up to date
@@ -158,7 +158,7 @@ doit (void) {
node_internal,
toku_cachetable_hash(brt->ft->cf, node_internal),
&bfe,
- TRUE,
+ true,
0,
NULL,
&node
@@ -181,7 +181,7 @@ doit (void) {
node_internal,
toku_cachetable_hash(brt->ft->cf, node_internal),
&bfe,
- TRUE,
+ true,
0,
NULL,
&node
diff --git a/ft/tests/test-pick-child-to-flush.cc b/ft/tests/test-pick-child-to-flush.cc
index 7839e8d9923..6f443d0300e 100644
--- a/ft/tests/test-pick-child-to-flush.cc
+++ b/ft/tests/test-pick-child-to-flush.cc
@@ -39,7 +39,7 @@ static int child_to_flush(FT UU(h), FTNODE parent, void* UU(extra)) {
return 0;
}
else {
- assert(FALSE);
+ assert(false);
}
return curr_child_to_flush;
}
@@ -63,11 +63,11 @@ static void merge_should_not_happen(struct flusher_advice* UU(fa),
FTNODE UU(child),
void* UU(extra))
{
- assert(FALSE);
+ assert(false);
}
static bool recursively_flush_should_not_happen(FTNODE UU(child), void* UU(extra)) {
- assert(FALSE);
+ assert(false);
}
static bool always_flush(FTNODE UU(child), void* UU(extra)) {
diff --git a/ft/tests/test.h b/ft/tests/test.h
index 83e3e8e9c00..c54f09d24cf 100644
--- a/ft/tests/test.h
+++ b/ft/tests/test.h
@@ -124,10 +124,10 @@ def_flush (CACHEFILE f __attribute__((__unused__)),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
- BOOL w __attribute__((__unused__)),
- BOOL keep __attribute__((__unused__)),
- BOOL c __attribute__((__unused__)),
- BOOL UU(is_clone)
+ bool w __attribute__((__unused__)),
+ bool keep __attribute__((__unused__)),
+ bool c __attribute__((__unused__)),
+ bool UU(is_clone)
) {
}
@@ -156,12 +156,12 @@ def_pe_callback (
return 0;
}
-static UU() BOOL def_pf_req_callback(void* UU(ftnode_pv), void* UU(read_extraargs)) {
- return FALSE;
+static UU() bool def_pf_req_callback(void* UU(ftnode_pv), void* UU(read_extraargs)) {
+ return false;
}
static UU() int def_pf_callback(void* UU(ftnode_pv), void* UU(dd), void* UU(read_extraargs), int UU(fd), PAIR_ATTR* UU(sizep)) {
- assert(FALSE);
+ assert(false);
return 0;
}
@@ -169,7 +169,7 @@ static UU() int
def_fetch (CACHEFILE f __attribute__((__unused__)),
int UU(fd),
CACHEKEY k __attribute__((__unused__)),
- u_int32_t fullhash __attribute__((__unused__)),
+ uint32_t fullhash __attribute__((__unused__)),
void **value __attribute__((__unused__)),
void **dd __attribute__((__unused__)),
PAIR_ATTR *sizep __attribute__((__unused__)),
@@ -187,11 +187,11 @@ static UU() int
def_cleaner_callback(
void* UU(ftnode_pv),
BLOCKNUM UU(blocknum),
- u_int32_t UU(fullhash),
+ uint32_t UU(fullhash),
void* UU(extraargs)
)
{
- assert(FALSE);
+ assert(false);
return 0;
}
diff --git a/ft/tests/test1308a.cc b/ft/tests/test1308a.cc
index ecc1d1f337a..c99332171c3 100644
--- a/ft/tests/test1308a.cc
+++ b/ft/tests/test1308a.cc
@@ -23,12 +23,12 @@ test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute_
int fd;
{
- static u_int64_t buf [BUFSIZE]; // make this static because it's too big to fit on the stack.
+ static uint64_t buf [BUFSIZE]; // make this static because it's too big to fit on the stack.
fd = open(FNAME, O_CREAT+O_RDWR+O_BINARY, 0777);
assert(fd>=0);
memset(buf, 0, sizeof(buf));
- u_int64_t i;
+ uint64_t i;
for (i=0; i<(1LL<<32); i+=BUFSIZE) {
toku_os_full_write(fd, buf, BUFSIZE);
}
diff --git a/ft/tests/test3856.cc b/ft/tests/test3856.cc
index 859782c1150..e81ada7ea29 100644
--- a/ft/tests/test3856.cc
+++ b/ft/tests/test3856.cc
@@ -61,12 +61,12 @@ test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute_
FT_CURSOR c;
char lkey[100],rkey[100];
DBT lk, rk;
- r = toku_ft_cursor(t, &c, null_txn, FALSE, FALSE); assert(r == 0);
+ r = toku_ft_cursor(t, &c, null_txn, false, false); assert(r == 0);
snprintf(lkey, 100, "hello%d", i);
snprintf(rkey, 100, "hello%d", i + 100);
toku_ft_cursor_set_range_lock(c, toku_fill_dbt(&lk, lkey, 1+strlen(lkey)),
toku_fill_dbt(&rk, rkey, 1+strlen(rkey)),
- FALSE, FALSE);
+ false, false);
r = toku_ft_cursor_set(c, &lk, found, NULL); assert(r == 0);
for (int j = 0; j < 100; ++j) {
r = toku_ft_cursor_next(c, found, NULL); assert(r == 0);
diff --git a/ft/tests/test3884.cc b/ft/tests/test3884.cc
index e20ee281f94..d672d9e0847 100644
--- a/ft/tests/test3884.cc
+++ b/ft/tests/test3884.cc
@@ -23,7 +23,7 @@ static const int keylen = sizeof(long);
static const int vallen = 64 - sizeof(long) - (sizeof(((LEAFENTRY)NULL)->type) // overhead from LE_CLEAN_MEMSIZE
+sizeof(((LEAFENTRY)NULL)->keylen)
+sizeof(((LEAFENTRY)NULL)->u.clean.vallen));
-#define dummy_msn_3884 ((MSN) { (u_int64_t) 3884 * MIN_MSN.msn })
+#define dummy_msn_3884 ((MSN) { (uint64_t) 3884 * MIN_MSN.msn })
static TOKUTXN const null_txn = 0;
static DB * const null_db = 0;
@@ -34,7 +34,7 @@ static int omt_long_cmp(OMTVALUE p, void *q)
LEAFENTRY CAST_FROM_VOIDP(a, p);
LEAFENTRY CAST_FROM_VOIDP(b, q);
void *ak, *bk;
- u_int32_t al, bl;
+ uint32_t al, bl;
ak = le_key_and_len(a, &al);
bk = le_key_and_len(b, &bl);
assert(al == sizeof(long) && bl == sizeof(long));
@@ -172,7 +172,7 @@ test_split_on_boundary(void)
FTNODE nodea, nodeb;
DBT splitk;
// if we haven't done it right, we should hit the assert in the top of move_leafentries
- ftleaf_split(brt->ft, &sn, &nodea, &nodeb, &splitk, TRUE, 0, NULL);
+ ftleaf_split(brt->ft, &sn, &nodea, &nodeb, &splitk, true, 0, NULL);
verify_basement_node_msns(nodea, dummy_msn_3884);
verify_basement_node_msns(nodeb, dummy_msn_3884);
@@ -245,7 +245,7 @@ test_split_with_everything_on_the_left(void)
FTNODE nodea, nodeb;
DBT splitk;
// if we haven't done it right, we should hit the assert in the top of move_leafentries
- ftleaf_split(brt->ft, &sn, &nodea, &nodeb, &splitk, TRUE, 0, NULL);
+ ftleaf_split(brt->ft, &sn, &nodea, &nodeb, &splitk, true, 0, NULL);
toku_unpin_ftnode(brt->ft, nodeb);
r = toku_close_ft_handle_nolsn(brt, NULL); assert(r == 0);
@@ -320,7 +320,7 @@ test_split_on_boundary_of_last_node(void)
FTNODE nodea, nodeb;
DBT splitk;
// if we haven't done it right, we should hit the assert in the top of move_leafentries
- ftleaf_split(brt->ft, &sn, &nodea, &nodeb, &splitk, TRUE, 0, NULL);
+ ftleaf_split(brt->ft, &sn, &nodea, &nodeb, &splitk, true, 0, NULL);
toku_unpin_ftnode(brt->ft, nodeb);
r = toku_close_ft_handle_nolsn(brt, NULL); assert(r == 0);
@@ -388,7 +388,7 @@ test_split_at_begin(void)
FTNODE nodea, nodeb;
DBT splitk;
// if we haven't done it right, we should hit the assert in the top of move_leafentries
- ftleaf_split(brt->ft, &sn, &nodea, &nodeb, &splitk, TRUE, 0, NULL);
+ ftleaf_split(brt->ft, &sn, &nodea, &nodeb, &splitk, true, 0, NULL);
toku_unpin_ftnode(brt->ft, nodeb);
r = toku_close_ft_handle_nolsn(brt, NULL); assert(r == 0);
@@ -452,7 +452,7 @@ test_split_at_end(void)
FTNODE nodea, nodeb;
DBT splitk;
// if we haven't done it right, we should hit the assert in the top of move_leafentries
- ftleaf_split(brt->ft, &sn, &nodea, &nodeb, &splitk, TRUE, 0, NULL);
+ ftleaf_split(brt->ft, &sn, &nodea, &nodeb, &splitk, true, 0, NULL);
toku_unpin_ftnode(brt->ft, nodeb);
r = toku_close_ft_handle_nolsn(brt, NULL); assert(r == 0);
@@ -506,7 +506,7 @@ test_split_odd_nodes(void)
FTNODE nodea, nodeb;
DBT splitk;
// if we haven't done it right, we should hit the assert in the top of move_leafentries
- ftleaf_split(brt->ft, &sn, &nodea, &nodeb, &splitk, TRUE, 0, NULL);
+ ftleaf_split(brt->ft, &sn, &nodea, &nodeb, &splitk, true, 0, NULL);
verify_basement_node_msns(nodea, dummy_msn_3884);
verify_basement_node_msns(nodeb, dummy_msn_3884);
diff --git a/ft/tests/test4115.cc b/ft/tests/test4115.cc
index baef08a6f8a..0f694a2df99 100644
--- a/ft/tests/test4115.cc
+++ b/ft/tests/test4115.cc
@@ -38,9 +38,9 @@ static void open_ft_and_ct (bool unlink_old) {
}
static void test_4115 (void) {
- u_int64_t limit=30000;
+ uint64_t limit=30000;
open_ft_and_ct(true);
- for (u_int64_t i=0; i<limit; i++) {
+ for (uint64_t i=0; i<limit; i++) {
char key[100],val[100];
snprintf(key, 100, "%08llu", (unsigned long long)2*i+1);
snprintf(val, 100, "%08llu", (unsigned long long)2*i+1);
diff --git a/ft/tests/test4244.cc b/ft/tests/test4244.cc
index 10c2d67b21e..808869be13c 100644
--- a/ft/tests/test4244.cc
+++ b/ft/tests/test4244.cc
@@ -51,7 +51,7 @@ doit (void) {
assert(r==0);
// make a 1MB val
- u_int32_t big_val_size = 1000000;
+ uint32_t big_val_size = 1000000;
char* XCALLOC_N(big_val_size, big_val);
DBT k,v;
memset(&k, 0, sizeof(k));
@@ -77,7 +77,7 @@ doit (void) {
node_internal,
toku_cachetable_hash(t->ft->cf, node_internal),
&bfe,
- TRUE,
+ true,
0,
NULL,
&node
diff --git a/ft/tests/test_block_allocator_merge.cc b/ft/tests/test_block_allocator_merge.cc
index 45fbf950ff2..41f02bd8c6f 100644
--- a/ft/tests/test_block_allocator_merge.cc
+++ b/ft/tests/test_block_allocator_merge.cc
@@ -10,9 +10,9 @@
int verbose = 0;
static void
-print_array (u_int64_t n, const struct block_allocator_blockpair a[/*n*/]) {
+print_array (uint64_t n, const struct block_allocator_blockpair a[/*n*/]) {
printf("{");
- for (u_int64_t i=0; i<n; i++) printf(" %016lx", (long)a[i].offset);
+ for (uint64_t i=0; i<n; i++) printf(" %016lx", (long)a[i].offset);
printf("}\n");
}
@@ -26,8 +26,8 @@ compare_blockpairs (const void *av, const void *bv) {
}
static void
-test_merge (u_int64_t an, const struct block_allocator_blockpair a[/*an*/],
- u_int64_t bn, const struct block_allocator_blockpair b[/*bn*/]) {
+test_merge (uint64_t an, const struct block_allocator_blockpair a[/*an*/],
+ uint64_t bn, const struct block_allocator_blockpair b[/*bn*/]) {
if (verbose>1) { printf("a:"); print_array(an, a); }
if (verbose>1) { printf("b:"); print_array(bn, b); }
struct block_allocator_blockpair *MALLOC_N(an+bn, q);
@@ -36,10 +36,10 @@ test_merge (u_int64_t an, const struct block_allocator_blockpair a[/*an*/],
fprintf(stderr, "malloc failed, continuing\n");
goto malloc_failed;
}
- for (u_int64_t i=0; i<an; i++) {
+ for (uint64_t i=0; i<an; i++) {
q[i] = m[i] = a[i];
}
- for (u_int64_t i=0; i<bn; i++) {
+ for (uint64_t i=0; i<bn; i++) {
q[an+i] = b[i];
}
if (verbose) printf("qsort\n");
@@ -49,7 +49,7 @@ test_merge (u_int64_t an, const struct block_allocator_blockpair a[/*an*/],
block_allocator_merge_blockpairs_into(an, m, bn, b);
if (verbose) printf("compare\n");
if (verbose>1) { printf("m:"); print_array(an+bn, m); }
- for (u_int64_t i=0; i<an+bn; i++) {
+ for (uint64_t i=0; i<an+bn; i++) {
assert(q[i].offset == m[i].offset);
}
malloc_failed:
@@ -57,17 +57,17 @@ test_merge (u_int64_t an, const struct block_allocator_blockpair a[/*an*/],
toku_free(m);
}
-static u_int64_t
-compute_a (u_int64_t i, int mode) {
- if (mode==0) return (((u_int64_t)random()) << 32) + i;
+static uint64_t
+compute_a (uint64_t i, int mode) {
+ if (mode==0) return (((uint64_t)random()) << 32) + i;
if (mode==1) return 2*i;
if (mode==2) return i;
if (mode==3) return (1LL<<50) + i;
abort();
}
-static u_int64_t
-compute_b (u_int64_t i, int mode) {
- if (mode==0) return (((u_int64_t)random()) << 32) + i;
+static uint64_t
+compute_b (uint64_t i, int mode) {
+ if (mode==0) return (((uint64_t)random()) << 32) + i;
if (mode==1) return 2*i+1;
if (mode==2) return (1LL<<50) + i;
if (mode==3) return i;
@@ -76,7 +76,7 @@ compute_b (u_int64_t i, int mode) {
static void
-test_merge_n_m (u_int64_t n, u_int64_t m, int mode)
+test_merge_n_m (uint64_t n, uint64_t m, int mode)
{
struct block_allocator_blockpair *MALLOC_N(n, na);
struct block_allocator_blockpair *MALLOC_N(m, ma);
@@ -85,11 +85,11 @@ test_merge_n_m (u_int64_t n, u_int64_t m, int mode)
goto malloc_failed;
}
if (verbose) printf("Filling a[%" PRIu64 "]\n", n);
- for (u_int64_t i=0; i<n; i++) {
+ for (uint64_t i=0; i<n; i++) {
na[i].offset = compute_a(i, mode);
}
if (verbose) printf("Filling b[%" PRIu64 "]\n", m);
- for (u_int64_t i=0; i<m; i++) {
+ for (uint64_t i=0; i<m; i++) {
if (verbose && i % (1+m/10) == 0) { printf("."); fflush(stdout); }
ma[i].offset = compute_b(i, mode);
}
@@ -104,22 +104,22 @@ test_merge_n_m (u_int64_t n, u_int64_t m, int mode)
static void
test_big_merge (void) {
- u_int64_t G = 1024LL * 1024LL * 1024LL;
+ uint64_t G = 1024LL * 1024LL * 1024LL;
if (toku_os_get_phys_memory_size() < 40 * G) {
fprintf(stderr, "Skipping big merge because there is only %4.1fGiB physical memory\n", toku_os_get_phys_memory_size()/(1024.0*1024.0*1024.0));
} else {
- u_int64_t twoG = 2*G;
+ uint64_t twoG = 2*G;
- u_int64_t an = twoG;
- u_int64_t bn = 1;
+ uint64_t an = twoG;
+ uint64_t bn = 1;
struct block_allocator_blockpair *MALLOC_N(an+bn, a);
assert(a);
struct block_allocator_blockpair *MALLOC_N(bn, b);
assert(b);
- for (u_int64_t i=0; i<an; i++) a[i].offset=i+1;
+ for (uint64_t i=0; i<an; i++) a[i].offset=i+1;
b[0].offset = 0;
block_allocator_merge_blockpairs_into(an, a, bn, b);
- for (u_int64_t i=0; i<an+bn; i++) assert(a[i].offset == i);
+ for (uint64_t i=0; i<an+bn; i++) assert(a[i].offset == i);
toku_free(a);
toku_free(b);
}
@@ -133,7 +133,7 @@ int main (int argc __attribute__((__unused__)), char *argv[] __attribute__((__un
test_merge_n_m(1000000, 1000000, 0);
// Cannot run this on my laptop, or even on pointy
#if 0
- u_int64_t too_big = 1024LL * 1024LL * 1024LL * 2;
+ uint64_t too_big = 1024LL * 1024LL * 1024LL * 2;
test_merge_n_m(too_big, too_big);
test_merge_n_m(1, too_big, 0);
#endif
diff --git a/ft/tests/test_logcursor.cc b/ft/tests/test_logcursor.cc
index f23fb59cd86..218b8e7ee73 100644
--- a/ft/tests/test_logcursor.cc
+++ b/ft/tests/test_logcursor.cc
@@ -168,8 +168,8 @@ int create_logfiles() {
TXNID txnid = 0;
TXNID cp_txnid = 0;
- u_int32_t num_fassociate = 0;
- u_int32_t num_xstillopen = 0;
+ uint32_t num_fassociate = 0;
+ uint32_t num_xstillopen = 0;
bs_aname.len = 4; bs_aname.data=(char *)"a.db";
bs_bname.len = 4; bs_bname.data=(char *)"b.db";
diff --git a/ft/tests/test_partitioned_counter.cc b/ft/tests/test_partitioned_counter.cc
index 630ecb52e3c..0f387dad3ff 100644
--- a/ft/tests/test_partitioned_counter.cc
+++ b/ft/tests/test_partitioned_counter.cc
@@ -33,7 +33,6 @@
*/
#include <pthread.h>
-#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
diff --git a/ft/tests/x1764-test.cc b/ft/tests/x1764-test.cc
index 2497593debe..812ea269eff 100644
--- a/ft/tests/x1764-test.cc
+++ b/ft/tests/x1764-test.cc
@@ -8,7 +8,7 @@
#include "test.h"
static void
test0 (void) {
- u_int32_t c = x1764_memory("", 0);
+ uint32_t c = x1764_memory("", 0);
assert(c==~(0U));
struct x1764 cs;
x1764_init(&cs);
@@ -19,12 +19,12 @@ test0 (void) {
static void
test1 (void) {
- u_int64_t v=0x123456789abcdef0ULL;
- u_int32_t c;
+ uint64_t v=0x123456789abcdef0ULL;
+ uint32_t c;
int i;
for (i=0; i<=8; i++) {
- u_int64_t expect64 = (i==8) ? v : v&((1LL<<(8*i))-1);
- u_int32_t expect = expect64 ^ (expect64>>32);
+ uint64_t expect64 = (i==8) ? v : v&((1LL<<(8*i))-1);
+ uint32_t expect = expect64 ^ (expect64>>32);
c = x1764_memory(&v, i);
//printf("i=%d c=%08x expect=%08x\n", i, c, expect);
assert(c==~expect);
@@ -42,7 +42,7 @@ test2 (void) {
int j;
for (j=i; j<=N; j++) {
// checksum from i (inclusive to j (exclusive)
- u_int32_t c = x1764_memory(&v[i], j-i);
+ uint32_t c = x1764_memory(&v[i], j-i);
// Now compute the checksum incrementally with various strides.
int stride;
for (stride=1; stride<=j-i; stride++) {
@@ -53,7 +53,7 @@ test2 (void) {
x1764_add(&s, &v[k], stride);
}
x1764_add(&s, &v[k], j-k);
- u_int32_t c2 = x1764_finish(&s);
+ uint32_t c2 = x1764_finish(&s);
assert(c2==c);
}
// Now use some random strides.
@@ -68,7 +68,7 @@ test2 (void) {
k+=stride;
}
x1764_add(&s, &v[k], j-k);
- u_int32_t c2 = x1764_finish(&s);
+ uint32_t c2 = x1764_finish(&s);
assert(c2==c);
}
}
@@ -85,8 +85,8 @@ test3 (void)
for (int off=0; off<32; off++) {
if (verbose) {printf("."); fflush(stdout);}
for (int len=0; len+off<datalen; len++) {
- u_int32_t reference_sum = x1764_memory_simple(data+off, len);
- u_int32_t fast_sum = x1764_memory (data+off, len);
+ uint32_t reference_sum = x1764_memory_simple(data+off, len);
+ uint32_t fast_sum = x1764_memory (data+off, len);
assert(reference_sum==fast_sum);
}
}
diff --git a/ft/tests/xid_lsn_independent.cc b/ft/tests/xid_lsn_independent.cc
index c9d995b9616..0ffdc53c5c9 100644
--- a/ft/tests/xid_lsn_independent.cc
+++ b/ft/tests/xid_lsn_independent.cc
@@ -24,7 +24,7 @@ static void do_txn(TOKULOGGER logger, bool readonly) {
if (!readonly) {
toku_maybe_log_begin_txn_for_write_operation(txn);
}
- r = toku_txn_commit_txn(txn, FALSE, NULL, NULL);
+ r = toku_txn_commit_txn(txn, false, NULL, NULL);
CKERR(r);
toku_txn_close_txn(txn);
@@ -46,7 +46,7 @@ static void test_xid_lsn_independent(int N) {
r = toku_open_ft_handle(FILENAME, 1, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun);
CKERR(r);
- r = toku_txn_commit_txn(txn, FALSE, NULL, NULL);
+ r = toku_txn_commit_txn(txn, false, NULL, NULL);
CKERR(r);
toku_txn_close_txn(txn);
@@ -70,11 +70,11 @@ static void test_xid_lsn_independent(int N) {
CKERR(r);
// Verify the txnid has gone up only by one (even though many log entries were done)
invariant(txn2->txnid64 == xid_first + 1);
- r = toku_txn_commit_txn(txn2, FALSE, NULL, NULL);
+ r = toku_txn_commit_txn(txn2, false, NULL, NULL);
CKERR(r);
toku_txn_close_txn(txn2);
}
- r = toku_txn_commit_txn(txn, FALSE, NULL, NULL);
+ r = toku_txn_commit_txn(txn, false, NULL, NULL);
CKERR(r);
toku_txn_close_txn(txn);
{
@@ -84,7 +84,7 @@ static void test_xid_lsn_independent(int N) {
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn3, logger, TXN_SNAPSHOT_ROOT);
CKERR(r);
invariant(txn3->txnid64 == xid_first + 2);
- r = toku_txn_commit_txn(txn3, FALSE, NULL, NULL);
+ r = toku_txn_commit_txn(txn3, false, NULL, NULL);
CKERR(r);
toku_txn_close_txn(txn3);
}
@@ -204,7 +204,7 @@ static void test_xid_lsn_independent_parents(int N) {
invariant(txns[i]->begin_was_logged);
}
for (int i = N-1; i >= 0; i--) {
- r = toku_txn_commit_txn(txns[i], FALSE, NULL, NULL);
+ r = toku_txn_commit_txn(txns[i], false, NULL, NULL);
CKERR(r);
toku_txn_close_txn(txns[i]);
diff --git a/ft/txn.cc b/ft/txn.cc
index 5f72d70a1fd..a7687b7bc84 100644
--- a/ft/txn.cc
+++ b/ft/txn.cc
@@ -63,7 +63,7 @@ toku_txn_unlock(TOKUTXN txn)
toku_mutex_unlock(&txn->txn_lock);
}
-u_int64_t
+uint64_t
toku_txn_get_id(TOKUTXN txn)
{
return txn->txnid64;
@@ -159,9 +159,9 @@ toku_txn_create_txn (
.live_root_txn_list = nullptr,
.xids = xids,
.begin_was_logged = false,
- .checkpoint_needed_before_commit = FALSE,
- .do_fsync = FALSE,
- .force_fsync_on_commit = FALSE,
+ .checkpoint_needed_before_commit = false,
+ .do_fsync = false,
+ .force_fsync_on_commit = false,
.do_fsync_lsn = ZERO_LSN,
.xa_xid = {0},
.progress_poll_fun = NULL,
@@ -258,7 +258,7 @@ int toku_txn_commit_txn(TOKUTXN txn, int nosync,
void
toku_txn_require_checkpoint_on_commit(TOKUTXN txn) {
- txn->checkpoint_needed_before_commit = TRUE;
+ txn->checkpoint_needed_before_commit = true;
}
struct xcommit_info {
@@ -266,7 +266,7 @@ struct xcommit_info {
TOKUTXN txn;
};
-BOOL toku_txn_requires_checkpoint(TOKUTXN txn) {
+bool toku_txn_requires_checkpoint(TOKUTXN txn) {
return (!txn->parent && txn->checkpoint_needed_before_commit);
}
@@ -324,7 +324,7 @@ int toku_txn_abort_with_lsn(TOKUTXN txn, LSN oplsn,
txn->progress_poll_fun = poll;
txn->progress_poll_fun_extra = poll_extra;
int r;
- txn->do_fsync = FALSE;
+ txn->do_fsync = false;
if (!toku_txn_is_read_only(txn)) {
r = toku_log_xabort(txn->logger, &txn->do_fsync_lsn, 0, txn, txn->txnid64);
@@ -373,7 +373,7 @@ void toku_txn_get_prepared_xa_xid (TOKUTXN txn, TOKU_XA_XID *xid) {
copy_xid(xid, &txn->xa_xid);
}
-int toku_logger_recover_txn (TOKULOGGER logger, struct tokulogger_preplist preplist[/*count*/], long count, /*out*/ long *retp, u_int32_t flags) {
+int toku_logger_recover_txn (TOKULOGGER logger, struct tokulogger_preplist preplist[/*count*/], long count, /*out*/ long *retp, uint32_t flags) {
return toku_txn_manager_recover_txn(
logger->txn_manager,
preplist,
@@ -383,7 +383,7 @@ int toku_logger_recover_txn (TOKULOGGER logger, struct tokulogger_preplist prepl
);
}
-int toku_txn_maybe_fsync_log(TOKULOGGER logger, LSN do_fsync_lsn, BOOL do_fsync) {
+int toku_txn_maybe_fsync_log(TOKULOGGER logger, LSN do_fsync_lsn, bool do_fsync) {
int r = 0;
if (logger && do_fsync) {
r = toku_logger_fsync_if_lsn_not_fsynced(logger, do_fsync_lsn);
@@ -391,7 +391,7 @@ int toku_txn_maybe_fsync_log(TOKULOGGER logger, LSN do_fsync_lsn, BOOL do_fsync)
return r;
}
-void toku_txn_get_fsync_info(TOKUTXN ttxn, BOOL* do_fsync, LSN* do_fsync_lsn) {
+void toku_txn_get_fsync_info(TOKUTXN ttxn, bool* do_fsync, LSN* do_fsync_lsn) {
*do_fsync = ttxn->do_fsync;
*do_fsync_lsn = ttxn->do_fsync_lsn;
}
@@ -401,7 +401,7 @@ void toku_txn_close_txn(TOKUTXN txn) {
toku_txn_destroy_txn(txn);
}
-static int remove_txn (OMTVALUE hv, u_int32_t UU(idx), void *txnv)
+static int remove_txn (OMTVALUE hv, uint32_t UU(idx), void *txnv)
// Effect: This function is called on every open FT that a transaction used.
// This function removes the transaction from that FT.
{
@@ -454,7 +454,7 @@ XIDS toku_txn_get_xids (TOKUTXN txn) {
}
void toku_txn_force_fsync_on_commit(TOKUTXN txn) {
- txn->force_fsync_on_commit = TRUE;
+ txn->force_fsync_on_commit = true;
}
TXNID toku_get_oldest_in_live_root_txn_list(TOKUTXN txn) {
@@ -471,7 +471,7 @@ bool toku_is_txn_in_live_root_txn_list(const xid_omt_t &live_root_txn_list, TXNI
int r = live_root_txn_list.find_zero<TXNID, toku_find_xid_by_xid>(xid, &txnid, nullptr);
if (r==0) {
invariant(txnid == xid);
- retval = TRUE;
+ retval = true;
}
else {
invariant(r==DB_NOTFOUND);
diff --git a/ft/txn.h b/ft/txn.h
index a7b9d3f42b9..07a391a4d86 100644
--- a/ft/txn.h
+++ b/ft/txn.h
@@ -12,7 +12,7 @@
void toku_txn_lock(TOKUTXN txn);
void toku_txn_unlock(TOKUTXN txn);
-u_int64_t toku_txn_get_id(TOKUTXN txn);
+uint64_t toku_txn_get_id(TOKUTXN txn);
int toku_txn_begin_txn (
DB_TXN *container_db_txn,
@@ -44,7 +44,7 @@ int toku_txn_load_txninfo (TOKUTXN txn, TXNINFO info);
int toku_txn_commit_txn (TOKUTXN txn, int nosync,
TXN_PROGRESS_POLL_FUNCTION poll, void *poll_extra);
-BOOL toku_txn_requires_checkpoint(TOKUTXN txn);
+bool toku_txn_requires_checkpoint(TOKUTXN txn);
int toku_txn_commit_with_lsn(TOKUTXN txn, int nosync, LSN oplsn,
TXN_PROGRESS_POLL_FUNCTION poll, void *poll_extra);
@@ -59,9 +59,9 @@ int toku_txn_prepare_txn (TOKUTXN txn, TOKU_XA_XID *xid) __attribute__((warn_unu
void toku_txn_get_prepared_xa_xid (TOKUTXN, TOKU_XA_XID *);
// Effect: Fill in the XID information for a transaction. The caller allocates the XID and the function fills in values.
-int toku_txn_maybe_fsync_log(TOKULOGGER logger, LSN do_fsync_lsn, BOOL do_fsync);
+int toku_txn_maybe_fsync_log(TOKULOGGER logger, LSN do_fsync_lsn, bool do_fsync);
-void toku_txn_get_fsync_info(TOKUTXN ttxn, BOOL* do_fsync, LSN* do_fsync_lsn);
+void toku_txn_get_fsync_info(TOKUTXN ttxn, bool* do_fsync, LSN* do_fsync_lsn);
// Complete and destroy a txn
void toku_txn_close_txn(TOKUTXN txn);
@@ -91,7 +91,7 @@ typedef enum {
} txn_status_entry;
typedef struct {
- BOOL initialized;
+ bool initialized;
TOKU_ENGINE_STATUS_ROW_S status[TXN_STATUS_NUM_ROWS];
} TXN_STATUS_S, *TXN_STATUS;
@@ -109,7 +109,7 @@ struct tokulogger_preplist {
TOKU_XA_XID xid;
DB_TXN *txn;
};
-int toku_logger_recover_txn (TOKULOGGER logger, struct tokulogger_preplist preplist[/*count*/], long count, /*out*/ long *retp, u_int32_t flags);
+int toku_logger_recover_txn (TOKULOGGER logger, struct tokulogger_preplist preplist[/*count*/], long count, /*out*/ long *retp, uint32_t flags);
void toku_maybe_log_begin_txn_for_write_operation(TOKUTXN txn);
diff --git a/ft/txn_manager.cc b/ft/txn_manager.cc
index 1feb262c7a4..37fddcb91a3 100644
--- a/ft/txn_manager.cc
+++ b/ft/txn_manager.cc
@@ -14,10 +14,10 @@
#include "omt-tmpl.h"
#include "rollback.h"
-BOOL garbage_collection_debug = FALSE;
+bool garbage_collection_debug = false;
-static BOOL is_txnid_live(TXN_MANAGER txn_manager, TXNID txnid) {
+static bool is_txnid_live(TXN_MANAGER txn_manager, TXNID txnid) {
TOKUTXN result = NULL;
toku_txn_manager_id2txn_unlocked(txn_manager, txnid, &result);
return (result != NULL);
@@ -156,7 +156,7 @@ verify_snapshot_system(TXN_MANAGER txn_manager UU()) {
for (i = 0; i < num_live_txns; i++) {
TOKUTXN txn = live_txns[i];
- BOOL expect = txn->snapshot_txnid64 == txn->txnid64;
+ bool expect = txn->snapshot_txnid64 == txn->txnid64;
{
//verify pair->xid2 is in snapshot_xids
r = txn_manager->snapshot_txnids.find_zero<TXNID, toku_find_xid_by_xid>(txn->txnid64, nullptr, nullptr);
@@ -369,7 +369,7 @@ int toku_txn_manager_start_txn(
txn->live_root_txn_list = parent->live_root_txn_list;
}
else {
- assert(FALSE);
+ assert(false);
}
}
}
@@ -469,7 +469,7 @@ void toku_txn_manager_finish_txn(TXN_MANAGER txn_manager, TOKUTXN txn) {
{
//Remove txn from list (omt) of live transactions
TOKUTXN txnagain;
- u_int32_t idx;
+ uint32_t idx;
r = txn_manager->live_txns.find_zero<TOKUTXN, find_xid>(txn, &txnagain, &idx);
invariant_zero(r);
invariant(txn==txnagain);
@@ -497,7 +497,7 @@ void toku_txn_manager_finish_txn(TXN_MANAGER txn_manager, TOKUTXN txn) {
if (txn->parent==NULL) {
TXNID xid;
- u_int32_t idx;
+ uint32_t idx;
//Remove txn from list of live root txns
r = txn_manager->live_root_txns.find_zero<TXNID, toku_find_xid_by_xid>(txn->txnid64, &xid, &idx);
invariant_zero(r);
@@ -598,7 +598,7 @@ exit:
return ret_val;
}
-u_int32_t toku_txn_manager_num_live_txns(TXN_MANAGER txn_manager) {
+uint32_t toku_txn_manager_num_live_txns(TXN_MANAGER txn_manager) {
int ret_val = 0;
toku_mutex_lock(&txn_manager->txn_manager_lock);
ret_val = txn_manager->live_txns.size();
@@ -692,7 +692,7 @@ int toku_txn_manager_recover_txn (
struct tokulogger_preplist preplist[/*count*/],
long count,
long *retp, /*out*/
- u_int32_t flags
+ uint32_t flags
)
{
int ret_val = 0;
diff --git a/ft/txn_manager.h b/ft/txn_manager.h
index 28f3a23fcd8..f09dcdb96d6 100644
--- a/ft/txn_manager.h
+++ b/ft/txn_manager.h
@@ -95,7 +95,7 @@ int toku_txn_manager_recover_txn(
struct tokulogger_preplist preplist[/*count*/],
long count,
long *retp, /*out*/
- u_int32_t flags
+ uint32_t flags
);
void toku_txn_manager_pin_live_txn_unlocked(TXN_MANAGER txn_manager, TOKUTXN txn);
diff --git a/ft/ule.cc b/ft/ule.cc
index 1b6a7ef4e58..3914bc190b1 100644
--- a/ft/ule.cc
+++ b/ft/ule.cc
@@ -120,18 +120,18 @@ const UXR_S committed_delete = {
static void msg_init_empty_ule(ULE ule, FT_MSG msg);
static void msg_modify_ule(ULE ule, FT_MSG msg);
-static void ule_init_empty_ule(ULE ule, u_int32_t keylen, void * keyp);
+static void ule_init_empty_ule(ULE ule, uint32_t keylen, void * keyp);
static void ule_do_implicit_promotions(ULE ule, XIDS xids);
static void ule_promote_provisional_innermost_to_index(ULE ule, uint32_t index);
static void ule_promote_provisional_innermost_to_committed(ULE ule);
-static void ule_apply_insert(ULE ule, XIDS xids, u_int32_t vallen, void * valp);
+static void ule_apply_insert(ULE ule, XIDS xids, uint32_t vallen, void * valp);
static void ule_apply_delete(ULE ule, XIDS xids);
static void ule_prepare_for_new_uxr(ULE ule, XIDS xids);
static void ule_apply_abort(ULE ule, XIDS xids);
static void ule_apply_broadcast_commit_all (ULE ule);
static void ule_apply_commit(ULE ule, XIDS xids);
-static void ule_push_insert_uxr(ULE ule, BOOL is_committed, TXNID xid, u_int32_t vallen, void * valp);
-static void ule_push_delete_uxr(ULE ule, BOOL is_committed, TXNID xid);
+static void ule_push_insert_uxr(ULE ule, bool is_committed, TXNID xid, uint32_t vallen, void * valp);
+static void ule_push_delete_uxr(ULE ule, bool is_committed, TXNID xid);
static void ule_push_placeholder_uxr(ULE ule, TXNID xid);
static UXR ule_get_innermost_uxr(ULE ule);
static UXR ule_get_first_empty_uxr(ULE ule);
@@ -141,9 +141,9 @@ static TXNID ule_get_xid(ULE ule, uint32_t index);
static void ule_remove_innermost_placeholders(ULE ule);
static void ule_add_placeholders(ULE ule, XIDS xids);
static void ule_optimize(ULE ule, XIDS xids);
-static inline BOOL uxr_type_is_insert(u_int8_t type);
-static inline BOOL uxr_type_is_delete(u_int8_t type);
-static inline BOOL uxr_type_is_placeholder(u_int8_t type);
+static inline bool uxr_type_is_insert(uint8_t type);
+static inline bool uxr_type_is_delete(uint8_t type);
+static inline bool uxr_type_is_placeholder(uint8_t type);
static inline size_t uxr_pack_txnid(UXR uxr, uint8_t *p);
static inline size_t uxr_pack_type_and_length(UXR uxr, uint8_t *p);
static inline size_t uxr_pack_length_and_bit(UXR uxr, uint8_t *p);
@@ -187,16 +187,16 @@ get_next_older_txnid(TXNID xc, const xid_omt_t &omt) {
}
//
-// This function returns TRUE if live transaction TL1 is allowed to read a value committed by
+// This function returns true if live transaction TL1 is allowed to read a value committed by
// transaction xc, false otherwise.
//
-static BOOL
+static bool
xid_reads_committed_xid(TXNID tl1, TXNID xc, const xid_omt_t &snapshot_txnids, const rx_omt_t &referenced_xids) {
- BOOL rval;
- if (tl1 < xc) rval = FALSE; //cannot read a newer txn
+ bool rval;
+ if (tl1 < xc) rval = false; //cannot read a newer txn
else {
TXNID x = toku_get_youngest_live_list_txnid_for(xc, snapshot_txnids, referenced_xids);
- if (x == TXNID_NONE) rval = TRUE; //Not in ANY live list, tl1 can read it.
+ if (x == TXNID_NONE) rval = true; //Not in ANY live list, tl1 can read it.
else rval = tl1 > x; //Newer than the 'newest one that has it in live list'
// we know tl1 > xc
// we know x > xc
@@ -211,8 +211,8 @@ static void
garbage_collection(ULE ule, const xid_omt_t &snapshot_xids, const rx_omt_t &referenced_xids, const xid_omt_t &live_root_txns) {
if (ule->num_cuxrs == 1) goto done;
// will fail if too many num_cuxrs
- BOOL necessary_static[MAX_TRANSACTION_RECORDS];
- BOOL *necessary;
+ bool necessary_static[MAX_TRANSACTION_RECORDS];
+ bool *necessary;
necessary = necessary_static;
if (ule->num_cuxrs >= MAX_TRANSACTION_RECORDS) {
XMALLOC_N(ule->num_cuxrs, necessary);
@@ -221,9 +221,9 @@ garbage_collection(ULE ule, const xid_omt_t &snapshot_xids, const rx_omt_t &refe
uint32_t curr_committed_entry;
curr_committed_entry = ule->num_cuxrs - 1;
- while (TRUE) {
+ while (true) {
// mark the curr_committed_entry as necessary
- necessary[curr_committed_entry] = TRUE;
+ necessary[curr_committed_entry] = true;
if (curr_committed_entry == 0) break; //nothing left
// find the youngest live transaction that reads something
@@ -240,7 +240,7 @@ garbage_collection(ULE ule, const xid_omt_t &snapshot_xids, const rx_omt_t &refe
// As a result, we must mark what is just below xc as necessary and move on.
// This issue was found while testing flusher threads, and was fixed for #3979
//
- BOOL is_xc_live = toku_is_txn_in_live_root_txn_list(live_root_txns, xc);
+ bool is_xc_live = toku_is_txn_in_live_root_txn_list(live_root_txns, xc);
if (is_xc_live) {
curr_committed_entry--;
continue;
@@ -404,7 +404,7 @@ garbage_collect_leafentry(LEAFENTRY old_leaf_entry,
//
static void
msg_init_empty_ule(ULE ule, FT_MSG msg) {
- u_int32_t keylen = ft_msg_get_keylen(msg);
+ uint32_t keylen = ft_msg_get_keylen(msg);
void *keyp = ft_msg_get_key(msg);
ule_init_empty_ule(ule, keylen, keyp);
}
@@ -439,7 +439,7 @@ msg_modify_ule(ULE ule, FT_MSG msg) {
// show why this is necessary. We need to update the key with the exact
// bytes of the message
update_ule_key(ule, msg);
- u_int32_t vallen = ft_msg_get_vallen(msg);
+ uint32_t vallen = ft_msg_get_vallen(msg);
invariant(IS_VALID_LEN(vallen));
void * valp = ft_msg_get_val(msg);
ule_apply_insert(ule, xids, vallen, valp);
@@ -465,10 +465,10 @@ msg_modify_ule(ULE ule, FT_MSG msg) {
break;
case FT_UPDATE:
case FT_UPDATE_BROADCAST_ALL:
- assert(FALSE); // These messages don't get this far. Instead they get translated (in setval_fun in do_update) into FT_INSERT messages.
+ assert(false); // These messages don't get this far. Instead they get translated (in setval_fun in do_update) into FT_INSERT messages.
break;
default:
- assert(FALSE /* illegal FT_MSG.type */);
+ assert(false /* illegal FT_MSG.type */);
break;
}
}
@@ -602,12 +602,12 @@ le_unpack(ULE ule, LEAFENTRY le) {
}
break;
default:
- invariant(FALSE);
+ invariant(false);
}
#if ULE_DEBUG
size_t memsize = le_memsize_from_ule(ule);
- assert(p == ((u_int8_t*)le) + memsize);
+ assert(p == ((uint8_t*)le) + memsize);
#endif
}
@@ -746,7 +746,7 @@ found_insert:;
new_leafentry->keylen = toku_htod32(ule->keylen);
//p always points to first unused byte after leafentry we are packing
- u_int8_t *p;
+ uint8_t *p;
invariant(ule->num_cuxrs>0);
//Type specific data
if (ule->num_cuxrs == 1 && ule->num_puxrs == 0) {
@@ -776,7 +776,7 @@ found_insert:;
// check should be "< MAX_TRANSACTION_RECORDS" or
// "< MAX_TRANSACTION_RECORDS - 1"
invariant(ule->num_puxrs < MAX_TRANSACTION_RECORDS);
- new_leafentry->u.mvcc.num_pxrs = (u_int8_t)ule->num_puxrs;
+ new_leafentry->u.mvcc.num_pxrs = (uint8_t)ule->num_puxrs;
//Store actual key.
memcpy(new_leafentry->u.mvcc.key_xrs, ule->keyp, ule->keylen);
@@ -995,7 +995,7 @@ leafentry_memsize (LEAFENTRY le) {
break;
}
default:
- invariant(FALSE);
+ invariant(false);
}
#if ULE_DEBUG
ULE_S ule;
@@ -1017,19 +1017,19 @@ leafentry_disksize (LEAFENTRY le) {
return leafentry_memsize(le);
}
-BOOL
+bool
le_is_clean(LEAFENTRY le) {
uint8_t type = le->type;
uint32_t rval;
switch (type) {
case LE_CLEAN:
- rval = TRUE;
+ rval = true;
break;
case LE_MVCC:;
- rval = FALSE;
+ rval = false;
break;
default:
- invariant(FALSE);
+ invariant(false);
}
return rval;
}
@@ -1064,7 +1064,7 @@ int le_latest_is_del(LEAFENTRY le) {
break;
}
default:
- invariant(FALSE);
+ invariant(false);
}
#if ULE_DEBUG
ULE_S ule;
@@ -1084,7 +1084,7 @@ int le_latest_is_del(LEAFENTRY le) {
// It is used to determine if a broadcast commit/abort message (look in ft-ops.c) should be applied to this leafentry
// If the outermost transactions match, then the broadcast commit/abort should be applied
//
-BOOL
+bool
le_has_xids(LEAFENTRY le, XIDS xids) {
//Read num_uxrs
uint32_t num_xids = xids_get_num_xids(xids);
@@ -1092,19 +1092,19 @@ le_has_xids(LEAFENTRY le, XIDS xids) {
TXNID xid = xids_get_xid(xids, 0);
invariant(xid!=TXNID_NONE);
- BOOL rval = (le_outermost_uncommitted_xid(le) == xid);
+ bool rval = (le_outermost_uncommitted_xid(le) == xid);
return rval;
}
-u_int32_t
+uint32_t
le_latest_keylen (LEAFENTRY le) {
- u_int32_t rval;
+ uint32_t rval;
rval = le_latest_is_del(le) ? 0 : le_keylen(le);
#if ULE_DEBUG
ULE_S ule;
le_unpack(&ule, le);
UXR uxr = ule_get_innermost_uxr(&ule);
- u_int32_t slow_rval;
+ uint32_t slow_rval;
if (uxr_is_insert(uxr)) {
slow_rval = ule.keylen;
}
@@ -1118,7 +1118,7 @@ le_latest_keylen (LEAFENTRY le) {
}
void*
-le_latest_val_and_len (LEAFENTRY le, u_int32_t *len) {
+le_latest_val_and_len (LEAFENTRY le, uint32_t *len) {
uint32_t keylen = toku_dtoh32(le->keylen);
uint8_t type = le->type;
void *valp;
@@ -1157,14 +1157,14 @@ le_latest_val_and_len (LEAFENTRY le, u_int32_t *len) {
}
break;
default:
- invariant(FALSE);
+ invariant(false);
}
#if ULE_DEBUG
ULE_S ule;
le_unpack(&ule, le);
UXR uxr = ule_get_innermost_uxr(&ule);
void *slow_valp;
- u_int32_t slow_len;
+ uint32_t slow_len;
if (uxr_is_insert(uxr)) {
slow_valp = uxr->valp;
slow_len = uxr->vallen;
@@ -1198,9 +1198,9 @@ le_latest_val (LEAFENTRY le) {
}
//needed to be fast for statistics.
-u_int32_t
+uint32_t
le_latest_vallen (LEAFENTRY le) {
- u_int32_t rval;
+ uint32_t rval;
uint32_t keylen = toku_dtoh32(le->keylen);
uint8_t type = le->type;
uint8_t *p;
@@ -1234,13 +1234,13 @@ le_latest_vallen (LEAFENTRY le) {
}
break;
default:
- invariant(FALSE);
+ invariant(false);
}
#if ULE_DEBUG
ULE_S ule;
le_unpack(&ule, le);
UXR uxr = ule_get_innermost_uxr(&ule);
- u_int32_t slow_rval;
+ uint32_t slow_rval;
if (uxr_is_insert(uxr))
slow_rval = uxr->vallen;
else
@@ -1253,7 +1253,7 @@ le_latest_vallen (LEAFENTRY le) {
//Return key and keylen unconditionally
void*
-le_key_and_len (LEAFENTRY le, u_int32_t *len) {
+le_key_and_len (LEAFENTRY le, uint32_t *len) {
*len = toku_dtoh32(le->keylen);
uint8_t type = le->type;
@@ -1266,13 +1266,13 @@ le_key_and_len (LEAFENTRY le, u_int32_t *len) {
keyp = le->u.mvcc.key_xrs;
break;
default:
- invariant(FALSE);
+ invariant(false);
}
#if ULE_DEBUG
ULE_S ule;
le_unpack(&ule, le);
void *slow_keyp;
- u_int32_t slow_len;
+ uint32_t slow_len;
slow_keyp = ule.keyp;
slow_len = ule.keylen;
assert(slow_keyp == le_key(le));
@@ -1299,7 +1299,7 @@ le_key (LEAFENTRY le) {
rval = le->u.mvcc.key_xrs;
break;
default:
- invariant(FALSE);
+ invariant(false);
}
#if ULE_DEBUG
ULE_S ule;
@@ -1311,13 +1311,13 @@ le_key (LEAFENTRY le) {
return rval;
}
-u_int32_t
+uint32_t
le_keylen (LEAFENTRY le) {
- u_int32_t rval = toku_dtoh32(le->keylen);
+ uint32_t rval = toku_dtoh32(le->keylen);
#if ULE_DEBUG
ULE_S ule;
le_unpack(&ule, le);
- u_int32_t slow_rval = ule.keylen;
+ uint32_t slow_rval = ule.keylen;
assert(rval==slow_rval);
ule_cleanup(&ule);
#endif
@@ -1325,7 +1325,7 @@ le_keylen (LEAFENTRY le) {
}
-u_int64_t
+uint64_t
le_outermost_uncommitted_xid (LEAFENTRY le) {
uint64_t rval = TXNID_NONE;
@@ -1400,7 +1400,7 @@ print_leafentry (FILE *outf, LEAFENTRY le) {
// ule constructor
// Note that transaction 0 is explicit in the ule
static void
-ule_init_empty_ule(ULE ule, u_int32_t keylen, void * keyp) {
+ule_init_empty_ule(ULE ule, uint32_t keylen, void * keyp) {
ule->num_cuxrs = 1;
ule->num_puxrs = 0;
ule->keylen = keylen;
@@ -1481,10 +1481,10 @@ ule_promote_provisional_innermost_to_committed(ULE ule) {
ule->num_puxrs = 0; //Discard all provisional uxrs.
if (uxr_is_delete(old_innermost_uxr)) {
- ule_push_delete_uxr(ule, TRUE, old_outermost_uncommitted_uxr->xid);
+ ule_push_delete_uxr(ule, true, old_outermost_uncommitted_uxr->xid);
}
else {
- ule_push_insert_uxr(ule, TRUE,
+ ule_push_insert_uxr(ule, true,
old_outermost_uncommitted_uxr->xid,
old_innermost_uxr->vallen,
old_innermost_uxr->valp);
@@ -1505,10 +1505,10 @@ ule_promote_provisional_innermost_to_index(ULE ule, uint32_t index) {
TXNID new_innermost_xid = ule->uxrs[index].xid;
ule->num_puxrs = index - ule->num_cuxrs; //Discard old uxr at index (and everything inner)
if (uxr_is_delete(old_innermost_uxr)) {
- ule_push_delete_uxr(ule, FALSE, new_innermost_xid);
+ ule_push_delete_uxr(ule, false, new_innermost_xid);
}
else {
- ule_push_insert_uxr(ule, FALSE,
+ ule_push_insert_uxr(ule, false,
new_innermost_xid,
old_innermost_uxr->vallen,
old_innermost_uxr->valp);
@@ -1523,7 +1523,7 @@ ule_promote_provisional_innermost_to_index(ULE ule, uint32_t index) {
// Purpose is to apply an insert message to this leafentry:
static void
-ule_apply_insert(ULE ule, XIDS xids, u_int32_t vallen, void * valp) {
+ule_apply_insert(ULE ule, XIDS xids, uint32_t vallen, void * valp) {
ule_prepare_for_new_uxr(ule, xids);
TXNID this_xid = xids_get_innermost_xid(xids); // xid of transaction doing this insert
ule_push_insert_uxr(ule, this_xid == TXNID_NONE, this_xid, vallen, valp);
@@ -1612,7 +1612,7 @@ void ule_apply_commit(ULE ule, XIDS xids) {
// Purpose is to record an insert for this transaction (and set type correctly).
static void
-ule_push_insert_uxr(ULE ule, BOOL is_committed, TXNID xid, u_int32_t vallen, void * valp) {
+ule_push_insert_uxr(ULE ule, bool is_committed, TXNID xid, uint32_t vallen, void * valp) {
UXR uxr = ule_get_first_empty_uxr(ule);
if (is_committed) {
invariant(ule->num_puxrs==0);
@@ -1631,7 +1631,7 @@ ule_push_insert_uxr(ULE ule, BOOL is_committed, TXNID xid, u_int32_t vallen, voi
// is the root transaction, then truly delete the leafentry by marking the
// ule as empty.
static void
-ule_push_delete_uxr(ULE ule, BOOL is_committed, TXNID xid) {
+ule_push_delete_uxr(ULE ule, bool is_committed, TXNID xid) {
UXR uxr = ule_get_first_empty_uxr(ule);
if (is_committed) {
invariant(ule->num_puxrs==0);
@@ -1739,7 +1739,7 @@ ule_add_placeholders(ULE ule, XIDS xids) {
TXNID this_xid = xids_get_innermost_xid(xids); // xid of this transaction
invariant(this_xid!=TXNID_NONE);
if (ica_xid != this_xid) { // if this transaction is the ICA, don't push any placeholders
- u_int8_t index = outermost_xid_not_in_ule(ule, xids);
+ uint8_t index = outermost_xid_not_in_ule(ule, xids);
TXNID current_msg_xid = xids_get_xid(xids, index);
while (current_msg_xid != this_xid) { // Placeholder for each transaction before this transaction
ule_push_placeholder_uxr(ule, current_msg_xid);
@@ -1812,35 +1812,35 @@ ule_get_innermost_numbytes(ULE ule) {
// This layer of abstraction (uxr_xxx) understands uxr and nothing else.
//
-static inline BOOL
-uxr_type_is_insert(u_int8_t type) {
- BOOL rval = (BOOL)(type == XR_INSERT);
+static inline bool
+uxr_type_is_insert(uint8_t type) {
+ bool rval = (bool)(type == XR_INSERT);
return rval;
}
-BOOL
+bool
uxr_is_insert(UXR uxr) {
return uxr_type_is_insert(uxr->type);
}
-static inline BOOL
-uxr_type_is_delete(u_int8_t type) {
- BOOL rval = (BOOL)(type == XR_DELETE);
+static inline bool
+uxr_type_is_delete(uint8_t type) {
+ bool rval = (bool)(type == XR_DELETE);
return rval;
}
-BOOL
+bool
uxr_is_delete(UXR uxr) {
return uxr_type_is_delete(uxr->type);
}
-static inline BOOL
-uxr_type_is_placeholder(u_int8_t type) {
- BOOL rval = (BOOL)(type == XR_PLACEHOLDER);
+static inline bool
+uxr_type_is_placeholder(uint8_t type) {
+ bool rval = (bool)(type == XR_PLACEHOLDER);
return rval;
}
-BOOL
+bool
uxr_is_placeholder(UXR uxr) {
return uxr_type_is_placeholder(uxr->type);
}
@@ -1896,7 +1896,7 @@ ule_verify_xids(ULE ule, uint32_t interesting, TXNID *xids) {
//
// Iterates over "possible" TXNIDs in a leafentry's stack, until one is accepted by 'f'. If the value
-// associated with the accepted TXNID is not an insert, then set *is_emptyp to TRUE, otherwise FALSE
+// associated with the accepted TXNID is not an insert, then set *is_emptyp to true, otherwise false
// The "possible" TXNIDs are:
// if provisionals exist, then the first possible TXNID is the outermost provisional.
// The next possible TXNIDs are the committed TXNIDs, from most recently committed to T_0.
@@ -1909,7 +1909,7 @@ ule_verify_xids(ULE ule, uint32_t interesting, TXNID *xids) {
// context - parameter for f
//
int
-le_iterate_is_del(LEAFENTRY le, LE_ITERATE_CALLBACK f, BOOL *is_delp, TOKUTXN context) {
+le_iterate_is_del(LEAFENTRY le, LE_ITERATE_CALLBACK f, bool *is_delp, TOKUTXN context) {
#if ULE_DEBUG
ULE_S ule;
le_unpack(&ule, le);
@@ -1918,7 +1918,7 @@ le_iterate_is_del(LEAFENTRY le, LE_ITERATE_CALLBACK f, BOOL *is_delp, TOKUTXN co
//Read the keylen
uint8_t type = le->type;
int r;
- BOOL is_del = FALSE;
+ bool is_del = false;
switch (type) {
case LE_CLEAN: {
r = 0;
@@ -1969,7 +1969,7 @@ le_iterate_is_del(LEAFENTRY le, LE_ITERATE_CALLBACK f, BOOL *is_delp, TOKUTXN co
#endif
break;
default:
- invariant(FALSE);
+ invariant(false);
}
cleanup:
#if ULE_DEBUG
@@ -1995,7 +1995,7 @@ cleanup:
// context - parameter for f
//
int
-le_iterate_val(LEAFENTRY le, LE_ITERATE_CALLBACK f, void** valpp, u_int32_t *vallenp, TOKUTXN context) {
+le_iterate_val(LEAFENTRY le, LE_ITERATE_CALLBACK f, void** valpp, uint32_t *vallenp, TOKUTXN context) {
#if ULE_DEBUG
ULE_S ule;
le_unpack(&ule, le);
@@ -2088,7 +2088,7 @@ verify_is_empty:;
}
break;
default:
- invariant(FALSE);
+ invariant(false);
}
cleanup:
#if ULE_DEBUG
@@ -2155,23 +2155,23 @@ le_committed_mvcc(uint8_t *key, uint32_t keylen,
// This is an on-disk format. static_asserts verify everything is packed and aligned correctly.
struct __attribute__ ((__packed__)) leafentry_13 {
struct leafentry_committed_13 {
- u_int8_t key_val[0]; //Actual key, then actual val
+ uint8_t key_val[0]; //Actual key, then actual val
};
static_assert(0 == sizeof(leafentry_committed_13), "wrong size");
static_assert(0 == __builtin_offsetof(leafentry_committed_13, key_val), "wrong offset");
struct __attribute__ ((__packed__)) leafentry_provisional_13 {
- u_int8_t innermost_type;
+ uint8_t innermost_type;
TXNID xid_outermost_uncommitted;
- u_int8_t key_val_xrs[0]; //Actual key,
+ uint8_t key_val_xrs[0]; //Actual key,
//then actual innermost inserted val,
//then transaction records.
};
static_assert(9 == sizeof(leafentry_provisional_13), "wrong size");
static_assert(9 == __builtin_offsetof(leafentry_provisional_13, key_val_xrs), "wrong offset");
- u_int8_t num_xrs;
- u_int32_t keylen;
- u_int32_t innermost_inserted_vallen;
+ uint8_t num_xrs;
+ uint32_t keylen;
+ uint32_t innermost_inserted_vallen;
union __attribute__ ((__packed__)) {
struct leafentry_committed_13 comm;
struct leafentry_provisional_13 prov;
@@ -2204,7 +2204,7 @@ le_memsize_from_ule_13 (ULE ule) {
+ule->keylen //actual key
+1*num_uxrs //types
+8*(num_uxrs-1); //txnids
- u_int8_t i;
+ uint8_t i;
for (i = 0; i < num_uxrs; i++) {
UXR uxr = &ule->uxrs[i];
if (uxr_is_insert(uxr)) {
@@ -2237,9 +2237,9 @@ le_unpack_13(ULE ule, LEAFENTRY_13 le) {
ule->keylen = toku_dtoh32(le->keylen);
//Read the vallen of innermost insert
- u_int32_t vallen_of_innermost_insert = toku_dtoh32(le->innermost_inserted_vallen);
+ uint32_t vallen_of_innermost_insert = toku_dtoh32(le->innermost_inserted_vallen);
- u_int8_t *p;
+ uint8_t *p;
if (num_xrs == 1) {
//Unpack a 'committed leafentry' (No uncommitted transactions exist)
ule->keyp = le->u.comm.key_val;
@@ -2255,7 +2255,7 @@ le_unpack_13(ULE ule, LEAFENTRY_13 le) {
//Unpack a 'provisional leafentry' (Uncommitted transactions exist)
//Read in type.
- u_int8_t innermost_type = le->u.prov.innermost_type;
+ uint8_t innermost_type = le->u.prov.innermost_type;
assert(!uxr_type_is_placeholder(innermost_type));
//Read in xid
@@ -2265,12 +2265,12 @@ le_unpack_13(ULE ule, LEAFENTRY_13 le) {
ule->keyp = le->u.prov.key_val_xrs;
//Read pointer to innermost inserted val (immediately after key)
- u_int8_t *valp_of_innermost_insert = &le->u.prov.key_val_xrs[ule->keylen];
+ uint8_t *valp_of_innermost_insert = &le->u.prov.key_val_xrs[ule->keylen];
//Point p to immediately after 'header'
p = &le->u.prov.key_val_xrs[ule->keylen + vallen_of_innermost_insert];
- BOOL found_innermost_insert = FALSE;
+ bool found_innermost_insert = false;
int i; //Index in ULE.uxrs[]
//Loop inner to outer
for (i = num_xrs - 1; i >= 0; i--) {
@@ -2306,7 +2306,7 @@ le_unpack_13(ULE ule, LEAFENTRY_13 le) {
if (uxr_is_insert(uxr)) {
if (found_innermost_insert) {
//Not the innermost insert. Load vallen/valp
- uxr->vallen = toku_dtoh32(*(u_int32_t*)p);
+ uxr->vallen = toku_dtoh32(*(uint32_t*)p);
p += 4;
uxr->valp = p;
@@ -2316,7 +2316,7 @@ le_unpack_13(ULE ule, LEAFENTRY_13 le) {
//Innermost insert, load the vallen/valp previously read from header
uxr->vallen = vallen_of_innermost_insert;
uxr->valp = valp_of_innermost_insert;
- found_innermost_insert = TRUE;
+ found_innermost_insert = true;
}
}
}
@@ -2324,7 +2324,7 @@ le_unpack_13(ULE ule, LEAFENTRY_13 le) {
}
#if ULE_DEBUG
size_t memsize = le_memsize_from_ule_13(ule);
- assert(p == ((u_int8_t*)le) + memsize);
+ assert(p == ((uint8_t*)le) + memsize);
#endif
}
diff --git a/ft/ule.h b/ft/ule.h
index e4f91f872a9..f1b8053c040 100644
--- a/ft/ule.h
+++ b/ft/ule.h
@@ -34,9 +34,9 @@ int ule_is_provisional(ULEHANDLE ule, uint64_t ith);
void *ule_get_key(ULEHANDLE ule);
uint32_t ule_get_keylen(ULEHANDLE ule);
-BOOL uxr_is_insert(UXRHANDLE uxr);
-BOOL uxr_is_delete(UXRHANDLE uxr);
-BOOL uxr_is_placeholder(UXRHANDLE uxr);
+bool uxr_is_insert(UXRHANDLE uxr);
+bool uxr_is_delete(UXRHANDLE uxr);
+bool uxr_is_placeholder(UXRHANDLE uxr);
void *uxr_get_val(UXRHANDLE uxr);
uint32_t uxr_get_vallen(UXRHANDLE uxr);
TXNID uxr_get_txnid(UXRHANDLE uxr);
diff --git a/ft/wbuf.h b/ft/wbuf.h
index 777218d09af..a5d4756afb7 100644
--- a/ft/wbuf.h
+++ b/ft/wbuf.h
@@ -50,7 +50,7 @@ static inline void wbuf_nocrc_char (struct wbuf *w, unsigned char ch) {
}
/* Write a character. */
-static inline void wbuf_nocrc_u_int8_t (struct wbuf *w, u_int8_t ch) {
+static inline void wbuf_nocrc_uint8_t (struct wbuf *w, uint8_t ch) {
assert(w->ndone<w->size);
w->buf[w->ndone++]=ch;
}
@@ -64,7 +64,7 @@ static inline void wbuf_char (struct wbuf *w, unsigned char ch) {
static void wbuf_network_int (struct wbuf *w, int32_t i) __attribute__((__unused__));
static void wbuf_network_int (struct wbuf *w, int32_t i) {
assert(w->ndone + 4 <= w->size);
- *(u_int32_t*)(&w->buf[w->ndone]) = toku_htonl(i);
+ *(uint32_t*)(&w->buf[w->ndone]) = toku_htonl(i);
x1764_add(&w->checksum, &w->buf[w->ndone], 4);
w->ndone += 4;
}
@@ -83,7 +83,7 @@ static inline void wbuf_nocrc_int (struct wbuf *w, int32_t i) {
w->buf[w->ndone+2] = i>>8;
w->buf[w->ndone+3] = i>>0;
#else
- *(u_int32_t*)(&w->buf[w->ndone]) = toku_htod32(i);
+ *(uint32_t*)(&w->buf[w->ndone]) = toku_htod32(i);
#endif
w->ndone += 4;
#endif
@@ -94,15 +94,15 @@ static inline void wbuf_int (struct wbuf *w, int32_t i) {
x1764_add(&w->checksum, &w->buf[w->ndone-4], 4);
}
-static inline void wbuf_nocrc_uint (struct wbuf *w, u_int32_t i) {
+static inline void wbuf_nocrc_uint (struct wbuf *w, uint32_t i) {
wbuf_nocrc_int(w, (int32_t)i);
}
-static inline void wbuf_uint (struct wbuf *w, u_int32_t i) {
+static inline void wbuf_uint (struct wbuf *w, uint32_t i) {
wbuf_int(w, (int32_t)i);
}
-static inline void wbuf_nocrc_literal_bytes(struct wbuf *w, bytevec bytes_bv, u_int32_t nbytes) {
+static inline void wbuf_nocrc_literal_bytes(struct wbuf *w, bytevec bytes_bv, uint32_t nbytes) {
const unsigned char *bytes = (const unsigned char *) bytes_bv;
#if 0
{ int i; for (i=0; i<nbytes; i++) wbuf_nocrc_char(w, bytes[i]); }
@@ -113,42 +113,42 @@ static inline void wbuf_nocrc_literal_bytes(struct wbuf *w, bytevec bytes_bv, u_
#endif
}
-static inline void wbuf_literal_bytes(struct wbuf *w, bytevec bytes_bv, u_int32_t nbytes) {
+static inline void wbuf_literal_bytes(struct wbuf *w, bytevec bytes_bv, uint32_t nbytes) {
wbuf_nocrc_literal_bytes(w, bytes_bv, nbytes);
x1764_add(&w->checksum, &w->buf[w->ndone-nbytes], nbytes);
}
-static void wbuf_nocrc_bytes (struct wbuf *w, bytevec bytes_bv, u_int32_t nbytes) {
+static void wbuf_nocrc_bytes (struct wbuf *w, bytevec bytes_bv, uint32_t nbytes) {
wbuf_nocrc_uint(w, nbytes);
wbuf_nocrc_literal_bytes(w, bytes_bv, nbytes);
}
-static void wbuf_bytes (struct wbuf *w, bytevec bytes_bv, u_int32_t nbytes) {
+static void wbuf_bytes (struct wbuf *w, bytevec bytes_bv, uint32_t nbytes) {
wbuf_uint(w, nbytes);
wbuf_literal_bytes(w, bytes_bv, nbytes);
}
-static void wbuf_nocrc_ulonglong (struct wbuf *w, u_int64_t ull) {
- wbuf_nocrc_uint(w, (u_int32_t)(ull>>32));
- wbuf_nocrc_uint(w, (u_int32_t)(ull&0xFFFFFFFF));
+static void wbuf_nocrc_ulonglong (struct wbuf *w, uint64_t ull) {
+ wbuf_nocrc_uint(w, (uint32_t)(ull>>32));
+ wbuf_nocrc_uint(w, (uint32_t)(ull&0xFFFFFFFF));
}
-static void wbuf_ulonglong (struct wbuf *w, u_int64_t ull) {
- wbuf_uint(w, (u_int32_t)(ull>>32));
- wbuf_uint(w, (u_int32_t)(ull&0xFFFFFFFF));
+static void wbuf_ulonglong (struct wbuf *w, uint64_t ull) {
+ wbuf_uint(w, (uint32_t)(ull>>32));
+ wbuf_uint(w, (uint32_t)(ull&0xFFFFFFFF));
}
-static inline void wbuf_nocrc_u_int64_t(struct wbuf *w, u_int64_t ull) {
+static inline void wbuf_nocrc_uint64_t(struct wbuf *w, uint64_t ull) {
wbuf_nocrc_ulonglong(w, ull);
}
-static inline void wbuf_u_int64_t(struct wbuf *w, u_int64_t ull) {
+static inline void wbuf_uint64_t(struct wbuf *w, uint64_t ull) {
wbuf_ulonglong(w, ull);
}
-static inline void wbuf_nocrc_BOOL (struct wbuf *w, BOOL b) {
- wbuf_nocrc_u_int8_t(w, (u_int8_t)(b ? 1 : 0));
+static inline void wbuf_nocrc_bool (struct wbuf *w, bool b) {
+ wbuf_nocrc_uint8_t(w, (uint8_t)(b ? 1 : 0));
}
static inline void wbuf_nocrc_BYTESTRING (struct wbuf *w, BYTESTRING v) {
@@ -159,20 +159,20 @@ static inline void wbuf_BYTESTRING (struct wbuf *w, BYTESTRING v) {
wbuf_bytes(w, v.data, v.len);
}
-static inline void wbuf_u_int8_t (struct wbuf *w, u_int8_t v) {
+static inline void wbuf_uint8_t (struct wbuf *w, uint8_t v) {
wbuf_char(w, v);
}
-static inline void wbuf_nocrc_u_int32_t (struct wbuf *w, u_int32_t v) {
+static inline void wbuf_nocrc_uint32_t (struct wbuf *w, uint32_t v) {
wbuf_nocrc_uint(w, v);
}
-static inline void wbuf_u_int32_t (struct wbuf *w, u_int32_t v) {
+static inline void wbuf_uint32_t (struct wbuf *w, uint32_t v) {
wbuf_uint(w, v);
}
static inline void wbuf_DISKOFF (struct wbuf *w, DISKOFF off) {
- wbuf_ulonglong(w, (u_int64_t)off);
+ wbuf_ulonglong(w, (uint64_t)off);
}
static inline void wbuf_BLOCKNUM (struct wbuf *w, BLOCKNUM b) {
@@ -191,9 +191,9 @@ static inline void wbuf_TXNID (struct wbuf *w, TXNID tid) {
}
static inline void wbuf_nocrc_XIDP (struct wbuf *w, XIDP xid) {
- wbuf_nocrc_u_int32_t(w, xid->formatID);
- wbuf_nocrc_u_int8_t(w, xid->gtrid_length);
- wbuf_nocrc_u_int8_t(w, xid->bqual_length);
+ wbuf_nocrc_uint32_t(w, xid->formatID);
+ wbuf_nocrc_uint8_t(w, xid->gtrid_length);
+ wbuf_nocrc_uint8_t(w, xid->bqual_length);
wbuf_nocrc_literal_bytes(w, xid->data, xid->gtrid_length+xid->bqual_length);
}
diff --git a/ft/x1764-speedup/x1764-speedup-test.cc b/ft/x1764-speedup/x1764-speedup-test.cc
index f6330a6cbb8..8b7157a339f 100644
--- a/ft/x1764-speedup/x1764-speedup-test.cc
+++ b/ft/x1764-speedup/x1764-speedup-test.cc
@@ -9,19 +9,19 @@
#include <sys/time.h>
#include <sys/types.h>
-u_int64_t x1764_simple (const u_int64_t *buf, size_t len)
+uint64_t x1764_simple (const uint64_t *buf, size_t len)
{
- u_int64_t sum=0;
+ uint64_t sum=0;
for (size_t i=0; i<len ;i++) {
sum = sum*17 + buf[i];
}
return sum;
}
-u_int64_t x1764_2x (const u_int64_t *buf, size_t len)
+uint64_t x1764_2x (const uint64_t *buf, size_t len)
{
assert(len%2==0);
- u_int64_t suma=0, sumb=0;
+ uint64_t suma=0, sumb=0;
for (size_t i=0; i<len ;i+=2) {
suma = suma*(17L*17L) + buf[i];
sumb = sumb*(17L*17L) + buf[i+1];
@@ -29,23 +29,23 @@ u_int64_t x1764_2x (const u_int64_t *buf, size_t len)
return suma*17+sumb;
}
-u_int64_t x1764_3x (const u_int64_t *buf, size_t len)
+uint64_t x1764_3x (const uint64_t *buf, size_t len)
{
assert(len%3==0);
- u_int64_t suma=0, sumb=0, sumc=0;
+ uint64_t suma=0, sumb=0, sumc=0;
for (size_t i=0; i<len ;i+=3) {
suma = suma*(17LL*17LL*17LL) + buf[i];
sumb = sumb*(17LL*17LL*17LL) + buf[i+1];
sumc = sumc*(17LL*17LL*17LL) + buf[i+2];
}
- u_int64_t r = suma*17L*17L + sumb*17L + sumc;
+ uint64_t r = suma*17L*17L + sumb*17L + sumc;
return r;
}
-u_int64_t x1764_4x (const u_int64_t *buf, size_t len)
+uint64_t x1764_4x (const uint64_t *buf, size_t len)
{
assert(len%4==0);
- u_int64_t suma=0, sumb=0, sumc=0, sumd=0;
+ uint64_t suma=0, sumb=0, sumc=0, sumd=0;
for (size_t i=0; i<len ;i+=4) {
suma = suma*(17LL*17LL*17LL*17LL) + buf[i];
sumb = sumb*(17LL*17LL*17LL*17LL) + buf[i+1];
@@ -67,13 +67,13 @@ int main (int argc, char *argv[]) {
struct timeval start,end,end2,end3,end4;
for (int i=0; i<size; i++) data[i]=i*i+j;
gettimeofday(&start, 0);
- u_int64_t s = x1764_simple((u_int64_t*)data, size/sizeof(u_int64_t));
+ uint64_t s = x1764_simple((uint64_t*)data, size/sizeof(uint64_t));
gettimeofday(&end, 0);
- u_int64_t s2 = x1764_2x((u_int64_t*)data, size/sizeof(u_int64_t));
+ uint64_t s2 = x1764_2x((uint64_t*)data, size/sizeof(uint64_t));
gettimeofday(&end2, 0);
- u_int64_t s3 = x1764_3x((u_int64_t*)data, size/sizeof(u_int64_t));
+ uint64_t s3 = x1764_3x((uint64_t*)data, size/sizeof(uint64_t));
gettimeofday(&end3, 0);
- u_int64_t s4 = x1764_4x((u_int64_t*)data, size/sizeof(u_int64_t));
+ uint64_t s4 = x1764_4x((uint64_t*)data, size/sizeof(uint64_t));
gettimeofday(&end4, 0);
assert(s==s2);
assert(s==s3);
diff --git a/ft/x1764.cc b/ft/x1764.cc
index 6181633ea08..4790288e3d9 100644
--- a/ft/x1764.cc
+++ b/ft/x1764.cc
@@ -9,10 +9,10 @@
#define PRINT 0
-u_int32_t x1764_memory_simple (const void *buf, int len)
+uint32_t x1764_memory_simple (const void *buf, int len)
{
- const u_int64_t *CAST_FROM_VOIDP(lbuf, buf);
- u_int64_t c=0;
+ const uint64_t *CAST_FROM_VOIDP(lbuf, buf);
+ uint64_t c=0;
while (len>=8) {
c = c*17 + *lbuf;
if (PRINT) printf("%d: c=%016" PRIx64 " sum=%016" PRIx64 "\n", __LINE__, *lbuf, c);
@@ -20,41 +20,41 @@ u_int32_t x1764_memory_simple (const void *buf, int len)
len-=8;
}
if (len>0) {
- const u_int8_t *cbuf=(u_int8_t*)lbuf;
+ const uint8_t *cbuf=(uint8_t*)lbuf;
int i;
- u_int64_t input=0;
+ uint64_t input=0;
for (i=0; i<len; i++) {
- input |= ((u_int64_t)(cbuf[i]))<<(8*i);
+ input |= ((uint64_t)(cbuf[i]))<<(8*i);
}
c = c*17 + input;
}
return ~((c&0xFFFFFFFF) ^ (c>>32));
}
-u_int32_t x1764_memory (const void *vbuf, int len)
+uint32_t x1764_memory (const void *vbuf, int len)
{
- const u_int8_t *CAST_FROM_VOIDP(buf, vbuf);
- int len_4_words = 4*sizeof(u_int64_t);
- u_int64_t suma=0, sumb=0, sumc=0, sumd=0;
+ const uint8_t *CAST_FROM_VOIDP(buf, vbuf);
+ int len_4_words = 4*sizeof(uint64_t);
+ uint64_t suma=0, sumb=0, sumc=0, sumd=0;
while (len >= len_4_words) {
- suma = suma*(17LL*17LL*17LL*17LL) + *(u_int64_t*)(buf +0*sizeof(u_int64_t));
- sumb = sumb*(17LL*17LL*17LL*17LL) + *(u_int64_t*)(buf +1*sizeof(u_int64_t));
- sumc = sumc*(17LL*17LL*17LL*17LL) + *(u_int64_t*)(buf +2*sizeof(u_int64_t));
- sumd = sumd*(17LL*17LL*17LL*17LL) + *(u_int64_t*)(buf +3*sizeof(u_int64_t));
+ suma = suma*(17LL*17LL*17LL*17LL) + *(uint64_t*)(buf +0*sizeof(uint64_t));
+ sumb = sumb*(17LL*17LL*17LL*17LL) + *(uint64_t*)(buf +1*sizeof(uint64_t));
+ sumc = sumc*(17LL*17LL*17LL*17LL) + *(uint64_t*)(buf +2*sizeof(uint64_t));
+ sumd = sumd*(17LL*17LL*17LL*17LL) + *(uint64_t*)(buf +3*sizeof(uint64_t));
buf += len_4_words;
len -= len_4_words;
}
- u_int64_t sum = suma*17L*17L*17L + sumb*17L*17L + sumc*17L + sumd;
+ uint64_t sum = suma*17L*17L*17L + sumb*17L*17L + sumc*17L + sumd;
assert(len>=0);
- while ((u_int64_t)len>=sizeof(u_int64_t)) {
- sum = sum*17 + *(u_int64_t*)buf;
- buf+=sizeof(u_int64_t);
- len-=sizeof(u_int64_t);
+ while ((uint64_t)len>=sizeof(uint64_t)) {
+ sum = sum*17 + *(uint64_t*)buf;
+ buf+=sizeof(uint64_t);
+ len-=sizeof(uint64_t);
}
if (len>0) {
- u_int64_t tailsum = 0;
+ uint64_t tailsum = 0;
for (int i=0; i<len; i++) {
- tailsum |= ((u_int64_t)(buf[i]))<<(8*i);
+ tailsum |= ((uint64_t)(buf[i]))<<(8*i);
}
sum = sum*17 + tailsum;
}
@@ -74,7 +74,7 @@ void x1764_add (struct x1764 *l, const void *vbuf, int len) {
const unsigned char *CAST_FROM_VOIDP(cbuf, vbuf);
// Special case short inputs
if (len==1) {
- u_int64_t input = l->input | ((u_int64_t)(*cbuf))<<(8*n_input_bytes);
+ uint64_t input = l->input | ((uint64_t)(*cbuf))<<(8*n_input_bytes);
n_input_bytes++;
if (n_input_bytes==8) {
l->sum = l->sum*17 + input;
@@ -86,8 +86,8 @@ void x1764_add (struct x1764 *l, const void *vbuf, int len) {
}
return;
} else if (len==2) {
- u_int64_t input = l->input;
- u_int64_t thisv = ((u_int64_t)(*(u_int16_t*)cbuf));
+ uint64_t input = l->input;
+ uint64_t thisv = ((uint64_t)(*(uint16_t*)cbuf));
if (n_input_bytes==7) {
l->sum = l->sum*17 + (input | (thisv<<(8*7)));
l->input = thisv>>8;
@@ -103,14 +103,14 @@ void x1764_add (struct x1764 *l, const void *vbuf, int len) {
return;
}
- u_int64_t sum;
+ uint64_t sum;
//assert(len>=0);
if (n_input_bytes) {
- u_int64_t input = l->input;
+ uint64_t input = l->input;
if (len>=8) {
sum = l->sum;
while (len>=8) {
- u_int64_t thisv = *(u_int64_t*)cbuf;
+ uint64_t thisv = *(uint64_t*)cbuf;
input |= thisv<<(8*n_input_bytes);
sum = sum*17 + input;
if (PRINT) printf("%d: input=%016" PRIx64 " sum=%016" PRIx64 "\n", __LINE__, input, sum);
@@ -124,7 +124,7 @@ void x1764_add (struct x1764 *l, const void *vbuf, int len) {
l->sum = sum;
}
if (len>=4) {
- u_int64_t thisv = *(u_int32_t*)cbuf;
+ uint64_t thisv = *(uint32_t*)cbuf;
if (n_input_bytes<4) {
input |= thisv<<(8*n_input_bytes);
if (PRINT) printf("%d: input=%016" PRIx64 "\n", __LINE__, input);
@@ -143,7 +143,7 @@ void x1764_add (struct x1764 *l, const void *vbuf, int len) {
}
//assert(n_input_bytes<=8);
while (n_input_bytes<8 && len) {
- input |= ((u_int64_t)(*cbuf))<<(8*n_input_bytes);
+ input |= ((uint64_t)(*cbuf))<<(8*n_input_bytes);
n_input_bytes++;
cbuf++;
len--;
@@ -163,44 +163,44 @@ void x1764_add (struct x1764 *l, const void *vbuf, int len) {
}
//assert(len>=0);
while (len>=8) {
- sum = sum*17 + *(u_int64_t*)cbuf;
+ sum = sum*17 + *(uint64_t*)cbuf;
cbuf+=8;
len -=8;
}
l->sum = sum;
n_input_bytes = 0;
- u_int64_t input;
+ uint64_t input;
l->n_input_bytes = len;
// Surprisingly, the loop is the fastest on bradley's laptop.
if (1) {
int i;
input=0;
for (i=0; i<len; i++) {
- input |= ((u_int64_t)(cbuf[i]))<<(8*i);
+ input |= ((uint64_t)(cbuf[i]))<<(8*i);
}
} else if (0) {
switch (len) {
- case 7: input = ((u_int64_t)(*(u_int32_t*)(cbuf))) | (((u_int64_t)(*(u_int16_t*)(cbuf+4)))<<32) | (((u_int64_t)(*(cbuf+4)))<<48); break;
- case 6: input = ((u_int64_t)(*(u_int32_t*)(cbuf))) | (((u_int64_t)(*(u_int16_t*)(cbuf+4)))<<32); break;
- case 5: input = ((u_int64_t)(*(u_int32_t*)(cbuf))) | (((u_int64_t)(*(cbuf+4)))<<32); break;
- case 4: input = ((u_int64_t)(*(u_int32_t*)(cbuf))); break;
- case 3: input = ((u_int64_t)(*(u_int16_t*)(cbuf))) | (((u_int64_t)(*(cbuf+2)))<<16); break;
- case 2: input = ((u_int64_t)(*(u_int16_t*)(cbuf))); break;
- case 1: input = ((u_int64_t)(*cbuf)); break;
+ case 7: input = ((uint64_t)(*(uint32_t*)(cbuf))) | (((uint64_t)(*(uint16_t*)(cbuf+4)))<<32) | (((uint64_t)(*(cbuf+4)))<<48); break;
+ case 6: input = ((uint64_t)(*(uint32_t*)(cbuf))) | (((uint64_t)(*(uint16_t*)(cbuf+4)))<<32); break;
+ case 5: input = ((uint64_t)(*(uint32_t*)(cbuf))) | (((uint64_t)(*(cbuf+4)))<<32); break;
+ case 4: input = ((uint64_t)(*(uint32_t*)(cbuf))); break;
+ case 3: input = ((uint64_t)(*(uint16_t*)(cbuf))) | (((uint64_t)(*(cbuf+2)))<<16); break;
+ case 2: input = ((uint64_t)(*(uint16_t*)(cbuf))); break;
+ case 1: input = ((uint64_t)(*cbuf)); break;
case 0: input = 0; break;
default: abort();
}
} else {
input=0;
int i=0;
- if (len>=4) { input = ((u_int64_t)(*(u_int32_t*)(cbuf))); cbuf+=4; len-=4; i=4;}
- if (len>=2) { input |= ((u_int64_t)(*(u_int16_t*)(cbuf)))<<(i*8); cbuf+=2; len-=2; i+=2; }
- if (len>=1) { input |= ((u_int64_t)(*(u_int8_t *)(cbuf)))<<(i*8); /*cbuf+=1; len-=1; i++;*/ }
+ if (len>=4) { input = ((uint64_t)(*(uint32_t*)(cbuf))); cbuf+=4; len-=4; i=4;}
+ if (len>=2) { input |= ((uint64_t)(*(uint16_t*)(cbuf)))<<(i*8); cbuf+=2; len-=2; i+=2; }
+ if (len>=1) { input |= ((uint64_t)(*(uint8_t *)(cbuf)))<<(i*8); /*cbuf+=1; len-=1; i++;*/ }
}
l->input = input;
if (PRINT) printf("%d: n_input_bytes=%d\n", __LINE__, l->n_input_bytes);
}
-u_int32_t x1764_finish (struct x1764 *l) {
+uint32_t x1764_finish (struct x1764 *l) {
if (PRINT) printf("%d: n_input_bytes=%d\n", __LINE__, l->n_input_bytes);
int len = l->n_input_bytes;
if (len>0) {
diff --git a/ft/x1764.h b/ft/x1764.h
index 67752ebec6e..f72c06c3e5a 100644
--- a/ft/x1764.h
+++ b/ft/x1764.h
@@ -15,17 +15,17 @@
// If any odd bytes numbers are left at the end, they are filled in at the low end.
-u_int32_t x1764_memory (const void *buf, int len);
+uint32_t x1764_memory (const void *buf, int len);
// Effect: Compute x1764 on the bytes of buf. Return the 32 bit answer.
-u_int32_t x1764_memory_simple (const void *buf, int len);
+uint32_t x1764_memory_simple (const void *buf, int len);
// Effect: Same as x1764_memory, but not highly optimized (more likely to be correct). Useful for testing the optimized version.
// For incrementally computing an x1764, use the following interfaces.
struct x1764 {
- u_int64_t sum;
- u_int64_t input;
+ uint64_t sum;
+ uint64_t input;
int n_input_bytes;
};
@@ -35,7 +35,7 @@ void x1764_init(struct x1764 *l);
void x1764_add (struct x1764 *l, const void *vbuf, int len);
// Effect: Add more bytes to *l.
-u_int32_t x1764_finish (struct x1764 *l);
+uint32_t x1764_finish (struct x1764 *l);
// Effect: Return the final 32-bit result.
diff --git a/ft/xids-internal.h b/ft/xids-internal.h
index afc1331fba4..f3d37fd08f0 100644
--- a/ft/xids-internal.h
+++ b/ft/xids-internal.h
@@ -20,7 +20,7 @@
#endif
typedef struct __attribute__((__packed__)) xids_t {
- u_int8_t num_xids; // maximum value of MAX_TRANSACTION_RECORDS - 1 ...
+ uint8_t num_xids; // maximum value of MAX_TRANSACTION_RECORDS - 1 ...
// ... because transaction 0 is implicit
TXNID ids[];
} XIDS_S;
diff --git a/ft/xids.cc b/ft/xids.cc
index 86803c1ca30..b25cfabfed7 100644
--- a/ft/xids.cc
+++ b/ft/xids.cc
@@ -69,7 +69,7 @@ xids_create_unknown_child(XIDS parent_xids, XIDS *xids_p) {
// xids_p points to an xids that is an exact copy of parent_xids, but with room for one more xid.
int rval;
invariant(parent_xids);
- u_int32_t num_child_xids = parent_xids->num_xids + 1;
+ uint32_t num_child_xids = parent_xids->num_xids + 1;
invariant(num_child_xids > 0);
invariant(num_child_xids <= MAX_TRANSACTION_RECORDS);
if (num_child_xids == MAX_TRANSACTION_RECORDS) rval = EINVAL;
@@ -91,7 +91,7 @@ xids_finalize_with_child(XIDS xids, TXNID this_xid) {
// - All error checking (except that this_xid is higher than its parent) is already complete
invariant(this_xid > xids_get_innermost_xid(xids));
TXNID this_xid_disk = toku_htod64(this_xid);
- u_int32_t num_child_xids = ++xids->num_xids;
+ uint32_t num_child_xids = ++xids->num_xids;
xids->ids[num_child_xids - 1] = this_xid_disk;
}
@@ -111,11 +111,11 @@ xids_create_child(XIDS parent_xids, // xids list for parent transaction
void
xids_create_from_buffer(struct rbuf *rb, // xids list for parent transaction
XIDS * xids_p) { // xids list created
- u_int8_t num_xids = rbuf_char(rb);
+ uint8_t num_xids = rbuf_char(rb);
invariant(num_xids < MAX_TRANSACTION_RECORDS);
XIDS CAST_FROM_VOIDP(xids, toku_xmalloc(sizeof(*xids) + num_xids*sizeof(xids->ids[0])));
xids->num_xids = num_xids;
- u_int8_t index;
+ uint8_t index;
for (index = 0; index < xids->num_xids; index++) {
rbuf_TXNID(rb, &xids->ids[index]);
if (index > 0)
@@ -136,7 +136,7 @@ xids_destroy(XIDS *xids_p) {
// If requesting an xid out of range (which will be the case if xids array is empty)
// then return 0, the xid of the root transaction.
TXNID
-xids_get_xid(XIDS xids, u_int8_t index) {
+xids_get_xid(XIDS xids, uint8_t index) {
invariant(index < xids_get_num_xids(xids));
TXNID rval = xids->ids[index];
rval = toku_dtoh64(rval);
@@ -145,9 +145,9 @@ xids_get_xid(XIDS xids, u_int8_t index) {
// This function assumes that target_xid IS in the list
// of xids.
-u_int8_t
+uint8_t
xids_find_index_of_xid(XIDS xids, TXNID target_xid) {
- u_int8_t index = 0; // search outer to inner
+ uint8_t index = 0; // search outer to inner
TXNID current_xid = xids_get_xid(xids, index);
while (current_xid != target_xid) {
invariant(current_xid < target_xid);
@@ -157,9 +157,9 @@ xids_find_index_of_xid(XIDS xids, TXNID target_xid) {
return index;
}
-u_int8_t
+uint8_t
xids_get_num_xids(XIDS xids) {
- u_int8_t rval = xids->num_xids;
+ uint8_t rval = xids->num_xids;
return rval;
}
@@ -170,7 +170,7 @@ xids_get_innermost_xid(XIDS xids) {
TXNID rval = TXNID_NONE;
if (xids_get_num_xids(xids)) {
// if clause above makes this cast ok
- u_int8_t innermost_xid = (u_int8_t)(xids_get_num_xids(xids)-1);
+ uint8_t innermost_xid = (uint8_t)(xids_get_num_xids(xids)-1);
rval = xids_get_xid(xids, innermost_xid);
}
return rval;
@@ -191,18 +191,18 @@ xids_cpy(XIDS target, XIDS source) {
}
// return size in bytes
-u_int32_t
+uint32_t
xids_get_size(XIDS xids){
- u_int32_t rval;
- u_int8_t num_xids = xids->num_xids;
+ uint32_t rval;
+ uint8_t num_xids = xids->num_xids;
rval = sizeof(*xids) + num_xids * sizeof(xids->ids[0]);
return rval;
}
-u_int32_t
+uint32_t
xids_get_serialize_size(XIDS xids){
- u_int32_t rval;
- u_int8_t num_xids = xids->num_xids;
+ uint32_t rval;
+ uint8_t num_xids = xids->num_xids;
rval = 1 + //num xids
8 * num_xids;
return rval;
@@ -216,8 +216,8 @@ toku_calc_more_murmur_xids (struct x1764 *mm, XIDS xids) {
x1764_add(mm, &xids->num_xids, 1);
TXNID zero = 0;
x1764_add(mm, &zero, 8);
- u_int8_t index;
- u_int8_t num_xids = xids_get_num_xids(xids);
+ uint8_t index;
+ uint8_t num_xids = xids_get_num_xids(xids);
for (index = 0; index < num_xids; index++) {
TXNID current_xid = xids_get_xid(xids, index);
x1764_add(mm, &current_xid, 8);
@@ -232,7 +232,7 @@ xids_get_end_of_array(XIDS xids) {
void wbuf_xids(struct wbuf *wb, XIDS xids) {
wbuf_char(wb, (unsigned char)xids->num_xids);
- u_int8_t index;
+ uint8_t index;
for (index = 0; index < xids->num_xids; index++) {
wbuf_TXNID(wb, xids->ids[index]);
}
@@ -240,7 +240,7 @@ void wbuf_xids(struct wbuf *wb, XIDS xids) {
void wbuf_nocrc_xids(struct wbuf *wb, XIDS xids) {
wbuf_nocrc_char(wb, (unsigned char)xids->num_xids);
- u_int8_t index;
+ uint8_t index;
for (index = 0; index < xids->num_xids; index++) {
wbuf_nocrc_TXNID(wb, xids->ids[index]);
}
@@ -248,7 +248,7 @@ void wbuf_nocrc_xids(struct wbuf *wb, XIDS xids) {
void
xids_fprintf(FILE* fp, XIDS xids) {
- u_int8_t index;
+ uint8_t index;
unsigned num_xids = xids_get_num_xids(xids);
fprintf(fp, "[|%u| ", num_xids);
for (index = 0; index < xids_get_num_xids(xids); index++) {
diff --git a/ft/xids.h b/ft/xids.h
index b30121520e0..8e211bd08ad 100644
--- a/ft/xids.h
+++ b/ft/xids.h
@@ -43,19 +43,19 @@ void xids_create_from_buffer(struct rbuf *rb, XIDS * xids_p);
void xids_destroy(XIDS *xids_p);
-TXNID xids_get_xid(XIDS xids, u_int8_t index);
+TXNID xids_get_xid(XIDS xids, uint8_t index);
-u_int8_t xids_find_index_of_xid(XIDS xids, TXNID target_xid);
+uint8_t xids_find_index_of_xid(XIDS xids, TXNID target_xid);
-u_int8_t xids_get_num_xids(XIDS xids);
+uint8_t xids_get_num_xids(XIDS xids);
TXNID xids_get_innermost_xid(XIDS xids);
TXNID xids_get_outermost_xid(XIDS xids);
// return size in bytes
-u_int32_t xids_get_size(XIDS xids);
+uint32_t xids_get_size(XIDS xids);
-u_int32_t xids_get_serialize_size(XIDS xids);
+uint32_t xids_get_serialize_size(XIDS xids);
void toku_calc_more_murmur_xids (struct x1764 *mm, XIDS xids);
diff --git a/ft/ybt.h b/ft/ybt.h
index 7c3683cd4da..fbf58d6071e 100644
--- a/ft/ybt.h
+++ b/ft/ybt.h
@@ -19,7 +19,7 @@ DBT *toku_fill_dbt(DBT *dbt, bytevec k, ITEMLEN len);
DBT *toku_copyref_dbt(DBT *dst, const DBT src);
DBT *toku_clone_dbt(DBT *dst, const DBT src);
int toku_dbt_set(ITEMLEN len, bytevec val, DBT *d, struct simple_dbt *sdbt);
-int toku_dbt_set_value(DBT *, bytevec *val, ITEMLEN vallen, void **staticptrp, BOOL ybt1_disposable);
+int toku_dbt_set_value(DBT *, bytevec *val, ITEMLEN vallen, void **staticptrp, bool ybt1_disposable);
void toku_sdbt_cleanup(struct simple_dbt *sdbt);