diff options
Diffstat (limited to 'test/tcl/test114.tcl')
-rw-r--r-- | test/tcl/test114.tcl | 707 |
1 files changed, 439 insertions, 268 deletions
diff --git a/test/tcl/test114.tcl b/test/tcl/test114.tcl index 088a426e..f5a6051b 100644 --- a/test/tcl/test114.tcl +++ b/test/tcl/test114.tcl @@ -1,16 +1,16 @@ # See the file LICENSE for redistribution information. # -# Copyright (c) 2005, 2012 Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2005, 2015 Oracle and/or its affiliates. All rights reserved. # # $Id$ # # TEST test114 -# TEST Test database compaction with overflows. +# TEST Test database compaction with overflow or duplicate pages. # TEST # TEST Populate a database. Remove a high proportion of entries. # TEST Dump and save contents. Compact the database, dump again, # TEST and make sure we still have the same contents. -# TEST Add back some entries, delete more entries (this time by +# TEST Add back some entries, delete more entries (this time by # TEST cursor), dump, compact, and do the before/after check again. proc test114 { method {nentries 10000} {tnum "114"} args } { @@ -23,11 +23,11 @@ proc test114 { method {nentries 10000} {tnum "114"} args } { return } - # Skip for fixed-length methods because we won't encounter - # overflows. + # Skip for fixed-length methods because we won't encounter + # overflows or duplicates. if { [is_fixed_length $method] == 1 } { - puts "Skipping test$tnum for fixed-length method $method." - return + puts "Skipping test$tnum for fixed-length method $method." + return } # We run with a small page size to force overflows. Skip @@ -40,10 +40,18 @@ proc test114 { method {nentries 10000} {tnum "114"} args } { set args [convert_args $method $args] set omethod [convert_method $method] + set npart 0 + set nodump 0 if { [is_partition_callback $args] == 1 } { set nodump 1 - } else { - set nodump 0 + set partindx [lsearch -exact $args "-partition_callback"] + set npart [lindex $args [expr $partindx + 1]] + } + if { $npart == 0 && [is_partitioned $args] == 1 } { + set partindx [lsearch -exact $args "-partition"] + incr partindx + set partkey [lindex $args $partindx] + set npart [expr [llength $partkey] + 1] } # If we are using an env, then testfile should just be the db name. @@ -53,7 +61,7 @@ proc test114 { method {nentries 10000} {tnum "114"} args } { if { $eindex == -1 } { set basename $testdir/test$tnum set env NULL - append args " -cachesize { 0 500000 0 }" + append args " -cachesize { 0 10000000 0 }" } else { set basename test$tnum incr eindex @@ -61,318 +69,481 @@ proc test114 { method {nentries 10000} {tnum "114"} args } { set txnenv [is_txnenv $env] if { $txnenv == 1 } { append args " -auto_commit" + # + # Cut nentries to 1000 for transactional environment + # to run the test a bit faster. + # + if { $nentries > 1000 } { + set nentries 1000 + } } set testdir [get_home $env] } - puts "Test$tnum: ($method $args) Database compaction with overflows." set t1 $testdir/t1 set t2 $testdir/t2 set splitopts { "" "-revsplitoff" } + set pgtype { "overflow" "unsorted duplicate" "sorted duplicate" } set txn "" - if { [is_record_based $method] == 1 } { - set checkfunc test001_recno.check - } else { - set checkfunc test001.check - } + foreach pgt $pgtype { + if { $pgt != "overflow" } { + # -dup and -dupsort are only supported by btree + # and hash. And it is an error to specify -recnum + # and -dup/-dupsort at the same time. + if { [is_btree $method] != 1 && \ + [is_hash $method] != 1 } { + puts "Skipping $method for compaction\ + with $pgt since it does not\ + support duplicates." + continue + } - cleanup $testdir $env - foreach splitopt $splitopts { - set testfile $basename.db - if { $splitopt == "-revsplitoff" } { - set testfile $basename.rev.db - if { [is_record_based $method] == 1 } { - puts "Skipping\ - -revsplitoff option for method $method." + # Compression requires -dupsort. + if { $pgt != "sorted duplicate" && \ + [is_compressed $args] == 1 } { + puts "Skipping compression for\ + compaction with $pgt." continue } } - set did [open $dict] - if { $env != "NULL" } { - set testdir [get_home $env] - } - puts "\tTest$tnum.a: Create and populate database ($splitopt)." - set pagesize 512 - set db [eval {berkdb_open -create -pagesize $pagesize \ - -mode 0644} $splitopt $args $omethod $testfile] - error_check_good dbopen [is_valid_db $db] TRUE - - set count 0 - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - while { [gets $did str] != -1 && $count < $nentries } { - if { [is_record_based $method] == 1 } { - set key [expr $count + 1] - } else { - set key $str + puts "Test$tnum:\ + ($method $args) Database compaction with $pgt." + foreach splitopt $splitopts { + set testfile $basename.db + if { $npart != 0 } { + set partpfx $testdir/__dbp.test${tnum}.db. + } + if { $splitopt == "-revsplitoff" } { + set testfile $basename.rev.db + if { $npart != 0 } { + set partpfx \ + $testdir/__dbp.test${tnum}.rev.db. + } + if { [is_btree $omethod] != 1 && \ + [is_hash $omethod] != 1 && \ + [is_rbtree $omethod] != 1 } { + puts "Skipping -revsplitoff\ + option for method $method." + continue + } + } + set did [open $dict] + if { $env != "NULL" } { + set testdir [get_home $env] } - set str [repeat $alphabet 100] - - set ret [eval \ - {$db put} $txn {$key [chop_data $method $str]}] - error_check_good put $ret 0 - incr count - - } - if { $txnenv == 1 } { - error_check_good txn_commit [$t commit] 0 - } - close $did - error_check_good db_sync [$db sync] 0 - - if { $env != "NULL" } { - set testdir [get_home $env] - set filename $testdir/$testfile - } else { - set filename $testfile - } - set size1 [file size $filename] - set count1 [stat_field $db stat "Page count"] - puts "\tTest$tnum.b: Delete most entries from database." - set did [open $dict] - set count [expr $nentries - 1] - set n 57 + cleanup $testdir $env + puts "\tTest$tnum.a:\ + Create and populate database ($splitopt)." + set flags $args + if { $pgt == "unsorted duplicate" } { + append flags " -dup" + } elseif { $pgt == "sorted duplicate" } { + append flags " -dupsort" + } + set pagesize 512 + set db [eval {berkdb_open -create -pagesize $pagesize \ + -mode 0644} $splitopt $flags $omethod $testfile] + error_check_good dbopen [is_valid_db $db] TRUE - # Leave every nth item. Since rrecno renumbers, we - # delete starting at nentries and working down to 0. - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - while { [gets $did str] != -1 && $count > 0 } { - if { [is_record_based $method] == 1 } { - set key [expr $count + 1] + if { $txnenv == 1 } { + set t [$env txn] + error_check_good txn \ + [is_valid_txn $t $env] TRUE + set txn "-txn $t" + } + # + # For overflow case, repeat the string 100 times to get + # a big data and then insert it in to the database + # so that overflow pages are created. For duplicate + # case, insert 10 duplicates of each key in order to + # have off-page duplicates. + # + if { $pgt == "overflow" } { + set start 100 + set end 100 } else { - set key $str + set start 1 + set end 10 } - - if { [expr $count % $n] != 0 } { - set ret [eval {$db del} $txn {$key}] - error_check_good del $ret 0 + set count 0 + set keycnt 0 + while { [gets $did str] != -1 && $count < $nentries } { + if { [is_record_based $method] == 1 } { + set key [expr $keycnt + 1] + } else { + set key $str + } + for { set i $start } \ + { $i <= $end && $count < $nentries } \ + { incr i ; incr count} { + if { $pgt == "overflow" } { + set str [repeat $alphabet $i] + } else { + set str "${i}.$alphabet" + } + set ret [eval {$db put} $txn \ + {$key [chop_data $method $str]}] + error_check_good put $ret 0 + } + incr keycnt } - incr count -1 - } - if { $txnenv == 1 } { - error_check_good t_commit [$t commit] 0 - } - error_check_good db_sync [$db sync] 0 - - # Now that the delete is done we ought to have a - # lot of pages on the free list. - if { [is_hash $method] == 1 } { - set free1 [stat_field $db stat "Free pages"] - } else { - set free1 [stat_field $db stat "Pages on freelist"] - } + + if { $txnenv == 1 } { + error_check_good txn_commit [$t commit] 0 + } + close $did + error_check_good db_sync [$db sync] 0 - puts "\tTest$tnum.c: Do a dump_file on contents." - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - dump_file $db $txn $t1 - if { $txnenv == 1 } { - error_check_good txn_commit [$t commit] 0 - } + if { $env != "NULL" } { + set testdir [get_home $env] + set filename $testdir/$testfile + } else { + set filename $testfile + } + # + # Check that we have the expected type of pages + # in the database. + # + if { $pgt == "overflow" } { + set ovf [stat_field $db stat "Overflow pages"] + error_check_good \ + overflow_pages [expr $ovf > 0] 1 + } else { + set dup [stat_field $db stat "Duplicate pages"] + error_check_good \ + duplicate_pages [expr $dup > 0] 1 + } + + puts "\tTest$tnum.b:\ + Delete most entries from database." + set did [open $dict] + if { $count != $keycnt } { + set count [expr $keycnt - 1] + } else { + set count [expr $nentries - 1] + } + set n 57 - puts "\tTest$tnum.d: Compact and verify database." - for {set commit 0} {$commit <= $txnenv} {incr commit} { + # Leave every nth item. Since rrecno renumbers, we + # delete starting at nentries and working down to 0. if { $txnenv == 1 } { set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE + error_check_good txn \ + [is_valid_txn $t $env] TRUE set txn "-txn $t" } - if {[catch {eval {$db compact} $txn {-freespace}} ret] } { - error "FAIL: db compact: $ret" - } - if { $txnenv == 1 } { - if { $commit == 0 } { - puts "\tTest$tnum.d: Aborting." - error_check_good txn_abort [$t abort] 0 + while { [gets $did str] != -1 && $count >= 0 } { + if { [is_record_based $method] == 1 } { + set key [expr $count + 1] } else { - puts "\tTest$tnum.d: Committing." - error_check_good txn_commit [$t commit] 0 + set key $str } + + if { [expr $count % $n] != 0 } { + set ret [eval {$db del} $txn {$key}] + error_check_good del $ret 0 + } + incr count -1 + } + if { $txnenv == 1 } { + error_check_good t_commit [$t commit] 0 } error_check_good db_sync [$db sync] 0 - error_check_good verify_dir \ - [ verify_dir $testdir "" 0 0 $nodump] 0 - } - set size2 [file size $filename] - set count2 [stat_field $db stat "Page count"] - if { [is_hash $method] == 1 } { - set free2 [stat_field $db stat "Free pages"] - } else { - set free2 [stat_field $db stat "Pages on freelist"] - } + # + # Get the db file size. We should look at the + # partitioned file if it is a partitioned db. + # + set size1 [file size $filename] + if { $npart != 0 } { + for { set i 0 } { $i < $npart } { incr i } { + incr size1 [file size ${partpfx}00${i}] + } + } + set count1 [stat_field $db stat "Page count"] - # Reduction in on-disk size should be substantial. -#### We should look at the partitioned files ##### -if { [is_partitioned $args] == 0 } { - set reduction .80 - error_check_good \ - file_size [expr [expr $size1 * $reduction] > $size2] 1 -} + # Now that the delete is done we ought to have a + # lot of pages on the free list. + if { [is_hash $method] == 1 } { + set free1 [stat_field $db stat "Free pages"] + } else { + set free1 \ + [stat_field $db stat "Pages on freelist"] + } - # The number of free pages should be reduced - # now that we've compacted with -freespace. - error_check_good pages_returned [expr $free1 > $free2] 1 + puts "\tTest$tnum.c: Do a dump_file on contents." + if { $txnenv == 1 } { + set t [$env txn] + error_check_good txn \ + [is_valid_txn $t $env] TRUE + set txn "-txn $t" + } + dump_file $db $txn $t1 + if { $txnenv == 1 } { + error_check_good txn_commit [$t commit] 0 + } - # Page count should be reduced for all methods except maybe - # record-based non-queue methods. Even with recno, the - # page count may not increase. - error_check_good page_count_reduced [expr $count1 > $count2] 1 + puts "\tTest$tnum.d: Compact and verify database." + for {set commit 0} {$commit <= $txnenv} {incr commit} { + if { $txnenv == 1 } { + set t [$env txn] + error_check_good txn \ + [is_valid_txn $t $env] TRUE + set txn "-txn $t" + } + if { [catch {eval {$db compact} \ + $txn {-freespace}} ret] } { + error "FAIL: db compact: $ret" + } + if { $txnenv == 1 } { + if { $commit == 0 } { + puts "\tTest$tnum.d: Aborting." + error_check_good \ + txn_abort [$t abort] 0 + } else { + puts "\tTest$tnum.d: Committing." + error_check_good \ + txn_commit [$t commit] 0 + } + } + error_check_good db_sync [$db sync] 0 + error_check_good verify_dir \ + [ verify_dir $testdir "" 0 0 $nodump] 0 + } - puts "\tTest$tnum.e: Contents are the same after compaction." - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - dump_file $db $txn $t2 - if { $txnenv == 1 } { - error_check_good txn_commit [$t commit] 0 - } + set size2 [file size $filename] + if { $npart != 0 } { + for { set i 0 } { $i < $npart } { incr i } { + incr size2 [file size ${partpfx}00${i}] + } + } + set count2 [stat_field $db stat "Page count"] + if { [is_hash $method] == 1 } { + set free2 [stat_field $db stat "Free pages"] + } else { + set free2 \ + [stat_field $db stat "Pages on freelist"] + } - if { [is_hash $method] != 0 } { - filesort $t1 $t1.sort - filesort $t2 $t2.sort - error_check_good filecmp [filecmp $t1.sort $t2.sort] 0 - } else { - error_check_good filecmp [filecmp $t1 $t2] 0 - } + # + # The file size and the number of pages in the database + # should never increase. Since only the empty pages + # in the end of the file can be returned to the file + # system, the file size and the number of pages may + # remain the same. In this case, the number of pages in + # the free list should never decrease. + # + error_check_good file_size [expr $size2 <= $size1] 1 + error_check_good page_count [expr $count2 <= $count1] 1 + if { $size2 == $size1 } { + error_check_good page_count $count2 $count1 + error_check_good pages_returned \ + [expr $free2 >= $free1] 1 + } else { + error_check_good page_count \ + [expr $count2 < $count1] 1 + } - puts "\tTest$tnum.f: Add more entries to database." - # Use integers as keys instead of strings, just to mix it up - # a little. - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - for { set i 1 } { $i < $nentries } { incr i } { - set key $i - set str [repeat $alphabet 100] - set ret [eval \ - {$db put} $txn {$key [chop_data $method $str]}] - error_check_good put $ret 0 - } - if { $txnenv == 1 } { - error_check_good t_commit [$t commit] 0 - } - error_check_good db_sync [$db sync] 0 + puts "\tTest$tnum.e:\ + Contents are the same after compaction." + if { $txnenv == 1 } { + set t [$env txn] + error_check_good txn \ + [is_valid_txn $t $env] TRUE + set txn "-txn $t" + } + dump_file $db $txn $t2 + if { $txnenv == 1 } { + error_check_good txn_commit [$t commit] 0 + } - set size3 [file size $filename] - set count3 [stat_field $db stat "Page count"] + if { [is_hash $method] != 0 } { + filesort $t1 $t1.sort + filesort $t2 $t2.sort + error_check_good filecmp \ + [filecmp $t1.sort $t2.sort] 0 + } else { + error_check_good filecmp [filecmp $t1 $t2] 0 + } - puts "\tTest$tnum.g: Remove more entries, this time by cursor." - set count 0 - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - set dbc [eval {$db cursor} $txn] + puts "\tTest$tnum.f: Add more entries to database." + # Use integers as keys instead of strings, just to mix + # it up a little. + if { $txnenv == 1 } { + set t [$env txn] + error_check_good txn \ + [is_valid_txn $t $env] TRUE + set txn "-txn $t" + } + set count 1 + set keycnt 1 + while { $count <= $nentries } { + set key $keycnt + for { set i $start } \ + { $i <= $end && $count <= $nentries } \ + { incr i ; incr count} { + if { $pgt == "overflow" } { + set str [repeat $alphabet $i] + } else { + set str "${i}.$alphabet" + } + set ret [eval {$db put} $txn \ + {$key [chop_data $method $str]}] + error_check_good put $ret 0 + } + incr keycnt + } + if { $txnenv == 1 } { + error_check_good t_commit [$t commit] 0 + } + error_check_good db_sync [$db sync] 0 + close $did + + # + # Check that we have the expected type of pages + # in the database. + # + if { $pgt == "overflow" } { + set ovf [stat_field $db stat "Overflow pages"] + error_check_good \ + overflow_pages [expr $ovf > 0] 1 + } else { + set dup [stat_field $db stat "Duplicate pages"] + error_check_good \ + duplicate_pages [expr $dup > 0] 1 + } - # Leave every nth item. - for { set dbt [$dbc get -first] } { [llength $dbt] > 0 }\ - { set dbt [$dbc get -next] ; incr count } { - if { [expr $count % $n] != 0 } { - error_check_good dbc_del [$dbc del] 0 + puts "\tTest$tnum.g:\ + Remove more entries, this time by cursor." + set count 0 + if { $txnenv == 1 } { + set t [$env txn] + error_check_good txn \ + [is_valid_txn $t $env] TRUE + set txn "-txn $t" + } + set dbc [eval {$db cursor} $txn] + + # Leave every nth item. + for { set dbt [$dbc get -first] } \ + { [llength $dbt] > 0 } \ + { set dbt [$dbc get -next] ; incr count } { + if { [expr $count % $n] != 0 } { + error_check_good dbc_del [$dbc del] 0 + } } - } - error_check_good cursor_close [$dbc close] 0 - if { $txnenv == 1 } { - error_check_good t_commit [$t commit] 0 - } - error_check_good db_sync [$db sync] 0 - if { [is_hash $method] == 1 } { - set free3 [stat_field $db stat "Free pages"] - } else { - set free3 [stat_field $db stat "Pages on freelist"] - } + error_check_good cursor_close [$dbc close] 0 + if { $txnenv == 1 } { + error_check_good t_commit [$t commit] 0 + } + error_check_good db_sync [$db sync] 0 - puts "\tTest$tnum.h: Save contents." - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - dump_file $db $txn $t1 - if { $txnenv == 1 } { - error_check_good t_commit [$t commit] 0 - } + set size3 [file size $filename] + if { $npart != 0 } { + for { set i 0 } { $i < $npart } { incr i } { + incr size3 [file size ${partpfx}00${i}] + } + } + set count3 [stat_field $db stat "Page count"] + if { [is_hash $method] == 1 } { + set free3 [stat_field $db stat "Free pages"] + } else { + set free3 \ + [stat_field $db stat "Pages on freelist"] + } - puts "\tTest$tnum.i: Compact and verify database again." - for {set commit 0} {$commit <= $txnenv} {incr commit} { + puts "\tTest$tnum.h: Save contents." if { $txnenv == 1 } { set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE + error_check_good txn \ + [is_valid_txn $t $env] TRUE set txn "-txn $t" } - if {[catch {eval {$db compact} $txn {-freespace}} ret] } { - error "FAIL: db compact: $ret" - } + dump_file $db $txn $t1 if { $txnenv == 1 } { - if { $commit == 0 } { - puts "\tTest$tnum.i: Aborting." - error_check_good txn_abort [$t abort] 0 - } else { - puts "\tTest$tnum.i: Committing." - error_check_good txn_commit [$t commit] 0 + error_check_good t_commit [$t commit] 0 + } + + puts "\tTest$tnum.i:\ + Compact and verify database again." + for {set commit 0} {$commit <= $txnenv} {incr commit} { + if { $txnenv == 1 } { + set t [$env txn] + error_check_good txn \ + [is_valid_txn $t $env] TRUE + set txn "-txn $t" + } + if { [catch {eval \ + {$db compact} $txn {-freespace}} ret] } { + error "FAIL: db compact: $ret" } + if { $txnenv == 1 } { + if { $commit == 0 } { + puts "\tTest$tnum.i: Aborting." + error_check_good \ + txn_abort [$t abort] 0 + } else { + puts "\tTest$tnum.i: Committing." + error_check_good \ + txn_commit [$t commit] 0 + } + } + error_check_good db_sync [$db sync] 0 + error_check_good verify_dir \ + [ verify_dir $testdir "" 0 0 $nodump] 0 } - error_check_good db_sync [$db sync] 0 - error_check_good verify_dir \ - [ verify_dir $testdir "" 0 0 $nodump] 0 - } - set size4 [file size $filename] - set count4 [stat_field $db stat "Page count"] - if { [is_hash $method] == 1 } { - set free4 [stat_field $db stat "Free pages"] - } else { - set free4 [stat_field $db stat "Pages on freelist"] - } + set size4 [file size $filename] + if { $npart != 0 } { + for { set i 0 } { $i < $npart } { incr i } { + incr size3 [file size ${partpfx}00${i}] + } + } + set count4 [stat_field $db stat "Page count"] + if { [is_hash $method] == 1 } { + set free4 [stat_field $db stat "Free pages"] + } else { + set free4 \ + [stat_field $db stat "Pages on freelist"] + } -#### We should look at the partitioned files ##### -if { [is_partitioned $args] == 0 } { - error_check_good \ - file_size [expr [expr $size3 * $reduction] > $size4] 1 -} + error_check_good file_size [expr $size4 <= $size3] 1 + error_check_good page_count [expr $count4 <= $count3] 1 + if { $size4 == $size3 } { + error_check_good page_count $count4 $count3 + error_check_good pages_returned \ + [expr $free4 >= $free3] 1 + } else { + error_check_good page_count \ + [expr $count4 < $count3] 1 + } - error_check_good pages_returned [expr $free3 > $free4] 1 - error_check_good \ - page_count_reduced [expr $count3 > $count4] 1 - puts "\tTest$tnum.j: Contents are the same after compaction." - if { $txnenv == 1 } { - set t [$env txn] - error_check_good txn [is_valid_txn $t $env] TRUE - set txn "-txn $t" - } - dump_file $db $txn $t2 - if { $txnenv == 1 } { - error_check_good t_commit [$t commit] 0 - } - if { [is_hash $method] != 0 } { - filesort $t1 $t1.sort - filesort $t2 $t2.sort - error_check_good filecmp [filecmp $t1.sort $t2.sort] 0 - } else { - error_check_good filecmp [filecmp $t1 $t2] 0 - } + puts "\tTest$tnum.j:\ + Contents are the same after compaction." + if { $txnenv == 1 } { + set t [$env txn] + error_check_good txn \ + [is_valid_txn $t $env] TRUE + set txn "-txn $t" + } + dump_file $db $txn $t2 + if { $txnenv == 1 } { + error_check_good t_commit [$t commit] 0 + } + if { [is_hash $method] != 0 } { + filesort $t1 $t1.sort + filesort $t2 $t2.sort + error_check_good filecmp \ + [filecmp $t1.sort $t2.sort] 0 + } else { + error_check_good filecmp [filecmp $t1 $t2] 0 + } - error_check_good db_close [$db close] 0 - close $did + error_check_good db_close [$db close] 0 + } } } |