summaryrefslogtreecommitdiff
path: root/bdb/test/test067.tcl
blob: 5f5a88c4be11bfa53a2c6bc32ea75b972e7fa3b9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
# See the file LICENSE for redistribution information.
#
# Copyright (c) 1999-2002
#	Sleepycat Software.  All rights reserved.
#
# $Id: test067.tcl,v 11.19 2002/06/11 15:19:16 sue Exp $
#
# TEST	test067
# TEST	Test of DB_CURRENT partial puts onto almost empty duplicate
# TEST	pages, with and without DB_DUP_SORT.
# TEST
# TEST	Test of DB_CURRENT partial puts on almost-empty duplicate pages.
# TEST	This test was written to address the following issue, #2 in the
# TEST	list of issues relating to bug #0820:
# TEST
# TEST	2. DBcursor->put, DB_CURRENT flag, off-page duplicates, hash and btree:
# TEST	In Btree, the DB_CURRENT overwrite of off-page duplicate records
# TEST	first deletes the record and then puts the new one -- this could
# TEST	be a problem if the removal of the record causes a reverse split.
# TEST	Suggested solution is to acquire a cursor to lock down the current
# TEST	record, put a new record after that record, and then delete using
# TEST	the held cursor.
# TEST
# TEST	It also tests the following, #5 in the same list of issues:
# TEST	5. DBcursor->put, DB_AFTER/DB_BEFORE/DB_CURRENT flags, DB_DBT_PARTIAL
# TEST	set, duplicate comparison routine specified.
# TEST	The partial change does not change how data items sort, but the
# TEST	record to be put isn't built yet, and that record supplied is the
# TEST	one that's checked for ordering compatibility.
proc test067 { method {ndups 1000} {tnum 67} args } {
	source ./include.tcl
	global alphabet
	global errorCode

	set args [convert_args $method $args]
	set omethod [convert_method $method]

	if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
	    puts "\tTest0$tnum: skipping for method $method."
	    return
	}
	set txn ""
	set txnenv 0
	set eindex [lsearch -exact $args "-env"]

	# If we are using an env, then testfile should just be the db name.
	# Otherwise it is the test directory and the name.
	if { $eindex == -1 } {
		set testfile $testdir/test0$tnum.db
		set env NULL
	} else {
		set testfile test0$tnum.db
		incr eindex
		set env [lindex $args $eindex]
		set txnenv [is_txnenv $env]
		if { $txnenv == 1 } {
			append args " -auto_commit "
			if { $ndups == 1000 } {
				set ndups 100
			}
		}
		set testdir [get_home $env]
	}

	puts "Test0$tnum:\
	    $method ($args) Partial puts on near-empty duplicate pages."

	foreach dupopt { "-dup" "-dup -dupsort" } {
		#
		# Testdir might get reset from the env's home dir back
		# to the default if this calls something that sources
		# include.tcl, since testdir is a global.  Set it correctly
		# here each time through the loop.
		#
		if { $env != "NULL" } {
			set testdir [get_home $env]
		}
		cleanup $testdir $env
		set db [eval {berkdb_open -create -mode 0644 \
		    $omethod} $args $dupopt {$testfile}]
		error_check_good db_open [is_valid_db $db] TRUE

		puts "\tTest0$tnum.a ($dupopt): Put $ndups duplicates."

		set key "key_test$tnum"

		for { set ndx 0 } { $ndx < $ndups } { incr ndx } {
			set data $alphabet$ndx

			if { $txnenv == 1 } {
				set t [$env txn]
				error_check_good txn [is_valid_txn $t $env] TRUE
				set txn "-txn $t"
			}
			# No need for pad_data since we're skipping recno.
			set ret [eval {$db put} $txn {$key $data}]
			error_check_good put($key,$data) $ret 0
			if { $txnenv == 1 } {
				error_check_good txn [$t commit] 0
			}
		}

		# Sync so we can inspect database if the next section bombs.
		error_check_good db_sync [$db sync] 0
		puts "\tTest0$tnum.b ($dupopt):\
		    Deleting dups (last first), overwriting each."

		if { $txnenv == 1 } {
			set t [$env txn]
			error_check_good txn [is_valid_txn $t $env] TRUE
			set txn "-txn $t"
		}
		set dbc [eval {$db cursor} $txn]
		error_check_good cursor_create [is_valid_cursor $dbc $db] TRUE

		set count 0
		while { $count < $ndups - 1 } {
			# set cursor to last item in db
			set ret [$dbc get -last]
			error_check_good \
			    verify_key [lindex [lindex $ret 0] 0] $key

			# for error reporting
			set currdatum [lindex [lindex $ret 0] 1]

			# partial-overwrite it
			# (overwrite offsets 1-4 with "bcde"--which they
			# already are)

			# Even though we expect success, we catch this
			# since it might return EINVAL, and we want that
			# to FAIL.
			set errorCode NONE
			set ret [catch {eval $dbc put -current \
				{-partial [list 1 4]} "bcde"} \
				res]
			error_check_good \
				partial_put_valid($currdatum) $errorCode NONE
			error_check_good partial_put($currdatum) $res 0

			# delete it
			error_check_good dbc_del [$dbc del] 0

			#puts $currdatum

			incr count
		}

		error_check_good dbc_close [$dbc close] 0
		if { $txnenv == 1 } {
			error_check_good txn [$t commit] 0
		}
		error_check_good db_close [$db close] 0
	}
}