summaryrefslogtreecommitdiff
path: root/test/suite/test_colgap.py
diff options
context:
space:
mode:
Diffstat (limited to 'test/suite/test_colgap.py')
-rw-r--r--test/suite/test_colgap.py88
1 files changed, 87 insertions, 1 deletions
diff --git a/test/suite/test_colgap.py b/test/suite/test_colgap.py
index 4192f14c5e6..46682c23167 100644
--- a/test/suite/test_colgap.py
+++ b/test/suite/test_colgap.py
@@ -1,6 +1,6 @@
#!usr/bin/env python
#
-# Public Domain 2014-2015 MongoDB, Inc.
+# Public Domain 2014-2016 MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
@@ -28,6 +28,7 @@
import wiredtiger, wttest
from helper import simple_populate, key_populate, value_populate
+from wtscenario import check_scenarios, multiply_scenarios, number_scenarios
# test_colgap.py
# Test variable-length column-store gap performance.
@@ -119,5 +120,90 @@ class test_column_store_gap(wttest.WiredTigerTestCase):
self.backward(cursor, list(reversed(v)))
+# Basic testing of variable-length column-store with big records.
+class test_colmax(wttest.WiredTigerTestCase):
+ name = 'test_colmax'
+
+ types = [
+ ('file', dict(type='file:')),
+ ('table', dict(type='table:'))
+ ]
+ valfmt = [
+ ('integer', dict(valfmt='i')),
+ ('string', dict(valfmt='S')),
+ ]
+ record_number = [
+ ('big', dict(recno=18446744073709551606)),
+ ('max', dict(recno=18446744073709551615)),
+ ]
+ bulk = [
+ ('bulk', dict(bulk=1)),
+ ('not-bulk', dict(bulk=0)),
+ ]
+ reopen = [
+ ('reopen', dict(reopen=1)),
+ ('not-reopen', dict(reopen=0)),
+ ]
+ single = [
+ ('single', dict(single=1)),
+ ('not-single', dict(single=0)),
+ ]
+
+ scenarios = number_scenarios(multiply_scenarios(\
+ '.', types, valfmt, record_number, bulk, reopen, single))
+
+ # Test that variable-length column-store correctly/efficiently handles big
+ # records (if it's not efficient, we'll just hang).
+ def test_colmax_op(self):
+ recno = self.recno
+
+ uri = self.type + self.name
+ self.session.create(uri, 'key_format=r' +',value_format=' + self.valfmt)
+
+ # Insert a big record with/without a bulk cursor.
+ bulk_config = ""
+ if self.bulk:
+ bulk_config = "bulk"
+ cursor = self.session.open_cursor(uri, None, bulk_config)
+
+ # Optionaly make the big record the only record in the table.
+ if not self.single:
+ for i in range(1, 723):
+ cursor[key_populate(cursor, i)] = value_populate(cursor, i)
+
+ # Confirm searching past the end of the table works.
+ if not self.bulk:
+ cursor.set_key(recno)
+ self.assertEqual(cursor.search(), wiredtiger.WT_NOTFOUND)
+
+ # Insert the big record.
+ cursor[key_populate(cursor, recno)] = value_populate(cursor, recno)
+
+ # Optionally flush to disk; re-open the cursor as necessary.
+ if self.bulk or self.reopen:
+ cursor.close()
+ if self.reopen == 1:
+ self.reopen_conn()
+ if self.bulk or self.reopen:
+ cursor = self.session.open_cursor(uri, None, None)
+
+ # Search for the large record.
+ cursor.set_key(recno)
+ self.assertEqual(cursor.search(), 0)
+ self.assertEqual(cursor.get_value(), value_populate(cursor, recno))
+
+ # Update it.
+ cursor[key_populate(cursor, recno)] = value_populate(cursor, 37)
+ cursor.set_key(recno)
+ self.assertEqual(cursor.search(), 0)
+ self.assertEqual(cursor.get_value(), value_populate(cursor, 37))
+
+ # Remove it.
+ cursor.set_key(recno)
+ self.assertEqual(cursor.remove(), 0)
+ cursor.set_key(key_populate(cursor, recno))
+ self.assertEqual(cursor.search(), wiredtiger.WT_NOTFOUND)
+
+
if __name__ == '__main__':
wttest.run()