summaryrefslogtreecommitdiff
path: root/src/mongo/dbtests
diff options
context:
space:
mode:
authorGeert Bosch <geert.bosch@mongodb.com>2014-06-30 16:12:03 -0400
committerGeert Bosch <geert.bosch@mongodb.com>2014-07-01 10:27:12 -0400
commitef3e85d4f5b67c039668cef805459e29711aa636 (patch)
treebced88a603d39d4d6cb3dc174c2ad19f8ad7dfaf /src/mongo/dbtests
parentf64357e485c8d46bd6fd393f7416df6117672de1 (diff)
downloadmongo-ef3e85d4f5b67c039668cef805459e29711aa636.tar.gz
SERVER-14085: Start using WriteUnitOfWork
Diffstat (limited to 'src/mongo/dbtests')
-rw-r--r--src/mongo/dbtests/basictests.cpp2
-rw-r--r--src/mongo/dbtests/clienttests.cpp1
-rw-r--r--src/mongo/dbtests/counttests.cpp4
-rw-r--r--src/mongo/dbtests/dbhelper_tests.cpp2
-rw-r--r--src/mongo/dbtests/indexcatalogtests.cpp3
-rw-r--r--src/mongo/dbtests/indexupdatetests.cpp3
-rw-r--r--src/mongo/dbtests/oplogstarttests.cpp3
-rw-r--r--src/mongo/dbtests/pdfiletests.cpp5
-rw-r--r--src/mongo/dbtests/plan_ranking.cpp3
-rw-r--r--src/mongo/dbtests/query_multi_plan_runner.cpp1
-rw-r--r--src/mongo/dbtests/query_single_solution_runner.cpp7
-rw-r--r--src/mongo/dbtests/query_stage_and.cpp16
-rw-r--r--src/mongo/dbtests/query_stage_collscan.cpp6
-rw-r--r--src/mongo/dbtests/query_stage_count.cpp14
-rw-r--r--src/mongo/dbtests/query_stage_distinct.cpp3
-rw-r--r--src/mongo/dbtests/query_stage_fetch.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_keep.cpp1
-rw-r--r--src/mongo/dbtests/query_stage_merge_sort.cpp8
-rw-r--r--src/mongo/dbtests/query_stage_sort.cpp5
-rw-r--r--src/mongo/dbtests/query_stage_tests.cpp4
-rw-r--r--src/mongo/dbtests/querytests.cpp26
-rw-r--r--src/mongo/dbtests/replsettests.cpp18
-rw-r--r--src/mongo/dbtests/repltests.cpp17
-rw-r--r--src/mongo/dbtests/runner_registry.cpp10
-rw-r--r--src/mongo/dbtests/threadedtests.cpp1
25 files changed, 153 insertions, 12 deletions
diff --git a/src/mongo/dbtests/basictests.cpp b/src/mongo/dbtests/basictests.cpp
index 1fbd27763c4..fc8ec367167 100644
--- a/src/mongo/dbtests/basictests.cpp
+++ b/src/mongo/dbtests/basictests.cpp
@@ -379,7 +379,9 @@ namespace BasicTests {
OperationContextImpl txn;
Lock::GlobalWrite lk(txn.lockState());
+ WriteUnitOfWork wunit(txn.recoveryUnit());
Database db( &txn, "dbtests_basictests_ownsns", NULL );
+ wunit.commit();
ASSERT( db.ownsNS( "dbtests_basictests_ownsns.x" ) );
ASSERT( db.ownsNS( "dbtests_basictests_ownsns.x.y" ) );
diff --git a/src/mongo/dbtests/clienttests.cpp b/src/mongo/dbtests/clienttests.cpp
index 1ba539f0266..4f4e7a96f15 100644
--- a/src/mongo/dbtests/clienttests.cpp
+++ b/src/mongo/dbtests/clienttests.cpp
@@ -160,6 +160,7 @@ namespace ClientTests {
ASSERT_EQUALS(3U, db.count("test.system.namespaces"));
db.ensureIndex(ns(), BSON("x" << 1), true);
+ ctx.commit();
ASSERT_EQUALS(2, indexCatalog->numIndexesReady());
ASSERT_EQUALS(2U, db.count("test.system.indexes"));
diff --git a/src/mongo/dbtests/counttests.cpp b/src/mongo/dbtests/counttests.cpp
index f8119b632a1..879d4353a0e 100644
--- a/src/mongo/dbtests/counttests.cpp
+++ b/src/mongo/dbtests/counttests.cpp
@@ -42,7 +42,7 @@ namespace CountTests {
class Base {
public:
- Base() : lk(_txn.lockState(), ns()), _context(&_txn, ns()) {
+ Base() : lk(_txn.lockState(), ns()), _wunit(_txn.recoveryUnit()), _context(&_txn, ns()) {
_database = _context.db();
_collection = _database->getCollection( &_txn, ns() );
if ( _collection ) {
@@ -55,6 +55,7 @@ namespace CountTests {
~Base() {
try {
uassertStatusOK( _database->dropCollection( &_txn, ns() ) );
+ _wunit.commit();
}
catch ( ... ) {
FAIL( "Exception while cleaning up collection" );
@@ -97,6 +98,7 @@ namespace CountTests {
private:
Lock::DBWrite lk;
+ WriteUnitOfWork _wunit;
Client::Context _context;
diff --git a/src/mongo/dbtests/dbhelper_tests.cpp b/src/mongo/dbtests/dbhelper_tests.cpp
index 88c04aece66..33d190ed34c 100644
--- a/src/mongo/dbtests/dbhelper_tests.cpp
+++ b/src/mongo/dbtests/dbhelper_tests.cpp
@@ -61,6 +61,7 @@ namespace mongo {
{
// Remove _id range [_min, _max).
Lock::DBWrite lk(txn.lockState(), ns);
+ WriteUnitOfWork wunit(txn.recoveryUnit());
Client::Context ctx(&txn, ns );
KeyRange range( ns,
@@ -68,6 +69,7 @@ namespace mongo {
BSON( "_id" << _max ),
BSON( "_id" << 1 ) );
Helpers::removeRange( &txn, range );
+ wunit.commit();
}
// Check that the expected documents remain.
diff --git a/src/mongo/dbtests/indexcatalogtests.cpp b/src/mongo/dbtests/indexcatalogtests.cpp
index bf9b61cf5d3..21fb94ca24b 100644
--- a/src/mongo/dbtests/indexcatalogtests.cpp
+++ b/src/mongo/dbtests/indexcatalogtests.cpp
@@ -37,6 +37,7 @@ namespace IndexCatalogTests {
_db = ctx.ctx().db();
_coll = _db->createCollection(&txn, _ns);
_catalog = _coll->getIndexCatalog();
+ ctx.commit();
}
~IndexIteratorTests() {
@@ -44,6 +45,7 @@ namespace IndexCatalogTests {
Client::WriteContext ctx(&txn, _ns);
_db->dropCollection(&txn, _ns);
+ ctx.commit();
}
void run() {
@@ -83,6 +85,7 @@ namespace IndexCatalogTests {
}
}
+ ctx.commit();
ASSERT_TRUE(indexesIterated == _catalog->numIndexesReady());
ASSERT_TRUE(foundIndex);
}
diff --git a/src/mongo/dbtests/indexupdatetests.cpp b/src/mongo/dbtests/indexupdatetests.cpp
index 3458181887a..f5c24e3ce59 100644
--- a/src/mongo/dbtests/indexupdatetests.cpp
+++ b/src/mongo/dbtests/indexupdatetests.cpp
@@ -59,6 +59,7 @@ namespace IndexUpdateTests {
}
~IndexBuildBase() {
_client.dropCollection( _ns );
+ _ctx.commit(); // just for testing purposes
getGlobalEnvironment()->unsetKillAllOperations();
}
Collection* collection() {
@@ -469,6 +470,7 @@ namespace IndexUpdateTests {
public:
void run() {
OperationContextImpl txn;
+ WriteUnitOfWork wunit (txn.recoveryUnit());
// Insert some documents.
int32_t nDocs = 1000;
for( int32_t i = 0; i < nDocs; ++i ) {
@@ -481,6 +483,7 @@ namespace IndexUpdateTests {
// The call is not interrupted.
Helpers::ensureIndex( &txn, collection(), BSON( "a" << 1 ), false, "a_1" );
// only want to interrupt the index build
+ wunit.commit();
getGlobalEnvironment()->unsetKillAllOperations();
// The new index is listed in system.indexes because the index build completed.
ASSERT_EQUALS( 1U,
diff --git a/src/mongo/dbtests/oplogstarttests.cpp b/src/mongo/dbtests/oplogstarttests.cpp
index 45015afe883..4f4f88c1ccc 100644
--- a/src/mongo/dbtests/oplogstarttests.cpp
+++ b/src/mongo/dbtests/oplogstarttests.cpp
@@ -38,6 +38,7 @@ namespace OplogStartTests {
class Base {
public:
Base() : _lk(_txn.lockState()),
+ _wunit(_txn.recoveryUnit()),
_context(&_txn, ns()),
_client(&_txn) {
@@ -50,6 +51,7 @@ namespace OplogStartTests {
~Base() {
client()->dropCollection(ns());
+ _wunit.commit();
}
protected:
@@ -94,6 +96,7 @@ namespace OplogStartTests {
// The order of these is important in order to ensure order of destruction
OperationContextImpl _txn;
Lock::GlobalWrite _lk;
+ WriteUnitOfWork _wunit;
Client::Context _context;
DBDirectClient _client;
diff --git a/src/mongo/dbtests/pdfiletests.cpp b/src/mongo/dbtests/pdfiletests.cpp
index d954c801fdd..aa6026d3d40 100644
--- a/src/mongo/dbtests/pdfiletests.cpp
+++ b/src/mongo/dbtests/pdfiletests.cpp
@@ -48,14 +48,15 @@ namespace PdfileTests {
class Base {
public:
Base() : _lk(_txn.lockState()),
+ _wunit(_txn.recoveryUnit()),
_context(&_txn, ns()) {
-
}
virtual ~Base() {
if ( !collection() )
return;
_context.db()->dropCollection( &_txn, ns() );
+ _wunit.commit();
}
protected:
@@ -68,7 +69,7 @@ namespace PdfileTests {
OperationContextImpl _txn;
Lock::GlobalWrite _lk;
-
+ WriteUnitOfWork _wunit;
Client::Context _context;
};
diff --git a/src/mongo/dbtests/plan_ranking.cpp b/src/mongo/dbtests/plan_ranking.cpp
index 79419bb60fa..7b8f01add03 100644
--- a/src/mongo/dbtests/plan_ranking.cpp
+++ b/src/mongo/dbtests/plan_ranking.cpp
@@ -66,6 +66,7 @@ namespace PlanRankingTests {
Client::WriteContext ctx(&_txn, ns);
_client.dropCollection(ns);
+ ctx.commit();
}
virtual ~PlanRankingTestBase() {
@@ -76,11 +77,13 @@ namespace PlanRankingTests {
void insert(const BSONObj& obj) {
Client::WriteContext ctx(&_txn, ns);
_client.insert(ns, obj);
+ ctx.commit();
}
void addIndex(const BSONObj& obj) {
Client::WriteContext ctx(&_txn, ns);
_client.ensureIndex(ns, obj);
+ ctx.commit();
}
/**
diff --git a/src/mongo/dbtests/query_multi_plan_runner.cpp b/src/mongo/dbtests/query_multi_plan_runner.cpp
index f47d08ecfe8..81982df6768 100644
--- a/src/mongo/dbtests/query_multi_plan_runner.cpp
+++ b/src/mongo/dbtests/query_multi_plan_runner.cpp
@@ -164,6 +164,7 @@ namespace QueryMultiPlanRunner {
ASSERT_EQUALS(obj["foo"].numberInt(), 7);
++results;
}
+ ctx.commit();
ASSERT_EQUALS(results, N / 10);
}
diff --git a/src/mongo/dbtests/query_single_solution_runner.cpp b/src/mongo/dbtests/query_single_solution_runner.cpp
index d5290853151..91596c5cf49 100644
--- a/src/mongo/dbtests/query_single_solution_runner.cpp
+++ b/src/mongo/dbtests/query_single_solution_runner.cpp
@@ -205,6 +205,7 @@ namespace QuerySingleSolutionRunner {
ASSERT_EQUALS(Runner::RUNNER_DEAD, ssr->getNext(&objOut, NULL));
deregisterRunner(ssr.get());
+ ctx.commit();
}
};
@@ -235,6 +236,7 @@ namespace QuerySingleSolutionRunner {
ASSERT_EQUALS(Runner::RUNNER_DEAD, ssr->getNext(&objOut, NULL));
deregisterRunner(ssr.get());
+ ctx.commit();
}
};
@@ -300,6 +302,7 @@ namespace QuerySingleSolutionRunner {
int ids[] = {3, 4, 2};
checkIds(ids, ssr.get());
+ ctx.commit();
}
};
@@ -329,6 +332,7 @@ namespace QuerySingleSolutionRunner {
// we should not see the moved document again.
int ids[] = {3, 4};
checkIds(ids, ssr.get());
+ ctx.commit();
}
};
@@ -357,6 +361,7 @@ namespace QuerySingleSolutionRunner {
ASSERT_EQUALS(1U, numCursors());
ctx.ctx().db()->getCollection( &_txn, ns() )->cursorCache()->invalidateAll(false);
ASSERT_EQUALS(0U, numCursors());
+ ctx.commit();
}
};
@@ -394,6 +399,7 @@ namespace QuerySingleSolutionRunner {
// number of cursors to return to 0.
ccPin.deleteUnderlying();
ASSERT_EQUALS(0U, numCursors());
+ ctx.commit();
}
};
@@ -407,6 +413,7 @@ namespace QuerySingleSolutionRunner {
{
Client::WriteContext ctx(&_txn, ns());
insert(BSON("a" << 1 << "b" << 1));
+ ctx.commit();
}
{
diff --git a/src/mongo/dbtests/query_stage_and.cpp b/src/mongo/dbtests/query_stage_and.cpp
index 59a5ae3819f..75b144c7e45 100644
--- a/src/mongo/dbtests/query_stage_and.cpp
+++ b/src/mongo/dbtests/query_stage_and.cpp
@@ -222,6 +222,7 @@ namespace QueryStageAnd {
ASSERT_GREATER_THAN_OR_EQUALS(elt.numberInt(), 10);
}
+ ctx.commit();
ASSERT_EQUALS(10, count);
}
};
@@ -308,6 +309,7 @@ namespace QueryStageAnd {
++count;
}
+ ctx.commit();
ASSERT_EQUALS(count, 20);
}
};
@@ -351,6 +353,7 @@ namespace QueryStageAnd {
params.bounds.endKeyInclusive = true;
params.direction = 1;
ah->addChild(new IndexScan(params, &ws, NULL));
+ ctx.commit();
// foo == bar == baz, and foo<=20, bar>=10, so our values are:
// foo == 10, 11, 12, 13, 14, 15. 16, 17, 18, 19, 20
@@ -405,6 +408,7 @@ namespace QueryStageAnd {
params.bounds.endKeyInclusive = true;
params.direction = 1;
ah->addChild(new IndexScan(params, &ws, NULL));
+ ctx.commit();
// Stage execution should fail.
ASSERT_EQUALS(-1, countResults(ah.get()));
@@ -457,6 +461,7 @@ namespace QueryStageAnd {
params.bounds.endKeyInclusive = true;
params.direction = 1;
ah->addChild(new IndexScan(params, &ws, NULL));
+ ctx.commit();
// foo == bar == baz, and foo<=20, bar>=10, so our values are:
// foo == 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20.
@@ -512,6 +517,7 @@ namespace QueryStageAnd {
params.bounds.endKeyInclusive = true;
params.direction = 1;
ah->addChild(new IndexScan(params, &ws, NULL));
+ ctx.commit();
// foo == bar == baz, and foo<=20, bar>=10, 5<=baz<=15, so our values are:
// foo == 10, 11, 12, 13, 14, 15.
@@ -578,6 +584,7 @@ namespace QueryStageAnd {
params.bounds.endKeyInclusive = true;
params.direction = 1;
ah->addChild(new IndexScan(params, &ws, NULL));
+ ctx.commit();
// Stage execution should fail.
ASSERT_EQUALS(-1, countResults(ah.get()));
@@ -633,6 +640,7 @@ namespace QueryStageAnd {
if (PlanStage::ADVANCED != status) { continue; }
++count;
}
+ ctx.commit();
ASSERT_EQUALS(0, count);
@@ -686,6 +694,7 @@ namespace QueryStageAnd {
params.bounds.endKeyInclusive = false;
params.direction = -1;
ah->addChild(new IndexScan(params, &ws, NULL));
+ ctx.commit();
ASSERT_EQUALS(0, countResults(ah.get()));
}
@@ -734,6 +743,7 @@ namespace QueryStageAnd {
params.bounds.endKeyInclusive = true;
params.direction = 1;
ah->addChild(new IndexScan(params, &ws, NULL));
+ ctx.commit();
// Bar == 97
ASSERT_EQUALS(1, countResults(ah.get()));
@@ -856,6 +866,7 @@ namespace QueryStageAnd {
ASSERT_TRUE(member->getFieldDotted("bar", &elt));
ASSERT_EQUALS(1, elt.numberInt());
}
+ ctx.commit();
ASSERT_EQUALS(count, 48);
@@ -912,6 +923,7 @@ namespace QueryStageAnd {
// baz == 1
params.descriptor = getIndex(BSON("baz" << 1), coll);
ah->addChild(new IndexScan(params, &ws, NULL));
+ ctx.commit();
ASSERT_EQUALS(50, countResults(ah.get()));
}
@@ -957,6 +969,7 @@ namespace QueryStageAnd {
params.bounds.endKeyInclusive = true;
params.direction = 1;
ah->addChild(new IndexScan(params, &ws, NULL));
+ ctx.commit();
ASSERT_EQUALS(0, countResults(ah.get()));
}
@@ -1005,6 +1018,7 @@ namespace QueryStageAnd {
params.bounds.endKeyInclusive = true;
params.direction = 1;
ah->addChild(new IndexScan(params, &ws, NULL));
+ ctx.commit();
ASSERT_EQUALS(0, countResults(ah.get()));
}
@@ -1049,6 +1063,7 @@ namespace QueryStageAnd {
// bar == 1
params.descriptor = getIndex(BSON("bar" << 1), coll);
ah->addChild(new IndexScan(params, &ws, NULL));
+ ctx.commit();
// Filter drops everything.
ASSERT_EQUALS(0, countResults(ah.get()));
@@ -1109,6 +1124,7 @@ namespace QueryStageAnd {
}
lastId = id;
}
+ ctx.commit();
ASSERT_EQUALS(count, 43);
}
diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp
index 647b721f878..06088bf425a 100644
--- a/src/mongo/dbtests/query_stage_collscan.cpp
+++ b/src/mongo/dbtests/query_stage_collscan.cpp
@@ -59,6 +59,7 @@ namespace QueryStageCollectionScan {
virtual ~QueryStageCollectionScanCappedBase() {
_context.db()->dropCollection( &_txn, ns() );
+ wunit.commit();
}
void run() {
@@ -175,6 +176,7 @@ namespace QueryStageCollectionScan {
Lock::GlobalWrite lk_;
Client::Context _context;
OperationContextImpl _txn;
+ WriteUnitOfWork wunit(_txn.recoveryUnit());
};
class QueryStageCollscanEmpty : public QueryStageCollectionScanCappedBase {
@@ -321,11 +323,13 @@ namespace QueryStageCollectionScan {
bob.append("foo", i);
_client.insert(ns(), bob.obj());
}
+ ctx.commit();
}
virtual ~QueryStageCollectionScanBase() {
Client::WriteContext ctx(&_txn, ns());
_client.dropCollection(ns());
+ ctx.commit();
}
void remove(const BSONObj& obj) {
@@ -552,6 +556,7 @@ namespace QueryStageCollectionScan {
++count;
}
}
+ ctx.commit();
ASSERT_EQUALS(numObj(), count);
}
@@ -613,6 +618,7 @@ namespace QueryStageCollectionScan {
++count;
}
}
+ ctx.commit();
ASSERT_EQUALS(numObj(), count);
}
diff --git a/src/mongo/dbtests/query_stage_count.cpp b/src/mongo/dbtests/query_stage_count.cpp
index 04a45d98044..b4a86309568 100644
--- a/src/mongo/dbtests/query_stage_count.cpp
+++ b/src/mongo/dbtests/query_stage_count.cpp
@@ -55,6 +55,7 @@ namespace QueryStageCount {
virtual ~CountBase() {
Client::WriteContext ctx(&_txn, ns());
_client.dropCollection(ns());
+ ctx.commit();
}
void addIndex(const BSONObj& obj) {
@@ -116,6 +117,7 @@ namespace QueryStageCount {
// Add an index on a:1
addIndex(BSON("a" << 1));
+ ctx.commit();
// Set up the count stage
CountParams params;
@@ -149,6 +151,7 @@ namespace QueryStageCount {
// Add an index
addIndex(BSON("a" << 1));
+ ctx.commit();
// Set up the count stage
CountParams params;
@@ -181,6 +184,7 @@ namespace QueryStageCount {
// Add an index
addIndex(BSON("a" << 1));
+ ctx.commit();
// Set up the count stage
CountParams params;
@@ -209,6 +213,7 @@ namespace QueryStageCount {
// Insert doc, add index
insert(BSON("a" << 2));
addIndex(BSON("a" << 1));
+ ctx.commit();
// Set up count, and run
CountParams params;
@@ -238,6 +243,7 @@ namespace QueryStageCount {
insert(BSON("a" << 2));
insert(BSON("a" << 3));
addIndex(BSON("a" << 1));
+ ctx.commit();
// Set up count, and run
CountParams params;
@@ -268,6 +274,7 @@ namespace QueryStageCount {
insert(BSON("a" << 2));
insert(BSON("a" << 4));
addIndex(BSON("a" << 1));
+ ctx.commit();
// Set up count, and run
CountParams params;
@@ -299,6 +306,7 @@ namespace QueryStageCount {
insert(BSON("a" << i));
}
addIndex(BSON("a" << 1));
+ ctx.commit();
// Set up count stage
CountParams params;
@@ -350,6 +358,7 @@ namespace QueryStageCount {
insert(BSON("a" << i));
}
addIndex(BSON("a" << 1));
+ ctx.commit();
// Set up count stage
CountParams params;
@@ -404,6 +413,7 @@ namespace QueryStageCount {
insert(BSON("a" << i));
}
addIndex(BSON("a" << 1));
+ ctx.commit();
// Set up count stage
CountParams params;
@@ -461,6 +471,7 @@ namespace QueryStageCount {
insert(BSON("a" << i));
}
addIndex(BSON("a" << 1));
+ ctx.commit();
// Set up count stage
CountParams params;
@@ -514,6 +525,7 @@ namespace QueryStageCount {
insert(BSON("a" << 1 << "b" << i));
}
addIndex(BSON("a" << 1));
+ ctx.commit();
// Mark several keys as 'unused'
remove(BSON("a" << 1 << "b" << 0));
@@ -552,6 +564,7 @@ namespace QueryStageCount {
// Mark key at end position as 'unused' by deleting
remove(BSON("a" << 1 << "b" << 9));
+ ctx.commit();
// Run count and check
CountParams params;
@@ -582,6 +595,7 @@ namespace QueryStageCount {
insert(BSON("a" << 1 << "b" << i));
}
addIndex(BSON("a" << 1));
+ ctx.commit();
// Set up count stage
CountParams params;
diff --git a/src/mongo/dbtests/query_stage_distinct.cpp b/src/mongo/dbtests/query_stage_distinct.cpp
index 942c87b264a..ac61fc55fce 100644
--- a/src/mongo/dbtests/query_stage_distinct.cpp
+++ b/src/mongo/dbtests/query_stage_distinct.cpp
@@ -53,16 +53,19 @@ namespace QueryStageDistinct {
virtual ~DistinctBase() {
Client::WriteContext ctx(&_txn, ns());
_client.dropCollection(ns());
+ ctx.commit();
}
void addIndex(const BSONObj& obj) {
Client::WriteContext ctx(&_txn, ns());
_client.ensureIndex(ns(), obj);
+ ctx.commit();
}
void insert(const BSONObj& obj) {
Client::WriteContext ctx(&_txn, ns());
_client.insert(ns(), obj);
+ ctx.commit();
}
IndexDescriptor* getIndex(const BSONObj& obj) {
diff --git a/src/mongo/dbtests/query_stage_fetch.cpp b/src/mongo/dbtests/query_stage_fetch.cpp
index 4bea8b9e004..bc95554164a 100644
--- a/src/mongo/dbtests/query_stage_fetch.cpp
+++ b/src/mongo/dbtests/query_stage_fetch.cpp
@@ -103,6 +103,7 @@ namespace QueryStageFetch {
set<DiskLoc> locs;
getLocs(&locs, coll);
ASSERT_EQUALS(size_t(1), locs.size());
+ ctx.commit();
// Create a mock stage that returns the WSM.
auto_ptr<MockStage> mockStage(new MockStage(&ws));
@@ -199,6 +200,7 @@ namespace QueryStageFetch {
// No more data to fetch, so, EOF.
state = fetchStage->work(&id);
ASSERT_EQUALS(PlanStage::IS_EOF, state);
+ ctx.commit();
}
};
diff --git a/src/mongo/dbtests/query_stage_keep.cpp b/src/mongo/dbtests/query_stage_keep.cpp
index 3283b66ab8c..eca675d6966 100644
--- a/src/mongo/dbtests/query_stage_keep.cpp
+++ b/src/mongo/dbtests/query_stage_keep.cpp
@@ -127,6 +127,7 @@ namespace QueryStageKeep {
member->obj = BSON("x" << 2);
ws.flagForReview(id);
}
+ ctx.commit();
// Create a collscan to provide the 10 objects in the collection.
CollectionScanParams params;
diff --git a/src/mongo/dbtests/query_stage_merge_sort.cpp b/src/mongo/dbtests/query_stage_merge_sort.cpp
index 11161a5c6be..2aa83912515 100644
--- a/src/mongo/dbtests/query_stage_merge_sort.cpp
+++ b/src/mongo/dbtests/query_stage_merge_sort.cpp
@@ -54,6 +54,7 @@ namespace QueryStageMergeSortTests {
virtual ~QueryStageMergeSortTestBase() {
Client::WriteContext ctx(&_txn, ns());
_client.dropCollection(ns());
+ ctx.commit();
}
void addIndex(const BSONObj& obj) {
@@ -149,6 +150,7 @@ namespace QueryStageMergeSortTests {
// b:1
params.descriptor = getIndex(secondIndex, coll);
ms->addChild(new IndexScan(params, ws, NULL));
+ ctx.commit();
// Must fetch if we want to easily pull out an obj.
PlanExecutor runner(ws, new FetchStage(ws, ms, NULL, coll), coll);
@@ -212,6 +214,7 @@ namespace QueryStageMergeSortTests {
// b:1
params.descriptor = getIndex(secondIndex, coll);
ms->addChild(new IndexScan(params, ws, NULL));
+ ctx.commit();
PlanExecutor runner(ws, new FetchStage(ws, ms, NULL, coll), coll);
@@ -274,6 +277,7 @@ namespace QueryStageMergeSortTests {
// b:1
params.descriptor = getIndex(secondIndex, coll);
ms->addChild(new IndexScan(params, ws, NULL));
+ ctx.commit();
PlanExecutor runner(ws, new FetchStage(ws, ms, NULL, coll), coll);
@@ -339,6 +343,7 @@ namespace QueryStageMergeSortTests {
// b:1
params.descriptor = getIndex(secondIndex, coll);
ms->addChild(new IndexScan(params, ws, NULL));
+ ctx.commit();
PlanExecutor runner(ws, new FetchStage(ws, ms, NULL, coll), coll);
@@ -403,6 +408,7 @@ namespace QueryStageMergeSortTests {
params.bounds.startKey = BSON("" << 51 << "" << MinKey);
params.bounds.endKey = BSON("" << 51 << "" << MaxKey);
ms->addChild(new IndexScan(params, ws, NULL));
+ ctx.commit();
PlanExecutor runner(ws, new FetchStage(ws, ms, NULL, coll), coll);
@@ -455,6 +461,7 @@ namespace QueryStageMergeSortTests {
params.descriptor = getIndex(indexSpec, coll);
ms->addChild(new IndexScan(params, ws, NULL));
}
+ ctx.commit();
PlanExecutor runner(ws, new FetchStage(ws, ms, NULL, coll), coll);
@@ -514,6 +521,7 @@ namespace QueryStageMergeSortTests {
getLocs(&locs, coll);
set<DiskLoc>::iterator it = locs.begin();
+ ctx.commit();
// Get 10 results. Should be getting results in order of 'locs'.
int count = 0;
diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp
index 6ae66a0b7bb..60838f5df77 100644
--- a/src/mongo/dbtests/query_stage_sort.cpp
+++ b/src/mongo/dbtests/query_stage_sort.cpp
@@ -193,6 +193,7 @@ namespace QueryStageSortTests {
fillData();
sortAndCheck(1, coll);
+ ctx.commit();
}
};
@@ -212,6 +213,7 @@ namespace QueryStageSortTests {
fillData();
sortAndCheck(-1, coll);
+ ctx.commit();
}
};
@@ -240,6 +242,7 @@ namespace QueryStageSortTests {
fillData();
sortAndCheck(-1, coll);
+ ctx.commit();
}
};
@@ -315,6 +318,7 @@ namespace QueryStageSortTests {
ASSERT(!member->hasLoc());
++count;
}
+ ctx.commit();
// Returns all docs.
ASSERT_EQUALS(limit() ? limit() : numObj(), count);
@@ -371,6 +375,7 @@ namespace QueryStageSortTests {
ws, new FetchStage(ws, new SortStage(params, ws, ms), NULL, coll), coll);
Runner::RunnerState runnerState = runner.getNext(NULL, NULL);
ASSERT_EQUALS(Runner::RUNNER_ERROR, runnerState);
+ ctx.commit();
}
};
diff --git a/src/mongo/dbtests/query_stage_tests.cpp b/src/mongo/dbtests/query_stage_tests.cpp
index fc5ee43ea50..91640817f74 100644
--- a/src/mongo/dbtests/query_stage_tests.cpp
+++ b/src/mongo/dbtests/query_stage_tests.cpp
@@ -59,16 +59,19 @@ namespace QueryStageTests {
addIndex(BSON("foo" << 1));
addIndex(BSON("foo" << 1 << "baz" << 1));
+ ctx.commit();
}
virtual ~IndexScanBase() {
Client::WriteContext ctx(&_txn, ns());
_client.dropCollection(ns());
+ ctx.commit();
}
void addIndex(const BSONObj& obj) {
Client::WriteContext ctx(&_txn, ns());
_client.ensureIndex(ns(), obj);
+ ctx.commit();
}
int countResults(const IndexScanParams& params, BSONObj filterObj = BSONObj()) {
@@ -99,6 +102,7 @@ namespace QueryStageTests {
double lng = double(rand()) / RAND_MAX;
_client.insert(ns(), BSON("geo" << BSON_ARRAY(lng << lat)));
}
+ ctx.commit();
}
IndexDescriptor* getIndex(const BSONObj& obj) {
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index ef3d859a260..f18e9121713 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -57,7 +57,7 @@ namespace QueryTests {
protected:
OperationContextImpl _txn;
Lock::GlobalWrite _lk;
-
+ WriteUnitOfWork _wunit;
Client::Context _context;
Database* _database;
@@ -65,8 +65,8 @@ namespace QueryTests {
public:
Base() : _lk(_txn.lockState()),
+ _wunit(_txn.recoveryUnit()),
_context(&_txn, ns()) {
-
_database = _context.db();
_collection = _database->getCollection( &_txn, ns() );
if ( _collection ) {
@@ -78,6 +78,7 @@ namespace QueryTests {
~Base() {
try {
uassertStatusOK( _database->dropCollection( &_txn, ns() ) );
+ _wunit.commit();
}
catch ( ... ) {
FAIL( "Exception while cleaning up collection" );
@@ -245,12 +246,14 @@ namespace QueryTests {
{
// Check internal server handoff to getmore.
Lock::DBWrite lk(_txn.lockState(), ns);
+ WriteUnitOfWork wunit(_txn.recoveryUnit());
Client::Context ctx(&_txn, ns );
ClientCursorPin clientCursor( ctx.db()->getCollection(&_txn, ns), cursorId );
// pq doesn't exist if it's a runner inside of the clientcursor.
// ASSERT( clientCursor.c()->pq );
// ASSERT_EQUALS( 2, clientCursor.c()->pq->getNumToReturn() );
ASSERT_EQUALS( 2, clientCursor.c()->pos() );
+ wunit.commit();
}
cursor = _client.getMore( ns, cursorId );
@@ -593,8 +596,9 @@ namespace QueryTests {
void run() {
const char *ns = "unittests.querytests.OplogReplaySlaveReadTill";
Lock::DBWrite lk(_txn.lockState(), ns);
+ WriteUnitOfWork wunit(_txn.recoveryUnit());
Client::Context ctx(&_txn, ns );
-
+
BSONObj info;
_client.runCommand( "unittests",
BSON( "create" << "querytests.OplogReplaySlaveReadTill" <<
@@ -616,6 +620,7 @@ namespace QueryTests {
ClientCursorPin clientCursor( ctx.db()->getCollection( &_txn, ns ), cursorId );
ASSERT_EQUALS( three.millis, clientCursor.c()->getSlaveReadTill().asDate() );
+ wunit.commit();
}
};
@@ -1057,7 +1062,9 @@ namespace QueryTests {
void run() {
Lock::GlobalWrite lk(_txn.lockState());
Client::Context ctx(&_txn, "unittests.DirectLocking");
+ WriteUnitOfWork wunit(_txn.recoveryUnit());
_client.remove( "a.b", BSONObj() );
+ wunit.commit();
ASSERT_EQUALS( "unittests", ctx.db()->name() );
}
const char *ns;
@@ -1208,6 +1215,7 @@ namespace QueryTests {
for ( int i=0; i<90; i++ ) {
insertNext();
}
+ ctx.commit();
while ( c->more() ) { c->next(); }
ASSERT( c->isDead() );
@@ -1235,6 +1243,7 @@ namespace QueryTests {
for ( int i=0; i<50; i++ ) {
insert( ns() , BSON( "_id" << i << "x" << i * 2 ) );
}
+ ctx.commit();
ASSERT_EQUALS( 50 , count() );
@@ -1289,6 +1298,7 @@ namespace QueryTests {
for ( int i=0; i<1000; i+=2 ) {
_client.remove( ns() , BSON( "_id" << i ) );
}
+ ctx.commit();
BSONObj res;
for ( int i=0; i<1000; i++ ) {
@@ -1309,7 +1319,7 @@ namespace QueryTests {
for ( int i=0; i<1000; i++ ) {
insert( ns() , BSON( "_id" << i << "x" << i * 2 ) );
}
-
+ ctx.commit();
}
};
@@ -1421,11 +1431,16 @@ namespace QueryTests {
CollectionInternalBase( const char *nsLeaf ) :
CollectionBase( nsLeaf ),
_lk(_txn.lockState(), ns() ),
+ _wunit( _txn.recoveryUnit() ),
_ctx(&_txn, ns()) {
-
}
+ ~CollectionInternalBase() {
+ _wunit.commit();
+ }
+
private:
Lock::DBWrite _lk;
+ WriteUnitOfWork _wunit;
Client::Context _ctx;
};
@@ -1511,6 +1526,7 @@ namespace QueryTests {
string expectedAssertion =
str::stream() << "Cannot kill active cursor " << cursorId;
ASSERT_EQUALS( expectedAssertion, _client.getLastError() );
+ ctx.commit();
}
// Verify that the remaining document is read from the cursor.
diff --git a/src/mongo/dbtests/replsettests.cpp b/src/mongo/dbtests/replsettests.cpp
index 0cd3597f34c..3e6b7ef7ad7 100644
--- a/src/mongo/dbtests/replsettests.cpp
+++ b/src/mongo/dbtests/replsettests.cpp
@@ -161,6 +161,7 @@ namespace ReplSetTests {
}
db->dropCollection(&_txn, ns());
+ c.commit();
}
static void setup() {
@@ -315,15 +316,19 @@ namespace ReplSetTests {
void create() {
Client::Context c(&_txn, _cappedNs);
+ WriteUnitOfWork wunit(_txn.recoveryUnit());
ASSERT( userCreateNS( &_txn, c.db(), _cappedNs, fromjson( spec() ), false ).isOK() );
+ wunit.commit();
}
void dropCapped() {
Client::Context c(&_txn, _cappedNs);
+ WriteUnitOfWork wunit(_txn.recoveryUnit());
Database* db = c.db();
if ( db->getCollection( &_txn, _cappedNs ) ) {
db->dropCollection( &_txn, _cappedNs );
}
+ wunit.commit();
}
BSONObj updateFail() {
@@ -358,17 +363,24 @@ namespace ReplSetTests {
// returns true on success, false on failure
bool apply(const BSONObj& op) {
Client::Context ctx(&_txn, _cappedNs );
+ WriteUnitOfWork wunit(_txn.recoveryUnit());
// in an annoying twist of api, returns true on failure
- return !applyOperation_inlock(&_txn, ctx.db(), op, true);
+ if (applyOperation_inlock(&_txn, ctx.db(), op, true)) {
+ return false;
+ }
+ wunit.commit();
+ return true;
}
void run() {
Lock::DBWrite lk(_txn.lockState(), _cappedNs);
+ WriteUnitOfWork wunit(_txn.recoveryUnit());
BSONObj op = updateFail();
Sync s("");
verify(!s.shouldRetry(&_txn, op));
+ wunit.commit();
}
};
@@ -388,6 +400,7 @@ namespace ReplSetTests {
void insert(OperationContext* txn) {
Client::Context ctx(txn, cappedNs());
+ WriteUnitOfWork wunit(txn->recoveryUnit());
Database* db = ctx.db();
Collection* coll = db->getCollection(txn, cappedNs());
if (!coll) {
@@ -397,6 +410,7 @@ namespace ReplSetTests {
BSONObj o = BSON(GENOID << "x" << 456);
DiskLoc loc = coll->insertDocument(txn, o, true).getValue();
verify(!loc.isNull());
+ wunit.commit();
}
public:
virtual ~CappedUpdate() {}
@@ -432,6 +446,7 @@ namespace ReplSetTests {
public:
virtual ~CappedInsert() {}
void run() {
+ WriteUnitOfWork wunit(_txn.recoveryUnit());
// This will succeed, but not insert anything because they are changed to upserts
for (int i=0; i<150; i++) {
insertSucceed();
@@ -442,6 +457,7 @@ namespace ReplSetTests {
Client::Context ctx(&_txn, cappedNs());
Collection* collection = ctx.db()->getCollection( &_txn, cappedNs() );
verify(collection->getIndexCatalog()->findIdIndex());
+ wunit.commit();
}
};
diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp
index 11338a61225..4694c942f73 100644
--- a/src/mongo/dbtests/repltests.cpp
+++ b/src/mongo/dbtests/repltests.cpp
@@ -58,12 +58,14 @@ namespace ReplTests {
protected:
mutable OperationContextImpl _txn;
Lock::GlobalWrite _lk;
+ WriteUnitOfWork _wunit;
mutable DBDirectClient _client;
Client::Context _context;
public:
Base() : _lk(_txn.lockState()),
+ _wunit( _txn.recoveryUnit()),
_client(&_txn),
_context(&_txn, ns()) {
@@ -73,6 +75,7 @@ namespace ReplTests {
replSettings.master = true;
createOplog();
+
Collection* c = _context.db()->getCollection( &_txn, ns() );
if ( ! c ) {
c = _context.db()->createCollection( &_txn, ns() );
@@ -84,6 +87,7 @@ namespace ReplTests {
replSettings.master = false;
deleteAll( ns() );
deleteAll( cllNS() );
+ _wunit.commit();
}
catch ( ... ) {
FAIL( "Exception while cleaning up test" );
@@ -140,6 +144,7 @@ namespace ReplTests {
static int opCount() {
OperationContextImpl txn;
Lock::GlobalWrite lk(txn.lockState());
+ WriteUnitOfWork wunit(txn.recoveryUnit());
Client::Context ctx(&txn, cllNS() );
Database* db = ctx.db();
@@ -155,12 +160,13 @@ namespace ReplTests {
++count;
}
delete it;
+ wunit.commit();
return count;
}
static void applyAllOperations() {
OperationContextImpl txn;
Lock::GlobalWrite lk(txn.lockState());
-
+ WriteUnitOfWork wunit(txn.recoveryUnit());
vector< BSONObj > ops;
{
Client::Context ctx(&txn, cllNS() );
@@ -188,10 +194,12 @@ namespace ReplTests {
a.applyOperation( &txn, ctx.db(), *i );
}
}
+ wunit.commit();
}
static void printAll( const char *ns ) {
OperationContextImpl txn;
Lock::GlobalWrite lk(txn.lockState());
+ WriteUnitOfWork wunit(txn.recoveryUnit());
Client::Context ctx(&txn, ns );
Database* db = ctx.db();
@@ -208,13 +216,14 @@ namespace ReplTests {
::mongo::log() << coll->docFor(currLoc).toString() << endl;
}
delete it;
+ wunit.commit();
}
// These deletes don't get logged.
static void deleteAll( const char *ns ) {
OperationContextImpl txn;
Lock::GlobalWrite lk(txn.lockState());
Client::Context ctx(&txn, ns );
-
+ WriteUnitOfWork wunit(txn.recoveryUnit());
Database* db = ctx.db();
Collection* coll = db->getCollection( &txn, ns );
if ( !coll ) {
@@ -231,12 +240,13 @@ namespace ReplTests {
for( vector< DiskLoc >::iterator i = toDelete.begin(); i != toDelete.end(); ++i ) {
coll->deleteDocument( &txn, *i, true );
}
+ wunit.commit();
}
static void insert( const BSONObj &o ) {
OperationContextImpl txn;
Lock::GlobalWrite lk(txn.lockState());
Client::Context ctx(&txn, ns() );
-
+ WriteUnitOfWork wunit(txn.recoveryUnit());
Database* db = ctx.db();
Collection* coll = db->getCollection( &txn, ns() );
if ( !coll ) {
@@ -254,6 +264,7 @@ namespace ReplTests {
b.appendOID( "_id", &id );
b.appendElements( o );
coll->insertDocument( &txn, b.obj(), true );
+ wunit.commit();
}
static BSONObj wid( const char *json ) {
class BSONObjBuilder b;
diff --git a/src/mongo/dbtests/runner_registry.cpp b/src/mongo/dbtests/runner_registry.cpp
index b7c5981a27f..a6c2b4dfac6 100644
--- a/src/mongo/dbtests/runner_registry.cpp
+++ b/src/mongo/dbtests/runner_registry.cpp
@@ -60,6 +60,12 @@ namespace RunnerRegistry {
}
}
+ ~RunnerRegistryBase() {
+ if (_ctx.get()) {
+ _ctx->commit();
+ }
+ }
+
/**
* Return a runner that is going over the collection in ns().
*/
@@ -270,6 +276,7 @@ namespace RunnerRegistry {
// Drop a DB that's not ours. We can't have a lock at all to do this as dropping a DB
// requires a "global write lock."
+ _ctx->commit();
_ctx.reset();
_client.dropDatabase("somesillydb");
_ctx.reset(new Client::WriteContext(&_opCtx, ns()));
@@ -286,6 +293,7 @@ namespace RunnerRegistry {
registerRunner(run.get());
// Drop our DB. Once again, must give up the lock.
+ _ctx->commit();
_ctx.reset();
_client.dropDatabase("unittests");
_ctx.reset(new Client::WriteContext(&_opCtx, ns()));
@@ -293,6 +301,8 @@ namespace RunnerRegistry {
// Unregister and restore state.
deregisterRunner(run.get());
run->restoreState(&_opCtx);
+ _ctx->commit();
+ _ctx.reset();
// Runner was killed.
ASSERT_EQUALS(Runner::RUNNER_DEAD, run->getNext(&obj, NULL));
diff --git a/src/mongo/dbtests/threadedtests.cpp b/src/mongo/dbtests/threadedtests.cpp
index 0e36df8f43f..8c6f5d7c2fa 100644
--- a/src/mongo/dbtests/threadedtests.cpp
+++ b/src/mongo/dbtests/threadedtests.cpp
@@ -220,6 +220,7 @@ namespace ThreadedTests {
}
{
Lock::DBWrite x(&lockState, "local");
+ // No actual writing here, so no WriteUnitOfWork
if( sometimes ) {
Lock::TempRelease t(&lockState);
}