diff options
Diffstat (limited to 'src/mongo/dbtests')
23 files changed, 291 insertions, 271 deletions
diff --git a/src/mongo/dbtests/framework.cpp b/src/mongo/dbtests/framework.cpp index f3d0b6f2ebb..c4947ebba57 100644 --- a/src/mongo/dbtests/framework.cpp +++ b/src/mongo/dbtests/framework.cpp @@ -66,7 +66,7 @@ namespace dbtests { int runDbTests(int argc, char** argv) { frameworkGlobalParams.perfHist = 1; - frameworkGlobalParams.seed = time(0); + frameworkGlobalParams.seed = time(nullptr); frameworkGlobalParams.runsPerTest = 1; registerShutdownTask([] { diff --git a/src/mongo/dbtests/jsobjtests.cpp b/src/mongo/dbtests/jsobjtests.cpp index 65b4b142ac1..e7398a5116d 100644 --- a/src/mongo/dbtests/jsobjtests.cpp +++ b/src/mongo/dbtests/jsobjtests.cpp @@ -1072,9 +1072,9 @@ class append { public: void run() { BSONObjBuilder b; - b.appendOID("a", 0); - b.appendOID("b", 0, false); - b.appendOID("c", 0, true); + b.appendOID("a", nullptr); + b.appendOID("b", nullptr, false); + b.appendOID("c", nullptr, true); BSONObj o = b.obj(); ASSERT(o["a"].__oid().toString() == "000000000000000000000000"); ASSERT(o["b"].__oid().toString() == "000000000000000000000000"); @@ -1086,7 +1086,7 @@ class increasing { public: BSONObj g() { BSONObjBuilder b; - b.appendOID("_id", 0, true); + b.appendOID("_id", nullptr, true); return b.obj(); } void run() { @@ -1869,7 +1869,7 @@ public: state = 1; } catch (std::exception& e) { state = 2; - ASSERT(strstr(e.what(), "_id: 5") != NULL); + ASSERT(strstr(e.what(), "_id: 5") != nullptr); } free(crap); ASSERT_EQUALS(2, state); diff --git a/src/mongo/dbtests/jstests.cpp b/src/mongo/dbtests/jstests.cpp index 7f06eea59f1..051bc478dbc 100644 --- a/src/mongo/dbtests/jstests.cpp +++ b/src/mongo/dbtests/jstests.cpp @@ -141,25 +141,25 @@ public: void run() { unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)()); - s->invoke("x=5;", 0, 0); + s->invoke("x=5;", nullptr, nullptr); ASSERT(5 == s->getNumber("x")); - s->invoke("return 17;", 0, 0); + s->invoke("return 17;", nullptr, nullptr); ASSERT(17 == s->getNumber("__returnValue")); - s->invoke("function(){ return 18; }", 0, 0); + s->invoke("function(){ return 18; }", nullptr, nullptr); ASSERT(18 == s->getNumber("__returnValue")); s->setNumber("x", 1.76); - s->invoke("return x == 1.76; ", 0, 0); + s->invoke("return x == 1.76; ", nullptr, nullptr); ASSERT(s->getBoolean("__returnValue")); s->setNumber("x", 1.76); - s->invoke("return x == 1.79; ", 0, 0); + s->invoke("return x == 1.79; ", nullptr, nullptr); ASSERT(!s->getBoolean("__returnValue")); BSONObj obj = BSON("" << 11.0); - s->invoke("function( z ){ return 5 + z; }", &obj, 0); + s->invoke("function( z ){ return 5 + z; }", &obj, nullptr); ASSERT_EQUALS(16, s->getNumber("__returnValue")); } }; @@ -240,12 +240,12 @@ public: unique_ptr<Scope> scope((getGlobalScriptEngine()->*scopeFactory)()); // No error is logged for a valid statement. - ASSERT_EQUALS(0, scope->invoke("validStatement = true", 0, 0)); + ASSERT_EQUALS(0, scope->invoke("validStatement = true", nullptr, nullptr)); ASSERT(!_logger.logged()); // An error is logged for an invalid statement. try { - scope->invoke("notAFunction()", 0, 0); + scope->invoke("notAFunction()", nullptr, nullptr); } catch (const DBException&) { // ignore the exception; just test that we logged something } @@ -276,48 +276,48 @@ public: << "sara"); s->setObject("blah", o); - s->invoke("return blah.x;", 0, 0); + s->invoke("return blah.x;", nullptr, nullptr); ASSERT_EQUALS(17, s->getNumber("__returnValue")); - s->invoke("return blah.y;", 0, 0); + s->invoke("return blah.y;", nullptr, nullptr); ASSERT_EQUALS("eliot", s->getString("__returnValue")); - s->invoke("return this.z;", 0, &o); + s->invoke("return this.z;", nullptr, &o); ASSERT_EQUALS("sara", s->getString("__returnValue")); - s->invoke("return this.z == 'sara';", 0, &o); + s->invoke("return this.z == 'sara';", nullptr, &o); ASSERT_EQUALS(true, s->getBoolean("__returnValue")); - s->invoke("this.z == 'sara';", 0, &o); + s->invoke("this.z == 'sara';", nullptr, &o); ASSERT_EQUALS(true, s->getBoolean("__returnValue")); - s->invoke("this.z == 'asara';", 0, &o); + s->invoke("this.z == 'asara';", nullptr, &o); ASSERT_EQUALS(false, s->getBoolean("__returnValue")); - s->invoke("return this.x == 17;", 0, &o); + s->invoke("return this.x == 17;", nullptr, &o); ASSERT_EQUALS(true, s->getBoolean("__returnValue")); - s->invoke("return this.x == 18;", 0, &o); + s->invoke("return this.x == 18;", nullptr, &o); ASSERT_EQUALS(false, s->getBoolean("__returnValue")); - s->invoke("function(){ return this.x == 17; }", 0, &o); + s->invoke("function(){ return this.x == 17; }", nullptr, &o); ASSERT_EQUALS(true, s->getBoolean("__returnValue")); - s->invoke("function(){ return this.x == 18; }", 0, &o); + s->invoke("function(){ return this.x == 18; }", nullptr, &o); ASSERT_EQUALS(false, s->getBoolean("__returnValue")); - s->invoke("function (){ return this.x == 17; }", 0, &o); + s->invoke("function (){ return this.x == 17; }", nullptr, &o); ASSERT_EQUALS(true, s->getBoolean("__returnValue")); - s->invoke("function z(){ return this.x == 18; }", 0, &o); + s->invoke("function z(){ return this.x == 18; }", nullptr, &o); ASSERT_EQUALS(false, s->getBoolean("__returnValue")); - s->invoke("function (){ this.x == 17; }", 0, &o); + s->invoke("function (){ this.x == 17; }", nullptr, &o); ASSERT_EQUALS(false, s->getBoolean("__returnValue")); - s->invoke("function z(){ this.x == 18; }", 0, &o); + s->invoke("function z(){ this.x == 18; }", nullptr, &o); ASSERT_EQUALS(false, s->getBoolean("__returnValue")); - s->invoke("x = 5; for( ; x <10; x++){ a = 1; }", 0, &o); + s->invoke("x = 5; for( ; x <10; x++){ a = 1; }", nullptr, &o); ASSERT_EQUALS(10, s->getNumber("x")); } }; @@ -328,12 +328,12 @@ public: void run() { unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)()); - s->invoke("z = { num : 1 };", 0, 0); + s->invoke("z = { num : 1 };", nullptr, nullptr); BSONObj out = s->getObject("z"); ASSERT_EQUALS(1, out["num"].number()); ASSERT_EQUALS(1, out.nFields()); - s->invoke("z = { x : 'eliot' };", 0, 0); + s->invoke("z = { x : 'eliot' };", nullptr, nullptr); out = s->getObject("z"); ASSERT_EQUALS((string) "eliot", out["x"].valuestr()); ASSERT_EQUALS(1, out.nFields()); @@ -417,14 +417,15 @@ public: BSONObj out; - ASSERT_THROWS(s->invoke("blah.y = 'e'", 0, 0), mongo::AssertionException); - ASSERT_THROWS(s->invoke("blah.a = 19;", 0, 0), mongo::AssertionException); - ASSERT_THROWS(s->invoke("blah.zz.a = 19;", 0, 0), mongo::AssertionException); - ASSERT_THROWS(s->invoke("blah.zz = { a : 19 };", 0, 0), mongo::AssertionException); - ASSERT_THROWS(s->invoke("delete blah['x']", 0, 0), mongo::AssertionException); + ASSERT_THROWS(s->invoke("blah.y = 'e'", nullptr, nullptr), mongo::AssertionException); + ASSERT_THROWS(s->invoke("blah.a = 19;", nullptr, nullptr), mongo::AssertionException); + ASSERT_THROWS(s->invoke("blah.zz.a = 19;", nullptr, nullptr), mongo::AssertionException); + ASSERT_THROWS(s->invoke("blah.zz = { a : 19 };", nullptr, nullptr), + mongo::AssertionException); + ASSERT_THROWS(s->invoke("delete blah['x']", nullptr, nullptr), mongo::AssertionException); // read-only object itself can be overwritten - s->invoke("blah = {}", 0, 0); + s->invoke("blah = {}", nullptr, nullptr); out = s->getObject("blah"); ASSERT(out.isEmpty()); @@ -456,13 +457,13 @@ public: } s->setObject("x", o); - s->invoke("return x.d.getTime() != 12;", 0, 0); + s->invoke("return x.d.getTime() != 12;", nullptr, nullptr); ASSERT_EQUALS(true, s->getBoolean("__returnValue")); - s->invoke("z = x.d.getTime();", 0, 0); + s->invoke("z = x.d.getTime();", nullptr, nullptr); ASSERT_EQUALS(123456789, s->getNumber("z")); - s->invoke("z = { z : x.d }", 0, 0); + s->invoke("z = { z : x.d }", nullptr, nullptr); BSONObj out = s->getObject("z"); ASSERT(out["z"].type() == Date); } @@ -477,16 +478,16 @@ public: } s->setObject("x", o); - s->invoke("z = x.r.test( 'b' );", 0, 0); + s->invoke("z = x.r.test( 'b' );", nullptr, nullptr); ASSERT_EQUALS(false, s->getBoolean("z")); - s->invoke("z = x.r.test( 'a' );", 0, 0); + s->invoke("z = x.r.test( 'a' );", nullptr, nullptr); ASSERT_EQUALS(true, s->getBoolean("z")); - s->invoke("z = x.r.test( 'ba' );", 0, 0); + s->invoke("z = x.r.test( 'ba' );", nullptr, nullptr); ASSERT_EQUALS(false, s->getBoolean("z")); - s->invoke("z = { a : x.r };", 0, 0); + s->invoke("z = { a : x.r };", nullptr, nullptr); BSONObj out = s->getObject("z"); ASSERT_EQUALS((string) "^a", out["a"].regex()); @@ -505,7 +506,7 @@ public: " }" " assert(threw);" "}"; - ASSERT_EQUALS(s->invoke(code, &invalidRegex, NULL), 0); + ASSERT_EQUALS(s->invoke(code, &invalidRegex, nullptr), 0); } // array @@ -559,7 +560,7 @@ public: s->setObject("z", b.obj()); - ASSERT(s->invoke("y = { a : z.a , b : z.b , c : z.c , d: z.d }", 0, 0) == 0); + ASSERT(s->invoke("y = { a : z.a , b : z.b , c : z.c , d: z.d }", nullptr, nullptr) == 0); BSONObj out = s->getObject("y"); ASSERT_EQUALS(bsonTimestamp, out["a"].type()); @@ -592,7 +593,7 @@ public: ASSERT_EQUALS(NumberDouble, o["b"].type()); s->setObject("z", o); - s->invoke("return z", 0, 0); + s->invoke("return z", nullptr, nullptr); BSONObj out = s->getObject("__returnValue"); ASSERT_EQUALS(5, out["a"].number()); ASSERT_EQUALS(5.6, out["b"].number()); @@ -610,7 +611,7 @@ public: } s->setObject("z", o, false); - s->invoke("return z", 0, 0); + s->invoke("return z", nullptr, nullptr); out = s->getObject("__returnValue"); ASSERT_EQUALS(5, out["a"].number()); ASSERT_EQUALS(5.6, out["b"].number()); @@ -643,7 +644,7 @@ public: ASSERT_EQUALS(NumberDouble, out["a"].embeddedObjectUserCheck()["0"].type()); ASSERT_EQUALS(NumberInt, out["a"].embeddedObjectUserCheck()["1"].type()); - s->invokeSafe("z.z = 5;", 0, 0); + s->invokeSafe("z.z = 5;", nullptr, nullptr); out = s->getObject("z"); ASSERT_EQUALS(5, out["z"].number()); ASSERT_EQUALS(NumberDouble, out["a"].embeddedObjectUserCheck()["0"].type()); @@ -913,10 +914,10 @@ public: for (int i = 5; i < 100; i += 10) { s->setObject("a", build(i), false); - s->invokeSafe("tojson( a )", 0, 0); + s->invokeSafe("tojson( a )", nullptr, nullptr); s->setObject("a", build(5), true); - s->invokeSafe("tojson( a )", 0, 0); + s->invokeSafe("tojson( a )", nullptr, nullptr); } } }; @@ -970,8 +971,8 @@ public: "function() { " " while (true) { } " "} ", - 0, - 0, + nullptr, + nullptr, 1); } catch (const DBException&) { caught = true; @@ -1040,8 +1041,8 @@ public: "function() { " " for (var i=0; i<1; i++) { ; } " "} ", - 0, - 0, + nullptr, + nullptr, 5 * 60 * 1000); } }; @@ -1111,8 +1112,8 @@ public: s->setObject("x", in); } - s->invokeSafe("myb = x.b; print( myb ); printjson( myb );", 0, 0); - s->invokeSafe("y = { c : myb };", 0, 0); + s->invokeSafe("myb = x.b; print( myb ); printjson( myb );", nullptr, nullptr); + s->invokeSafe("y = { c : myb };", nullptr, nullptr); BSONObj out = s->getObject("y"); ASSERT_EQUALS(BinData, out["c"].type()); @@ -1121,7 +1122,7 @@ public: ASSERT_EQUALS(0, in["b"].woCompare(out["c"], false)); // check that BinData js class is utilized - s->invokeSafe("q = x.b.toString();", 0, 0); + s->invokeSafe("q = x.b.toString();", nullptr, nullptr); stringstream expected; expected << "BinData(" << BinDataGeneral << ",\"" << base64 << "\")"; ASSERT_EQUALS(expected.str(), s->getString("q")); @@ -1130,12 +1131,12 @@ public: scriptBuilder << "z = { c : new BinData( " << BinDataGeneral << ", \"" << base64 << "\" ) };"; string script = scriptBuilder.str(); - s->invokeSafe(script.c_str(), 0, 0); + s->invokeSafe(script.c_str(), nullptr, nullptr); out = s->getObject("z"); // pp( "out" , out["c"] ); ASSERT_EQUALS(0, in["b"].woCompare(out["c"], false)); - s->invokeSafe("a = { f: new BinData( 128, \"\" ) };", 0, 0); + s->invokeSafe("a = { f: new BinData( 128, \"\" ) };", nullptr, nullptr); out = s->getObject("a"); int len = -1; out["f"].binData(len); @@ -1187,14 +1188,14 @@ public: unique_ptr<Scope> s; s.reset((getGlobalScriptEngine()->*scopeFactory)()); - s->invokeSafe("x = 5;", 0, 0); + s->invokeSafe("x = 5;", nullptr, nullptr); { BSONObjBuilder b; s->append(b, "z", "x"); ASSERT_BSONOBJ_EQ(BSON("z" << 5), b.obj()); } - s->invokeSafe("x = function(){ return 17; }", 0, 0); + s->invokeSafe("x = function(){ return 17; }", nullptr, nullptr); BSONObj temp; { BSONObjBuilder b; @@ -1202,7 +1203,7 @@ public: temp = b.obj(); } - s->invokeSafe("foo = this.z();", 0, &temp); + s->invokeSafe("foo = this.z();", nullptr, &temp); ASSERT_EQUALS(17, s->getNumber("foo")); } }; @@ -1244,7 +1245,7 @@ public: s->setObject("val", BSONObj(reinterpret_cast<char*>(bits)).getOwned()); - s->invoke("val[\"a\"];", 0, 0); + s->invoke("val[\"a\"];", nullptr, nullptr); ASSERT_TRUE(std::isnan(s->getNumber("__returnValue"))); } }; @@ -1255,43 +1256,43 @@ public: void run() { unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)()); - s->invoke("x=5;", 0, 0); + s->invoke("x=5;", nullptr, nullptr); ASSERT_EQUALS(5, s->getNumber("__returnValue")); - s->invoke("x='test'", 0, 0); + s->invoke("x='test'", nullptr, nullptr); ASSERT_EQUALS("test", s->getString("__returnValue")); - s->invoke("x='return'", 0, 0); + s->invoke("x='return'", nullptr, nullptr); ASSERT_EQUALS("return", s->getString("__returnValue")); - s->invoke("return 'return'", 0, 0); + s->invoke("return 'return'", nullptr, nullptr); ASSERT_EQUALS("return", s->getString("__returnValue")); - s->invoke("x = ' return '", 0, 0); + s->invoke("x = ' return '", nullptr, nullptr); ASSERT_EQUALS(" return ", s->getString("__returnValue")); - s->invoke("x = \" return \"", 0, 0); + s->invoke("x = \" return \"", nullptr, nullptr); ASSERT_EQUALS(" return ", s->getString("__returnValue")); - s->invoke("x = \"' return '\"", 0, 0); + s->invoke("x = \"' return '\"", nullptr, nullptr); ASSERT_EQUALS("' return '", s->getString("__returnValue")); - s->invoke("x = '\" return \"'", 0, 0); + s->invoke("x = '\" return \"'", nullptr, nullptr); ASSERT_EQUALS("\" return \"", s->getString("__returnValue")); - s->invoke(";return 5", 0, 0); + s->invoke(";return 5", nullptr, nullptr); ASSERT_EQUALS(5, s->getNumber("__returnValue")); - s->invoke("String('return')", 0, 0); + s->invoke("String('return')", nullptr, nullptr); ASSERT_EQUALS("return", s->getString("__returnValue")); - s->invoke("String(' return ')", 0, 0); + s->invoke("String(' return ')", nullptr, nullptr); ASSERT_EQUALS(" return ", s->getString("__returnValue")); - s->invoke("String(\"'return\")", 0, 0); + s->invoke("String(\"'return\")", nullptr, nullptr); ASSERT_EQUALS("'return", s->getString("__returnValue")); - s->invoke("String('\"return')", 0, 0); + s->invoke("String('\"return')", nullptr, nullptr); ASSERT_EQUALS("\"return", s->getString("__returnValue")); } }; @@ -1302,7 +1303,7 @@ public: static BSONObj callback(const BSONObj& args, void* data) { auto scope = static_cast<Scope*>(data); - scope->invoke("x = 10;", 0, 0); + scope->invoke("x = 10;", nullptr, nullptr); return BSONObj(); } @@ -1311,7 +1312,7 @@ public: unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)()); s->injectNative("foo", callback, s.get()); - s->invoke("var x = 1; foo();", 0, 0); + s->invoke("var x = 1; foo();", nullptr, nullptr); ASSERT_EQUALS(s->getNumberInt("x"), 10); } }; @@ -1325,7 +1326,7 @@ public: { bool threwException = false; try { - s->invoke("\"use strict\"; x = 10;", 0, 0); + s->invoke("\"use strict\"; x = 10;", nullptr, nullptr); } catch (...) { threwException = true; @@ -1340,7 +1341,7 @@ public: { bool threwException = false; try { - s->invoke("UUID(1,2,3,4,5);", 0, 0); + s->invoke("UUID(1,2,3,4,5);", nullptr, nullptr); } catch (...) { threwException = true; @@ -1368,7 +1369,9 @@ public: s->injectNative("foo", sidecarThrowingFunc); ASSERT_THROWS_WITH_CHECK( - s->invoke("try { foo(); } catch (e) { throw e; } throw new Error(\"bar\");", 0, 0), + s->invoke("try { foo(); } catch (e) { throw e; } throw new Error(\"bar\");", + nullptr, + nullptr), ExceptionFor<ErrorCodes::ForTestingErrorExtraInfo>, [](const auto& ex) { ASSERT_EQ(ex->data, 123); }); } @@ -1423,7 +1426,7 @@ class ConvertShardKeyToHashed { public: void check(shared_ptr<Scope> s, const mongo::BSONObj& o) { s->setObject("o", o, true); - s->invoke("return convertShardKeyToHashed(o);", 0, 0); + s->invoke("return convertShardKeyToHashed(o);", nullptr, nullptr); const auto scopeShardKey = s->getNumber("__returnValue"); // Wrapping to form a proper element @@ -1438,7 +1441,7 @@ public: void checkWithSeed(shared_ptr<Scope> s, const mongo::BSONObj& o, int seed) { s->setObject("o", o, true); s->setNumber("seed", seed); - s->invoke("return convertShardKeyToHashed(o, seed);", 0, 0); + s->invoke("return convertShardKeyToHashed(o, seed);", nullptr, nullptr); const auto scopeShardKey = s->getNumber("__returnValue"); // Wrapping to form a proper element @@ -1450,19 +1453,19 @@ public: } void checkNoArgs(shared_ptr<Scope> s) { - s->invoke("return convertShardKeyToHashed();", 0, 0); + s->invoke("return convertShardKeyToHashed();", nullptr, nullptr); } void checkWithExtraArg(shared_ptr<Scope> s, const mongo::BSONObj& o, int seed) { s->setObject("o", o, true); s->setNumber("seed", seed); - s->invoke("return convertShardKeyToHashed(o, seed, 1);", 0, 0); + s->invoke("return convertShardKeyToHashed(o, seed, 1);", nullptr, nullptr); } void checkWithBadSeed(shared_ptr<Scope> s, const mongo::BSONObj& o) { s->setObject("o", o, true); s->setString("seed", "sunflower"); - s->invoke("return convertShardKeyToHashed(o, seed);", 0, 0); + s->invoke("return convertShardKeyToHashed(o, seed);", nullptr, nullptr); } void run() { @@ -1520,12 +1523,12 @@ public: "let f = async function() { return 28; };" "f().then(function(y){ x = y; });" "return x;", - 0, - 0); + nullptr, + nullptr); ASSERT(0 == scope->getNumber("__returnValue")); /* When we return x the second time the value has been updated * by the async function */ - scope->invoke("return x;", 0, 0); + scope->invoke("return x;", nullptr, nullptr); ASSERT(28 == scope->getNumber("__returnValue")); } }; diff --git a/src/mongo/dbtests/mock/mock_dbclient_connection.h b/src/mongo/dbtests/mock/mock_dbclient_connection.h index 8995e92ab19..aaa4968d58e 100644 --- a/src/mongo/dbtests/mock/mock_dbclient_connection.h +++ b/src/mongo/dbtests/mock/mock_dbclient_connection.h @@ -78,7 +78,7 @@ public: mongo::Query query = mongo::Query(), int nToReturn = 0, int nToSkip = 0, - const mongo::BSONObj* fieldsToReturn = 0, + const mongo::BSONObj* fieldsToReturn = nullptr, int queryOptions = 0, int batchSize = 0) override; @@ -107,7 +107,7 @@ public: unsigned long long query(std::function<void(mongo::DBClientCursorBatchIterator&)> f, const NamespaceStringOrUUID& nsOrUuid, mongo::Query query, - const mongo::BSONObj* fieldsToReturn = 0, + const mongo::BSONObj* fieldsToReturn = nullptr, int queryOptions = 0, int batchSize = 0) override; @@ -120,7 +120,9 @@ public: mongo::Message& response, bool assertOk, std::string* actualServer) override; - void say(mongo::Message& toSend, bool isRetry = false, std::string* actualServer = 0) override; + void say(mongo::Message& toSend, + bool isRetry = false, + std::string* actualServer = nullptr) override; bool lazySupported() const override; private: diff --git a/src/mongo/dbtests/mock/mock_remote_db_server.h b/src/mongo/dbtests/mock/mock_remote_db_server.h index 1ccddf63942..6c0ccc8170d 100644 --- a/src/mongo/dbtests/mock/mock_remote_db_server.h +++ b/src/mongo/dbtests/mock/mock_remote_db_server.h @@ -166,7 +166,7 @@ public: mongo::Query query = mongo::Query(), int nToReturn = 0, int nToSkip = 0, - const mongo::BSONObj* fieldsToReturn = 0, + const mongo::BSONObj* fieldsToReturn = nullptr, int queryOptions = 0, int batchSize = 0); diff --git a/src/mongo/dbtests/mock/mock_replica_set.cpp b/src/mongo/dbtests/mock/mock_replica_set.cpp index f6158af9abb..fb17ad1a960 100644 --- a/src/mongo/dbtests/mock/mock_replica_set.cpp +++ b/src/mongo/dbtests/mock/mock_replica_set.cpp @@ -166,7 +166,7 @@ vector<string> MockReplicaSet::getSecondaries() const { } MockRemoteDBServer* MockReplicaSet::getNode(const string& hostAndPort) { - return mapFindWithDefault(_nodeMap, hostAndPort, static_cast<MockRemoteDBServer*>(NULL)); + return mapFindWithDefault(_nodeMap, hostAndPort, static_cast<MockRemoteDBServer*>(nullptr)); } repl::ReplSetConfig MockReplicaSet::getReplConfig() const { @@ -342,7 +342,7 @@ void MockReplicaSet::mockReplSetGetStatusCmd() { // TODO: syncingTo fullStatBuilder.append("set", _setName); - fullStatBuilder.appendTimeT("date", time(0)); + fullStatBuilder.appendTimeT("date", time(nullptr)); fullStatBuilder.append("myState", getState(node->getServerAddress())); fullStatBuilder.append("members", hostsField); fullStatBuilder.append("ok", true); diff --git a/src/mongo/dbtests/mock_replica_set_test.cpp b/src/mongo/dbtests/mock_replica_set_test.cpp index 44a6c09e8aa..2a44c20ec31 100644 --- a/src/mongo/dbtests/mock_replica_set_test.cpp +++ b/src/mongo/dbtests/mock_replica_set_test.cpp @@ -68,7 +68,7 @@ TEST(MockReplicaSetTest, GetNode) { ASSERT_EQUALS("$n0:27017", replSet.getNode("$n0:27017")->getServerAddress()); ASSERT_EQUALS("$n1:27017", replSet.getNode("$n1:27017")->getServerAddress()); ASSERT_EQUALS("$n2:27017", replSet.getNode("$n2:27017")->getServerAddress()); - ASSERT(replSet.getNode("$n3:27017") == NULL); + ASSERT(replSet.getNode("$n3:27017") == nullptr); } TEST(MockReplicaSetTest, IsMasterNode0) { diff --git a/src/mongo/dbtests/plan_executor_invalidation_test.cpp b/src/mongo/dbtests/plan_executor_invalidation_test.cpp index 2cc4eccd7fa..48e9a7adfb1 100644 --- a/src/mongo/dbtests/plan_executor_invalidation_test.cpp +++ b/src/mongo/dbtests/plan_executor_invalidation_test.cpp @@ -76,7 +76,7 @@ public: params.direction = CollectionScanParams::FORWARD; params.tailable = false; unique_ptr<CollectionScan> scan( - new CollectionScan(&_opCtx, collection(), params, ws.get(), NULL)); + new CollectionScan(&_opCtx, collection(), params, ws.get(), nullptr)); // Create a plan executor to hold it auto qr = std::make_unique<QueryRequest>(nss); @@ -138,7 +138,7 @@ TEST_F(PlanExecutorInvalidationTest, ExecutorToleratesDeletedDocumentsDuringYiel // Read some of it. for (int i = 0; i < 10; ++i) { - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj["foo"].numberInt()); } @@ -153,11 +153,11 @@ TEST_F(PlanExecutorInvalidationTest, ExecutorToleratesDeletedDocumentsDuringYiel // Make sure that the PlanExecutor moved forward over the deleted data. We don't see foo==10 or // foo==11. for (int i = 12; i < N(); ++i) { - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj["foo"].numberInt()); } - ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&obj, nullptr)); } TEST_F(PlanExecutorInvalidationTest, PlanExecutorThrowsOnRestoreWhenCollectionIsDropped) { @@ -166,7 +166,7 @@ TEST_F(PlanExecutorInvalidationTest, PlanExecutorThrowsOnRestoreWhenCollectionIs // Read some of it. for (int i = 0; i < 10; ++i) { - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj["foo"].numberInt()); } @@ -177,7 +177,7 @@ TEST_F(PlanExecutorInvalidationTest, PlanExecutorThrowsOnRestoreWhenCollectionIs exec->restoreState(); - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(10, obj["foo"].numberInt()); exec->saveState(); @@ -195,7 +195,7 @@ TEST_F(PlanExecutorInvalidationTest, CollScanExecutorDoesNotDieWhenAllIndicesDro // Read some of it. for (int i = 0; i < 10; ++i) { - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj["foo"].numberInt()); } @@ -205,7 +205,7 @@ TEST_F(PlanExecutorInvalidationTest, CollScanExecutorDoesNotDieWhenAllIndicesDro // Read the rest of the collection. for (int i = 10; i < N(); ++i) { - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj["foo"].numberInt()); } } @@ -218,7 +218,7 @@ TEST_F(PlanExecutorInvalidationTest, CollScanExecutorDoesNotDieWhenOneIndexDropp // Read some of it. for (int i = 0; i < 10; ++i) { - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj["foo"].numberInt()); } @@ -228,7 +228,7 @@ TEST_F(PlanExecutorInvalidationTest, CollScanExecutorDoesNotDieWhenOneIndexDropp // Read the rest of the collection. for (int i = 10; i < N(); ++i) { - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj["foo"].numberInt()); } } @@ -245,7 +245,7 @@ TEST_F(PlanExecutorInvalidationTest, IxscanExecutorDiesWhenAllIndexesDropped) { // Start scanning the index. BSONObj obj; for (int i = 0; i < 10; ++i) { - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj.firstElement().numberInt()); } @@ -266,7 +266,7 @@ TEST_F(PlanExecutorInvalidationTest, IxscanExecutorDiesWhenIndexBeingScannedIsDr // Start scanning the index. BSONObj obj; for (int i = 0; i < 10; ++i) { - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj.firstElement().numberInt()); } @@ -289,7 +289,7 @@ TEST_F(PlanExecutorInvalidationTest, IxscanExecutorSurvivesWhenUnrelatedIndexIsD // Start scanning the index. BSONObj obj; for (int i = 0; i < 10; ++i) { - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj.firstElement().numberInt()); } @@ -301,7 +301,7 @@ TEST_F(PlanExecutorInvalidationTest, IxscanExecutorSurvivesWhenUnrelatedIndexIsD // Scan the rest of the index. for (int i = 10; i < N(); ++i) { - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj.firstElement().numberInt()); } } @@ -312,7 +312,7 @@ TEST_F(PlanExecutorInvalidationTest, ExecutorThrowsOnRestoreWhenDatabaseIsDroppe // Read some of it. for (int i = 0; i < 10; ++i) { - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj["foo"].numberInt()); } @@ -325,7 +325,7 @@ TEST_F(PlanExecutorInvalidationTest, ExecutorThrowsOnRestoreWhenDatabaseIsDroppe _ctx.reset(new dbtests::WriteContextForTests(&_opCtx, nss.ns())); exec->restoreState(); - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(10, obj["foo"].numberInt()); exec->saveState(); @@ -344,7 +344,7 @@ TEST_F(PlanExecutorInvalidationTest, CollScanDiesOnCollectionRenameWithinDatabas // Partially scan the collection. BSONObj obj; for (int i = 0; i < 10; ++i) { - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj["foo"].numberInt()); } @@ -371,7 +371,7 @@ TEST_F(PlanExecutorInvalidationTest, IxscanDiesOnCollectionRenameWithinDatabase) // Partially scan the index. BSONObj obj; for (int i = 0; i < 10; ++i) { - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj.firstElement().numberInt()); } @@ -400,7 +400,7 @@ TEST_F(PlanExecutorInvalidationTest, CollScanDiesOnRestartCatalog) { // Partially scan the collection. BSONObj obj; for (int i = 0; i < 10; ++i) { - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj["foo"].numberInt()); } @@ -421,7 +421,7 @@ TEST_F(PlanExecutorInvalidationTest, IxscanDiesWhenTruncateCollectionDropsAllInd // Partially scan the index. BSONObj obj; for (int i = 0; i < 10; ++i) { - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj.firstElement().numberInt()); } @@ -438,7 +438,7 @@ TEST_F(PlanExecutorInvalidationTest, CollScanExecutorSurvivesCollectionTruncate) // Partially scan the collection. BSONObj obj; for (int i = 0; i < 10; ++i) { - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj["foo"].numberInt()); } @@ -449,7 +449,7 @@ TEST_F(PlanExecutorInvalidationTest, CollScanExecutorSurvivesCollectionTruncate) exec->restoreState(); // Since all documents in the collection have been deleted, the PlanExecutor should issue EOF. - ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&obj, nullptr)); } } // namespace mongo diff --git a/src/mongo/dbtests/plan_ranking.cpp b/src/mongo/dbtests/plan_ranking.cpp index 9e965b270c3..4709a63c8bc 100644 --- a/src/mongo/dbtests/plan_ranking.cpp +++ b/src/mongo/dbtests/plan_ranking.cpp @@ -147,7 +147,7 @@ public: * Was a backup plan picked during the ranking process? */ bool hasBackupPlan() const { - ASSERT(NULL != _mps.get()); + ASSERT(nullptr != _mps.get()); return _mps->hasBackupPlan(); } @@ -256,7 +256,7 @@ public: auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(qr)); verify(statusWithCQ.isOK()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); - ASSERT(NULL != cq.get()); + ASSERT(nullptr != cq.get()); // Turn on the "force intersect" option. // This will be reverted by PlanRankingTestBase's destructor when the test completes. @@ -297,7 +297,7 @@ public: auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(qr)); ASSERT_OK(statusWithCQ.getStatus()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); - ASSERT(NULL != cq.get()); + ASSERT(nullptr != cq.get()); QuerySolution* soln = pickBestPlan(cq.get()); @@ -331,7 +331,7 @@ public: auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(qr)); ASSERT_OK(statusWithCQ.getStatus()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); - ASSERT(NULL != cq.get()); + ASSERT(nullptr != cq.get()); QuerySolution* soln = pickBestPlan(cq.get()); @@ -370,7 +370,7 @@ public: auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(qr)); ASSERT_OK(statusWithCQ.getStatus()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); - ASSERT(NULL != cq.get()); + ASSERT(nullptr != cq.get()); QuerySolution* soln = pickBestPlan(cq.get()); // Prefer the fully covered plan. @@ -403,7 +403,7 @@ public: auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(qr)); verify(statusWithCQ.isOK()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); - ASSERT(NULL != cq.get()); + ASSERT(nullptr != cq.get()); // {a: 100} is super selective so choose that. QuerySolution* soln = pickBestPlan(cq.get()); @@ -439,7 +439,7 @@ public: auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(qr)); verify(statusWithCQ.isOK()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); - ASSERT(NULL != cq.get()); + ASSERT(nullptr != cq.get()); // {a: 100} is super selective so choose that. QuerySolution* soln = pickBestPlan(cq.get()); @@ -498,7 +498,7 @@ public: auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(qr)); verify(statusWithCQ.isOK()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); - ASSERT(NULL != cq.get()); + ASSERT(nullptr != cq.get()); QuerySolution* soln = pickBestPlan(cq.get()); @@ -532,7 +532,7 @@ public: auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(qr)); ASSERT_OK(statusWithCQ.getStatus()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); - ASSERT(NULL != cq.get()); + ASSERT(nullptr != cq.get()); // No results will be returned during the trial period, // so we expect to choose {d: 1, e: 1}, as it allows us @@ -569,7 +569,7 @@ public: auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(qr)); ASSERT_OK(statusWithCQ.getStatus()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); - ASSERT(NULL != cq.get()); + ASSERT(nullptr != cq.get()); // Use index on 'b'. QuerySolution* soln = pickBestPlan(cq.get()); @@ -601,7 +601,7 @@ public: auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(qr)); ASSERT_OK(statusWithCQ.getStatus()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); - ASSERT(NULL != cq.get()); + ASSERT(nullptr != cq.get()); // Expect to use index {a: 1, b: 1}. QuerySolution* soln = pickBestPlan(cq.get()); diff --git a/src/mongo/dbtests/query_plan_executor.cpp b/src/mongo/dbtests/query_plan_executor.cpp index d790c24dca0..4d645862ffc 100644 --- a/src/mongo/dbtests/query_plan_executor.cpp +++ b/src/mongo/dbtests/query_plan_executor.cpp @@ -112,7 +112,7 @@ public: auto statusWithCQ = CanonicalQuery::canonicalize(&_opCtx, std::move(qr)); ASSERT_OK(statusWithCQ.getStatus()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); - verify(NULL != cq.get()); + verify(nullptr != cq.get()); // Make the stage. unique_ptr<PlanStage> root( @@ -151,14 +151,14 @@ public: const Collection* coll = db->getCollection(&_opCtx, nss); unique_ptr<WorkingSet> ws(new WorkingSet()); - IndexScan* ix = new IndexScan(&_opCtx, ixparams, ws.get(), NULL); - unique_ptr<PlanStage> root(new FetchStage(&_opCtx, ws.get(), ix, NULL, coll)); + IndexScan* ix = new IndexScan(&_opCtx, ixparams, ws.get(), nullptr); + unique_ptr<PlanStage> root(new FetchStage(&_opCtx, ws.get(), ix, nullptr, coll)); auto qr = std::make_unique<QueryRequest>(nss); auto statusWithCQ = CanonicalQuery::canonicalize(&_opCtx, std::move(qr)); verify(statusWithCQ.isOK()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); - verify(NULL != cq.get()); + verify(nullptr != cq.get()); // Hand the plan off to the executor. auto statusWithPlanExecutor = PlanExecutor::make(&_opCtx, @@ -334,7 +334,7 @@ protected: BSONObj objOut; int idcount = 0; PlanExecutor::ExecState state; - while (PlanExecutor::ADVANCED == (state = exec->getNext(&objOut, NULL))) { + while (PlanExecutor::ADVANCED == (state = exec->getNext(&objOut, nullptr))) { ASSERT_EQUALS(expectedIds[idcount], objOut["_id"].numberInt()); ++idcount; } @@ -358,7 +358,7 @@ TEST_F(PlanExecutorSnapshotTest, SnapshotControl) { auto exec = makeCollScanExec(coll, filterObj); BSONObj objOut; - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&objOut, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&objOut, nullptr)); ASSERT_EQUALS(2, objOut["a"].numberInt()); forceDocumentMove(); @@ -382,7 +382,7 @@ TEST_F(PlanExecutorSnapshotTest, SnapshotTest) { auto exec = makeIndexScanExec(ctx.db(), indexSpec, 2, 5); BSONObj objOut; - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&objOut, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&objOut, nullptr)); ASSERT_EQUALS(2, objOut["a"].numberInt()); forceDocumentMove(); diff --git a/src/mongo/dbtests/query_stage_and.cpp b/src/mongo/dbtests/query_stage_and.cpp index aab96cdff95..1bad15c7280 100644 --- a/src/mongo/dbtests/query_stage_and.cpp +++ b/src/mongo/dbtests/query_stage_and.cpp @@ -205,12 +205,12 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 20); params.direction = -1; - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // Bar >= 10. params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 10); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // 'ah' reads the first child into its hash table: foo=20, foo=19, ..., foo=0 // in that order. Read half of them. @@ -292,13 +292,13 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 20); params.direction = -1; - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // Bar <= 19 (descending). params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 19); params.direction = -1; - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // First call to work reads the first result from the children. The first result for the // first scan over foo is {foo: 20, bar: 20, baz: 20}. The first result for the second scan @@ -372,13 +372,13 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 20); params.direction = -1; - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // Bar >= 10 params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 10); params.direction = -1; - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // foo == bar == baz, and foo<=20, bar>=10, so our values are: // foo == 10, 11, 12, 13, 14, 15. 16, 17, 18, 19, 20 @@ -421,13 +421,13 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1 << "big" << 1), coll)); params.bounds.startKey = BSON("" << 20 << "" << big); params.direction = -1; - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // Bar >= 10 params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 10); params.direction = -1; - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // Stage execution should fail. ASSERT_EQUALS(-1, countResults(ah.get())); @@ -468,13 +468,13 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 20); params.direction = -1; - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // Bar >= 10 params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1 << "big" << 1), coll)); params.bounds.startKey = BSON("" << 10 << "" << big); params.direction = -1; - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // foo == bar == baz, and foo<=20, bar>=10, so our values are: // foo == 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20. @@ -510,18 +510,18 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 20); params.direction = -1; - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // Bar >= 10 params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 10); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // 5 <= baz <= 15 params = makeIndexScanParams(&_opCtx, getIndex(BSON("baz" << 1), coll)); params.bounds.startKey = BSON("" << 5); params.bounds.endKey = BSON("" << 15); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // foo == bar == baz, and foo<=20, bar>=10, 5<=baz<=15, so our values are: // foo == 10, 11, 12, 13, 14, 15. @@ -568,18 +568,18 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 20); params.direction = -1; - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // Bar >= 10 params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1 << "big" << 1), coll)); params.bounds.startKey = BSON("" << 10 << "" << big); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // 5 <= baz <= 15 params = makeIndexScanParams(&_opCtx, getIndex(BSON("baz" << 1), coll)); params.bounds.startKey = BSON("" << 5); params.bounds.endKey = BSON("" << 15); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // Stage execution should fail. ASSERT_EQUALS(-1, countResults(ah.get())); @@ -613,13 +613,13 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 20); params.direction = -1; - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // Bar == 5. Index scan should be eof. params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 5); params.bounds.endKey = BSON("" << 5); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); int count = 0; int works = 0; @@ -669,7 +669,7 @@ public: // Foo >= 100 auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 100); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // Bar <= 100 params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll)); @@ -681,7 +681,7 @@ public: << ""); params.bounds.boundInclusion = BoundInclusion::kIncludeStartKeyOnly; params.direction = -1; - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); ASSERT_EQUALS(0, countResults(ah.get())); } @@ -717,17 +717,17 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 20); params.direction = -1; - IndexScan* firstScan = new IndexScan(&_opCtx, params, &ws, NULL); + IndexScan* firstScan = new IndexScan(&_opCtx, params, &ws, nullptr); // First child of the AND_HASH stage is a Fetch. The NULL in the // constructor means there is no filter. - FetchStage* fetch = new FetchStage(&_opCtx, &ws, firstScan, NULL, coll); + FetchStage* fetch = new FetchStage(&_opCtx, &ws, firstScan, nullptr, coll); ah->addChild(fetch); // Bar >= 10 params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 10); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // Check that the AndHash stage returns docs {foo: 10, bar: 10} // through {foo: 20, bar: 20}. @@ -769,16 +769,16 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 20); params.direction = -1; - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // Bar >= 10 params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 10); - IndexScan* secondScan = new IndexScan(&_opCtx, params, &ws, NULL); + IndexScan* secondScan = new IndexScan(&_opCtx, params, &ws, nullptr); // Second child of the AND_HASH stage is a Fetch. The NULL in the // constructor means there is no filter. - FetchStage* fetch = new FetchStage(&_opCtx, &ws, secondScan, NULL, coll); + FetchStage* fetch = new FetchStage(&_opCtx, &ws, secondScan, nullptr, coll); ah->addChild(fetch); // Check that the AndHash stage returns docs {foo: 10, bar: 10} @@ -955,13 +955,13 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // Scan over bar == 1. params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // Get the set of RecordIds in our collection to use later. set<RecordId> data; @@ -1072,19 +1072,19 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // bar == 1 params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // baz == 1 params = makeIndexScanParams(&_opCtx, getIndex(BSON("baz" << 1), coll)); params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); ASSERT_EQUALS(50, countResults(ah.get())); } @@ -1117,13 +1117,13 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 7); params.bounds.endKey = BSON("" << 7); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // Bar == 20, not EOF. params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 20); params.bounds.endKey = BSON("" << 20); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); ASSERT_EQUALS(0, countResults(ah.get())); } @@ -1160,13 +1160,13 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 7); params.bounds.endKey = BSON("" << 7); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // bar == 20. params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 20); params.bounds.endKey = BSON("" << 20); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); ASSERT_EQUALS(0, countResults(ah.get())); } @@ -1199,13 +1199,13 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // Intersect with 7 <= bar < 10000 params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 7); params.bounds.endKey = BSON("" << 10000); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); WorkingSetID lastId = WorkingSet::INVALID_ID; @@ -1261,18 +1261,18 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); - IndexScan* firstScan = new IndexScan(&_opCtx, params, &ws, NULL); + IndexScan* firstScan = new IndexScan(&_opCtx, params, &ws, nullptr); // First child of the AND_SORTED stage is a Fetch. The NULL in the // constructor means there is no filter. - FetchStage* fetch = new FetchStage(&_opCtx, &ws, firstScan, NULL, coll); + FetchStage* fetch = new FetchStage(&_opCtx, &ws, firstScan, nullptr, coll); as->addChild(fetch); // bar == 1 params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); - as->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + as->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); for (int i = 0; i < 50; i++) { BSONObj obj = getNext(as.get(), &ws); @@ -1313,17 +1313,17 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); - as->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + as->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // bar == 1 params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); - IndexScan* secondScan = new IndexScan(&_opCtx, params, &ws, NULL); + IndexScan* secondScan = new IndexScan(&_opCtx, params, &ws, nullptr); // Second child of the AND_SORTED stage is a Fetch. The NULL in the // constructor means there is no filter. - FetchStage* fetch = new FetchStage(&_opCtx, &ws, secondScan, NULL, coll); + FetchStage* fetch = new FetchStage(&_opCtx, &ws, secondScan, nullptr, coll); as->addChild(fetch); for (int i = 0; i < 50; i++) { diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp index 615f30f6869..2a5e808e6b0 100644 --- a/src/mongo/dbtests/query_stage_collscan.cpp +++ b/src/mongo/dbtests/query_stage_collscan.cpp @@ -114,7 +114,7 @@ public: // Use the runner to count the number of objects scanned. int count = 0; PlanExecutor::ExecState state; - for (BSONObj obj; PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL));) { + for (BSONObj obj; PlanExecutor::ADVANCED == (state = exec->getNext(&obj, nullptr));) { ++count; } ASSERT_EQUALS(PlanExecutor::IS_EOF, state); @@ -130,7 +130,8 @@ public: params.direction = direction; params.tailable = false; - unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, collection, params, &ws, NULL)); + unique_ptr<CollectionScan> scan( + new CollectionScan(&_opCtx, collection, params, &ws, nullptr)); while (!scan->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); @@ -227,7 +228,7 @@ public: int count = 0; PlanExecutor::ExecState state; - for (BSONObj obj; PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL));) { + for (BSONObj obj; PlanExecutor::ADVANCED == (state = exec->getNext(&obj, nullptr));) { // Make sure we get the objects in the order we want ASSERT_EQUALS(count, obj["foo"].numberInt()); ++count; @@ -262,7 +263,7 @@ public: int count = 0; PlanExecutor::ExecState state; - for (BSONObj obj; PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL));) { + for (BSONObj obj; PlanExecutor::ADVANCED == (state = exec->getNext(&obj, nullptr));) { ++count; ASSERT_EQUALS(numObj() - count, obj["foo"].numberInt()); } @@ -293,7 +294,7 @@ public: params.tailable = false; WorkingSet ws; - unique_ptr<PlanStage> scan(new CollectionScan(&_opCtx, coll, params, &ws, NULL)); + unique_ptr<PlanStage> scan(new CollectionScan(&_opCtx, coll, params, &ws, nullptr)); int count = 0; while (count < 10) { @@ -352,7 +353,7 @@ public: params.tailable = false; WorkingSet ws; - unique_ptr<PlanStage> scan(new CollectionScan(&_opCtx, coll, params, &ws, NULL)); + unique_ptr<PlanStage> scan(new CollectionScan(&_opCtx, coll, params, &ws, nullptr)); int count = 0; while (count < 10) { diff --git a/src/mongo/dbtests/query_stage_count.cpp b/src/mongo/dbtests/query_stage_count.cpp index 80770a69e70..003beb748d9 100644 --- a/src/mongo/dbtests/query_stage_count.cpp +++ b/src/mongo/dbtests/query_stage_count.cpp @@ -57,7 +57,9 @@ const int kInterjections = kDocuments; class CountStageTest { public: CountStageTest() - : _dbLock(&_opCtx, nsToDatabaseSubstring(ns()), MODE_X), _ctx(&_opCtx, ns()), _coll(NULL) {} + : _dbLock(&_opCtx, nsToDatabaseSubstring(ns()), MODE_X), + _ctx(&_opCtx, ns()), + _coll(nullptr) {} virtual ~CountStageTest() {} @@ -94,7 +96,7 @@ public: params.direction = CollectionScanParams::FORWARD; params.tailable = false; - unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, _coll, params, &ws, NULL)); + unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, _coll, params, &ws, nullptr)); while (!scan->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); @@ -129,7 +131,7 @@ public: Snapshotted<BSONObj>(_opCtx.recoveryUnit()->getSnapshotId(), oldDoc), newDoc, true, - NULL, + nullptr, &args); wunit.commit(); } diff --git a/src/mongo/dbtests/query_stage_delete.cpp b/src/mongo/dbtests/query_stage_delete.cpp index f10b7390243..708d68e068a 100644 --- a/src/mongo/dbtests/query_stage_delete.cpp +++ b/src/mongo/dbtests/query_stage_delete.cpp @@ -90,7 +90,8 @@ public: params.direction = direction; params.tailable = false; - unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, collection, params, &ws, NULL)); + unique_ptr<CollectionScan> scan( + new CollectionScan(&_opCtx, collection, params, &ws, nullptr)); while (!scan->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); @@ -150,7 +151,7 @@ public: std::move(deleteStageParams), &ws, coll, - new CollectionScan(&_opCtx, coll, collScanParams, &ws, NULL)); + new CollectionScan(&_opCtx, coll, collScanParams, &ws, nullptr)); const DeleteStats* stats = static_cast<const DeleteStats*>(deleteStage.getSpecificStats()); diff --git a/src/mongo/dbtests/query_stage_fetch.cpp b/src/mongo/dbtests/query_stage_fetch.cpp index 2eb09c39057..f8bf178e5c3 100644 --- a/src/mongo/dbtests/query_stage_fetch.cpp +++ b/src/mongo/dbtests/query_stage_fetch.cpp @@ -138,7 +138,7 @@ public: } unique_ptr<FetchStage> fetchStage( - new FetchStage(&_opCtx, &ws, mockStage.release(), NULL, coll)); + new FetchStage(&_opCtx, &ws, mockStage.release(), nullptr, coll)); WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state; diff --git a/src/mongo/dbtests/query_stage_ixscan.cpp b/src/mongo/dbtests/query_stage_ixscan.cpp index d5112419175..d38c26eb009 100644 --- a/src/mongo/dbtests/query_stage_ixscan.cpp +++ b/src/mongo/dbtests/query_stage_ixscan.cpp @@ -47,7 +47,9 @@ const auto kIndexVersion = IndexDescriptor::IndexVersion::kV2; class IndexScanTest { public: IndexScanTest() - : _dbLock(&_opCtx, nsToDatabaseSubstring(ns()), MODE_X), _ctx(&_opCtx, ns()), _coll(NULL) {} + : _dbLock(&_opCtx, nsToDatabaseSubstring(ns()), MODE_X), + _ctx(&_opCtx, ns()), + _coll(nullptr) {} virtual ~IndexScanTest() {} @@ -109,7 +111,7 @@ public: params.direction = 1; // This child stage gets owned and freed by the caller. - MatchExpression* filter = NULL; + MatchExpression* filter = nullptr; return new IndexScan(&_opCtx, params, &_ws, filter); } @@ -133,7 +135,7 @@ public: oil.intervals.push_back(Interval(bob.obj(), startInclusive, endInclusive)); params.bounds.fields.push_back(oil); - MatchExpression* filter = NULL; + MatchExpression* filter = nullptr; return new IndexScan(&_opCtx, params, &_ws, filter); } diff --git a/src/mongo/dbtests/query_stage_merge_sort.cpp b/src/mongo/dbtests/query_stage_merge_sort.cpp index aa6cb5e174a..5656174f7b3 100644 --- a/src/mongo/dbtests/query_stage_merge_sort.cpp +++ b/src/mongo/dbtests/query_stage_merge_sort.cpp @@ -173,11 +173,11 @@ public: // a:1 auto params = makeIndexScanParams(&_opCtx, getIndex(firstIndex, coll)); - ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr)); // b:1 params = makeIndexScanParams(&_opCtx, getIndex(secondIndex, coll)); - ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr)); unique_ptr<FetchStage> fetchStage = make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll); @@ -189,9 +189,9 @@ public: for (int i = 0; i < N; ++i) { BSONObj first, second; - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, nullptr)); first = first.getOwned(); - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, nullptr)); ASSERT_EQUALS(first["c"].numberInt(), second["c"].numberInt()); ASSERT_EQUALS(i, first["c"].numberInt()); ASSERT((first.hasField("a") && second.hasField("b")) || @@ -200,7 +200,7 @@ public: // Should be done now. BSONObj foo; - ASSERT_NOT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&foo, NULL)); + ASSERT_NOT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&foo, nullptr)); } }; @@ -238,11 +238,11 @@ public: // a:1 auto params = makeIndexScanParams(&_opCtx, getIndex(firstIndex, coll)); - ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr)); // b:1 params = makeIndexScanParams(&_opCtx, getIndex(secondIndex, coll)); - ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr)); unique_ptr<FetchStage> fetchStage = make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll); @@ -253,9 +253,9 @@ public: for (int i = 0; i < N; ++i) { BSONObj first, second; - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, nullptr)); first = first.getOwned(); - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, nullptr)); ASSERT_EQUALS(first["c"].numberInt(), second["c"].numberInt()); ASSERT_EQUALS(i, first["c"].numberInt()); ASSERT((first.hasField("a") && second.hasField("b")) || @@ -264,7 +264,7 @@ public: // Should be done now. BSONObj foo; - ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&foo, NULL)); + ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&foo, nullptr)); } }; @@ -302,11 +302,11 @@ public: // a:1 auto params = makeIndexScanParams(&_opCtx, getIndex(firstIndex, coll)); - ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr)); // b:1 params = makeIndexScanParams(&_opCtx, getIndex(secondIndex, coll)); - ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr)); unique_ptr<FetchStage> fetchStage = make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll); @@ -318,9 +318,9 @@ public: for (int i = 0; i < N; ++i) { BSONObj first, second; // We inserted N objects but we get 2 * N from the runner because of dups. - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, nullptr)); first = first.getOwned(); - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, nullptr)); ASSERT_EQUALS(first["c"].numberInt(), second["c"].numberInt()); ASSERT_EQUALS(i, first["c"].numberInt()); ASSERT((first.hasField("a") && second.hasField("b")) || @@ -329,7 +329,7 @@ public: // Should be done now. BSONObj foo; - ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&foo, NULL)); + ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&foo, nullptr)); } }; @@ -370,13 +370,13 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(firstIndex, coll)); params.bounds.startKey = objWithMaxKey(1); params.bounds.endKey = objWithMinKey(1); - ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr)); // b:1 params = makeIndexScanParams(&_opCtx, getIndex(secondIndex, coll)); params.bounds.startKey = objWithMaxKey(1); params.bounds.endKey = objWithMinKey(1); - ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr)); unique_ptr<FetchStage> fetchStage = make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll); @@ -387,9 +387,9 @@ public: for (int i = 0; i < N; ++i) { BSONObj first, second; - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, nullptr)); first = first.getOwned(); - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, nullptr)); ASSERT_EQUALS(first["c"].numberInt(), second["c"].numberInt()); ASSERT_EQUALS(N - i - 1, first["c"].numberInt()); ASSERT((first.hasField("a") && second.hasField("b")) || @@ -398,7 +398,7 @@ public: // Should be done now. BSONObj foo; - ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&foo, NULL)); + ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&foo, nullptr)); } }; @@ -436,13 +436,13 @@ public: // a:1 auto params = makeIndexScanParams(&_opCtx, getIndex(firstIndex, coll)); - ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr)); // b:51 (EOF) params = makeIndexScanParams(&_opCtx, getIndex(secondIndex, coll)); params.bounds.startKey = BSON("" << 51 << "" << MinKey); params.bounds.endKey = BSON("" << 51 << "" << MaxKey); - ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr)); unique_ptr<FetchStage> fetchStage = make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll); @@ -454,14 +454,14 @@ public: // Only getting results from the a:1 index scan. for (int i = 0; i < N; ++i) { BSONObj obj; - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj["c"].numberInt()); ASSERT_EQUALS(1, obj["a"].numberInt()); } // Should be done now. BSONObj foo; - ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&foo, NULL)); + ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&foo, nullptr)); } }; @@ -493,7 +493,7 @@ public: BSONObj indexSpec = BSON(index << 1 << "foo" << 1); addIndex(indexSpec); auto params = makeIndexScanParams(&_opCtx, getIndex(indexSpec, coll)); - ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr)); } unique_ptr<FetchStage> fetchStage = make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll); @@ -505,7 +505,7 @@ public: for (int i = 0; i < numIndices; ++i) { BSONObj obj; - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj["foo"].numberInt()); string index(1, 'a' + i); ASSERT_EQUALS(1, obj[index].numberInt()); @@ -513,7 +513,7 @@ public: // Should be done now. BSONObj foo; - ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&foo, NULL)); + ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&foo, nullptr)); } }; @@ -547,7 +547,7 @@ public: BSONObj indexSpec = BSON(index << 1 << "foo" << 1); addIndex(indexSpec); auto params = makeIndexScanParams(&_opCtx, getIndex(indexSpec, coll)); - ms->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ms->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); } set<RecordId> recordIds; @@ -765,11 +765,11 @@ public: // a:1 auto params = makeIndexScanParams(&_opCtx, getIndex(firstIndex, coll)); - ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr)); // b:1 params = makeIndexScanParams(&_opCtx, getIndex(secondIndex, coll)); - ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr)); unique_ptr<FetchStage> fetchStage = make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll); @@ -781,9 +781,9 @@ public: for (int i = 0; i < N; ++i) { BSONObj first, second; - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, nullptr)); first = first.getOwned(); - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, nullptr)); ASSERT_EQUALS(first["c"].numberInt(), second["c"].numberInt()); ASSERT_EQUALS(i, first["c"].numberInt()); // {a: 1, c: i, d: "abc"} should precede {b: 1, c: i, d: "bca"}. @@ -792,7 +792,7 @@ public: // Should be done now. BSONObj foo; - ASSERT_NOT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&foo, NULL)); + ASSERT_NOT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&foo, nullptr)); } }; @@ -833,11 +833,11 @@ public: // a:1 auto params = makeIndexScanParams(&_opCtx, getIndex(firstIndex, coll)); - ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr)); // b:1 params = makeIndexScanParams(&_opCtx, getIndex(secondIndex, coll)); - ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr)); unique_ptr<FetchStage> fetchStage = make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll); @@ -849,9 +849,9 @@ public: for (int i = 0; i < N; ++i) { BSONObj first, second; - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, nullptr)); first = first.getOwned(); - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, nullptr)); ASSERT_EQUALS(first["c"].numberInt(), second["c"].numberInt()); ASSERT_EQUALS(i, first["c"].numberInt()); // {b: 1, c: i, d: "cba"} should precede {a: 1, c: i, d: "abc"}. @@ -860,7 +860,7 @@ public: // Should be done now. BSONObj foo; - ASSERT_NOT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&foo, NULL)); + ASSERT_NOT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&foo, nullptr)); } }; diff --git a/src/mongo/dbtests/query_stage_multiplan.cpp b/src/mongo/dbtests/query_stage_multiplan.cpp index 622d284045b..97c50d74952 100644 --- a/src/mongo/dbtests/query_stage_multiplan.cpp +++ b/src/mongo/dbtests/query_stage_multiplan.cpp @@ -267,7 +267,7 @@ TEST_F(QueryStageMultiPlanTest, MPSCollectionScanVsHighlySelectiveIXScan) { int results = 0; BSONObj obj; PlanExecutor::ExecState state; - while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) { + while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, nullptr))) { ASSERT_EQUALS(obj["foo"].numberInt(), 7); ++results; } @@ -378,7 +378,7 @@ TEST_F(QueryStageMultiPlanTest, MPSBackupPlan) { auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(qr)); verify(statusWithCQ.isOK()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); - ASSERT(NULL != cq.get()); + ASSERT(nullptr != cq.get()); // Force index intersection. bool forceIxisectOldValue = internalQueryForceIntersectionPlans.load(); diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp index a50de5f0d3f..1982273bc79 100644 --- a/src/mongo/dbtests/query_stage_sort.cpp +++ b/src/mongo/dbtests/query_stage_sort.cpp @@ -175,7 +175,7 @@ public: // Look at pairs of objects to make sure that the sort order is pairwise (and therefore // totally) correct. BSONObj last; - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&last, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&last, nullptr)); last = last.getOwned(); // Count 'last'. @@ -183,7 +183,7 @@ public: BSONObj current; PlanExecutor::ExecState state; - while (PlanExecutor::ADVANCED == (state = exec->getNext(¤t, NULL))) { + while (PlanExecutor::ADVANCED == (state = exec->getNext(¤t, nullptr))) { int cmp = sgn(dps::compareObjectsAccordingToSort(current, last, params.pattern)); // The next object should be equal to the previous or oriented according to the sort // pattern. @@ -364,7 +364,7 @@ public: CollectionUpdateArgs args; { WriteUnitOfWork wuow(&_opCtx); - coll->updateDocument(&_opCtx, *it, oldDoc, newDoc(oldDoc), false, NULL, &args); + coll->updateDocument(&_opCtx, *it, oldDoc, newDoc(oldDoc), false, nullptr, &args); wuow.commit(); } exec->restoreState(); @@ -382,7 +382,7 @@ public: oldDoc = coll->docFor(&_opCtx, *it); { WriteUnitOfWork wuow(&_opCtx); - coll->updateDocument(&_opCtx, *it++, oldDoc, newDoc(oldDoc), false, NULL, &args); + coll->updateDocument(&_opCtx, *it++, oldDoc, newDoc(oldDoc), false, nullptr, &args); wuow.commit(); } } @@ -571,7 +571,7 @@ public: &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD); auto exec = std::move(statusWithPlanExecutor.getValue()); - PlanExecutor::ExecState runnerState = exec->getNext(NULL, NULL); + PlanExecutor::ExecState runnerState = exec->getNext(nullptr, nullptr); ASSERT_EQUALS(PlanExecutor::FAILURE, runnerState); } }; diff --git a/src/mongo/dbtests/query_stage_tests.cpp b/src/mongo/dbtests/query_stage_tests.cpp index ec2a54934e3..ffa55f0040b 100644 --- a/src/mongo/dbtests/query_stage_tests.cpp +++ b/src/mongo/dbtests/query_stage_tests.cpp @@ -102,7 +102,7 @@ public: int count = 0; PlanExecutor::ExecState state; - for (RecordId dl; PlanExecutor::ADVANCED == (state = exec->getNext(NULL, &dl));) { + for (RecordId dl; PlanExecutor::ADVANCED == (state = exec->getNext(nullptr, &dl));) { ++count; } ASSERT_EQUALS(PlanExecutor::IS_EOF, state); diff --git a/src/mongo/dbtests/query_stage_update.cpp b/src/mongo/dbtests/query_stage_update.cpp index 99829659d0c..adafcaadccd 100644 --- a/src/mongo/dbtests/query_stage_update.cpp +++ b/src/mongo/dbtests/query_stage_update.cpp @@ -131,7 +131,8 @@ public: params.direction = CollectionScanParams::FORWARD; params.tailable = false; - unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, collection, params, &ws, NULL)); + unique_ptr<CollectionScan> scan( + new CollectionScan(&_opCtx, collection, params, &ws, nullptr)); while (!scan->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); @@ -152,7 +153,8 @@ public: params.direction = direction; params.tailable = false; - unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, collection, params, &ws, NULL)); + unique_ptr<CollectionScan> scan( + new CollectionScan(&_opCtx, collection, params, &ws, nullptr)); while (!scan->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp index c981c80a888..a41e2d9054b 100644 --- a/src/mongo/dbtests/querytests.cpp +++ b/src/mongo/dbtests/querytests.cpp @@ -220,7 +220,7 @@ public: WriteUnitOfWork wunit(&_opCtx); Database* db = ctx.db(); if (db->getCollection(&_opCtx, nss())) { - _collection = NULL; + _collection = nullptr; db->dropCollection(&_opCtx, nss()).transitional_ignore(); } _collection = db->createCollection(&_opCtx, nss(), CollectionOptions(), false); @@ -338,7 +338,8 @@ public: } // Create a cursor on the collection, with a batch size of 200. - unique_ptr<DBClientCursor> cursor = _client.query(NamespaceString(ns), "", 0, 0, 0, 0, 200); + unique_ptr<DBClientCursor> cursor = + _client.query(NamespaceString(ns), "", 0, 0, nullptr, 0, 200); // Count 500 results, spanning a few batches of documents. for (int i = 0; i < 500; ++i) { @@ -383,7 +384,8 @@ public: } // Create a cursor on the collection, with a batch size of 200. - unique_ptr<DBClientCursor> cursor = _client.query(NamespaceString(ns), "", 0, 0, 0, 0, 200); + unique_ptr<DBClientCursor> cursor = + _client.query(NamespaceString(ns), "", 0, 0, nullptr, 0, 200); CursorId cursorId = cursor->getCursorId(); // Count 500 results, spanning a few batches of documents. @@ -461,7 +463,7 @@ public: Query().hint(BSON("$natural" << 1)), 2, 0, - 0, + nullptr, QueryOption_CursorTailable); ASSERT(0 != c->getCursorId()); while (c->more()) @@ -493,7 +495,7 @@ public: Query().hint(BSON("$natural" << 1)), 2, 0, - 0, + nullptr, QueryOption_CursorTailable); ASSERT_EQUALS(0, c->getCursorId()); ASSERT(c->isDead()); @@ -502,7 +504,7 @@ public: QUERY("a" << 1).hint(BSON("$natural" << 1)), 2, 0, - 0, + nullptr, QueryOption_CursorTailable); ASSERT(0 != c->getCursorId()); ASSERT(!c->isDead()); @@ -528,7 +530,7 @@ public: Query().hint(BSON("$natural" << 1)), 2, 0, - 0, + nullptr, QueryOption_CursorTailable); c->next(); c->next(); @@ -560,7 +562,7 @@ public: Query().hint(BSON("$natural" << 1)), 2, 0, - 0, + nullptr, QueryOption_CursorTailable); c->next(); c->next(); @@ -594,7 +596,7 @@ public: Query().hint(BSON("$natural" << 1)), 2, 0, - 0, + nullptr, QueryOption_CursorTailable); c->next(); c->next(); @@ -616,7 +618,8 @@ public: const char* ns = "unittests.querytests.TailCappedOnly"; _client.insert(ns, BSONObj()); ASSERT_THROWS( - _client.query(NamespaceString(ns), BSONObj(), 0, 0, 0, QueryOption_CursorTailable), + _client.query( + NamespaceString(ns), BSONObj(), 0, 0, nullptr, QueryOption_CursorTailable), AssertionException); } }; @@ -629,8 +632,8 @@ public: void insertA(const char* ns, int a) { BSONObjBuilder b; - b.appendOID("_id", 0, true); - b.appendOID("value", 0, true); + b.appendOID("_id", nullptr, true); + b.appendOID("value", nullptr, true); b.append("a", a); insert(ns, b.obj()); } @@ -656,11 +659,15 @@ public: insertA(ns, 0); insertA(ns, 1); unique_ptr<DBClientCursor> c1 = _client.query( - NamespaceString(ns), QUERY("a" << GT << -1), 0, 0, 0, QueryOption_CursorTailable); + NamespaceString(ns), QUERY("a" << GT << -1), 0, 0, nullptr, QueryOption_CursorTailable); OID id; id.init("000000000000000000000000"); - unique_ptr<DBClientCursor> c2 = _client.query( - NamespaceString(ns), QUERY("value" << GT << id), 0, 0, 0, QueryOption_CursorTailable); + unique_ptr<DBClientCursor> c2 = _client.query(NamespaceString(ns), + QUERY("value" << GT << id), + 0, + 0, + nullptr, + QueryOption_CursorTailable); c1->next(); c1->next(); ASSERT(!c1->more()); @@ -703,7 +710,7 @@ public: QUERY("ts" << GT << Timestamp(1000, 1)).hint(BSON("$natural" << 1)), 0, 0, - 0, + nullptr, QueryOption_OplogReplay); ASSERT(c->more()); ASSERT_EQUALS(2u, c->next()["ts"].timestamp().getInc()); @@ -714,7 +721,7 @@ public: QUERY("ts" << GT << Timestamp(1000, 1)).hint(BSON("$natural" << 1)), 0, 0, - 0, + nullptr, QueryOption_OplogReplay); ASSERT(c->more()); ASSERT_EQUALS(2u, c->next()["ts"].timestamp().getInc()); @@ -747,7 +754,7 @@ public: QUERY("ts" << GT << Timestamp(1000, 1)).hint(BSON("$natural" << 1)).explain(), 0, 0, - 0, + nullptr, QueryOption_OplogReplay); ASSERT(c->more()); @@ -1370,7 +1377,7 @@ public: QUERY("i" << GT << 0).hint(BSON("$natural" << 1)), 0, 0, - 0, + nullptr, QueryOption_CursorTailable); int n = 0; while (c->more()) { @@ -1397,7 +1404,7 @@ public: void insertNext() { BSONObjBuilder b; - b.appendOID("_id", 0, true); + b.appendOID("_id", nullptr, true); b.append("i", _n++); insert(ns(), b.obj()); } @@ -1539,7 +1546,7 @@ public: QUERY("ts" << GTE << Timestamp(1000, j)), 0, 0, - 0, + nullptr, QueryOption_OplogReplay); ASSERT(c->more()); BSONObj next = c->next(); @@ -1596,7 +1603,7 @@ public: QUERY("ts" << GTE << Timestamp(1000, j)), 0, 0, - 0, + nullptr, QueryOption_OplogReplay); ASSERT(c->more()); BSONObj next = c->next(); @@ -1634,7 +1641,7 @@ public: QUERY("ts" << GTE << Timestamp(1000, 50)), 0, 0, - 0, + nullptr, QueryOption_OplogReplay); ASSERT(!c0->more()); @@ -1656,7 +1663,7 @@ public: QUERY("ts" << GTE << Timestamp(1000, 50)), 0, 0, - 0, + nullptr, QueryOption_OplogReplay); ASSERT(!c->more()); @@ -1667,7 +1674,7 @@ public: QUERY("ts" << GTE << Timestamp(1000, 50)), 0, 0, - 0, + nullptr, QueryOption_OplogReplay); ASSERT(c->more()); ASSERT_EQUALS(100u, c->next()["ts"].timestamp().getInc()); @@ -1752,7 +1759,7 @@ public: BSON("ts" << GTE << Timestamp(1000, 0)), 0, 0, - 0, + nullptr, QueryOption_OplogReplay | QueryOption_CursorTailable | QueryOption_Exhaust, message); diff --git a/src/mongo/dbtests/rollbacktests.cpp b/src/mongo/dbtests/rollbacktests.cpp index 71824e31bb1..d7d1607c1c2 100644 --- a/src/mongo/dbtests/rollbacktests.cpp +++ b/src/mongo/dbtests/rollbacktests.cpp @@ -117,12 +117,12 @@ void assertEmpty(OperationContext* opCtx, const NamespaceString& nss) { bool indexExists(OperationContext* opCtx, const NamespaceString& nss, const string& idxName) { auto databaseHolder = DatabaseHolder::get(opCtx); auto coll = databaseHolder->getDb(opCtx, nss.db())->getCollection(opCtx, nss); - return coll->getIndexCatalog()->findIndexByName(opCtx, idxName, true) != NULL; + return coll->getIndexCatalog()->findIndexByName(opCtx, idxName, true) != nullptr; } bool indexReady(OperationContext* opCtx, const NamespaceString& nss, const string& idxName) { auto databaseHolder = DatabaseHolder::get(opCtx); auto coll = databaseHolder->getDb(opCtx, nss.db())->getCollection(opCtx, nss); - return coll->getIndexCatalog()->findIndexByName(opCtx, idxName, false) != NULL; + return coll->getIndexCatalog()->findIndexByName(opCtx, idxName, false) != nullptr; } size_t getNumIndexEntries(OperationContext* opCtx, const NamespaceString& nss, |