diff options
author | A. Jesse Jiryu Davis <jesse@mongodb.com> | 2019-06-14 16:42:10 -0400 |
---|---|---|
committer | A. Jesse Jiryu Davis <jesse@mongodb.com> | 2019-06-14 19:23:18 -0400 |
commit | 47b380f03e8898f4706ff01fa2be64dfb72e0dba (patch) | |
tree | fb3508758c9abd0e297afee43ac847bf5aebcbbb | |
parent | b3c26131f6ab3f919beca658341e737de5d45683 (diff) | |
download | mongo-47b380f03e8898f4706ff01fa2be64dfb72e0dba.tar.gz |
SERVER-41071 Replace NULL and 0 with nullptr
288 files changed, 1600 insertions, 1553 deletions
diff --git a/src/mongo/base/clonable_ptr.h b/src/mongo/base/clonable_ptr.h index 135fd272fa0..42b0c63c1db 100644 --- a/src/mongo/base/clonable_ptr.h +++ b/src/mongo/base/clonable_ptr.h @@ -63,7 +63,7 @@ struct detect_clone_factory_type_member_impl { template <typename U> static Yes& test(U*); - static constexpr bool value = sizeof(test<Derived>(0)) == sizeof(Yes); + static constexpr bool value = sizeof(test<Derived>(nullptr)) == sizeof(Yes); using type = typename std::integral_constant<bool, value>::type; }; diff --git a/src/mongo/base/owned_pointer_vector.h b/src/mongo/base/owned_pointer_vector.h index 199e172e4c3..929606ec520 100644 --- a/src/mongo/base/owned_pointer_vector.h +++ b/src/mongo/base/owned_pointer_vector.h @@ -157,7 +157,7 @@ public: */ T* releaseAt(size_t i) { T* out = _vector[i]; - _vector[i] = NULL; + _vector[i] = nullptr; return out; } diff --git a/src/mongo/base/owned_pointer_vector_test.cpp b/src/mongo/base/owned_pointer_vector_test.cpp index 5838785725e..c11bda6fb6f 100644 --- a/src/mongo/base/owned_pointer_vector_test.cpp +++ b/src/mongo/base/owned_pointer_vector_test.cpp @@ -290,7 +290,7 @@ TEST(OwnedPointerVectorTest, ReleaseAt) { holder.reset(owned.releaseAt(1)); ASSERT_EQUALS(0U, DestructionLogger::destroyedNames().size()); ASSERT_EQUALS(3U, owned.size()); - ASSERT_EQUALS(static_cast<DestructionLogger*>(NULL), owned[1]); + ASSERT_EQUALS(static_cast<DestructionLogger*>(nullptr), owned[1]); // owned destroyed } diff --git a/src/mongo/base/status.h b/src/mongo/base/status.h index 52a46d0adb0..d3faa8fd0ec 100644 --- a/src/mongo/base/status.h +++ b/src/mongo/base/status.h @@ -312,7 +312,7 @@ inline unsigned Status::refCount() const { return _error ? _error->refs.load() : 0; } -inline Status::Status() : _error(NULL) {} +inline Status::Status() : _error(nullptr) {} inline void Status::ref(ErrorInfo* error) { if (error) diff --git a/src/mongo/base/string_data.h b/src/mongo/base/string_data.h index 4192ccd60f6..3a943348680 100644 --- a/src/mongo/base/string_data.h +++ b/src/mongo/base/string_data.h @@ -279,7 +279,7 @@ inline size_t StringData::find(char c, size_t fromPos) const { return std::string::npos; const void* x = memchr(_data + fromPos, c, _size - fromPos); - if (x == 0) + if (x == nullptr) return std::string::npos; return static_cast<size_t>(static_cast<const char*>(x) - _data); } diff --git a/src/mongo/base/string_data_test.cpp b/src/mongo/base/string_data_test.cpp index a1d9d7d7773..b0cfac8467a 100644 --- a/src/mongo/base/string_data_test.cpp +++ b/src/mongo/base/string_data_test.cpp @@ -42,7 +42,7 @@ using std::string; TEST(Construction, Empty) { StringData strData; ASSERT_EQUALS(strData.size(), 0U); - ASSERT_TRUE(strData.rawData() == NULL); + ASSERT_TRUE(strData.rawData() == nullptr); } TEST(Construction, FromStdString) { @@ -60,10 +60,10 @@ TEST(Construction, FromCString) { } TEST(Construction, FromNullCString) { - char* c = NULL; + char* c = nullptr; StringData strData(c); ASSERT_EQUALS(strData.size(), 0U); - ASSERT_TRUE(strData.rawData() == NULL); + ASSERT_TRUE(strData.rawData() == nullptr); } TEST(Construction, FromUserDefinedLiteral) { diff --git a/src/mongo/bson/bson_validate.cpp b/src/mongo/bson/bson_validate.cpp index da7f1de0c1c..40edd405c9b 100644 --- a/src/mongo/bson/bson_validate.cpp +++ b/src/mongo/bson/bson_validate.cpp @@ -309,7 +309,7 @@ Status validateElementInfo(Buffer* buffer, Status validateBSONIterative(Buffer* buffer) { std::vector<ValidationObjectFrame> frames; frames.reserve(16); - ValidationObjectFrame* curr = NULL; + ValidationObjectFrame* curr = nullptr; ValidationState::State state = ValidationState::BeginObj; uint64_t idElemStartPos = 0; // will become idElem once validated diff --git a/src/mongo/bson/bson_validate_test.cpp b/src/mongo/bson/bson_validate_test.cpp index 2c3a9b0d07b..94fb4ec269b 100644 --- a/src/mongo/bson/bson_validate_test.cpp +++ b/src/mongo/bson/bson_validate_test.cpp @@ -146,7 +146,7 @@ TEST(BSONValidate, MuckingData1) { } TEST(BSONValidate, Fuzz) { - int64_t seed = time(0); + int64_t seed = time(nullptr); log() << "BSONValidate Fuzz random seed: " << seed << endl; PseudoRandom randomSource(seed); diff --git a/src/mongo/bson/bsonelement.cpp b/src/mongo/bson/bsonelement.cpp index 2ad980eb957..d659f84ee13 100644 --- a/src/mongo/bson/bsonelement.cpp +++ b/src/mongo/bson/bsonelement.cpp @@ -160,7 +160,7 @@ void BSONElement::jsonStringStream(JsonStringFormat format, s << " "; } - if (strtol(e.fieldName(), 0, 10) > count) { + if (strtol(e.fieldName(), nullptr, 10) > count) { s << "undefined"; } else { e.jsonStringStream(format, false, pretty ? pretty + 1 : 0, s); diff --git a/src/mongo/bson/bsonmisc.h b/src/mongo/bson/bsonmisc.h index 6baec67d7a6..cfc56a63ef2 100644 --- a/src/mongo/bson/bsonmisc.h +++ b/src/mongo/bson/bsonmisc.h @@ -212,7 +212,7 @@ public: void endField(StringData nextFieldName = StringData()); bool subobjStarted() const { - return _fieldName != 0; + return _fieldName != nullptr; } // The following methods provide API compatibility with BSONArrayBuilder @@ -236,7 +236,7 @@ private: BSONObjBuilder* _builder; bool haveSubobj() const { - return _subobj.get() != 0; + return _subobj.get() != nullptr; } BSONObjBuilder* subobj(); std::unique_ptr<BSONObjBuilder> _subobj; diff --git a/src/mongo/bson/bsonobj.h b/src/mongo/bson/bsonobj.h index 2faa7c9cc09..c79615bbb94 100644 --- a/src/mongo/bson/bsonobj.h +++ b/src/mongo/bson/bsonobj.h @@ -697,7 +697,7 @@ public: explicit BSONObjIterator(const BSONObj& jso) { int sz = jso.objsize(); if (MONGO_unlikely(sz == 0)) { - _pos = _theend = 0; + _pos = _theend = nullptr; return; } _pos = jso.objdata() + 4; diff --git a/src/mongo/bson/bsonobjbuilder.cpp b/src/mongo/bson/bsonobjbuilder.cpp index 7c64e35cac5..871dcdfb1c0 100644 --- a/src/mongo/bson/bsonobjbuilder.cpp +++ b/src/mongo/bson/bsonobjbuilder.cpp @@ -90,7 +90,7 @@ BSONObjBuilder& BSONObjBuilder::appendMinForType(StringData fieldName, int t) { appendArray(fieldName, BSONObj()); return *this; case BinData: - appendBinData(fieldName, 0, BinDataGeneral, (const char*)0); + appendBinData(fieldName, 0, BinDataGeneral, (const char*)nullptr); return *this; case RegEx: appendRegex(fieldName, ""); diff --git a/src/mongo/bson/bsonobjbuilder.h b/src/mongo/bson/bsonobjbuilder.h index ef28c9d828a..fe8c0a5b555 100644 --- a/src/mongo/bson/bsonobjbuilder.h +++ b/src/mongo/bson/bsonobjbuilder.h @@ -68,7 +68,7 @@ class BSONObjBuilder { public: /** @param initsize this is just a hint as to the final size of the object */ BSONObjBuilder(int initsize = 512) - : _b(_buf), _buf(initsize), _offset(0), _s(this), _tracker(0), _doneCalled(false) { + : _b(_buf), _buf(initsize), _offset(0), _s(this), _tracker(nullptr), _doneCalled(false) { // Skip over space for the object length. The length is filled in by _done. _b.skip(sizeof(int)); @@ -85,7 +85,7 @@ public: _buf(0), _offset(baseBuilder.len()), _s(this), - _tracker(0), + _tracker(nullptr), _doneCalled(false) { // Skip over space for the object length, which is filled in by _done. We don't need a // holder since we are a sub-builder, and some parent builder has already made the @@ -135,7 +135,7 @@ public: * into this constructor where possible. */ BSONObjBuilder(BSONObj prefix) - : _b(_buf), _buf(0), _offset(0), _s(this), _tracker(0), _doneCalled(false) { + : _b(_buf), _buf(0), _offset(0), _s(this), _tracker(nullptr), _doneCalled(false) { // If prefix wasn't owned or we don't have exclusive access to it, we must copy. if (!prefix.isOwned() || prefix.sharedBuffer().isShared()) { _b.grow(prefix.objsize()); // Make sure we won't need to realloc(). @@ -386,7 +386,9 @@ public: @deprecated Generally, it is preferred to use the append append(name, oid) method for this. */ - BSONObjBuilder& appendOID(StringData fieldName, OID* oid = 0, bool generateIfBlank = false) { + BSONObjBuilder& appendOID(StringData fieldName, + OID* oid = nullptr, + bool generateIfBlank = false) { _b.appendNum((char)jstOID); _b.appendStr(fieldName); if (oid) diff --git a/src/mongo/bson/json.cpp b/src/mongo/bson/json.cpp index a2bc9872c79..15661b0a456 100644 --- a/src/mongo/bson/json.cpp +++ b/src/mongo/bson/json.cpp @@ -1108,7 +1108,7 @@ Status JParse::chars(std::string* result, const char* terminalSet, const char* a const char* q = _input; while (q < _input_end && !match(*q, terminalSet)) { MONGO_JSON_DEBUG("q: " << q); - if (allowedSet != NULL) { + if (allowedSet != nullptr) { if (!match(*q, allowedSet)) { _input = q; return Status::OK(); @@ -1226,7 +1226,7 @@ inline bool JParse::readToken(const char* token) { bool JParse::readTokenImpl(const char* token, bool advance) { MONGO_JSON_DEBUG("token: " << token); const char* check = _input; - if (token == NULL) { + if (token == nullptr) { return false; } // 'isspace()' takes an 'int' (signed), so (default signed) 'char's get sign-extended @@ -1264,13 +1264,13 @@ bool JParse::readField(StringData expectedField) { } inline bool JParse::match(char matchChar, const char* matchSet) const { - if (matchSet == NULL) { + if (matchSet == nullptr) { return true; } if (*matchSet == '\0') { return false; } - return (strchr(matchSet, matchChar) != NULL); + return (strchr(matchSet, matchChar) != nullptr); } bool JParse::isHexString(StringData str) const { diff --git a/src/mongo/bson/json.h b/src/mongo/bson/json.h index 38217fb7eef..45b08acd095 100644 --- a/src/mongo/bson/json.h +++ b/src/mongo/bson/json.h @@ -52,7 +52,7 @@ namespace mongo { BSONObj fromjson(const std::string& str); /** @param len will be size of JSON object in text chars. */ -BSONObj fromjson(const char* str, int* len = NULL); +BSONObj fromjson(const char* str, int* len = nullptr); /** * Tests whether the JSON string is an Array. @@ -410,7 +410,7 @@ private: * string, but there is no guarantee that it will not contain other * null characters. */ - Status chars(std::string* result, const char* terminalSet, const char* allowedSet = NULL); + Status chars(std::string* result, const char* terminalSet, const char* allowedSet = nullptr); /** * Converts the two byte Unicode code point to its UTF8 character diff --git a/src/mongo/bson/mutable/const_element.h b/src/mongo/bson/mutable/const_element.h index 4b8d483095e..5346c56c52d 100644 --- a/src/mongo/bson/mutable/const_element.h +++ b/src/mongo/bson/mutable/const_element.h @@ -120,7 +120,7 @@ private: friend class Document; template <typename Builder> - inline void writeElement(Builder* builder, const StringData* fieldName = NULL) const; + inline void writeElement(Builder* builder, const StringData* fieldName = nullptr) const; Element _basis; }; diff --git a/src/mongo/bson/mutable/document.cpp b/src/mongo/bson/mutable/document.cpp index fb75d2d008c..443c57cebf1 100644 --- a/src/mongo/bson/mutable/document.cpp +++ b/src/mongo/bson/mutable/document.cpp @@ -997,7 +997,7 @@ public: // inform upstream that we are not returning in-place result data. if (_inPlaceMode == Document::kInPlaceDisabled) { damages->clear(); - *source = NULL; + *source = nullptr; if (size) *size = 0; return false; @@ -1077,7 +1077,7 @@ public: template <typename Builder> void writeElement(Element::RepIdx repIdx, Builder* builder, - const StringData* fieldName = NULL) const; + const StringData* fieldName = nullptr) const; template <typename Builder> void writeChildren(Element::RepIdx repIdx, Builder* builder) const; @@ -2647,7 +2647,7 @@ Element Document::makeElementSafeNum(StringData fieldName, SafeNum value) { } Element Document::makeElement(ConstElement element) { - return makeElement(element, NULL); + return makeElement(element, nullptr); } Element Document::makeElementWithNewFieldName(StringData fieldName, ConstElement element) { diff --git a/src/mongo/bson/mutable/document.h b/src/mongo/bson/mutable/document.h index 0feb056cc35..ac254b7ac21 100644 --- a/src/mongo/bson/mutable/document.h +++ b/src/mongo/bson/mutable/document.h @@ -476,7 +476,7 @@ public: * The destination offsets in the damage events are implicitly offsets into the * BSONObj used to construct this Document. */ - bool getInPlaceUpdates(DamageVector* damages, const char** source, size_t* size = NULL); + bool getInPlaceUpdates(DamageVector* damages, const char** source, size_t* size = nullptr); /** Drop the queue of in-place update damage events, and do not queue new operations * that would otherwise have been in-place. Use this if you know that in-place updates diff --git a/src/mongo/bson/mutable/element.h b/src/mongo/bson/mutable/element.h index 85756d1fea9..7438d97f186 100644 --- a/src/mongo/bson/mutable/element.h +++ b/src/mongo/bson/mutable/element.h @@ -724,7 +724,7 @@ inline bool Element::isValueMaxKey() const { } inline bool Element::ok() const { - dassert(_doc != NULL); + dassert(_doc != nullptr); return _repIdx <= kMaxRepIdx; } @@ -745,7 +745,7 @@ inline Element::RepIdx Element::getIdx() const { } inline Element::Element(Document* doc, RepIdx repIdx) : _doc(doc), _repIdx(repIdx) { - dassert(_doc != NULL); + dassert(_doc != nullptr); } inline StringData Element::getValueStringOrSymbol() const { diff --git a/src/mongo/bson/mutable/mutable_bson_test.cpp b/src/mongo/bson/mutable/mutable_bson_test.cpp index fa13986ef12..58a46d04ce3 100644 --- a/src/mongo/bson/mutable/mutable_bson_test.cpp +++ b/src/mongo/bson/mutable/mutable_bson_test.cpp @@ -1017,11 +1017,11 @@ TEST(Documentation, Example2InPlaceWithDamageVector) { // Extract the damage events mmb::DamageVector damages; - const char* source = NULL; + const char* source = nullptr; size_t size = 0; ASSERT_EQUALS(true, doc.getInPlaceUpdates(&damages, &source, &size)); ASSERT_NOT_EQUALS(0U, damages.size()); - ASSERT_NOT_EQUALS(static_cast<const char*>(NULL), source); + ASSERT_NOT_EQUALS(static_cast<const char*>(nullptr), source); ASSERT_NOT_EQUALS(0U, size); apply(&obj, damages, source); @@ -2772,12 +2772,12 @@ TEST(DocumentInPlace, InPlaceModeWorksWithNoMutations) { mongo::BSONObj obj; mmb::Document doc(obj, mmb::Document::kInPlaceEnabled); ASSERT_TRUE(doc.isInPlaceModeEnabled()); - const char* source = NULL; + const char* source = nullptr; mmb::DamageVector damages; ASSERT_TRUE(damages.empty()); doc.getInPlaceUpdates(&damages, &source); ASSERT_TRUE(damages.empty()); - ASSERT_NOT_EQUALS(static_cast<const char*>(NULL), source); + ASSERT_NOT_EQUALS(static_cast<const char*>(nullptr), source); ASSERT_TRUE(doc.isInPlaceModeEnabled()); } @@ -2876,14 +2876,14 @@ TEST(DocumentInPlace, GettingInPlaceUpdatesWhenDisabledClearsArguments) { const char* source = "foo"; ASSERT_FALSE(doc.getInPlaceUpdates(&damages, &source)); ASSERT_TRUE(damages.empty()); - ASSERT_EQUALS(static_cast<const char*>(NULL), source); + ASSERT_EQUALS(static_cast<const char*>(nullptr), source); damages.push_back(event); source = "bar"; size_t size = 1; ASSERT_FALSE(doc.getInPlaceUpdates(&damages, &source, &size)); ASSERT_TRUE(damages.empty()); - ASSERT_EQUALS(static_cast<const char*>(NULL), source); + ASSERT_EQUALS(static_cast<const char*>(nullptr), source); ASSERT_EQUALS(0U, size); } @@ -2929,7 +2929,7 @@ TEST(DocumentInPlace, StringLifecycle) { mmb::Element x = doc.root().leftChild(); mmb::DamageVector damages; - const char* source = NULL; + const char* source = nullptr; x.setValueString("bar").transitional_ignore(); ASSERT_TRUE(doc.getInPlaceUpdates(&damages, &source)); @@ -2955,7 +2955,7 @@ TEST(DocumentInPlace, BinDataLifecycle) { mmb::Element x = doc.root().leftChild(); mmb::DamageVector damages; - const char* source = NULL; + const char* source = nullptr; x.setValueBinary(binData2.length, binData2.type, binData2.data).transitional_ignore(); ASSERT_TRUE(doc.getInPlaceUpdates(&damages, &source)); @@ -2985,7 +2985,7 @@ TEST(DocumentInPlace, OIDLifecycle) { mmb::Element x = doc.root().leftChild(); mmb::DamageVector damages; - const char* source = NULL; + const char* source = nullptr; x.setValueOID(oid2).transitional_ignore(); ASSERT_TRUE(doc.getInPlaceUpdates(&damages, &source)); @@ -3005,7 +3005,7 @@ TEST(DocumentInPlace, BooleanLifecycle) { mmb::Element x = doc.root().leftChild(); mmb::DamageVector damages; - const char* source = NULL; + const char* source = nullptr; x.setValueBool(false).transitional_ignore(); ASSERT_TRUE(doc.getInPlaceUpdates(&damages, &source)); @@ -3031,7 +3031,7 @@ TEST(DocumentInPlace, DateLifecycle) { mmb::Element x = doc.root().leftChild(); mmb::DamageVector damages; - const char* source = NULL; + const char* source = nullptr; x.setValueDate(mongo::Date_t::fromMillisSinceEpoch(20000)).transitional_ignore(); ASSERT_TRUE(doc.getInPlaceUpdates(&damages, &source)); @@ -3053,7 +3053,7 @@ TEST(DocumentInPlace, NumberIntLifecycle) { mmb::Element x = doc.root().leftChild(); mmb::DamageVector damages; - const char* source = NULL; + const char* source = nullptr; x.setValueInt(value2).transitional_ignore(); ASSERT_TRUE(doc.getInPlaceUpdates(&damages, &source)); @@ -3079,7 +3079,7 @@ TEST(DocumentInPlace, TimestampLifecycle) { mmb::Element x = doc.root().leftChild(); mmb::DamageVector damages; - const char* source = NULL; + const char* source = nullptr; x.setValueTimestamp(mongo::Timestamp(mongo::Date_t::fromMillisSinceEpoch(20000))) .transitional_ignore(); @@ -3103,7 +3103,7 @@ TEST(DocumentInPlace, NumberLongLifecycle) { mmb::Element x = doc.root().leftChild(); mmb::DamageVector damages; - const char* source = NULL; + const char* source = nullptr; x.setValueLong(value2).transitional_ignore(); ASSERT_TRUE(doc.getInPlaceUpdates(&damages, &source)); @@ -3132,7 +3132,7 @@ TEST(DocumentInPlace, NumberDoubleLifecycle) { mmb::Element x = doc.root().leftChild(); mmb::DamageVector damages; - const char* source = NULL; + const char* source = nullptr; x.setValueDouble(value2).transitional_ignore(); ASSERT_TRUE(doc.getInPlaceUpdates(&damages, &source)); @@ -3161,7 +3161,7 @@ TEST(DocumentInPlace, NumberDecimalLifecycle) { mmb::Element x = doc.root().leftChild(); mmb::DamageVector damages; - const char* source = NULL; + const char* source = nullptr; x.setValueDecimal(value2).transitional_ignore(); ASSERT_TRUE(doc.getInPlaceUpdates(&damages, &source)); @@ -3192,7 +3192,7 @@ TEST(DocumentInPlace, DoubleToLongAndBack) { mmb::Element x = doc.root().leftChild(); mmb::DamageVector damages; - const char* source = NULL; + const char* source = nullptr; x.setValueLong(value2).transitional_ignore(); ASSERT_TRUE(doc.getInPlaceUpdates(&damages, &source)); diff --git a/src/mongo/bson/oid.cpp b/src/mongo/bson/oid.cpp index a4d2d901e61..0f08d6c56b6 100644 --- a/src/mongo/bson/oid.cpp +++ b/src/mongo/bson/oid.cpp @@ -135,7 +135,7 @@ void OID::justForked() { void OID::init() { // each set* method handles endianness - setTimestamp(time(0)); + setTimestamp(time(nullptr)); setInstanceUnique(_instanceUnique); setIncrement(Increment::next()); } diff --git a/src/mongo/bson/util/builder.h b/src/mongo/bson/util/builder.h index ad890ceb074..d1cd0d53963 100644 --- a/src/mongo/bson/util/builder.h +++ b/src/mongo/bson/util/builder.h @@ -458,7 +458,8 @@ public: verify(z >= 0); verify(z < maxSize); _buf.l = prev + z; - if (strchr(start, '.') == 0 && strchr(start, 'E') == 0 && strchr(start, 'N') == 0) { + if (strchr(start, '.') == nullptr && strchr(start, 'E') == nullptr && + strchr(start, 'N') == nullptr) { write(".0", 2); } } diff --git a/src/mongo/bson/util/builder_test.cpp b/src/mongo/bson/util/builder_test.cpp index 2dc2662e29d..8ceb8c2c5b8 100644 --- a/src/mongo/bson/util/builder_test.cpp +++ b/src/mongo/bson/util/builder_test.cpp @@ -48,7 +48,7 @@ TEST(Builder, StringBuilderAddress) { const void* longPtr = reinterpret_cast<const void*>(-1); const void* shortPtr = reinterpret_cast<const void*>(static_cast<uintptr_t>(0xDEADBEEF)); - const void* nullPtr = NULL; + const void* nullPtr = nullptr; StringBuilder sb; sb << longPtr; diff --git a/src/mongo/client/connection_pool.cpp b/src/mongo/client/connection_pool.cpp index 643817aa9c6..dfe098e1c9d 100644 --- a/src/mongo/client/connection_pool.cpp +++ b/src/mongo/client/connection_pool.cpp @@ -273,7 +273,7 @@ ConnectionPool::ConnectionPtr& ConnectionPool::ConnectionPtr::operator=(Connecti void ConnectionPool::ConnectionPtr::done(Date_t now) { _pool->releaseConnection(_connInfo, now); - _pool = NULL; + _pool = nullptr; } } // namespace mongo diff --git a/src/mongo/client/connection_string_connect.cpp b/src/mongo/client/connection_string_connect.cpp index 76844368edc..81e3bdaef4f 100644 --- a/src/mongo/client/connection_string_connect.cpp +++ b/src/mongo/client/connection_string_connect.cpp @@ -44,7 +44,7 @@ namespace mongo { stdx::mutex ConnectionString::_connectHookMutex; -ConnectionString::ConnectionHook* ConnectionString::_connectHook = NULL; +ConnectionString::ConnectionHook* ConnectionString::_connectHook = nullptr; std::unique_ptr<DBClientBase> ConnectionString::connect(StringData applicationName, std::string& errmsg, diff --git a/src/mongo/client/connpool.cpp b/src/mongo/client/connpool.cpp index d4d58addcfc..5901f695014 100644 --- a/src/mongo/client/connpool.cpp +++ b/src/mongo/client/connpool.cpp @@ -572,7 +572,7 @@ bool DBConnectionPool::poolKeyCompare::operator()(const PoolKey& a, const PoolKe } bool DBConnectionPool::isConnectionGood(const string& hostName, DBClientBase* conn) { - if (conn == NULL) { + if (conn == nullptr) { return false; } @@ -642,13 +642,13 @@ void ScopedDbConnection::done() { } globalConnPool.release(_host, _conn); - _conn = NULL; + _conn = nullptr; } void ScopedDbConnection::kill() { globalConnPool.decrementEgress(_host, _conn); delete _conn; - _conn = NULL; + _conn = nullptr; } void ScopedDbConnection::_setSocketTimeout() { diff --git a/src/mongo/client/connpool.h b/src/mongo/client/connpool.h index 24e2cd70dd4..9fbf65214db 100644 --- a/src/mongo/client/connpool.h +++ b/src/mongo/client/connpool.h @@ -457,7 +457,7 @@ public: explicit ScopedDbConnection(const ConnectionString& host, double socketTimeout = 0); explicit ScopedDbConnection(const MongoURI& host, double socketTimeout = 0); - ScopedDbConnection() : _host(""), _conn(0), _socketTimeoutSecs(0) {} + ScopedDbConnection() : _host(""), _conn(nullptr), _socketTimeoutSecs(0) {} /* @param conn - bind to an existing connection */ ScopedDbConnection(const std::string& host, DBClientBase* conn, double socketTimeout = 0) @@ -488,7 +488,7 @@ public: } bool ok() const { - return _conn != NULL; + return _conn != nullptr; } std::string getHost() const { diff --git a/src/mongo/client/dbclient_base.cpp b/src/mongo/client/dbclient_base.cpp index dcf30fa1881..3c896123850 100644 --- a/src/mongo/client/dbclient_base.cpp +++ b/src/mongo/client/dbclient_base.cpp @@ -287,7 +287,7 @@ bool DBClientBase::runCommand(const string& dbname, BSONObj cmd, BSONObj& info, */ bool DBClientBase::simpleCommand(const string& dbname, BSONObj* info, const string& command) { BSONObj o; - if (info == 0) + if (info == nullptr) info = &o; BSONObjBuilder b; b.append(command, 1); @@ -532,7 +532,7 @@ bool DBClientBase::isMaster(bool& isMaster, BSONObj* info) { } BSONObj o; - if (info == 0) + if (info == nullptr) info = &o; bool ok = runCommand("admin", bob.obj(), *info); isMaster = info->getField("ismaster").trueValue(); @@ -543,7 +543,7 @@ bool DBClientBase::createCollection( const string& ns, long long size, bool capped, int max, BSONObj* info) { verify(!capped || size); BSONObj o; - if (info == 0) + if (info == nullptr) info = &o; BSONObjBuilder b; string db = nsToDatabase(ns); diff --git a/src/mongo/client/dbclient_base.h b/src/mongo/client/dbclient_base.h index 558cc3efa8e..a7cdfc43c3b 100644 --- a/src/mongo/client/dbclient_base.h +++ b/src/mongo/client/dbclient_base.h @@ -77,21 +77,21 @@ class DBClientQueryInterface { Query query, int nToReturn = 0, int nToSkip = 0, - const BSONObj* fieldsToReturn = 0, + const BSONObj* fieldsToReturn = nullptr, int queryOptions = 0, int batchSize = 0) = 0; virtual unsigned long long query(std::function<void(const BSONObj&)> f, const NamespaceStringOrUUID& nsOrUuid, Query query, - const BSONObj* fieldsToReturn = 0, + const BSONObj* fieldsToReturn = nullptr, int queryOptions = 0, int batchSize = 0) = 0; virtual unsigned long long query(std::function<void(DBClientCursorBatchIterator&)> f, const NamespaceStringOrUUID& nsOrUuid, Query query, - const BSONObj* fieldsToReturn = 0, + const BSONObj* fieldsToReturn = nullptr, int queryOptions = 0, int batchSize = 0) = 0; }; @@ -118,7 +118,7 @@ public: */ virtual BSONObj findOne(const std::string& ns, const Query& query, - const BSONObj* fieldsToReturn = 0, + const BSONObj* fieldsToReturn = nullptr, int queryOptions = 0); /** query N objects from the database into an array. makes sense mostly when you want a small @@ -129,7 +129,7 @@ public: Query query, int nToReturn, int nToSkip = 0, - const BSONObj* fieldsToReturn = 0, + const BSONObj* fieldsToReturn = nullptr, int queryOptions = 0); /** @@ -368,7 +368,7 @@ public: returns true if command invoked successfully. */ - virtual bool isMaster(bool& isMaster, BSONObj* info = 0); + virtual bool isMaster(bool& isMaster, BSONObj* info = nullptr); /** Create a new collection in the database. Normally, collection creation is automatic. You @@ -390,7 +390,7 @@ public: long long size = 0, bool capped = false, int max = 0, - BSONObj* info = 0); + BSONObj* info = nullptr); /** Get error result from the last write operation (insert/update/delete) on this connection. db doesn't change the command's behavior - it is just for auth checks. @@ -583,7 +583,7 @@ public: Query query, int nToReturn = 0, int nToSkip = 0, - const BSONObj* fieldsToReturn = 0, + const BSONObj* fieldsToReturn = nullptr, int queryOptions = 0, int batchSize = 0) override; @@ -605,14 +605,14 @@ public: unsigned long long query(std::function<void(const BSONObj&)> f, const NamespaceStringOrUUID& nsOrUuid, Query query, - const BSONObj* fieldsToReturn = 0, + const BSONObj* fieldsToReturn = nullptr, int queryOptions = QueryOption_Exhaust, int batchSize = 0) final; unsigned long long query(std::function<void(DBClientCursorBatchIterator&)> f, const NamespaceStringOrUUID& nsOrUuid, Query query, - const BSONObj* fieldsToReturn = 0, + const BSONObj* fieldsToReturn = nullptr, int queryOptions = QueryOption_Exhaust, int batchSize = 0) override; diff --git a/src/mongo/client/dbclient_connection.h b/src/mongo/client/dbclient_connection.h index dc2f9ae7f6c..5b2976a134f 100644 --- a/src/mongo/client/dbclient_connection.h +++ b/src/mongo/client/dbclient_connection.h @@ -154,7 +154,7 @@ public: Query query = Query(), int nToReturn = 0, int nToSkip = 0, - const BSONObj* fieldsToReturn = 0, + const BSONObj* fieldsToReturn = nullptr, int queryOptions = 0, int batchSize = 0) override { checkConnection(); @@ -227,12 +227,12 @@ public: return _serverAddress; } - void say(Message& toSend, bool isRetry = false, std::string* actualServer = 0) override; + void say(Message& toSend, bool isRetry = false, std::string* actualServer = nullptr) override; bool recv(Message& m, int lastRequestId) override; void checkResponse(const std::vector<BSONObj>& batch, bool networkError, - bool* retry = NULL, - std::string* host = NULL) override; + bool* retry = nullptr, + std::string* host = nullptr) override; bool call(Message& toSend, Message& response, bool assertOk, diff --git a/src/mongo/client/dbclient_cursor.cpp b/src/mongo/client/dbclient_cursor.cpp index 3e049f786e5..16be475d4b0 100644 --- a/src/mongo/client/dbclient_cursor.cpp +++ b/src/mongo/client/dbclient_cursor.cpp @@ -490,7 +490,7 @@ void DBClientCursor::attach(AScopedConnection* conn) { } conn->done(); - _client = 0; + _client = nullptr; _lazyHost = ""; } diff --git a/src/mongo/client/dbclient_cursor.h b/src/mongo/client/dbclient_cursor.h index e5b69b10b50..ad3a2c89583 100644 --- a/src/mongo/client/dbclient_cursor.h +++ b/src/mongo/client/dbclient_cursor.h @@ -94,7 +94,7 @@ public: /** * peek ahead and see if an error occurred, and get the error if so. */ - bool peekError(BSONObj* error = NULL); + bool peekError(BSONObj* error = nullptr); /** iterate the rest of the cursor and return the number if items diff --git a/src/mongo/client/dbclient_rs.cpp b/src/mongo/client/dbclient_rs.cpp index 033f39b0bef..b7e2044f93c 100644 --- a/src/mongo/client/dbclient_rs.cpp +++ b/src/mongo/client/dbclient_rs.cpp @@ -298,7 +298,7 @@ DBClientConnection* DBClientReplicaSet::checkMaster() { MongoURI masterUri = _uri.cloneURIForServer(_masterHost); string errmsg; - DBClientConnection* newConn = NULL; + DBClientConnection* newConn = nullptr; boost::optional<double> socketTimeout; if (_so_timeout > 0.0) socketTimeout = _so_timeout; @@ -313,7 +313,7 @@ DBClientConnection* DBClientReplicaSet::checkMaster() { errmsg = ex.toString(); } - if (newConn == NULL || !errmsg.empty()) { + if (newConn == nullptr || !errmsg.empty()) { const std::string message = str::stream() << "can't connect to new replica set master [" << _masterHost.toString() << "]" << (errmsg.empty() ? "" : ", err: ") << errmsg; @@ -398,7 +398,7 @@ DBClientConnection& DBClientReplicaSet::slaveConn() { uassert(16369, str::stream() << "No good nodes available for set: " << _getMonitor()->getName(), - conn != NULL); + conn != nullptr); return *conn; } @@ -493,7 +493,7 @@ void DBClientReplicaSet::logout(const string& dbname, BSONObj& info) { * needed when we actually have something cached and is last known to be * working. */ - if (_lastSlaveOkConn.get() != NULL && !_lastSlaveOkConn->isFailed()) { + if (_lastSlaveOkConn.get() != nullptr && !_lastSlaveOkConn->isFailed()) { try { BSONObj dummy; _lastSlaveOkConn->logout(dbname, dummy); @@ -536,8 +536,8 @@ unique_ptr<DBClientCursor> DBClientReplicaSet::query(const NamespaceStringOrUUID LOG(3) << "dbclient_rs query using secondary or tagged node selection in " << _getMonitor()->getName() << ", read pref is " << readPref->toString() << " (primary : " - << (_master.get() != NULL ? _master->getServerAddress() : "[not cached]") - << ", lastTagged : " << (_lastSlaveOkConn.get() != NULL + << (_master.get() != nullptr ? _master->getServerAddress() : "[not cached]") + << ", lastTagged : " << (_lastSlaveOkConn.get() != nullptr ? _lastSlaveOkConn->getServerAddress() : "[not cached]") << ")" << endl; @@ -548,7 +548,7 @@ unique_ptr<DBClientCursor> DBClientReplicaSet::query(const NamespaceStringOrUUID try { DBClientConnection* conn = selectNodeUsingTags(readPref); - if (conn == NULL) { + if (conn == nullptr) { break; } @@ -588,8 +588,8 @@ BSONObj DBClientReplicaSet::findOne(const string& ns, LOG(3) << "dbclient_rs findOne using secondary or tagged node selection in " << _getMonitor()->getName() << ", read pref is " << readPref->toString() << " (primary : " - << (_master.get() != NULL ? _master->getServerAddress() : "[not cached]") - << ", lastTagged : " << (_lastSlaveOkConn.get() != NULL + << (_master.get() != nullptr ? _master->getServerAddress() : "[not cached]") + << ", lastTagged : " << (_lastSlaveOkConn.get() != nullptr ? _lastSlaveOkConn->getServerAddress() : "[not cached]") << ")" << endl; @@ -600,7 +600,7 @@ BSONObj DBClientReplicaSet::findOne(const string& ns, try { DBClientConnection* conn = selectNodeUsingTags(readPref); - if (conn == NULL) { + if (conn == nullptr) { break; } @@ -647,7 +647,7 @@ void DBClientReplicaSet::isntMaster() { unique_ptr<DBClientCursor> DBClientReplicaSet::checkSlaveQueryResult( unique_ptr<DBClientCursor> result) { - if (result.get() == NULL) + if (result.get() == nullptr) return result; BSONObj error; @@ -733,7 +733,7 @@ DBClientConnection* DBClientReplicaSet::selectNodeUsingTags( // that returning NULL means none of the nodes were good, which is not the case here. uassert(16532, str::stream() << "Failed to connect to " << _lastSlaveOkHost.toString(), - newConn != NULL); + newConn != nullptr); _lastSlaveOkConn = std::shared_ptr<DBClientConnection>(newConn, std::move(dtor)); _lastSlaveOkConn->setParentReplSetName(_setName); @@ -767,8 +767,8 @@ void DBClientReplicaSet::say(Message& toSend, bool isRetry, string* actualServer LOG(3) << "dbclient_rs say using secondary or tagged node selection in " << _getMonitor()->getName() << ", read pref is " << readPref->toString() << " (primary : " - << (_master.get() != NULL ? _master->getServerAddress() : "[not cached]") - << ", lastTagged : " << (_lastSlaveOkConn.get() != NULL + << (_master.get() != nullptr ? _master->getServerAddress() : "[not cached]") + << ", lastTagged : " << (_lastSlaveOkConn.get() != nullptr ? _lastSlaveOkConn->getServerAddress() : "[not cached]") << ")" << endl; @@ -780,11 +780,11 @@ void DBClientReplicaSet::say(Message& toSend, bool isRetry, string* actualServer try { DBClientConnection* conn = selectNodeUsingTags(readPref); - if (conn == NULL) { + if (conn == nullptr) { break; } - if (actualServer != NULL) { + if (actualServer != nullptr) { *actualServer = conn->getServerAddress(); } @@ -985,7 +985,7 @@ bool DBClientReplicaSet::call(Message& toSend, Message& response, bool assertOk, string* actualServer) { - const char* ns = 0; + const char* ns = nullptr; if (toSend.operation() == dbQuery) { // TODO: might be possible to do this faster by changing api @@ -998,8 +998,8 @@ bool DBClientReplicaSet::call(Message& toSend, LOG(3) << "dbclient_rs call using secondary or tagged node selection in " << _getMonitor()->getName() << ", read pref is " << readPref->toString() << " (primary : " - << (_master.get() != NULL ? _master->getServerAddress() : "[not cached]") - << ", lastTagged : " << (_lastSlaveOkConn.get() != NULL + << (_master.get() != nullptr ? _master->getServerAddress() : "[not cached]") + << ", lastTagged : " << (_lastSlaveOkConn.get() != nullptr ? _lastSlaveOkConn->getServerAddress() : "[not cached]") << ")" << endl; @@ -1008,11 +1008,11 @@ bool DBClientReplicaSet::call(Message& toSend, try { DBClientConnection* conn = selectNodeUsingTags(readPref); - if (conn == NULL) { + if (conn == nullptr) { return false; } - if (actualServer != NULL) { + if (actualServer != nullptr) { *actualServer = conn->getServerAddress(); } @@ -1068,7 +1068,7 @@ void DBClientReplicaSet::_invalidateLastSlaveOkCache(const Status& status) { void DBClientReplicaSet::reset() { resetSlaveOkConn(); - _lazyState._lastClient = NULL; + _lazyState._lastClient = nullptr; _lastReadPref.reset(); } @@ -1089,7 +1089,7 @@ void DBClientReplicaSet::resetMaster() { void DBClientReplicaSet::resetSlaveOkConn() { if (_lastSlaveOkConn.get() == _master.get()) { _lastSlaveOkConn.reset(); - } else if (_lastSlaveOkConn.get() != NULL) { + } else if (_lastSlaveOkConn.get() != nullptr) { if (_authPooledSecondaryConn) { logoutAll(_lastSlaveOkConn.get()); } else { diff --git a/src/mongo/client/dbclient_rs.h b/src/mongo/client/dbclient_rs.h index 506a3cf6f16..92e2a93acb0 100644 --- a/src/mongo/client/dbclient_rs.h +++ b/src/mongo/client/dbclient_rs.h @@ -93,14 +93,14 @@ public: Query query, int nToReturn = 0, int nToSkip = 0, - const BSONObj* fieldsToReturn = 0, + const BSONObj* fieldsToReturn = nullptr, int queryOptions = 0, int batchSize = 0) override; /** throws userassertion "no master found" */ BSONObj findOne(const std::string& ns, const Query& query, - const BSONObj* fieldsToReturn = 0, + const BSONObj* fieldsToReturn = nullptr, int queryOptions = 0) override; void insert(const std::string& ns, BSONObj obj, int flags = 0) override; @@ -136,12 +136,12 @@ public: // ---- callback pieces ------- - void say(Message& toSend, bool isRetry = false, std::string* actualServer = 0) override; + void say(Message& toSend, bool isRetry = false, std::string* actualServer = nullptr) override; bool recv(Message& toRecv, int lastRequestId) override; void checkResponse(const std::vector<BSONObj>& batch, bool networkError, - bool* retry = NULL, - std::string* targetHost = NULL) override; + bool* retry = nullptr, + std::string* targetHost = nullptr) override; /* this is the callback from our underlying connections to notify us that we got a "not master" * error. @@ -345,7 +345,7 @@ protected: */ class LazyState { public: - LazyState() : _lastClient(NULL), _lastOp(-1), _secondaryQueryOk(false), _retries(0) {} + LazyState() : _lastClient(nullptr), _lastOp(-1), _secondaryQueryOk(false), _retries(0) {} DBClientConnection* _lastClient; int _lastOp; bool _secondaryQueryOk; diff --git a/src/mongo/client/index_spec.cpp b/src/mongo/client/index_spec.cpp index c5d3061d781..4562a8fe17f 100644 --- a/src/mongo/client/index_spec.cpp +++ b/src/mongo/client/index_spec.cpp @@ -46,8 +46,8 @@ namespace { const int kIndexTypeNumbers[] = {IndexSpec::kIndexValAscending, IndexSpec::kIndexValDescending}; -const char* const kIndexTypeStrings[] = {NULL, - NULL, +const char* const kIndexTypeStrings[] = {nullptr, + nullptr, IndexSpec::kIndexValText, IndexSpec::kIndexValGeo2D, IndexSpec::kIndexValGeoHaystack, diff --git a/src/mongo/client/query.h b/src/mongo/client/query.h index c8adf396cfe..24444e853e8 100644 --- a/src/mongo/client/query.h +++ b/src/mongo/client/query.h @@ -131,8 +131,8 @@ public: /** * @return true if this query has an orderby, hint, or some other field */ - bool isComplex(bool* hasDollar = 0) const; - static bool isComplex(const BSONObj& obj, bool* hasDollar = 0); + bool isComplex(bool* hasDollar = nullptr) const; + static bool isComplex(const BSONObj& obj, bool* hasDollar = nullptr); BSONObj getFilter() const; BSONObj getSort() const; diff --git a/src/mongo/client/replica_set_monitor.cpp b/src/mongo/client/replica_set_monitor.cpp index cca254b4007..d134f935070 100644 --- a/src/mongo/client/replica_set_monitor.cpp +++ b/src/mongo/client/replica_set_monitor.cpp @@ -1200,7 +1200,7 @@ std::vector<HostAndPort> SetState::getMatchingHosts(const ReadPreferenceSetting& Node* SetState::findNode(const HostAndPort& host) { const Nodes::iterator it = std::lower_bound(nodes.begin(), nodes.end(), host, compareHosts); if (it == nodes.end() || it->host != host) - return NULL; + return nullptr; return &(*it); } diff --git a/src/mongo/client/replica_set_monitor_read_preference_test.cpp b/src/mongo/client/replica_set_monitor_read_preference_test.cpp index 3f738d6cd41..374a3f5f9c5 100644 --- a/src/mongo/client/replica_set_monitor_read_preference_test.cpp +++ b/src/mongo/client/replica_set_monitor_read_preference_test.cpp @@ -589,7 +589,7 @@ public: virtual ~MultiTagsTest() = default; const TagSet& getMatchesFirstTagSet() { - if (matchFirstTags.get() != NULL) { + if (matchFirstTags.get() != nullptr) { return *matchFirstTags; } @@ -604,7 +604,7 @@ public: } const TagSet& getMatchesSecondTagSet() { - if (matchSecondTags.get() != NULL) { + if (matchSecondTags.get() != nullptr) { return *matchSecondTags; } @@ -621,7 +621,7 @@ public: } const TagSet& getMatchesLastTagSet() { - if (matchLastTags.get() != NULL) { + if (matchLastTags.get() != nullptr) { return *matchLastTags; } @@ -642,7 +642,7 @@ public: } const TagSet& getMatchesPriTagSet() { - if (matchPriTags.get() != NULL) { + if (matchPriTags.get() != nullptr) { return *matchPriTags; } diff --git a/src/mongo/crypto/sha_block_openssl.cpp b/src/mongo/crypto/sha_block_openssl.cpp index c776985c3b7..be2e9a67db9 100644 --- a/src/mongo/crypto/sha_block_openssl.cpp +++ b/src/mongo/crypto/sha_block_openssl.cpp @@ -98,13 +98,13 @@ HashType computeHashImpl(const EVP_MD* md, std::initializer_list<ConstDataRange> EVP_MD_CTX_free); fassert(40379, - EVP_DigestInit_ex(digestCtx.get(), md, NULL) == 1 && + EVP_DigestInit_ex(digestCtx.get(), md, nullptr) == 1 && std::all_of(begin(input), end(input), [&](const auto& i) { return EVP_DigestUpdate(digestCtx.get(), i.data(), i.length()) == 1; }) && - EVP_DigestFinal_ex(digestCtx.get(), output.data(), NULL) == 1); + EVP_DigestFinal_ex(digestCtx.get(), output.data(), nullptr) == 1); return output; } @@ -117,7 +117,7 @@ void computeHmacImpl(const EVP_MD* md, std::unique_ptr<HMAC_CTX, decltype(&HMAC_CTX_free)> digestCtx(HMAC_CTX_new(), HMAC_CTX_free); fassert(40380, - HMAC_Init_ex(digestCtx.get(), key, keyLen, md, NULL) == 1 && + HMAC_Init_ex(digestCtx.get(), key, keyLen, md, nullptr) == 1 && std::all_of(begin(input), end(input), [&](const auto& i) { @@ -125,7 +125,7 @@ void computeHmacImpl(const EVP_MD* md, reinterpret_cast<const unsigned char*>(i.data()), i.length()) == 1; }) && - HMAC_Final(digestCtx.get(), output->data(), NULL) == 1); + HMAC_Final(digestCtx.get(), output->data(), nullptr) == 1); } } // namespace diff --git a/src/mongo/db/auth/authz_manager_external_state_local.cpp b/src/mongo/db/auth/authz_manager_external_state_local.cpp index e3184bef814..7fa1f2ff5f0 100644 --- a/src/mongo/db/auth/authz_manager_external_state_local.cpp +++ b/src/mongo/db/auth/authz_manager_external_state_local.cpp @@ -574,7 +574,7 @@ private: void _refreshRoleGraph() { stdx::lock_guard<stdx::mutex> lk(_externalState->_roleGraphMutex); Status status = _externalState->_roleGraph.handleLogOp( - _opCtx, _op.c_str(), _nss, _o, _o2 ? &*_o2 : NULL); + _opCtx, _op.c_str(), _nss, _o, _o2 ? &*_o2 : nullptr); if (status == ErrorCodes::OplogOperationUnsupported) { _externalState->_roleGraph = RoleGraph(); diff --git a/src/mongo/db/auth/authz_manager_external_state_mock.cpp b/src/mongo/db/auth/authz_manager_external_state_mock.cpp index 42ad4dd5b53..16cdd668604 100644 --- a/src/mongo/db/auth/authz_manager_external_state_mock.cpp +++ b/src/mongo/db/auth/authz_manager_external_state_mock.cpp @@ -90,7 +90,7 @@ void addPrivilegeObjectsOrWarningsToArrayElement(mutablebson::Element privileges } } // namespace -AuthzManagerExternalStateMock::AuthzManagerExternalStateMock() : _authzManager(NULL) {} +AuthzManagerExternalStateMock::AuthzManagerExternalStateMock() : _authzManager(nullptr) {} AuthzManagerExternalStateMock::~AuthzManagerExternalStateMock() {} void AuthzManagerExternalStateMock::setAuthorizationManager(AuthorizationManager* authzManager) { @@ -164,7 +164,7 @@ Status AuthzManagerExternalStateMock::insert(OperationContext* opCtx, _documents[collectionName].push_back(toInsert); if (_authzManager) { - _authzManager->logOp(opCtx, "i", collectionName, toInsert, NULL); + _authzManager->logOp(opCtx, "i", collectionName, toInsert, nullptr); } return Status::OK(); @@ -262,7 +262,7 @@ Status AuthzManagerExternalStateMock::remove(OperationContext* opCtx, ++n; if (_authzManager) { - _authzManager->logOp(opCtx, "d", collectionName, idQuery, NULL); + _authzManager->logOp(opCtx, "d", collectionName, idQuery, nullptr); } } *numRemoved = n; diff --git a/src/mongo/db/auth/privilege_parser.cpp b/src/mongo/db/auth/privilege_parser.cpp index 2cae5290649..7fb7ea38008 100644 --- a/src/mongo/db/auth/privilege_parser.cpp +++ b/src/mongo/db/auth/privilege_parser.cpp @@ -56,7 +56,7 @@ ParsedResource::~ParsedResource() {} bool ParsedResource::isValid(std::string* errMsg) const { std::string dummy; - if (errMsg == NULL) { + if (errMsg == nullptr) { errMsg = &dummy; } @@ -273,7 +273,7 @@ ParsedPrivilege::~ParsedPrivilege() {} bool ParsedPrivilege::isValid(std::string* errMsg) const { std::string dummy; - if (errMsg == NULL) { + if (errMsg == nullptr) { errMsg = &dummy; } diff --git a/src/mongo/db/auth/role_graph.cpp b/src/mongo/db/auth/role_graph.cpp index 8093864dfe3..51f91a10ba8 100644 --- a/src/mongo/db/auth/role_graph.cpp +++ b/src/mongo/db/auth/role_graph.cpp @@ -132,19 +132,19 @@ Status RoleGraph::deleteRole(const RoleName& role) { RoleNameIterator RoleGraph::getDirectSubordinates(const RoleName& role) { if (!roleExists(role)) - return RoleNameIterator(NULL); + return RoleNameIterator(nullptr); return makeRoleNameIteratorForContainer(_roleToSubordinates[role]); } RoleNameIterator RoleGraph::getIndirectSubordinates(const RoleName& role) { if (!roleExists(role)) - return RoleNameIterator(NULL); + return RoleNameIterator(nullptr); return makeRoleNameIteratorForContainer(_roleToIndirectSubordinates[role]); } RoleNameIterator RoleGraph::getDirectMembers(const RoleName& role) { if (!roleExists(role)) - return RoleNameIterator(NULL); + return RoleNameIterator(nullptr); return makeRoleNameIteratorForContainer(_roleToMembers[role]); } diff --git a/src/mongo/db/auth/role_name.h b/src/mongo/db/auth/role_name.h index 75aa6202920..ccabd86b4a2 100644 --- a/src/mongo/db/auth/role_name.h +++ b/src/mongo/db/auth/role_name.h @@ -134,7 +134,7 @@ public: Impl(){}; virtual ~Impl(){}; static Impl* clone(Impl* orig) { - return orig ? orig->doClone() : NULL; + return orig ? orig->doClone() : nullptr; } virtual bool more() const = 0; virtual const RoleName& get() const = 0; diff --git a/src/mongo/db/catalog/collection_impl.h b/src/mongo/db/catalog/collection_impl.h index 8657a48589f..25c85a660fb 100644 --- a/src/mongo/db/catalog/collection_impl.h +++ b/src/mongo/db/catalog/collection_impl.h @@ -330,7 +330,7 @@ public: } uint64_t getIndexSize(OperationContext* opCtx, - BSONObjBuilder* details = NULL, + BSONObjBuilder* details = nullptr, int scale = 1) const final; /** diff --git a/src/mongo/db/catalog/collection_info_cache_impl.cpp b/src/mongo/db/catalog/collection_info_cache_impl.cpp index f08c5e3bbb1..bf4866b750a 100644 --- a/src/mongo/db/catalog/collection_info_cache_impl.cpp +++ b/src/mongo/db/catalog/collection_info_cache_impl.cpp @@ -167,7 +167,7 @@ void CollectionInfoCacheImpl::notifyOfQuery(OperationContext* opCtx, for (auto it = indexesUsed.begin(); it != indexesUsed.end(); ++it) { // This index should still exist, since the PlanExecutor would have been killed if the // index was dropped (and we would not get here). - dassert(NULL != _collection->getIndexCatalog()->findIndexByName(opCtx, *it)); + dassert(nullptr != _collection->getIndexCatalog()->findIndexByName(opCtx, *it)); _indexUsageTracker.recordIndexAccess(*it); } @@ -175,7 +175,7 @@ void CollectionInfoCacheImpl::notifyOfQuery(OperationContext* opCtx, void CollectionInfoCacheImpl::clearQueryCache() { LOG(1) << _collection->ns() << ": clearing plan cache - collection info cache reset"; - if (NULL != _planCache.get()) { + if (nullptr != _planCache.get()) { _planCache->clear(); } } diff --git a/src/mongo/db/catalog/database_holder_impl.cpp b/src/mongo/db/catalog/database_holder_impl.cpp index 7d38bbc3c82..d499edbfc67 100644 --- a/src/mongo/db/catalog/database_holder_impl.cpp +++ b/src/mongo/db/catalog/database_holder_impl.cpp @@ -80,7 +80,7 @@ Database* DatabaseHolderImpl::getDb(OperationContext* opCtx, StringData ns) cons return it->second; } - return NULL; + return nullptr; } std::set<std::string> DatabaseHolderImpl::_getNamesWithConflictingCasing_inlock(StringData name) { diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp index 8e7738e4e5f..ed556c88af4 100644 --- a/src/mongo/db/catalog/database_impl.cpp +++ b/src/mongo/db/catalog/database_impl.cpp @@ -583,7 +583,7 @@ void DatabaseImpl::_checkCanCreateCollection(OperationContext* opCtx, << " bytes)", !nss.isNormal() || nss.size() <= NamespaceString::MaxNsCollectionLen); - uassert(17316, "cannot create a blank collection", nss.coll() > 0); + uassert(17316, "cannot create a blank collection", nss.coll() > nullptr); uassert(28838, "cannot create a non-capped oplog collection", options.capped || !nss.isOplog()); uassert(ErrorCodes::DatabaseDropPending, str::stream() << "Cannot create collection " << nss diff --git a/src/mongo/db/catalog/multi_index_block.cpp b/src/mongo/db/catalog/multi_index_block.cpp index 2c03dce3e03..65c6ed1d142 100644 --- a/src/mongo/db/catalog/multi_index_block.cpp +++ b/src/mongo/db/catalog/multi_index_block.cpp @@ -575,7 +575,7 @@ Status MultiIndexBlock::dumpInsertsFromBulk(OperationContext* opCtx, invariant(opCtx->lockState()->isNoop() || !opCtx->lockState()->inAWriteUnitOfWork()); for (size_t i = 0; i < _indexes.size(); i++) { - if (_indexes[i].bulk == NULL) + if (_indexes[i].bulk == nullptr) continue; // If 'dupRecords' is provided, it will be used to store all records that would result in diff --git a/src/mongo/db/catalog/multi_index_block.h b/src/mongo/db/catalog/multi_index_block.h index af53fa21e04..0d12e4630e6 100644 --- a/src/mongo/db/catalog/multi_index_block.h +++ b/src/mongo/db/catalog/multi_index_block.h @@ -297,7 +297,7 @@ private: struct IndexToBuild { std::unique_ptr<IndexCatalog::IndexBuildBlockInterface> block; - IndexAccessMethod* real = NULL; // owned elsewhere + IndexAccessMethod* real = nullptr; // owned elsewhere const MatchExpression* filterExpression; // might be NULL, owned elsewhere std::unique_ptr<IndexAccessMethod::BulkBuilder> bulk; diff --git a/src/mongo/db/client.cpp b/src/mongo/db/client.cpp index b177bc24e07..8c1eb3c3bfb 100644 --- a/src/mongo/db/client.cpp +++ b/src/mongo/db/client.cpp @@ -117,13 +117,13 @@ ServiceContext::UniqueOperationContext Client::makeOperationContext() { void Client::setOperationContext(OperationContext* opCtx) { // We can only set the OperationContext once before resetting it. - invariant(opCtx != NULL && _opCtx == NULL); + invariant(opCtx != nullptr && _opCtx == nullptr); _opCtx = opCtx; } void Client::resetOperationContext() { - invariant(_opCtx != NULL); - _opCtx = NULL; + invariant(_opCtx != nullptr); + _opCtx = nullptr; } std::string Client::clientAddress(bool includePort) const { diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp index 3e03837118f..730f3086e38 100644 --- a/src/mongo/db/cloner.cpp +++ b/src/mongo/db/cloner.cpp @@ -142,7 +142,7 @@ struct Cloner::Fun { auto db = databaseHolder->openDb(opCtx, _dbName); bool createdCollection = false; - Collection* collection = NULL; + Collection* collection = nullptr; collection = db->getCollection(opCtx, to_collection); if (!collection) { @@ -177,7 +177,7 @@ struct Cloner::Fun { while (i.moreInCurrentBatch()) { if (numSeen % 128 == 127) { - time_t now = time(0); + time_t now = time(nullptr); if (now - lastLog >= 60) { // report progress if (lastLog) @@ -205,13 +205,13 @@ struct Cloner::Fun { db = databaseHolder->getDb(opCtx, _dbName); uassert(28593, str::stream() << "Database " << _dbName << " dropped while cloning", - db != NULL); + db != nullptr); collection = db->getCollection(opCtx, to_collection); uassert(28594, str::stream() << "Collection " << to_collection.ns() << " dropped while cloning", - collection != NULL); + collection != nullptr); } BSONObj tmp = i.nextSafe(); @@ -271,9 +271,9 @@ struct Cloner::Fun { }); static Rarely sampler; - if (sampler.tick() && (time(0) - saveLast > 60)) { + if (sampler.tick() && (time(nullptr) - saveLast > 60)) { log() << numSeen << " objects cloned so far from collection " << from_collection; - saveLast = time(0); + saveLast = time(nullptr); } } } @@ -310,7 +310,7 @@ void Cloner::copy(OperationContext* opCtx, f.from_options = from_opts; f.from_id_index = from_id_index; f.to_collection = to_collection; - f.saveLast = time(0); + f.saveLast = time(nullptr); f._opts = opts; int options = QueryOption_NoCursorTimeout | (opts.slaveOk ? QueryOption_SlaveOk : 0) | @@ -320,7 +320,7 @@ void Cloner::copy(OperationContext* opCtx, _conn->query(std::function<void(DBClientCursorBatchIterator&)>(f), from_collection, query, - 0, + nullptr, options); } diff --git a/src/mongo/db/commands/dbcommands.cpp b/src/mongo/db/commands/dbcommands.cpp index 2f960430e2a..71343fa05e4 100644 --- a/src/mongo/db/commands/dbcommands.cpp +++ b/src/mongo/db/commands/dbcommands.cpp @@ -484,7 +484,7 @@ public: keyPattern, true); // requireSingleKey - if (idx == NULL) { + if (idx == nullptr) { errmsg = "couldn't find valid index containing key pattern"; return false; } diff --git a/src/mongo/db/commands/dbcommands_d.cpp b/src/mongo/db/commands/dbcommands_d.cpp index 2681ed180b2..5b83d1e9877 100644 --- a/src/mongo/db/commands/dbcommands_d.cpp +++ b/src/mongo/db/commands/dbcommands_d.cpp @@ -265,7 +265,7 @@ public: BSONObj obj; PlanExecutor::ExecState state; - while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) { + while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, nullptr))) { BSONElement ne = obj["n"]; verify(ne.isNumber()); int myn = ne.numberInt(); diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp index 7dc15854506..b24fcc3fd45 100644 --- a/src/mongo/db/commands/dbhash.cpp +++ b/src/mongo/db/commands/dbhash.cpp @@ -374,8 +374,8 @@ private: long long n = 0; PlanExecutor::ExecState state; BSONObj c; - verify(NULL != exec.get()); - while (PlanExecutor::ADVANCED == (state = exec->getNext(&c, NULL))) { + verify(nullptr != exec.get()); + while (PlanExecutor::ADVANCED == (state = exec->getNext(&c, nullptr))) { md5_append(&st, (const md5_byte_t*)c.objdata(), c.objsize()); n++; } diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp index e31a0843f2c..7d00315fd98 100644 --- a/src/mongo/db/commands/distinct.cpp +++ b/src/mongo/db/commands/distinct.cpp @@ -239,7 +239,7 @@ public: BSONObj obj; PlanExecutor::ExecState state; - while (PlanExecutor::ADVANCED == (state = executor.getValue()->getNext(&obj, NULL))) { + while (PlanExecutor::ADVANCED == (state = executor.getValue()->getNext(&obj, nullptr))) { // Distinct expands arrays. // // If our query is covered, each value of the key should be in the index key and diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp index 83d2c3c5dcc..1549ca4f12b 100644 --- a/src/mongo/db/commands/getmore_cmd.cpp +++ b/src/mongo/db/commands/getmore_cmd.cpp @@ -277,7 +277,7 @@ public: BSONObj obj; try { while (!FindCommon::enoughForGetMore(request.batchSize.value_or(0), *numResults) && - PlanExecutor::ADVANCED == (*state = exec->getNext(&obj, NULL))) { + PlanExecutor::ADVANCED == (*state = exec->getNext(&obj, nullptr))) { // If adding this object will cause us to exceed the message size limit, then we // stash it for later. if (!FindCommon::haveSpaceForNext(obj, *numResults, nextBatch->bytesUsed())) { diff --git a/src/mongo/db/commands/index_filter_commands.cpp b/src/mongo/db/commands/index_filter_commands.cpp index 6df4e890028..b5c65b1e2be 100644 --- a/src/mongo/db/commands/index_filter_commands.cpp +++ b/src/mongo/db/commands/index_filter_commands.cpp @@ -67,9 +67,9 @@ static Status getQuerySettingsAndPlanCache(OperationContext* opCtx, const string& ns, QuerySettings** querySettingsOut, PlanCache** planCacheOut) { - *querySettingsOut = NULL; - *planCacheOut = NULL; - if (NULL == collection) { + *querySettingsOut = nullptr; + *planCacheOut = nullptr; + if (nullptr == collection) { return Status(ErrorCodes::BadValue, "no such collection"); } diff --git a/src/mongo/db/commands/list_collections.cpp b/src/mongo/db/commands/list_collections.cpp index 5c116d3a500..29cd4b02a29 100644 --- a/src/mongo/db/commands/list_collections.cpp +++ b/src/mongo/db/commands/list_collections.cpp @@ -366,7 +366,7 @@ public: for (long long objCount = 0; objCount < batchSize; objCount++) { BSONObj next; - PlanExecutor::ExecState state = exec->getNext(&next, NULL); + PlanExecutor::ExecState state = exec->getNext(&next, nullptr); if (state == PlanExecutor::IS_EOF) { break; } diff --git a/src/mongo/db/commands/list_indexes.cpp b/src/mongo/db/commands/list_indexes.cpp index bb0b394b273..59721e91ce4 100644 --- a/src/mongo/db/commands/list_indexes.cpp +++ b/src/mongo/db/commands/list_indexes.cpp @@ -199,7 +199,7 @@ public: for (long long objCount = 0; objCount < batchSize; objCount++) { BSONObj next; - PlanExecutor::ExecState state = exec->getNext(&next, NULL); + PlanExecutor::ExecState state = exec->getNext(&next, nullptr); if (state == PlanExecutor::IS_EOF) { break; } diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp index f936129a526..d4a8b7da1b3 100644 --- a/src/mongo/db/commands/mr.cpp +++ b/src/mongo/db/commands/mr.cpp @@ -266,7 +266,7 @@ void JSMapper::map(const BSONObj& o) { BSONObj JSFinalizer::finalize(const BSONObj& o) { Scope* s = _func.scope(); - s->invokeSafe(_func.func(), &o, 0); + s->invokeSafe(_func.func(), &o, nullptr); // We don't want to use o.objsize() to size b since there are many cases where the point of // finalize is converting many fields to 1 @@ -371,7 +371,7 @@ void JSReducer::_reduce(const BSONList& tuples, BSONObj& key, int& endSizeEstima Scope* s = _func.scope(); - s->invokeSafe(_func.func(), &args, 0); + s->invokeSafe(_func.func(), &args, nullptr); ++numReduces; if (s->type("__returnValue") == Array) { @@ -669,7 +669,7 @@ void State::appendResults(BSONObjBuilder& final) { " result.push({_id: key, value: map[key]});" "}" "return result;"); - _scope->invoke(getResult, 0, 0, 0, false); + _scope->invoke(getResult, nullptr, nullptr, 0, false); BSONObj obj = _scope->getObject("__returnValue"); final.append("results", BSONArray(obj)); return; @@ -832,7 +832,7 @@ void State::insert(const NamespaceString& nss, const BSONObj& o) { WriteUnitOfWork wuow(_opCtx); BSONObjBuilder b; if (!o.hasField("_id")) { - b.appendOID("_id", NULL, true); + b.appendOID("_id", nullptr, true); } b.appendElements(o); BSONObj bo = b.obj(); @@ -934,7 +934,7 @@ State::~State() { try { ScriptingFunction cleanup = _scope->createFunction("delete _emitCt; delete _keyCt; delete _mrMap;"); - _scope->invoke(cleanup, 0, 0, 0, true); + _scope->invoke(cleanup, nullptr, nullptr, 0, true); } catch (const DBException&) { // not important because properties will be reset if scope is reused LOG(1) << "MapReduce terminated during state destruction"; @@ -962,7 +962,7 @@ void State::init() { _config.reducer->init(this); if (_config.finalizer) _config.finalizer->init(this); - _scope->setBoolean("_doFinal", _config.finalizer.get() != 0); + _scope->setBoolean("_doFinal", _config.finalizer.get() != nullptr); switchMode(_config.jsMode); // set up js-mode based on Config @@ -979,7 +979,7 @@ void State::init() { "if (typeof(_mrMap) === 'undefined') {" " _mrMap = {};" "}"); - _scope->invoke(init, 0, 0, 0, true); + _scope->invoke(init, nullptr, nullptr, 0, true); // js function to run reduce on all keys // redfunc = _scope->createFunction("for (var key in hashmap) { print('Key is ' + key); @@ -1085,7 +1085,7 @@ void State::bailFromJS() { // reduce and reemit into c++ switchMode(false); - _scope->invoke(_reduceAndEmit, 0, 0, 0, true); + _scope->invoke(_reduceAndEmit, nullptr, nullptr, 0, true); // need to get the real number emitted so far _numEmits = _scope->getNumberInt("_emitCt"); _config.reducer->numReduces = _scope->getNumberInt("_redCt"); @@ -1128,10 +1128,10 @@ void State::finalReduce(OperationContext* opCtx, CurOp* curOp) { // apply the reduce within JS if (_onDisk) { _scope->injectNative("_nativeToTemp", _nativeToTemp, this); - _scope->invoke(_reduceAndFinalizeAndInsert, 0, 0, 0, true); + _scope->invoke(_reduceAndFinalizeAndInsert, nullptr, nullptr, 0, true); return; } else { - _scope->invoke(_reduceAndFinalize, 0, 0, 0, true); + _scope->invoke(_reduceAndFinalize, nullptr, nullptr, 0, true); return; } } @@ -1220,7 +1220,7 @@ void State::finalReduce(OperationContext* opCtx, CurOp* curOp) { // iterate over all sorted objects BSONObj o; PlanExecutor::ExecState state; - while (PlanExecutor::ADVANCED == (state = exec->getNext(&o, NULL))) { + while (PlanExecutor::ADVANCED == (state = exec->getNext(&o, nullptr))) { o = o.getOwned(); // we will be accessing outside of the lock pm.hit(); @@ -1357,7 +1357,7 @@ void State::reduceAndSpillInMemoryStateIfNeeded() { } else if (dupCt > (keyCt * _config.reduceTriggerRatio)) { // reduce now to lower mem usage Timer t; - _scope->invoke(_reduceAll, 0, 0, 0, true); + _scope->invoke(_reduceAll, nullptr, nullptr, 0, true); LOG(3) << " MR - did reduceAll: keys=" << keyCt << " dups=" << dupCt << " newKeys=" << _scope->getNumberInt("_keyCt") << " time=" << t.millis() << "ms"; @@ -1549,7 +1549,7 @@ public: BSONObj o; PlanExecutor::ExecState execState; - while (PlanExecutor::ADVANCED == (execState = exec->getNext(&o, NULL))) { + while (PlanExecutor::ADVANCED == (execState = exec->getNext(&o, nullptr))) { o = o.getOwned(); // The object will be accessed outside of collection lock // Check to see if this is a new object we don't own yet because of a chunk diff --git a/src/mongo/db/commands/plan_cache_commands.cpp b/src/mongo/db/commands/plan_cache_commands.cpp index 6a89ca96bdf..c9a5566b11f 100644 --- a/src/mongo/db/commands/plan_cache_commands.cpp +++ b/src/mongo/db/commands/plan_cache_commands.cpp @@ -64,9 +64,9 @@ static Status getPlanCache(OperationContext* opCtx, Collection* collection, const string& ns, PlanCache** planCacheOut) { - *planCacheOut = NULL; + *planCacheOut = nullptr; - if (NULL == collection) { + if (nullptr == collection) { return Status(ErrorCodes::BadValue, "no such collection"); } diff --git a/src/mongo/db/commands/server_status.cpp b/src/mongo/db/commands/server_status.cpp index 75b5f7fe9e6..84b2eae0707 100644 --- a/src/mongo/db/commands/server_status.cpp +++ b/src/mongo/db/commands/server_status.cpp @@ -96,7 +96,7 @@ public: result.append("version", VersionInfoInterface::instance().version()); result.append("process", serverGlobalParams.binaryName); result.append("pid", ProcessId::getCurrent().asLongLong()); - result.append("uptime", (double)(time(0) - serverGlobalParams.started)); + result.append("uptime", (double)(time(nullptr) - serverGlobalParams.started)); auto uptime = clock->now() - _started; result.append("uptimeMillis", durationCount<Milliseconds>(uptime)); result.append("uptimeEstimate", durationCount<Seconds>(uptime)); @@ -132,7 +132,7 @@ public: } // --- counters - bool includeMetricTree = MetricTree::theMetricTree != NULL; + bool includeMetricTree = MetricTree::theMetricTree != nullptr; if (cmdObj["metrics"].type() && !cmdObj["metrics"].trueValue()) includeMetricTree = false; @@ -144,7 +144,8 @@ public: { RamLog::LineIterator rl(RamLog::get("warnings")); - if (rl.lastWrite() >= time(0) - (10 * 60)) { // only show warnings from last 10 minutes + if (rl.lastWrite() >= + time(nullptr) - (10 * 60)) { // only show warnings from last 10 minutes BSONArrayBuilder arr(result.subarrayStart("warnings")); while (rl.more()) { arr.append(rl.next()); diff --git a/src/mongo/db/commands/server_status_internal.cpp b/src/mongo/db/commands/server_status_internal.cpp index 738b22e8945..7556eee5e01 100644 --- a/src/mongo/db/commands/server_status_internal.cpp +++ b/src/mongo/db/commands/server_status_internal.cpp @@ -41,7 +41,7 @@ using std::endl; using std::map; using std::string; -MetricTree* MetricTree::theMetricTree = NULL; +MetricTree* MetricTree::theMetricTree = nullptr; void MetricTree::add(ServerStatusMetric* metric) { string name = metric->getMetricName(); diff --git a/src/mongo/db/commands/server_status_metric.cpp b/src/mongo/db/commands/server_status_metric.cpp index d56a2970ec6..8e8b749b729 100644 --- a/src/mongo/db/commands/server_status_metric.cpp +++ b/src/mongo/db/commands/server_status_metric.cpp @@ -37,7 +37,7 @@ using std::string; ServerStatusMetric::ServerStatusMetric(const string& nameIn) : _name(nameIn), _leafName(_parseLeafName(nameIn)) { - if (MetricTree::theMetricTree == 0) + if (MetricTree::theMetricTree == nullptr) MetricTree::theMetricTree = new MetricTree(); MetricTree::theMetricTree->add(this); } diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp index c47ca33f94c..645820be5ff 100644 --- a/src/mongo/db/commands/user_management_commands.cpp +++ b/src/mongo/db/commands/user_management_commands.cpp @@ -882,7 +882,7 @@ public: audit::logCreateUser(Client::getCurrent(), args.userName, args.hasPassword, - args.hasCustomData ? &args.customData : NULL, + args.hasCustomData ? &args.customData : nullptr, args.roles, args.authenticationRestrictions); status = insertPrivilegeDocument(opCtx, userObj); @@ -1003,8 +1003,8 @@ public: audit::logUpdateUser(Client::getCurrent(), args.userName, args.hasPassword, - args.hasCustomData ? &args.customData : NULL, - args.hasRoles ? &args.roles : NULL, + args.hasCustomData ? &args.customData : nullptr, + args.hasRoles ? &args.roles : nullptr, args.authenticationRestrictions); status = updatePrivilegeDocument( @@ -2437,14 +2437,14 @@ public: audit::logCreateUser(Client::getCurrent(), userName, hasPwd, - userObj.hasField("customData") ? &customData : NULL, + userObj.hasField("customData") ? &customData : nullptr, roles, authenticationRestrictions); } else { audit::logUpdateUser(Client::getCurrent(), userName, hasPwd, - userObj.hasField("customData") ? &customData : NULL, + userObj.hasField("customData") ? &customData : nullptr, &roles, authenticationRestrictions); } diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp index 586c87792d7..d30c820e747 100644 --- a/src/mongo/db/commands/validate.cpp +++ b/src/mongo/db/commands/validate.cpp @@ -125,7 +125,7 @@ public: AutoGetDb ctx(opCtx, nss.db(), MODE_IX); Lock::CollectionLock collLk(opCtx, nss, MODE_X); - Collection* collection = ctx.getDb() ? ctx.getDb()->getCollection(opCtx, nss) : NULL; + Collection* collection = ctx.getDb() ? ctx.getDb()->getCollection(opCtx, nss) : nullptr; if (!collection) { if (ctx.getDb() && ViewCatalog::get(ctx.getDb())->lookup(opCtx, nss.ns())) { uasserted(ErrorCodes::CommandNotSupportedOnView, "Cannot validate a view"); diff --git a/src/mongo/db/concurrency/lock_request_list.h b/src/mongo/db/concurrency/lock_request_list.h index c9009cdec4f..5423a11792f 100644 --- a/src/mongo/db/concurrency/lock_request_list.h +++ b/src/mongo/db/concurrency/lock_request_list.h @@ -48,10 +48,10 @@ class LockRequestList { public: void push_front(LockRequest* request) { // Sanity check that we do not reuse entries without cleaning them up - invariant(request->next == NULL); - invariant(request->prev == NULL); + invariant(request->next == nullptr); + invariant(request->prev == nullptr); - if (_front == NULL) { + if (_front == nullptr) { _front = _back = request; } else { request->next = _front; @@ -63,10 +63,10 @@ public: void push_back(LockRequest* request) { // Sanity check that we do not reuse entries without cleaning them up - invariant(request->next == NULL); - invariant(request->prev == NULL); + invariant(request->next == nullptr); + invariant(request->prev == nullptr); - if (_front == NULL) { + if (_front == nullptr) { _front = _back = request; } else { request->prev = _back; @@ -77,28 +77,28 @@ public: } void remove(LockRequest* request) { - if (request->prev != NULL) { + if (request->prev != nullptr) { request->prev->next = request->next; } else { _front = request->next; } - if (request->next != NULL) { + if (request->next != nullptr) { request->next->prev = request->prev; } else { _back = request->prev; } - request->prev = NULL; - request->next = NULL; + request->prev = nullptr; + request->next = nullptr; } void reset() { - _front = _back = NULL; + _front = _back = nullptr; } bool empty() const { - return _front == NULL; + return _front == nullptr; } // Pointers to the beginning and the end of the list diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp index df1f2908406..1d5dd2b582c 100644 --- a/src/mongo/db/db.cpp +++ b/src/mongo/db/db.cpp @@ -212,7 +212,7 @@ void logStartup(OperationContext* opCtx) { toLog.append("_id", id.str()); toLog.append("hostname", getHostNameCached()); - toLog.appendTimeT("startTime", time(0)); + toLog.appendTimeT("startTime", time(nullptr)); toLog.append("startTimeLocal", dateToCtimeString(Date_t::now())); toLog.append("cmdLine", serverGlobalParams.parsedOpts); diff --git a/src/mongo/db/dbdirectclient.h b/src/mongo/db/dbdirectclient.h index ae348ab93bd..c700baf3d8f 100644 --- a/src/mongo/db/dbdirectclient.h +++ b/src/mongo/db/dbdirectclient.h @@ -62,7 +62,7 @@ public: Query query, int nToReturn = 0, int nToSkip = 0, - const BSONObj* fieldsToReturn = 0, + const BSONObj* fieldsToReturn = nullptr, int queryOptions = 0, int batchSize = 0); @@ -77,9 +77,9 @@ public: virtual bool call(Message& toSend, Message& response, bool assertOk = true, - std::string* actualServer = 0); + std::string* actualServer = nullptr); - virtual void say(Message& toSend, bool isRetry = false, std::string* actualServer = 0); + virtual void say(Message& toSend, bool isRetry = false, std::string* actualServer = nullptr); virtual unsigned long long count(const std::string& ns, const BSONObj& query = BSONObj(), diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp index 1447ad18389..e9bfdf9dff1 100644 --- a/src/mongo/db/dbhelpers.cpp +++ b/src/mongo/db/dbhelpers.cpp @@ -176,7 +176,7 @@ bool Helpers::getSingleton(OperationContext* opCtx, const char* ns, BSONObj& res AutoGetCollectionForReadCommand ctx(opCtx, NamespaceString(ns)); auto exec = InternalPlanner::collectionScan(opCtx, ns, ctx.getCollection(), PlanExecutor::NO_YIELD); - PlanExecutor::ExecState state = exec->getNext(&result, NULL); + PlanExecutor::ExecState state = exec->getNext(&result, nullptr); CurOp::get(opCtx)->done(); @@ -195,7 +195,7 @@ bool Helpers::getLast(OperationContext* opCtx, const char* ns, BSONObj& result) AutoGetCollectionForReadCommand autoColl(opCtx, NamespaceString(ns)); auto exec = InternalPlanner::collectionScan( opCtx, ns, autoColl.getCollection(), PlanExecutor::NO_YIELD, InternalPlanner::BACKWARD); - PlanExecutor::ExecState state = exec->getNext(&result, NULL); + PlanExecutor::ExecState state = exec->getNext(&result, nullptr); // Non-yielding collection scans from InternalPlanner will never error. invariant(PlanExecutor::ADVANCED == state || PlanExecutor::IS_EOF == state); diff --git a/src/mongo/db/dbhelpers.h b/src/mongo/db/dbhelpers.h index 47af0b8ffc2..246130853ff 100644 --- a/src/mongo/db/dbhelpers.h +++ b/src/mongo/db/dbhelpers.h @@ -83,8 +83,8 @@ struct Helpers { StringData ns, BSONObj query, BSONObj& result, - bool* nsFound = 0, - bool* indexFound = 0); + bool* nsFound = nullptr, + bool* indexFound = nullptr); /* TODO: should this move into Collection? * uasserts if no _id index. diff --git a/src/mongo/db/dbmessage.cpp b/src/mongo/db/dbmessage.cpp index f893b53eee9..75f2ecb8fbc 100644 --- a/src/mongo/db/dbmessage.cpp +++ b/src/mongo/db/dbmessage.cpp @@ -36,7 +36,7 @@ namespace mongo { -DbMessage::DbMessage(const Message& msg) : _msg(msg), _nsStart(NULL), _mark(NULL), _nsLen(0) { +DbMessage::DbMessage(const Message& msg) : _msg(msg), _nsStart(nullptr), _mark(nullptr), _nsLen(0) { // for received messages, Message has only one buffer _theEnd = _msg.singleData().data() + _msg.singleData().dataLen(); _nextjsobj = _msg.singleData().data(); @@ -89,7 +89,7 @@ const char* DbMessage::getArray(size_t count) const { BSONObj DbMessage::nextJsObj() { uassert(ErrorCodes::InvalidBSON, "Client Error: Remaining data too small for BSON object", - _nextjsobj != NULL && _theEnd - _nextjsobj >= 5); + _nextjsobj != nullptr && _theEnd - _nextjsobj >= 5); if (serverGlobalParams.objcheck) { Status status = validateBSON( @@ -105,12 +105,12 @@ BSONObj DbMessage::nextJsObj() { _nextjsobj += js.objsize(); if (_nextjsobj >= _theEnd) - _nextjsobj = NULL; + _nextjsobj = nullptr; return js; } -void DbMessage::markReset(const char* toMark = NULL) { - if (toMark == NULL) { +void DbMessage::markReset(const char* toMark = nullptr) { + if (toMark == nullptr) { toMark = _mark; } diff --git a/src/mongo/db/dbmessage.h b/src/mongo/db/dbmessage.h index 7828e2938f1..f6281c3614c 100644 --- a/src/mongo/db/dbmessage.h +++ b/src/mongo/db/dbmessage.h @@ -241,7 +241,7 @@ public: /* for insert and update msgs */ bool moreJSObjs() const { - return _nextjsobj != 0 && _nextjsobj != _theEnd; + return _nextjsobj != nullptr && _nextjsobj != _theEnd; } BSONObj nextJsObj(); diff --git a/src/mongo/db/exec/collection_scan.cpp b/src/mongo/db/exec/collection_scan.cpp index f416f8f3f9d..6a1e64dd30f 100644 --- a/src/mongo/db/exec/collection_scan.cpp +++ b/src/mongo/db/exec/collection_scan.cpp @@ -240,7 +240,7 @@ void CollectionScan::doReattachToOperationContext() { unique_ptr<PlanStageStats> CollectionScan::getStats() { // Add a BSON representation of the filter to the stats tree, if there is one. - if (NULL != _filter) { + if (nullptr != _filter) { BSONObjBuilder bob; _filter->serialize(&bob); _commonStats.filter = bob.obj(); diff --git a/src/mongo/db/exec/fetch.cpp b/src/mongo/db/exec/fetch.cpp index c8abe5fa26b..2e30f53e302 100644 --- a/src/mongo/db/exec/fetch.cpp +++ b/src/mongo/db/exec/fetch.cpp @@ -186,7 +186,7 @@ unique_ptr<PlanStageStats> FetchStage::getStats() { _commonStats.isEOF = isEOF(); // Add a BSON representation of the filter to the stats tree, if there is one. - if (NULL != _filter) { + if (nullptr != _filter) { BSONObjBuilder bob; _filter->serialize(&bob); _commonStats.filter = bob.obj(); diff --git a/src/mongo/db/exec/filter.h b/src/mongo/db/exec/filter.h index dea812de1a8..dcc079c9136 100644 --- a/src/mongo/db/exec/filter.h +++ b/src/mongo/db/exec/filter.h @@ -151,21 +151,21 @@ public: * Returns false if 'wsm' does not satisfy the filter. */ static bool passes(WorkingSetMember* wsm, const MatchExpression* filter) { - if (NULL == filter) { + if (nullptr == filter) { return true; } WorkingSetMatchableDocument doc(wsm); - return filter->matches(&doc, NULL); + return filter->matches(&doc, nullptr); } static bool passes(const BSONObj& keyData, const BSONObj& keyPattern, const MatchExpression* filter) { - if (NULL == filter) { + if (nullptr == filter) { return true; } IndexKeyMatchableDocument doc(keyData, keyPattern); - return filter->matches(&doc, NULL); + return filter->matches(&doc, nullptr); } }; diff --git a/src/mongo/db/exec/geo_near.cpp b/src/mongo/db/exec/geo_near.cpp index 27b04a5be73..5fa04b917db 100644 --- a/src/mongo/db/exec/geo_near.cpp +++ b/src/mongo/db/exec/geo_near.cpp @@ -80,14 +80,14 @@ namespace { struct StoredGeometry { static StoredGeometry* parseFrom(const BSONElement& element) { if (!element.isABSONObj()) - return NULL; + return nullptr; unique_ptr<StoredGeometry> stored(new StoredGeometry); // GeoNear stage can only be run with an existing index // Therefore, it is always safe to skip geometry validation if (!stored->geometry.parseFromStorage(element, true).isOK()) - return NULL; + return nullptr; stored->element = element; return stored.release(); } @@ -343,7 +343,7 @@ void GeoNear2DStage::DensityEstimator::buildIndexScan(OperationContext* opCtx, IndexBoundsBuilder::intersectize(oil, &scanParams.bounds.fields[twoDFieldPosition]); invariant(!_indexScan); - _indexScan = new IndexScan(opCtx, scanParams, workingSet, NULL); + _indexScan = new IndexScan(opCtx, scanParams, workingSet, nullptr); _children->emplace_back(_indexScan); } @@ -461,7 +461,7 @@ PlanStage::StageState GeoNear2DStage::initialize(OperationContext* opCtx, invariant(_boundsIncrement > 0.0); // Clean up - _densityEstimator.reset(NULL); + _densityEstimator.reset(nullptr); } return state; @@ -530,7 +530,7 @@ public: unique_ptr<MatchExpression> shallowClone() const final { MONGO_UNREACHABLE; - return NULL; + return nullptr; } private: @@ -897,7 +897,7 @@ void GeoNear2DSphereStage::DensityEstimator::buildIndexScan(OperationContext* op // Index scan invariant(!_indexScan); - _indexScan = new IndexScan(opCtx, scanParams, workingSet, NULL); + _indexScan = new IndexScan(opCtx, scanParams, workingSet, nullptr); _children->emplace_back(_indexScan); } @@ -1003,7 +1003,7 @@ PlanStage::StageState GeoNear2DSphereStage::initialize(OperationContext* opCtx, invariant(_boundsIncrement > 0.0); // Clean up - _densityEstimator.reset(NULL); + _densityEstimator.reset(nullptr); } return state; diff --git a/src/mongo/db/exec/geo_near.h b/src/mongo/db/exec/geo_near.h index 2fd2700db86..33295de7b39 100644 --- a/src/mongo/db/exec/geo_near.h +++ b/src/mongo/db/exec/geo_near.h @@ -47,7 +47,8 @@ namespace mongo { * Generic parameters for a GeoNear search */ struct GeoNearParams { - GeoNearParams() : filter(NULL), nearQuery(NULL), addPointMeta(false), addDistMeta(false) {} + GeoNearParams() + : filter(nullptr), nearQuery(nullptr), addPointMeta(false), addDistMeta(false) {} // MatchExpression to apply to the index keys and fetched documents // Not owned here, owned by solution nodes diff --git a/src/mongo/db/exec/idhack.cpp b/src/mongo/db/exec/idhack.cpp index b54a9d06a52..8cd352fa046 100644 --- a/src/mongo/db/exec/idhack.cpp +++ b/src/mongo/db/exec/idhack.cpp @@ -58,7 +58,7 @@ IDHackStage::IDHackStage(OperationContext* opCtx, _workingSet(ws), _key(query->getQueryObj()["_id"].wrap()) { _specificStats.indexName = descriptor->indexName(); - if (NULL != query->getProj()) { + if (nullptr != query->getProj()) { _addKeyMetadata = query->getProj()->wantIndexKey(); } } diff --git a/src/mongo/db/exec/index_scan.cpp b/src/mongo/db/exec/index_scan.cpp index 22c0cfe5898..f74743629bb 100644 --- a/src/mongo/db/exec/index_scan.cpp +++ b/src/mongo/db/exec/index_scan.cpp @@ -261,7 +261,7 @@ std::unique_ptr<PlanStageStats> IndexScan::getStats() { // catalog information here. // Add a BSON representation of the filter to the stats tree, if there is one. - if (NULL != _filter) { + if (nullptr != _filter) { BSONObjBuilder bob; _filter->serialize(&bob); _commonStats.filter = bob.obj(); diff --git a/src/mongo/db/exec/merge_sort.h b/src/mongo/db/exec/merge_sort.h index 6992214064b..5a25a3243cd 100644 --- a/src/mongo/db/exec/merge_sort.h +++ b/src/mongo/db/exec/merge_sort.h @@ -76,7 +76,7 @@ public: private: struct StageWithValue { - StageWithValue() : id(WorkingSet::INVALID_ID), stage(NULL) {} + StageWithValue() : id(WorkingSet::INVALID_ID), stage(nullptr) {} WorkingSetID id; PlanStage* stage; }; @@ -135,7 +135,7 @@ private: // Parameters that must be provided to a MergeSortStage class MergeSortStageParams { public: - MergeSortStageParams() : collator(NULL), dedup(true) {} + MergeSortStageParams() : collator(nullptr), dedup(true) {} // How we're sorting. BSONObj pattern; diff --git a/src/mongo/db/exec/multi_iterator.h b/src/mongo/db/exec/multi_iterator.h index 93eea472d0c..88dd5586ff7 100644 --- a/src/mongo/db/exec/multi_iterator.h +++ b/src/mongo/db/exec/multi_iterator.h @@ -63,7 +63,7 @@ public: // Not used. SpecificStats* getSpecificStats() const final { - return NULL; + return nullptr; } // Not used. diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp index af79d91af30..fa5a3ded422 100644 --- a/src/mongo/db/exec/multi_plan.cpp +++ b/src/mongo/db/exec/multi_plan.cpp @@ -166,7 +166,7 @@ size_t MultiPlanStage::getTrialPeriodWorks(OperationContext* opCtx, const Collec // Run each plan some number of times. This number is at least as great as // 'internalQueryPlanEvaluationWorks', but may be larger for big collections. size_t numWorks = internalQueryPlanEvaluationWorks.load(); - if (NULL != collection) { + if (nullptr != collection) { // For large collections, the number of works is set to be this // fraction of the collection size. double fraction = internalQueryPlanEvaluationCollFraction; @@ -307,7 +307,7 @@ Status MultiPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) { // XXX: One known example is 2D queries bool validSolutions = true; for (size_t ix = 0; ix < solutions.size(); ++ix) { - if (NULL == solutions[ix]->cacheData.get()) { + if (nullptr == solutions[ix]->cacheData.get()) { LOG(5) << "Not caching query because this solution has no cache data: " << redact(solutions[ix]->toString()); validSolutions = false; @@ -413,7 +413,7 @@ int MultiPlanStage::bestPlanIdx() const { QuerySolution* MultiPlanStage::bestSolution() { if (_bestPlanIdx == kNoSuchPlan) - return NULL; + return nullptr; return _candidates[_bestPlanIdx].solution.get(); } diff --git a/src/mongo/db/exec/or.cpp b/src/mongo/db/exec/or.cpp index 965c972669d..8269b599f43 100644 --- a/src/mongo/db/exec/or.cpp +++ b/src/mongo/db/exec/or.cpp @@ -125,7 +125,7 @@ unique_ptr<PlanStageStats> OrStage::getStats() { _commonStats.isEOF = isEOF(); // Add a BSON representation of the filter to the stats tree, if there is one. - if (NULL != _filter) { + if (nullptr != _filter) { BSONObjBuilder bob; _filter->serialize(&bob); _commonStats.filter = bob.obj(); diff --git a/src/mongo/db/exec/projection_exec.cpp b/src/mongo/db/exec/projection_exec.cpp index bc2a4a42d65..505027adc61 100644 --- a/src/mongo/db/exec/projection_exec.cpp +++ b/src/mongo/db/exec/projection_exec.cpp @@ -166,7 +166,7 @@ void ProjectionExec::add(const string& field, bool include) { ProjectionExec*& fm = _fields[subfield.c_str()]; - if (NULL == fm) { + if (nullptr == fm) { fm = new ProjectionExec(); } @@ -187,7 +187,7 @@ void ProjectionExec::add(const string& field, int skip, int limit) { ProjectionExec*& fm = _fields[subfield.c_str()]; - if (NULL == fm) { + if (nullptr == fm) { fm = new ProjectionExec(); } diff --git a/src/mongo/db/exec/projection_exec.h b/src/mongo/db/exec/projection_exec.h index 805be89ef44..ab4dc5f48dc 100644 --- a/src/mongo/db/exec/projection_exec.h +++ b/src/mongo/db/exec/projection_exec.h @@ -211,7 +211,7 @@ private: */ Status append(BSONObjBuilder* bob, const BSONElement& elt, - const MatchDetails* details = NULL, + const MatchDetails* details = nullptr, const ArrayOpType arrayOpType = ARRAY_OP_NORMAL) const; /** diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp index a01bc911754..39ba8540c0f 100644 --- a/src/mongo/db/exec/stagedebug_cmd.cpp +++ b/src/mongo/db/exec/stagedebug_cmd.cpp @@ -184,7 +184,7 @@ public: unique_ptr<WorkingSet> ws(new WorkingSet()); PlanStage* userRoot = parseQuery(opCtx, collection, planObj, ws.get(), &exprs); - uassert(16911, "Couldn't parse plan from " + cmdObj.toString(), NULL != userRoot); + uassert(16911, "Couldn't parse plan from " + cmdObj.toString(), nullptr != userRoot); // Add a fetch at the top for the user so we can get obj back for sure. // TODO: Do we want to do this for the user? I think so. @@ -200,7 +200,7 @@ public: BSONObj obj; PlanExecutor::ExecState state; - while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) { + while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, nullptr))) { resultBuilder.append(obj); } @@ -224,11 +224,11 @@ public: std::vector<std::unique_ptr<MatchExpression>>* exprs) { BSONElement firstElt = obj.firstElement(); if (!firstElt.isABSONObj()) { - return NULL; + return nullptr; } BSONObj paramObj = firstElt.Obj(); - MatchExpression* matcher = NULL; + MatchExpression* matcher = nullptr; BSONObj nodeArgs; // Every node has these two fields. @@ -239,7 +239,7 @@ public: while (it.more()) { BSONElement e = it.next(); if (!e.isABSONObj()) { - return NULL; + return nullptr; } BSONObj argObj = e.Obj(); if (filterTag == e.fieldName()) { @@ -252,12 +252,12 @@ public: ExtensionsCallbackReal(opCtx, &collection->ns()), MatchExpressionParser::kAllowAllSpecialFeatures); if (!statusWithMatcher.isOK()) { - return NULL; + return nullptr; } std::unique_ptr<MatchExpression> me = std::move(statusWithMatcher.getValue()); // exprs is what will wind up deleting this. matcher = me.get(); - verify(NULL != matcher); + verify(nullptr != matcher); exprs->push_back(std::move(me)); } else if (argsTag == e.fieldName()) { nodeArgs = argObj; @@ -265,7 +265,7 @@ public: uasserted(16910, "Unknown fieldname " + string(e.fieldName()) + " in query node " + obj.toString()); - return NULL; + return nullptr; } } @@ -323,8 +323,9 @@ public: uassert(16922, "node of AND isn't an obj?: " + e.toString(), e.isABSONObj()); PlanStage* subNode = parseQuery(opCtx, collection, e.Obj(), workingSet, exprs); - uassert( - 16923, "Can't parse sub-node of AND: " + e.Obj().toString(), NULL != subNode); + uassert(16923, + "Can't parse sub-node of AND: " + e.Obj().toString(), + nullptr != subNode); // takes ownership andStage->addChild(subNode); ++nodesAdded; @@ -346,8 +347,9 @@ public: uassert(16925, "node of AND isn't an obj?: " + e.toString(), e.isABSONObj()); PlanStage* subNode = parseQuery(opCtx, collection, e.Obj(), workingSet, exprs); - uassert( - 16926, "Can't parse sub-node of AND: " + e.Obj().toString(), NULL != subNode); + uassert(16926, + "Can't parse sub-node of AND: " + e.Obj().toString(), + nullptr != subNode); // takes ownership andStage->addChild(subNode); ++nodesAdded; @@ -366,11 +368,11 @@ public: while (it.more()) { BSONElement e = it.next(); if (!e.isABSONObj()) { - return NULL; + return nullptr; } PlanStage* subNode = parseQuery(opCtx, collection, e.Obj(), workingSet, exprs); uassert( - 16936, "Can't parse sub-node of OR: " + e.Obj().toString(), NULL != subNode); + 16936, "Can't parse sub-node of OR: " + e.Obj().toString(), nullptr != subNode); // takes ownership orStage->addChild(subNode); } @@ -383,11 +385,12 @@ public: parseQuery(opCtx, collection, nodeArgs["node"].Obj(), workingSet, exprs); uassert(28731, "Can't parse sub-node of FETCH: " + nodeArgs["node"].Obj().toString(), - NULL != subNode); + nullptr != subNode); return new FetchStage(opCtx, workingSet, subNode, matcher, collection); } else if ("limit" == nodeName) { - uassert( - 16937, "Limit stage doesn't have a filter (put it on the child)", NULL == matcher); + uassert(16937, + "Limit stage doesn't have a filter (put it on the child)", + nullptr == matcher); uassert( 16930, "Node argument must be provided to limit", nodeArgs["node"].isABSONObj()); uassert(16931, "Num argument must be provided to limit", nodeArgs["num"].isNumber()); @@ -395,18 +398,19 @@ public: parseQuery(opCtx, collection, nodeArgs["node"].Obj(), workingSet, exprs); uassert(28732, "Can't parse sub-node of LIMIT: " + nodeArgs["node"].Obj().toString(), - NULL != subNode); + nullptr != subNode); return new LimitStage(opCtx, nodeArgs["num"].numberInt(), workingSet, subNode); } else if ("skip" == nodeName) { - uassert( - 16938, "Skip stage doesn't have a filter (put it on the child)", NULL == matcher); + uassert(16938, + "Skip stage doesn't have a filter (put it on the child)", + nullptr == matcher); uassert(16932, "Node argument must be provided to skip", nodeArgs["node"].isABSONObj()); uassert(16933, "Num argument must be provided to skip", nodeArgs["num"].isNumber()); PlanStage* subNode = parseQuery(opCtx, collection, nodeArgs["node"].Obj(), workingSet, exprs); uassert(28733, "Can't parse sub-node of SKIP: " + nodeArgs["node"].Obj().toString(), - NULL != subNode); + nullptr != subNode); return new SkipStage(opCtx, nodeArgs["num"].numberInt(), workingSet, subNode); } else if ("cscan" == nodeName) { CollectionScanParams params; @@ -443,7 +447,7 @@ public: PlanStage* subNode = parseQuery(opCtx, collection, e.Obj(), workingSet, exprs); uassert(16974, "Can't parse sub-node of mergeSort: " + e.Obj().toString(), - NULL != subNode); + nullptr != subNode); // takes ownership mergeStage->addChild(subNode); } @@ -466,7 +470,7 @@ public: // that can only be checked for equality. We ignore this now. Status s = fam->getSpec().getIndexPrefix(BSONObj(), ¶ms.indexPrefix); if (!s.isOK()) { - return NULL; + return nullptr; } params.spec = fam->getSpec(); @@ -476,13 +480,14 @@ public: params.query.setCaseSensitive(TextMatchExpressionBase::kCaseSensitiveDefault); params.query.setDiacriticSensitive(TextMatchExpressionBase::kDiacriticSensitiveDefault); if (!params.query.parse(fam->getSpec().getTextIndexVersion()).isOK()) { - return NULL; + return nullptr; } return new TextStage(opCtx, params, workingSet, matcher); } else if ("delete" == nodeName) { - uassert( - 18636, "Delete stage doesn't have a filter (put it on the child)", NULL == matcher); + uassert(18636, + "Delete stage doesn't have a filter (put it on the child)", + nullptr == matcher); uassert( 18637, "node argument must be provided to delete", nodeArgs["node"].isABSONObj()); uassert(18638, @@ -492,12 +497,12 @@ public: parseQuery(opCtx, collection, nodeArgs["node"].Obj(), workingSet, exprs); uassert(28734, "Can't parse sub-node of DELETE: " + nodeArgs["node"].Obj().toString(), - NULL != subNode); + nullptr != subNode); auto params = std::make_unique<DeleteStageParams>(); params->isMulti = nodeArgs["isMulti"].Bool(); return new DeleteStage(opCtx, std::move(params), workingSet, collection, subNode); } else { - return NULL; + return nullptr; } } }; diff --git a/src/mongo/db/exec/subplan.cpp b/src/mongo/db/exec/subplan.cpp index 2506db606b1..889ea82428f 100644 --- a/src/mongo/db/exec/subplan.cpp +++ b/src/mongo/db/exec/subplan.cpp @@ -190,7 +190,7 @@ Status tagOrChildAccordingToCache(PlanCacheIndexTree* compositeCacheData, invariant(compositeCacheData); // We want a well-formed *indexed* solution. - if (NULL == branchCacheData) { + if (nullptr == branchCacheData) { // For example, we don't cache things for 2d indices. str::stream ss; ss << "No cache data for subchild " << orChild->debugString(); @@ -298,7 +298,7 @@ Status SubplanStage::choosePlanForSubqueries(PlanYieldPolicy* yieldPolicy) { // Check that we have good cache data. For example, we don't cache things // for 2d indices. - if (NULL == bestSoln->cacheData.get()) { + if (nullptr == bestSoln->cacheData.get()) { str::stream ss; ss << "No cache data for subchild " << orChild->debugString(); return Status(ErrorCodes::BadValue, ss); @@ -343,7 +343,7 @@ Status SubplanStage::choosePlanForSubqueries(PlanYieldPolicy* yieldPolicy) { _compositeSolution = QueryPlannerAnalysis::analyzeDataAccess(*_query, _plannerParams, std::move(solnRoot)); - if (NULL == _compositeSolution.get()) { + if (nullptr == _compositeSolution.get()) { str::stream ss; ss << "Failed to analyze subplanned query"; return Status(ErrorCodes::BadValue, ss); @@ -487,11 +487,11 @@ unique_ptr<PlanStageStats> SubplanStage::getStats() { } bool SubplanStage::branchPlannedFromCache(size_t i) const { - return NULL != _branchResults[i]->cachedSolution.get(); + return nullptr != _branchResults[i]->cachedSolution.get(); } const SpecificStats* SubplanStage::getSpecificStats() const { - return NULL; + return nullptr; } } // namespace mongo diff --git a/src/mongo/db/exec/update_stage.cpp b/src/mongo/db/exec/update_stage.cpp index 34953bb62ff..7f079f27d9f 100644 --- a/src/mongo/db/exec/update_stage.cpp +++ b/src/mongo/db/exec/update_stage.cpp @@ -168,7 +168,7 @@ UpdateStage::UpdateStage(OperationContext* opCtx, _ws(ws), _idRetrying(WorkingSet::INVALID_ID), _idReturning(WorkingSet::INVALID_ID), - _updatedRecordIds(params.request->isMulti() ? new RecordIdSet() : NULL), + _updatedRecordIds(params.request->isMulti() ? new RecordIdSet() : nullptr), _doc(params.driver->getDocument()) { _children.emplace_back(child); @@ -282,7 +282,7 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj, Reco } // See if the changes were applied in place - const char* source = NULL; + const char* source = nullptr; const bool inPlace = _doc.getInPlaceUpdates(&_damages, &source); if (inPlace && _damages.empty()) { diff --git a/src/mongo/db/exec/update_stage.h b/src/mongo/db/exec/update_stage.h index 93936166544..24beae7ffaa 100644 --- a/src/mongo/db/exec/update_stage.h +++ b/src/mongo/db/exec/update_stage.h @@ -47,7 +47,7 @@ struct PlanSummaryStats; struct UpdateStageParams { UpdateStageParams(const UpdateRequest* r, UpdateDriver* d, OpDebug* o) - : request(r), driver(d), opDebug(o), canonicalQuery(NULL) {} + : request(r), driver(d), opDebug(o), canonicalQuery(nullptr) {} // Contains update parameters like whether it's a multi update or an upsert. Not owned. // Must outlive the UpdateStage. diff --git a/src/mongo/db/exec/working_set.cpp b/src/mongo/db/exec/working_set.cpp index e9dd0ac7e9c..79a0fbbcaa6 100644 --- a/src/mongo/db/exec/working_set.cpp +++ b/src/mongo/db/exec/working_set.cpp @@ -39,7 +39,7 @@ using std::string; namespace dps = ::mongo::dotted_path_support; -WorkingSet::MemberHolder::MemberHolder() : member(NULL) {} +WorkingSet::MemberHolder::MemberHolder() : member(nullptr) {} WorkingSet::MemberHolder::~MemberHolder() {} WorkingSet::WorkingSet() : _freeList(INVALID_ID) {} diff --git a/src/mongo/db/exec/working_set_test.cpp b/src/mongo/db/exec/working_set_test.cpp index 9472c90fabe..eb8618552eb 100644 --- a/src/mongo/db/exec/working_set_test.cpp +++ b/src/mongo/db/exec/working_set_test.cpp @@ -52,12 +52,12 @@ protected: id = ws->allocate(); ASSERT(id != WorkingSet::INVALID_ID); member = ws->get(id); - ASSERT(NULL != member); + ASSERT(nullptr != member); } void tearDown() { ws.reset(); - member = NULL; + member = nullptr; } std::unique_ptr<WorkingSet> ws; @@ -123,7 +123,7 @@ TEST_F(WorkingSetFixture, getFieldFromIndex) { string secondName = "y"; int secondValue = 10; - member->keyData.push_back(IndexKeyDatum(BSON(firstName << 1), BSON("" << firstValue), NULL)); + member->keyData.push_back(IndexKeyDatum(BSON(firstName << 1), BSON("" << firstValue), nullptr)); // Also a minor lie as RecordId is bogus. ws->transitionToRecordIdAndIdx(id); BSONElement elt; @@ -133,7 +133,8 @@ TEST_F(WorkingSetFixture, getFieldFromIndex) { ASSERT_FALSE(member->getFieldDotted("foo", &elt)); // Add another index datum. - member->keyData.push_back(IndexKeyDatum(BSON(secondName << 1), BSON("" << secondValue), NULL)); + member->keyData.push_back( + IndexKeyDatum(BSON(secondName << 1), BSON("" << secondValue), nullptr)); ASSERT_TRUE(member->getFieldDotted(secondName, &elt)); ASSERT_EQUALS(elt.numberInt(), secondValue); ASSERT_TRUE(member->getFieldDotted(firstName, &elt)); @@ -146,7 +147,7 @@ TEST_F(WorkingSetFixture, getDottedFieldFromIndex) { string firstName = "x.y"; int firstValue = 5; - member->keyData.push_back(IndexKeyDatum(BSON(firstName << 1), BSON("" << firstValue), NULL)); + member->keyData.push_back(IndexKeyDatum(BSON(firstName << 1), BSON("" << firstValue), nullptr)); ws->transitionToRecordIdAndIdx(id); BSONElement elt; ASSERT_TRUE(member->getFieldDotted(firstName, &elt)); diff --git a/src/mongo/db/field_parser.h b/src/mongo/db/field_parser.h index 748787affe9..45cbf50506b 100644 --- a/src/mongo/db/field_parser.h +++ b/src/mongo/db/field_parser.h @@ -85,102 +85,102 @@ public: static FieldState extract(BSONObj doc, const BSONField<bool>& field, bool* out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); static FieldState extract(BSONElement elem, const BSONField<bool>& field, bool* out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); static FieldState extract(BSONObj doc, const BSONField<BSONArray>& field, BSONArray* out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); static FieldState extract(BSONElement elem, const BSONField<BSONArray>& field, BSONArray* out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); static FieldState extract(BSONObj doc, const BSONField<BSONObj>& field, BSONObj* out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); static FieldState extract(BSONElement elem, const BSONField<BSONObj>& field, BSONObj* out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); static FieldState extract(BSONObj doc, const BSONField<Date_t>& field, Date_t* out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); static FieldState extract(BSONElement elem, const BSONField<Date_t>& field, Date_t* out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); static FieldState extract(BSONObj doc, const BSONField<Timestamp>& field, Timestamp* out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); static FieldState extract(BSONElement elem, const BSONField<Timestamp>& field, Timestamp* out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); static FieldState extract(BSONObj doc, const BSONField<std::string>& field, std::string* out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); static FieldState extract(BSONElement elem, const BSONField<std::string>& field, std::string* out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); static FieldState extract(BSONObj doc, const BSONField<OID>& field, OID* out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); static FieldState extract(BSONElement elem, const BSONField<OID>& field, OID* out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); static FieldState extract(BSONObj doc, const BSONField<int>& field, int* out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); static FieldState extract(BSONElement elem, const BSONField<int>& field, int* out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); static FieldState extract(BSONObj doc, const BSONField<long long>& field, long long* out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); static FieldState extract(BSONElement elem, const BSONField<long long>& field, long long* out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); static FieldState extract(BSONElement elem, const BSONField<double>& field, double* out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); static FieldState extract(BSONObj doc, const BSONField<double>& field, double* out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); /** * The following extractNumber methods do implicit conversion between any numeric type and @@ -190,32 +190,32 @@ public: static FieldState extractNumber(BSONObj doc, const BSONField<int>& field, int* out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); static FieldState extractNumber(BSONElement elem, const BSONField<int>& field, int* out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); static FieldState extractNumber(BSONObj doc, const BSONField<long long>& field, long long* out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); static FieldState extractNumber(BSONElement elem, const BSONField<long long>& field, long long* out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); static FieldState extractNumber(BSONObj doc, const BSONField<double>& field, double* out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); static FieldState extractNumber(BSONElement elem, const BSONField<double>& field, double* out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); /** * Extracts a document id from a particular field name, which may be of any type but Array. @@ -224,12 +224,12 @@ public: static FieldState extractID(BSONObj doc, const BSONField<BSONObj>& field, BSONObj* out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); static FieldState extractID(BSONElement elem, const BSONField<BSONObj>& field, BSONObj* out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); // TODO: BSONElement extraction of types below @@ -242,7 +242,7 @@ public: static FieldState extract(BSONObj doc, const BSONField<T>& field, T* out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); /** * Similar to the mandatory 'extract' but on a optional field. The '*out' value would only be @@ -253,13 +253,13 @@ public: static FieldState extract(BSONObj doc, const BSONField<T*>& field, T** out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); template <typename T> static FieldState extract(BSONObj doc, const BSONField<T>& field, T** out, // alloc variation - std::string* errMsg = NULL); + std::string* errMsg = nullptr); /** * Extracts a mandatory repetition of 'field', from the object 'doc'. Writes the extracted @@ -272,7 +272,7 @@ public: static FieldState extract(BSONObj doc, const BSONField<std::vector<T*>>& field, std::vector<T*>* out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); /** * Extracts a mandatory repetition of 'field', from the field 'elem'. Writes the extracted @@ -285,7 +285,7 @@ public: static FieldState extract(BSONElement elem, const BSONField<std::vector<T*>>& field, std::vector<T*>* out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); /** * Similar to the mandatory repetition' extract but on an optional field. The '*out' value would @@ -298,7 +298,7 @@ public: static FieldState extract(BSONObj doc, const BSONField<std::vector<T*>>& field, std::vector<T*>** out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); // // ==================== Below DEPRECATED; use types instead ==================== @@ -317,25 +317,25 @@ public: static FieldState extract(BSONObj doc, const BSONField<std::vector<T>>& field, std::vector<T>* out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); template <typename T> static FieldState extract(BSONElement elem, const BSONField<std::vector<T>>& field, std::vector<T>* out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); template <typename K, typename T> static FieldState extract(BSONObj doc, const BSONField<std::map<K, T>>& field, std::map<K, T>* out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); template <typename K, typename T> static FieldState extract(BSONElement elem, const BSONField<std::map<K, T>>& field, std::map<K, T>* out, - std::string* errMsg = NULL); + std::string* errMsg = nullptr); }; template <typename T> diff --git a/src/mongo/db/field_parser_test.cpp b/src/mongo/db/field_parser_test.cpp index 9627b914c6b..a99ee1b45e8 100644 --- a/src/mongo/db/field_parser_test.cpp +++ b/src/mongo/db/field_parser_test.cpp @@ -180,31 +180,33 @@ TEST_F(ExtractionFixture, GetLong) { TEST_F(ExtractionFixture, IsFound) { bool bool_val; BSONField<bool> aBoolMissing("aBoolMissing"); - ASSERT_EQUALS(FieldParser::extract(doc, aBool, &bool_val, NULL), FieldParser::FIELD_SET); - ASSERT_EQUALS(FieldParser::extract(doc, aBoolMissing, &bool_val, NULL), + ASSERT_EQUALS(FieldParser::extract(doc, aBool, &bool_val, nullptr), FieldParser::FIELD_SET); + ASSERT_EQUALS(FieldParser::extract(doc, aBoolMissing, &bool_val, nullptr), FieldParser::FIELD_NONE); Date_t Date_t_val; BSONField<Date_t> aDateMissing("aDateMissing"); - ASSERT_EQUALS(FieldParser::extract(doc, aDate, &Date_t_val, NULL), FieldParser::FIELD_SET); - ASSERT_EQUALS(FieldParser::extract(doc, aDateMissing, &Date_t_val, NULL), + ASSERT_EQUALS(FieldParser::extract(doc, aDate, &Date_t_val, nullptr), FieldParser::FIELD_SET); + ASSERT_EQUALS(FieldParser::extract(doc, aDateMissing, &Date_t_val, nullptr), FieldParser::FIELD_NONE); string string_val; BSONField<string> aStringMissing("aStringMissing"); - ASSERT_EQUALS(FieldParser::extract(doc, aString, &string_val, NULL), FieldParser::FIELD_SET); - ASSERT_EQUALS(FieldParser::extract(doc, aStringMissing, &string_val, NULL), + ASSERT_EQUALS(FieldParser::extract(doc, aString, &string_val, nullptr), FieldParser::FIELD_SET); + ASSERT_EQUALS(FieldParser::extract(doc, aStringMissing, &string_val, nullptr), FieldParser::FIELD_NONE); OID OID_val; BSONField<OID> anOIDMissing("anOIDMissing"); - ASSERT_EQUALS(FieldParser::extract(doc, anOID, &OID_val, NULL), FieldParser::FIELD_SET); - ASSERT_EQUALS(FieldParser::extract(doc, anOIDMissing, &OID_val, NULL), FieldParser::FIELD_NONE); + ASSERT_EQUALS(FieldParser::extract(doc, anOID, &OID_val, nullptr), FieldParser::FIELD_SET); + ASSERT_EQUALS(FieldParser::extract(doc, anOIDMissing, &OID_val, nullptr), + FieldParser::FIELD_NONE); long long long_long_val; BSONField<long long> aLongMissing("aLongMissing"); - ASSERT_EQUALS(FieldParser::extract(doc, aLong, &long_long_val, NULL), FieldParser::FIELD_SET); - ASSERT_EQUALS(FieldParser::extract(doc, aLongMissing, &long_long_val, NULL), + ASSERT_EQUALS(FieldParser::extract(doc, aLong, &long_long_val, nullptr), + FieldParser::FIELD_SET); + ASSERT_EQUALS(FieldParser::extract(doc, aLongMissing, &long_long_val, nullptr), FieldParser::FIELD_NONE); } diff --git a/src/mongo/db/field_ref_set.cpp b/src/mongo/db/field_ref_set.cpp index cbfcee236d3..2843f39d655 100644 --- a/src/mongo/db/field_ref_set.cpp +++ b/src/mongo/db/field_ref_set.cpp @@ -139,7 +139,7 @@ bool FieldRefSet::insert(const FieldRef* toInsert, const FieldRef** conflict) { } _fieldSet.insert(it, toInsert); - *conflict = NULL; + *conflict = nullptr; return true; } diff --git a/src/mongo/db/field_ref_set_test.cpp b/src/mongo/db/field_ref_set_test.cpp index 9b437f3b9ad..ae610a15cbf 100644 --- a/src/mongo/db/field_ref_set_test.cpp +++ b/src/mongo/db/field_ref_set_test.cpp @@ -44,17 +44,17 @@ TEST(EmptySet, Normal) { FieldRef bSimple("b"); const FieldRef* conflict; ASSERT_TRUE(fieldSet.insert(&bSimple, &conflict)); - ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict); + ASSERT_EQUALS(static_cast<const FieldRef*>(nullptr), conflict); // insert "a", OK FieldRef aSimple("a"); ASSERT_TRUE(fieldSet.insert(&aSimple, &conflict)); - ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict); + ASSERT_EQUALS(static_cast<const FieldRef*>(nullptr), conflict); // insert "c", OK FieldRef cSimple("c"); ASSERT_TRUE(fieldSet.insert(&cSimple, &conflict)); - ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict); + ASSERT_EQUALS(static_cast<const FieldRef*>(nullptr), conflict); } TEST(EmptySet, Conflict) { @@ -63,7 +63,7 @@ TEST(EmptySet, Conflict) { FieldRef aDotB("a.b"); const FieldRef* conflict; ASSERT_TRUE(fieldSet.insert(&aDotB, &conflict)); - ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict); + ASSERT_EQUALS(static_cast<const FieldRef*>(nullptr), conflict); // insert "a", conflicts with "a.b" FieldRef prefix("a"); @@ -83,7 +83,7 @@ TEST(EmptySet, EmptyField) { FieldRef empty; const FieldRef* conflict; ASSERT_TRUE(fieldSet.insert(&empty, &conflict)); - ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict); + ASSERT_EQUALS(static_cast<const FieldRef*>(nullptr), conflict); ASSERT_FALSE(fieldSet.insert(&empty, &conflict)); ASSERT_EQUALS(empty, *conflict); @@ -96,24 +96,24 @@ TEST(NotEmptySet, Normal) { FieldRef bDotE("b.e"); const FieldRef* conflict; ASSERT_TRUE(fieldSet.insert(&bDotC, &conflict)); - ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict); + ASSERT_EQUALS(static_cast<const FieldRef*>(nullptr), conflict); ASSERT_TRUE(fieldSet.insert(&bDotE, &conflict)); - ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict); + ASSERT_EQUALS(static_cast<const FieldRef*>(nullptr), conflict); // insert "a" before, OK FieldRef aSimple("a"); ASSERT_TRUE(fieldSet.insert(&aSimple, &conflict)); - ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict); + ASSERT_EQUALS(static_cast<const FieldRef*>(nullptr), conflict); // insert "b.d" in the middle, OK FieldRef bDotD("b.d"); ASSERT_TRUE(fieldSet.insert(&bDotD, &conflict)); - ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict); + ASSERT_EQUALS(static_cast<const FieldRef*>(nullptr), conflict); // insert "c" after, OK FieldRef cSimple("c"); ASSERT_TRUE(fieldSet.insert(&cSimple, &conflict)); - ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict); + ASSERT_EQUALS(static_cast<const FieldRef*>(nullptr), conflict); } TEST(NotEmpty, Conflict) { @@ -123,9 +123,9 @@ TEST(NotEmpty, Conflict) { FieldRef bDotE("b.e"); const FieldRef* conflict; ASSERT_TRUE(fieldSet.insert(&bDotC, &conflict)); - ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict); + ASSERT_EQUALS(static_cast<const FieldRef*>(nullptr), conflict); ASSERT_TRUE(fieldSet.insert(&bDotE, &conflict)); - ASSERT_EQUALS(static_cast<const FieldRef*>(NULL), conflict); + ASSERT_EQUALS(static_cast<const FieldRef*>(nullptr), conflict); // insert "b" before, conflicts "b.c" FieldRef bSimple("b"); diff --git a/src/mongo/db/fts/fts_basic_phrase_matcher.cpp b/src/mongo/db/fts/fts_basic_phrase_matcher.cpp index 320127c87dc..8b4c373e048 100644 --- a/src/mongo/db/fts/fts_basic_phrase_matcher.cpp +++ b/src/mongo/db/fts/fts_basic_phrase_matcher.cpp @@ -43,7 +43,7 @@ bool BasicFTSPhraseMatcher::phraseMatches(const string& phrase, return haystack.find(phrase) != string::npos; } - return strcasestr(haystack.c_str(), phrase.c_str()) != NULL; + return strcasestr(haystack.c_str(), phrase.c_str()) != nullptr; } } // namespace fts diff --git a/src/mongo/db/fts/fts_element_iterator.h b/src/mongo/db/fts/fts_element_iterator.h index db0426de41a..611cf33c7fb 100644 --- a/src/mongo/db/fts/fts_element_iterator.h +++ b/src/mongo/db/fts/fts_element_iterator.h @@ -48,7 +48,7 @@ struct FTSIteratorValue { FTSIteratorValue(const char* text, const FTSLanguage* language, double weight) : _text(text), _language(language), _weight(weight), _valid(true) {} - FTSIteratorValue() : _text(NULL), _language(), _weight(0.0), _valid(false) {} + FTSIteratorValue() : _text(nullptr), _language(), _weight(0.0), _valid(false) {} bool valid() const { return _valid; diff --git a/src/mongo/db/fts/stemmer.cpp b/src/mongo/db/fts/stemmer.cpp index db5e97227da..5b9fcdadc3c 100644 --- a/src/mongo/db/fts/stemmer.cpp +++ b/src/mongo/db/fts/stemmer.cpp @@ -37,7 +37,7 @@ namespace mongo { namespace fts { Stemmer::Stemmer(const FTSLanguage* language) { - _stemmer = NULL; + _stemmer = nullptr; if (language->str() != "none") _stemmer = sb_stemmer_new(language->str().c_str(), "UTF_8"); } @@ -45,7 +45,7 @@ Stemmer::Stemmer(const FTSLanguage* language) { Stemmer::~Stemmer() { if (_stemmer) { sb_stemmer_delete(_stemmer); - _stemmer = NULL; + _stemmer = nullptr; } } @@ -56,7 +56,7 @@ StringData Stemmer::stem(StringData word) const { const sb_symbol* sb_sym = sb_stemmer_stem(_stemmer, (const sb_symbol*)word.rawData(), word.size()); - if (sb_sym == NULL) { + if (sb_sym == nullptr) { // out of memory MONGO_UNREACHABLE; } diff --git a/src/mongo/db/geo/geometry_container.cpp b/src/mongo/db/geo/geometry_container.cpp index 5b4ade3d062..5a4c066ee28 100644 --- a/src/mongo/db/geo/geometry_container.cpp +++ b/src/mongo/db/geo/geometry_container.cpp @@ -37,7 +37,7 @@ namespace mongo { bool GeometryContainer::isSimpleContainer() const { - return NULL != _point || NULL != _line || NULL != _polygon; + return nullptr != _point || nullptr != _line || nullptr != _polygon; } bool GeometryContainer::isPoint() const { @@ -45,37 +45,38 @@ bool GeometryContainer::isPoint() const { } bool GeometryContainer::supportsContains() const { - return NULL != _polygon || NULL != _box || NULL != _cap || NULL != _multiPolygon || - (NULL != _geometryCollection && (_geometryCollection->polygons.vector().size() > 0 || - _geometryCollection->multiPolygons.vector().size() > 0)); + return nullptr != _polygon || nullptr != _box || nullptr != _cap || nullptr != _multiPolygon || + (nullptr != _geometryCollection && + (_geometryCollection->polygons.vector().size() > 0 || + _geometryCollection->multiPolygons.vector().size() > 0)); } bool GeometryContainer::hasS2Region() const { - return (NULL != _point && _point->crs == SPHERE) || NULL != _line || - (NULL != _polygon && (_polygon->crs == SPHERE || _polygon->crs == STRICT_SPHERE)) || - (NULL != _cap && _cap->crs == SPHERE) || NULL != _multiPoint || NULL != _multiLine || - NULL != _multiPolygon || NULL != _geometryCollection; + return (nullptr != _point && _point->crs == SPHERE) || nullptr != _line || + (nullptr != _polygon && (_polygon->crs == SPHERE || _polygon->crs == STRICT_SPHERE)) || + (nullptr != _cap && _cap->crs == SPHERE) || nullptr != _multiPoint || + nullptr != _multiLine || nullptr != _multiPolygon || nullptr != _geometryCollection; } const S2Region& GeometryContainer::getS2Region() const { - if (NULL != _point && SPHERE == _point->crs) { + if (nullptr != _point && SPHERE == _point->crs) { return _point->cell; - } else if (NULL != _line) { + } else if (nullptr != _line) { return _line->line; - } else if (NULL != _polygon && NULL != _polygon->s2Polygon) { + } else if (nullptr != _polygon && nullptr != _polygon->s2Polygon) { return *_polygon->s2Polygon; - } else if (NULL != _polygon && NULL != _polygon->bigPolygon) { + } else if (nullptr != _polygon && nullptr != _polygon->bigPolygon) { return *_polygon->bigPolygon; - } else if (NULL != _cap && SPHERE == _cap->crs) { + } else if (nullptr != _cap && SPHERE == _cap->crs) { return _cap->cap; - } else if (NULL != _multiPoint) { + } else if (nullptr != _multiPoint) { return *_s2Region; - } else if (NULL != _multiLine) { + } else if (nullptr != _multiLine) { return *_s2Region; - } else if (NULL != _multiPolygon) { + } else if (nullptr != _multiPolygon) { return *_s2Region; } else { - invariant(NULL != _geometryCollection); + invariant(nullptr != _geometryCollection); return *_s2Region; } } @@ -233,23 +234,23 @@ bool GeometryContainer::contains(const GeometryContainer& otherContainer) const return false; } - if (NULL != _polygon && (FLAT == _polygon->crs)) { - if (NULL == otherContainer._point) { + if (nullptr != _polygon && (FLAT == _polygon->crs)) { + if (nullptr == otherContainer._point) { return false; } return _polygon->oldPolygon.contains(otherContainer._point->oldPoint); } - if (NULL != _box) { + if (nullptr != _box) { verify(FLAT == _box->crs); - if (NULL == otherContainer._point) { + if (nullptr == otherContainer._point) { return false; } return _box->box.inside(otherContainer._point->oldPoint); } - if (NULL != _cap && (FLAT == _cap->crs)) { - if (NULL == otherContainer._point) { + if (nullptr != _cap && (FLAT == _cap->crs)) { + if (nullptr == otherContainer._point) { return false; } // Let's be as consistent epsilon-wise as we can with the '2d' indextype. @@ -260,20 +261,20 @@ bool GeometryContainer::contains(const GeometryContainer& otherContainer) const // Now we deal with all the SPHERE stuff. // Iterate over the other thing and see if we contain it all. - if (NULL != otherContainer._point) { + if (nullptr != otherContainer._point) { return contains(otherContainer._point->cell, otherContainer._point->point); } - if (NULL != otherContainer._line) { + if (nullptr != otherContainer._line) { return contains(otherContainer._line->line); } - if (NULL != otherContainer._polygon) { - invariant(NULL != otherContainer._polygon->s2Polygon); + if (nullptr != otherContainer._polygon) { + invariant(nullptr != otherContainer._polygon->s2Polygon); return contains(*otherContainer._polygon->s2Polygon); } - if (NULL != otherContainer._multiPoint) { + if (nullptr != otherContainer._multiPoint) { for (size_t i = 0; i < otherContainer._multiPoint->points.size(); ++i) { if (!contains(otherContainer._multiPoint->cells[i], otherContainer._multiPoint->points[i])) { @@ -283,7 +284,7 @@ bool GeometryContainer::contains(const GeometryContainer& otherContainer) const return true; } - if (NULL != otherContainer._multiLine) { + if (nullptr != otherContainer._multiLine) { const vector<S2Polyline*>& lines = otherContainer._multiLine->lines.vector(); for (size_t i = 0; i < lines.size(); ++i) { if (!contains(*lines[i])) { @@ -293,7 +294,7 @@ bool GeometryContainer::contains(const GeometryContainer& otherContainer) const return true; } - if (NULL != otherContainer._multiPolygon) { + if (nullptr != otherContainer._multiPolygon) { const vector<S2Polygon*>& polys = otherContainer._multiPolygon->polygons.vector(); for (size_t i = 0; i < polys.size(); ++i) { if (!contains(*polys[i])) { @@ -303,7 +304,7 @@ bool GeometryContainer::contains(const GeometryContainer& otherContainer) const return true; } - if (NULL != otherContainer._geometryCollection) { + if (nullptr != otherContainer._geometryCollection) { GeometryCollection& c = *otherContainer._geometryCollection; for (size_t i = 0; i < c.points.size(); ++i) { @@ -372,21 +373,21 @@ bool containsPoint(const S2Polygon& poly, const S2Cell& otherCell, const S2Point } bool GeometryContainer::contains(const S2Cell& otherCell, const S2Point& otherPoint) const { - if (NULL != _polygon && (NULL != _polygon->s2Polygon)) { + if (nullptr != _polygon && (nullptr != _polygon->s2Polygon)) { return containsPoint(*_polygon->s2Polygon, otherCell, otherPoint); } - if (NULL != _polygon && (NULL != _polygon->bigPolygon)) { + if (nullptr != _polygon && (nullptr != _polygon->bigPolygon)) { if (_polygon->bigPolygon->Contains(otherPoint)) return true; return _polygon->bigPolygon->MayIntersect(otherCell); } - if (NULL != _cap && (_cap->crs == SPHERE)) { + if (nullptr != _cap && (_cap->crs == SPHERE)) { return _cap->cap.MayIntersect(otherCell); } - if (NULL != _multiPolygon) { + if (nullptr != _multiPolygon) { const vector<S2Polygon*>& polys = _multiPolygon->polygons.vector(); for (size_t i = 0; i < polys.size(); ++i) { if (containsPoint(*polys[i], otherCell, otherPoint)) { @@ -395,7 +396,7 @@ bool GeometryContainer::contains(const S2Cell& otherCell, const S2Point& otherPo } } - if (NULL != _geometryCollection) { + if (nullptr != _geometryCollection) { const vector<PolygonWithCRS*>& polys = _geometryCollection->polygons.vector(); for (size_t i = 0; i < polys.size(); ++i) { if (containsPoint(*polys[i]->s2Polygon, otherCell, otherPoint)) { @@ -439,15 +440,15 @@ bool containsLine(const S2Polygon& poly, const S2Polyline& otherLine) { } bool GeometryContainer::contains(const S2Polyline& otherLine) const { - if (NULL != _polygon && NULL != _polygon->s2Polygon) { + if (nullptr != _polygon && nullptr != _polygon->s2Polygon) { return containsLine(*_polygon->s2Polygon, otherLine); } - if (NULL != _polygon && NULL != _polygon->bigPolygon) { + if (nullptr != _polygon && nullptr != _polygon->bigPolygon) { return _polygon->bigPolygon->Contains(otherLine); } - if (NULL != _cap && (_cap->crs == SPHERE)) { + if (nullptr != _cap && (_cap->crs == SPHERE)) { // If the radian distance of a line to the centroid of the complement spherical cap is less // than the arc radian of the complement cap, then the line is not within the spherical cap. S2Cap complementSphere = _cap->cap.Complement(); @@ -458,7 +459,7 @@ bool GeometryContainer::contains(const S2Polyline& otherLine) const { return true; } - if (NULL != _multiPolygon) { + if (nullptr != _multiPolygon) { const vector<S2Polygon*>& polys = _multiPolygon->polygons.vector(); for (size_t i = 0; i < polys.size(); ++i) { if (containsLine(*polys[i], otherLine)) { @@ -467,7 +468,7 @@ bool GeometryContainer::contains(const S2Polyline& otherLine) const { } } - if (NULL != _geometryCollection) { + if (nullptr != _geometryCollection) { const vector<PolygonWithCRS*>& polys = _geometryCollection->polygons.vector(); for (size_t i = 0; i < polys.size(); ++i) { if (containsLine(*polys[i]->s2Polygon, otherLine)) { @@ -495,15 +496,15 @@ bool containsPolygon(const S2Polygon& poly, const S2Polygon& otherPoly) { } bool GeometryContainer::contains(const S2Polygon& otherPolygon) const { - if (NULL != _polygon && NULL != _polygon->s2Polygon) { + if (nullptr != _polygon && nullptr != _polygon->s2Polygon) { return containsPolygon(*_polygon->s2Polygon, otherPolygon); } - if (NULL != _polygon && NULL != _polygon->bigPolygon) { + if (nullptr != _polygon && nullptr != _polygon->bigPolygon) { return _polygon->bigPolygon->Contains(otherPolygon); } - if (NULL != _cap && (_cap->crs == SPHERE)) { + if (nullptr != _cap && (_cap->crs == SPHERE)) { // If the radian distance of a polygon to the centroid of the complement spherical cap is // less than the arc radian of the complement cap, then the polygon is not within the // spherical cap. @@ -515,7 +516,7 @@ bool GeometryContainer::contains(const S2Polygon& otherPolygon) const { return true; } - if (NULL != _multiPolygon) { + if (nullptr != _multiPolygon) { const vector<S2Polygon*>& polys = _multiPolygon->polygons.vector(); for (size_t i = 0; i < polys.size(); ++i) { if (containsPolygon(*polys[i], otherPolygon)) { @@ -524,7 +525,7 @@ bool GeometryContainer::contains(const S2Polygon& otherPolygon) const { } } - if (NULL != _geometryCollection) { + if (nullptr != _geometryCollection) { const vector<PolygonWithCRS*>& polys = _geometryCollection->polygons.vector(); for (size_t i = 0; i < polys.size(); ++i) { if (containsPolygon(*polys[i]->s2Polygon, otherPolygon)) { @@ -548,22 +549,22 @@ bool GeometryContainer::contains(const S2Polygon& otherPolygon) const { } bool GeometryContainer::intersects(const GeometryContainer& otherContainer) const { - if (NULL != otherContainer._point) { + if (nullptr != otherContainer._point) { return intersects(otherContainer._point->cell); - } else if (NULL != otherContainer._line) { + } else if (nullptr != otherContainer._line) { return intersects(otherContainer._line->line); - } else if (NULL != otherContainer._polygon) { - if (NULL == otherContainer._polygon->s2Polygon) { + } else if (nullptr != otherContainer._polygon) { + if (nullptr == otherContainer._polygon->s2Polygon) { return false; } return intersects(*otherContainer._polygon->s2Polygon); - } else if (NULL != otherContainer._multiPoint) { + } else if (nullptr != otherContainer._multiPoint) { return intersects(*otherContainer._multiPoint); - } else if (NULL != otherContainer._multiLine) { + } else if (nullptr != otherContainer._multiLine) { return intersects(*otherContainer._multiLine); - } else if (NULL != otherContainer._multiPolygon) { + } else if (nullptr != otherContainer._multiPolygon) { return intersects(*otherContainer._multiPolygon); - } else if (NULL != otherContainer._geometryCollection) { + } else if (nullptr != otherContainer._geometryCollection) { const GeometryCollection& c = *otherContainer._geometryCollection; for (size_t i = 0; i < c.points.size(); ++i) { @@ -635,36 +636,36 @@ bool GeometryContainer::intersects(const MultiPolygonWithCRS& otherMultiPolygon) // Does this (GeometryContainer) intersect the provided data? bool GeometryContainer::intersects(const S2Cell& otherPoint) const { - if (NULL != _point) { + if (nullptr != _point) { return _point->cell.MayIntersect(otherPoint); - } else if (NULL != _line) { + } else if (nullptr != _line) { return _line->line.MayIntersect(otherPoint); - } else if (NULL != _polygon && NULL != _polygon->s2Polygon) { + } else if (nullptr != _polygon && nullptr != _polygon->s2Polygon) { return _polygon->s2Polygon->MayIntersect(otherPoint); - } else if (NULL != _polygon && NULL != _polygon->bigPolygon) { + } else if (nullptr != _polygon && nullptr != _polygon->bigPolygon) { return _polygon->bigPolygon->MayIntersect(otherPoint); - } else if (NULL != _multiPoint) { + } else if (nullptr != _multiPoint) { const vector<S2Cell>& cells = _multiPoint->cells; for (size_t i = 0; i < cells.size(); ++i) { if (cells[i].MayIntersect(otherPoint)) { return true; } } - } else if (NULL != _multiLine) { + } else if (nullptr != _multiLine) { const vector<S2Polyline*>& lines = _multiLine->lines.vector(); for (size_t i = 0; i < lines.size(); ++i) { if (lines[i]->MayIntersect(otherPoint)) { return true; } } - } else if (NULL != _multiPolygon) { + } else if (nullptr != _multiPolygon) { const vector<S2Polygon*>& polys = _multiPolygon->polygons.vector(); for (size_t i = 0; i < polys.size(); ++i) { if (polys[i]->MayIntersect(otherPoint)) { return true; } } - } else if (NULL != _geometryCollection) { + } else if (nullptr != _geometryCollection) { const GeometryCollection& c = *_geometryCollection; for (size_t i = 0; i < c.points.size(); ++i) { @@ -728,33 +729,33 @@ bool polygonLineIntersection(const S2Polyline& line, const S2Polygon& poly) { } bool GeometryContainer::intersects(const S2Polyline& otherLine) const { - if (NULL != _point) { + if (nullptr != _point) { return otherLine.MayIntersect(_point->cell); - } else if (NULL != _line) { + } else if (nullptr != _line) { return otherLine.Intersects(&_line->line); - } else if (NULL != _polygon && NULL != _polygon->s2Polygon) { + } else if (nullptr != _polygon && nullptr != _polygon->s2Polygon) { return polygonLineIntersection(otherLine, *_polygon->s2Polygon); - } else if (NULL != _polygon && NULL != _polygon->bigPolygon) { + } else if (nullptr != _polygon && nullptr != _polygon->bigPolygon) { return _polygon->bigPolygon->Intersects(otherLine); - } else if (NULL != _multiPoint) { + } else if (nullptr != _multiPoint) { for (size_t i = 0; i < _multiPoint->cells.size(); ++i) { if (otherLine.MayIntersect(_multiPoint->cells[i])) { return true; } } - } else if (NULL != _multiLine) { + } else if (nullptr != _multiLine) { for (size_t i = 0; i < _multiLine->lines.vector().size(); ++i) { if (otherLine.Intersects(_multiLine->lines.vector()[i])) { return true; } } - } else if (NULL != _multiPolygon) { + } else if (nullptr != _multiPolygon) { for (size_t i = 0; i < _multiPolygon->polygons.vector().size(); ++i) { if (polygonLineIntersection(otherLine, *_multiPolygon->polygons.vector()[i])) { return true; } } - } else if (NULL != _geometryCollection) { + } else if (nullptr != _geometryCollection) { const GeometryCollection& c = *_geometryCollection; for (size_t i = 0; i < c.points.size(); ++i) { @@ -808,33 +809,33 @@ bool GeometryContainer::intersects(const S2Polyline& otherLine) const { // Does 'this' intersect with the provided polygon? bool GeometryContainer::intersects(const S2Polygon& otherPolygon) const { - if (NULL != _point) { + if (nullptr != _point) { return otherPolygon.MayIntersect(_point->cell); - } else if (NULL != _line) { + } else if (nullptr != _line) { return polygonLineIntersection(_line->line, otherPolygon); - } else if (NULL != _polygon && NULL != _polygon->s2Polygon) { + } else if (nullptr != _polygon && nullptr != _polygon->s2Polygon) { return otherPolygon.Intersects(_polygon->s2Polygon.get()); - } else if (NULL != _polygon && NULL != _polygon->bigPolygon) { + } else if (nullptr != _polygon && nullptr != _polygon->bigPolygon) { return _polygon->bigPolygon->Intersects(otherPolygon); - } else if (NULL != _multiPoint) { + } else if (nullptr != _multiPoint) { for (size_t i = 0; i < _multiPoint->cells.size(); ++i) { if (otherPolygon.MayIntersect(_multiPoint->cells[i])) { return true; } } - } else if (NULL != _multiLine) { + } else if (nullptr != _multiLine) { for (size_t i = 0; i < _multiLine->lines.vector().size(); ++i) { if (polygonLineIntersection(*_multiLine->lines.vector()[i], otherPolygon)) { return true; } } - } else if (NULL != _multiPolygon) { + } else if (nullptr != _multiPolygon) { for (size_t i = 0; i < _multiPolygon->polygons.vector().size(); ++i) { if (otherPolygon.Intersects(_multiPolygon->polygons.vector()[i])) { return true; } } - } else if (NULL != _geometryCollection) { + } else if (nullptr != _geometryCollection) { const GeometryCollection& c = *_geometryCollection; for (size_t i = 0; i < c.points.size(); ++i) { @@ -1069,23 +1070,23 @@ Status GeometryContainer::parseFromStorage(const BSONElement& elem, bool skipVal } string GeometryContainer::getDebugType() const { - if (NULL != _point) { + if (nullptr != _point) { return "pt"; - } else if (NULL != _line) { + } else if (nullptr != _line) { return "ln"; - } else if (NULL != _box) { + } else if (nullptr != _box) { return "bx"; - } else if (NULL != _polygon) { + } else if (nullptr != _polygon) { return "pl"; - } else if (NULL != _cap) { + } else if (nullptr != _cap) { return "cc"; - } else if (NULL != _multiPoint) { + } else if (nullptr != _multiPoint) { return "mp"; - } else if (NULL != _multiLine) { + } else if (nullptr != _multiLine) { return "ml"; - } else if (NULL != _multiPolygon) { + } else if (nullptr != _multiPolygon) { return "my"; - } else if (NULL != _geometryCollection) { + } else if (nullptr != _geometryCollection) { return "gc"; } else { MONGO_UNREACHABLE; @@ -1096,23 +1097,23 @@ string GeometryContainer::getDebugType() const { CRS GeometryContainer::getNativeCRS() const { // TODO: Fix geometry collection reporting when/if we support multiple CRSes - if (NULL != _point) { + if (nullptr != _point) { return _point->crs; - } else if (NULL != _line) { + } else if (nullptr != _line) { return _line->crs; - } else if (NULL != _box) { + } else if (nullptr != _box) { return _box->crs; - } else if (NULL != _polygon) { + } else if (nullptr != _polygon) { return _polygon->crs; - } else if (NULL != _cap) { + } else if (nullptr != _cap) { return _cap->crs; - } else if (NULL != _multiPoint) { + } else if (nullptr != _multiPoint) { return _multiPoint->crs; - } else if (NULL != _multiLine) { + } else if (nullptr != _multiLine) { return _multiLine->crs; - } else if (NULL != _multiPolygon) { + } else if (nullptr != _multiPolygon) { return _multiPolygon->crs; - } else if (NULL != _geometryCollection) { + } else if (nullptr != _geometryCollection) { return SPHERE; } else { MONGO_UNREACHABLE; @@ -1123,24 +1124,24 @@ CRS GeometryContainer::getNativeCRS() const { bool GeometryContainer::supportsProject(CRS otherCRS) const { // TODO: Fix geometry collection reporting when/if we support more CRSes - if (NULL != _point) { + if (nullptr != _point) { return ShapeProjection::supportsProject(*_point, otherCRS); - } else if (NULL != _line) { + } else if (nullptr != _line) { return _line->crs == otherCRS; - } else if (NULL != _box) { + } else if (nullptr != _box) { return _box->crs == otherCRS; - } else if (NULL != _polygon) { + } else if (nullptr != _polygon) { return ShapeProjection::supportsProject(*_polygon, otherCRS); - } else if (NULL != _cap) { + } else if (nullptr != _cap) { return _cap->crs == otherCRS; - } else if (NULL != _multiPoint) { + } else if (nullptr != _multiPoint) { return _multiPoint->crs == otherCRS; - } else if (NULL != _multiLine) { + } else if (nullptr != _multiLine) { return _multiLine->crs == otherCRS; - } else if (NULL != _multiPolygon) { + } else if (nullptr != _multiPolygon) { return _multiPolygon->crs == otherCRS; } else { - invariant(NULL != _geometryCollection); + invariant(nullptr != _geometryCollection); return SPHERE == otherCRS; } } @@ -1149,12 +1150,12 @@ void GeometryContainer::projectInto(CRS otherCRS) { if (getNativeCRS() == otherCRS) return; - if (NULL != _polygon) { + if (nullptr != _polygon) { ShapeProjection::projectInto(_polygon.get(), otherCRS); return; } - invariant(NULL != _point); + invariant(nullptr != _point); ShapeProjection::projectInto(_point.get(), otherCRS); } @@ -1228,7 +1229,7 @@ static double s2MinDistanceRad(const S2Point& s2Point, ++it) { invariant(SPHERE == (*it)->crs); // We don't support distances for big polygons yet. - invariant(NULL != (*it)->s2Polygon); + invariant(nullptr != (*it)->s2Polygon); double nextDistance = S2Distance::minDistanceRad(s2Point, *((*it)->s2Polygon)); if (minDistance < 0 || nextDistance < minDistance) { minDistance = nextDistance; @@ -1272,7 +1273,7 @@ double GeometryContainer::minDistance(const PointWithCRS& otherPoint) const { const CRS crs = getNativeCRS(); if (FLAT == crs) { - invariant(NULL != _point); + invariant(nullptr != _point); if (FLAT == otherPoint.crs) { return distance(_point->oldPoint, otherPoint.oldPoint); @@ -1286,23 +1287,23 @@ double GeometryContainer::minDistance(const PointWithCRS& otherPoint) const { double minDistance = -1; - if (NULL != _point) { + if (nullptr != _point) { minDistance = S2Distance::distanceRad(otherPoint.point, _point->point); - } else if (NULL != _line) { + } else if (nullptr != _line) { minDistance = S2Distance::minDistanceRad(otherPoint.point, _line->line); - } else if (NULL != _polygon) { + } else if (nullptr != _polygon) { // We don't support distances for big polygons yet. - invariant(NULL != _polygon->s2Polygon); + invariant(nullptr != _polygon->s2Polygon); minDistance = S2Distance::minDistanceRad(otherPoint.point, *_polygon->s2Polygon); - } else if (NULL != _cap) { + } else if (nullptr != _cap) { minDistance = S2Distance::minDistanceRad(otherPoint.point, _cap->cap); - } else if (NULL != _multiPoint) { + } else if (nullptr != _multiPoint) { minDistance = s2MinDistanceRad(otherPoint.point, *_multiPoint); - } else if (NULL != _multiLine) { + } else if (nullptr != _multiLine) { minDistance = s2MinDistanceRad(otherPoint.point, *_multiLine); - } else if (NULL != _multiPolygon) { + } else if (nullptr != _multiPolygon) { minDistance = s2MinDistanceRad(otherPoint.point, *_multiPolygon); - } else if (NULL != _geometryCollection) { + } else if (nullptr != _geometryCollection) { minDistance = s2MinDistanceRad(otherPoint.point, *_geometryCollection); } diff --git a/src/mongo/db/geo/hash.cpp b/src/mongo/db/geo/hash.cpp index f74a403f77b..761f8c79213 100644 --- a/src/mongo/db/geo/hash.cpp +++ b/src/mongo/db/geo/hash.cpp @@ -738,7 +738,7 @@ GeoHash GeoHashConverter::hash(const Point& p) const { } GeoHash GeoHashConverter::hash(const BSONObj& o) const { - return hash(o, NULL); + return hash(o, nullptr); } // src is printed out as debugging information. Maybe it is actually somehow the 'source' of o? diff --git a/src/mongo/db/geo/r2_region_coverer.cpp b/src/mongo/db/geo/r2_region_coverer.cpp index 4b170dbc1d3..67ebbf37924 100644 --- a/src/mongo/db/geo/r2_region_coverer.cpp +++ b/src/mongo/db/geo/r2_region_coverer.cpp @@ -61,7 +61,7 @@ R2RegionCoverer::R2RegionCoverer(GeoHashConverter* hashConverter) _minLevel(0u), _maxLevel(GeoHash::kMaxBits), _maxCells(kDefaultMaxCells), - _region(NULL), + _region(nullptr), _candidateQueue(new CandidateQueue), _results(new vector<GeoHash>) {} @@ -126,7 +126,7 @@ void R2RegionCoverer::getCovering(const R2Region& region, vector<GeoHash>* cover LOG(3) << "Queue: " << _candidateQueue->size(); } - _region = NULL; + _region = nullptr; cover->swap(*_results); } @@ -136,7 +136,7 @@ R2RegionCoverer::Candidate* R2RegionCoverer::newCandidate(const GeoHash& cell) { Box box = _hashConverter->unhashToBoxCovering(cell); if (_region->fastDisjoint(box)) { - return NULL; + return nullptr; } Candidate* candidate = new Candidate(); @@ -152,7 +152,7 @@ R2RegionCoverer::Candidate* R2RegionCoverer::newCandidate(const GeoHash& cell) { // Takes ownership of "candidate" void R2RegionCoverer::addCandidate(Candidate* candidate) { - if (candidate == NULL) + if (candidate == nullptr) return; if (candidate->isTerminal) { diff --git a/src/mongo/db/index/index_descriptor.cpp b/src/mongo/db/index/index_descriptor.cpp index edae421b9f6..d90a8dddad3 100644 --- a/src/mongo/db/index/index_descriptor.cpp +++ b/src/mongo/db/index/index_descriptor.cpp @@ -112,7 +112,7 @@ IndexDescriptor::IndexDescriptor(Collection* collection, _sparse(infoObj[IndexDescriptor::kSparseFieldName].trueValue()), _unique(_isIdIndex || infoObj[kUniqueFieldName].trueValue()), _partial(!infoObj[kPartialFilterExprFieldName].eoo()), - _cachedEntry(NULL) { + _cachedEntry(nullptr) { _indexNamespace = NamespaceString(_parentNS).makeIndexNamespace(_indexName).ns(); BSONElement e = _infoObj[IndexDescriptor::kIndexVersionFieldName]; diff --git a/src/mongo/db/initialize_server_global_state.cpp b/src/mongo/db/initialize_server_global_state.cpp index 0565045ba9f..5fa77e32020 100644 --- a/src/mongo/db/initialize_server_global_state.cpp +++ b/src/mongo/db/initialize_server_global_state.cpp @@ -177,21 +177,21 @@ static bool forkServer() { // this is run in the final child process (the server) FILE* f = freopen("/dev/null", "w", stdout); - if (f == NULL) { + if (f == nullptr) { cout << "Cant reassign stdout while forking server process: " << strerror(errno) << endl; return false; } f = freopen("/dev/null", "w", stderr); - if (f == NULL) { + if (f == nullptr) { cout << "Cant reassign stderr while forking server process: " << strerror(errno) << endl; return false; } f = freopen("/dev/null", "r", stdin); - if (f == NULL) { + if (f == nullptr) { cout << "Cant reassign stdin while forking server process: " << strerror(errno) << endl; return false; } diff --git a/src/mongo/db/matcher/expression_array.cpp b/src/mongo/db/matcher/expression_array.cpp index af7205ef822..c00d5899eda 100644 --- a/src/mongo/db/matcher/expression_array.cpp +++ b/src/mongo/db/matcher/expression_array.cpp @@ -77,7 +77,7 @@ bool ElemMatchObjectMatchExpression::matchesArray(const BSONObj& anArray, BSONElement inner = i.next(); if (!inner.isABSONObj()) continue; - if (_sub->matchesBSON(inner.Obj(), NULL)) { + if (_sub->matchesBSON(inner.Obj(), nullptr)) { if (details && details->needRecord()) { details->setElemMatchKey(inner.fieldName()); } @@ -92,7 +92,7 @@ void ElemMatchObjectMatchExpression::debugString(StringBuilder& debug, int inden debug << path() << " $elemMatch (obj)"; MatchExpression::TagData* td = getTag(); - if (NULL != td) { + if (nullptr != td) { debug << " "; td->debugString(&debug); } @@ -165,7 +165,7 @@ void ElemMatchValueMatchExpression::debugString(StringBuilder& debug, int indent debug << path() << " $elemMatch (value)"; MatchExpression::TagData* td = getTag(); - if (NULL != td) { + if (nullptr != td) { debug << " "; td->debugString(&debug); } @@ -218,7 +218,7 @@ void SizeMatchExpression::debugString(StringBuilder& debug, int indentationLevel debug << path() << " $size : " << _size << "\n"; MatchExpression::TagData* td = getTag(); - if (NULL != td) { + if (nullptr != td) { debug << " "; td->debugString(&debug); } diff --git a/src/mongo/db/matcher/expression_array_test.cpp b/src/mongo/db/matcher/expression_array_test.cpp index 6c9ac139edd..40e94ef38ee 100644 --- a/src/mongo/db/matcher/expression_array_test.cpp +++ b/src/mongo/db/matcher/expression_array_test.cpp @@ -92,29 +92,30 @@ TEST(ElemMatchObjectMatchExpression, MatchesNonArray) { ElemMatchObjectMatchExpression op("a", eq.release()); // Directly nested objects are not matched with $elemMatch. An intervening array is // required. - ASSERT(!op.matchesBSON(BSON("a" << BSON("b" << 5)), NULL)); - ASSERT(!op.matchesBSON(BSON("a" << BSON("0" << (BSON("b" << 5)))), NULL)); - ASSERT(!op.matchesBSON(BSON("a" << 4), NULL)); + ASSERT(!op.matchesBSON(BSON("a" << BSON("b" << 5)), nullptr)); + ASSERT(!op.matchesBSON(BSON("a" << BSON("0" << (BSON("b" << 5)))), nullptr)); + ASSERT(!op.matchesBSON(BSON("a" << 4), nullptr)); } TEST(ElemMatchObjectMatchExpression, MatchesArrayObject) { BSONObj baseOperand = BSON("b" << 5); unique_ptr<ComparisonMatchExpression> eq(new EqualityMatchExpression("b", baseOperand["b"])); ElemMatchObjectMatchExpression op("a", eq.release()); - ASSERT(op.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << 5))), NULL)); - ASSERT(op.matchesBSON(BSON("a" << BSON_ARRAY(4 << BSON("b" << 5))), NULL)); - ASSERT(op.matchesBSON(BSON("a" << BSON_ARRAY(BSONObj() << BSON("b" << 5))), NULL)); - ASSERT(op.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << 6) << BSON("b" << 5))), NULL)); + ASSERT(op.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << 5))), nullptr)); + ASSERT(op.matchesBSON(BSON("a" << BSON_ARRAY(4 << BSON("b" << 5))), nullptr)); + ASSERT(op.matchesBSON(BSON("a" << BSON_ARRAY(BSONObj() << BSON("b" << 5))), nullptr)); + ASSERT(op.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << 6) << BSON("b" << 5))), nullptr)); } TEST(ElemMatchObjectMatchExpression, MatchesMultipleNamedValues) { BSONObj baseOperand = BSON("c" << 5); unique_ptr<ComparisonMatchExpression> eq(new EqualityMatchExpression("c", baseOperand["c"])); ElemMatchObjectMatchExpression op("a.b", eq.release()); - ASSERT(op.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << BSON_ARRAY(BSON("c" << 5))))), NULL)); + ASSERT( + op.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << BSON_ARRAY(BSON("c" << 5))))), nullptr)); ASSERT(op.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << BSON_ARRAY(BSON("c" << 1))) << BSON("b" << BSON_ARRAY(BSON("c" << 5))))), - NULL)); + nullptr)); } TEST(ElemMatchObjectMatchExpression, ElemMatchKey) { @@ -202,27 +203,27 @@ TEST(ElemMatchValueMatchExpression, MatchesNonArray) { ElemMatchObjectMatchExpression op("a", gt.release()); // Directly nested objects are not matched with $elemMatch. An intervening array is // required. - ASSERT(!op.matchesBSON(BSON("a" << 6), NULL)); - ASSERT(!op.matchesBSON(BSON("a" << BSON("0" << 6)), NULL)); + ASSERT(!op.matchesBSON(BSON("a" << 6), nullptr)); + ASSERT(!op.matchesBSON(BSON("a" << BSON("0" << 6)), nullptr)); } TEST(ElemMatchValueMatchExpression, MatchesArrayScalar) { BSONObj baseOperand = BSON("$gt" << 5); unique_ptr<ComparisonMatchExpression> gt(new GTMatchExpression("", baseOperand["$gt"])); ElemMatchValueMatchExpression op("a", gt.release()); - ASSERT(op.matchesBSON(BSON("a" << BSON_ARRAY(6)), NULL)); - ASSERT(op.matchesBSON(BSON("a" << BSON_ARRAY(4 << 6)), NULL)); - ASSERT(op.matchesBSON(BSON("a" << BSON_ARRAY(BSONObj() << 7)), NULL)); + ASSERT(op.matchesBSON(BSON("a" << BSON_ARRAY(6)), nullptr)); + ASSERT(op.matchesBSON(BSON("a" << BSON_ARRAY(4 << 6)), nullptr)); + ASSERT(op.matchesBSON(BSON("a" << BSON_ARRAY(BSONObj() << 7)), nullptr)); } TEST(ElemMatchValueMatchExpression, MatchesMultipleNamedValues) { BSONObj baseOperand = BSON("$gt" << 5); unique_ptr<ComparisonMatchExpression> gt(new GTMatchExpression("", baseOperand["$gt"])); ElemMatchValueMatchExpression op("a.b", gt.release()); - ASSERT(op.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << BSON_ARRAY(6)))), NULL)); + ASSERT(op.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << BSON_ARRAY(6)))), nullptr)); ASSERT(op.matchesBSON( BSON("a" << BSON_ARRAY(BSON("b" << BSON_ARRAY(4)) << BSON("b" << BSON_ARRAY(4 << 6)))), - NULL)); + nullptr)); } TEST(ElemMatchValueMatchExpression, ElemMatchKey) { @@ -345,19 +346,19 @@ TEST(AndOfElemMatch, Matches) { andOfEM->add(elemMatch2.release()); BSONObj nonArray = BSON("x" << 4); - ASSERT(!andOfEM->matchesBSON(nonArray, NULL)); + ASSERT(!andOfEM->matchesBSON(nonArray, nullptr)); BSONObj emptyArray = BSON("x" << BSONArray()); - ASSERT(!andOfEM->matchesBSON(emptyArray, NULL)); + ASSERT(!andOfEM->matchesBSON(emptyArray, nullptr)); BSONObj nonNumberArray = BSON("x" << BSON_ARRAY("q")); - ASSERT(!andOfEM->matchesBSON(nonNumberArray, NULL)); + ASSERT(!andOfEM->matchesBSON(nonNumberArray, nullptr)); BSONObj singleMatch = BSON("x" << BSON_ARRAY(5)); - ASSERT(!andOfEM->matchesBSON(singleMatch, NULL)); + ASSERT(!andOfEM->matchesBSON(singleMatch, nullptr)); BSONObj otherMatch = BSON("x" << BSON_ARRAY(105)); - ASSERT(!andOfEM->matchesBSON(otherMatch, NULL)); + ASSERT(!andOfEM->matchesBSON(otherMatch, nullptr)); BSONObj bothMatch = BSON("x" << BSON_ARRAY(5 << 105)); - ASSERT(andOfEM->matchesBSON(bothMatch, NULL)); + ASSERT(andOfEM->matchesBSON(bothMatch, nullptr)); BSONObj neitherMatch = BSON("x" << BSON_ARRAY(0 << 200)); - ASSERT(!andOfEM->matchesBSON(neitherMatch, NULL)); + ASSERT(!andOfEM->matchesBSON(neitherMatch, nullptr)); } TEST(SizeMatchExpression, MatchesElement) { @@ -382,15 +383,15 @@ TEST(SizeMatchExpression, MatchesNonArray) { TEST(SizeMatchExpression, MatchesArray) { SizeMatchExpression size("a", 2); - ASSERT(size.matchesBSON(BSON("a" << BSON_ARRAY(4 << 5.5)), NULL)); + ASSERT(size.matchesBSON(BSON("a" << BSON_ARRAY(4 << 5.5)), nullptr)); // Arrays are not unwound to look for matching subarrays. - ASSERT(!size.matchesBSON(BSON("a" << BSON_ARRAY(4 << 5.5 << BSON_ARRAY(1 << 2))), NULL)); + ASSERT(!size.matchesBSON(BSON("a" << BSON_ARRAY(4 << 5.5 << BSON_ARRAY(1 << 2))), nullptr)); } TEST(SizeMatchExpression, MatchesNestedArray) { SizeMatchExpression size("a.2", 2); // A numerically referenced nested array is matched. - ASSERT(size.matchesBSON(BSON("a" << BSON_ARRAY(4 << 5.5 << BSON_ARRAY(1 << 2))), NULL)); + ASSERT(size.matchesBSON(BSON("a" << BSON_ARRAY(4 << 5.5 << BSON_ARRAY(1 << 2))), nullptr)); } TEST(SizeMatchExpression, ElemMatchKey) { diff --git a/src/mongo/db/matcher/expression_geo.cpp b/src/mongo/db/matcher/expression_geo.cpp index 18e38b319f0..67ee37060d7 100644 --- a/src/mongo/db/matcher/expression_geo.cpp +++ b/src/mongo/db/matcher/expression_geo.cpp @@ -89,7 +89,7 @@ Status GeoExpression::parseQuery(const BSONObj& obj) { } } - if (geoContainer == NULL) { + if (geoContainer == nullptr) { return Status(ErrorCodes::BadValue, "geo query doesn't have any geometry"); } @@ -376,7 +376,7 @@ void GeoMatchExpression::debugString(StringBuilder& debug, int indentationLevel) debug << "GEO raw = " << builder.obj().toString(); MatchExpression::TagData* td = getTag(); - if (NULL != td) { + if (nullptr != td) { debug << " "; td->debugString(&debug); } @@ -434,7 +434,7 @@ void GeoNearMatchExpression::debugString(StringBuilder& debug, int indentationLe _debugAddSpace(debug, indentationLevel); debug << "GEONEAR " << _query->toString(); MatchExpression::TagData* td = getTag(); - if (NULL != td) { + if (nullptr != td) { debug << " "; td->debugString(&debug); } diff --git a/src/mongo/db/matcher/expression_leaf.cpp b/src/mongo/db/matcher/expression_leaf.cpp index ee43237617e..3001eb59ba8 100644 --- a/src/mongo/db/matcher/expression_leaf.cpp +++ b/src/mongo/db/matcher/expression_leaf.cpp @@ -263,7 +263,7 @@ void RegexMatchExpression::debugString(StringBuilder& debug, int indentationLeve debug << path() << " regex /" << _regex << "/" << _flags; MatchExpression::TagData* td = getTag(); - if (NULL != td) { + if (nullptr != td) { debug << " "; td->debugString(&debug); } @@ -306,7 +306,7 @@ void ModMatchExpression::debugString(StringBuilder& debug, int indentationLevel) _debugAddSpace(debug, indentationLevel); debug << path() << " mod " << _divisor << " % x == " << _remainder; MatchExpression::TagData* td = getTag(); - if (NULL != td) { + if (nullptr != td) { debug << " "; td->debugString(&debug); } @@ -340,7 +340,7 @@ void ExistsMatchExpression::debugString(StringBuilder& debug, int indentationLev _debugAddSpace(debug, indentationLevel); debug << path() << " exists"; MatchExpression::TagData* td = getTag(); - if (NULL != td) { + if (nullptr != td) { debug << " "; td->debugString(&debug); } @@ -416,7 +416,7 @@ void InMatchExpression::debugString(StringBuilder& debug, int indentationLevel) } debug << "]"; MatchExpression::TagData* td = getTag(); - if (NULL != td) { + if (nullptr != td) { debug << " "; td->debugString(&debug); } diff --git a/src/mongo/db/matcher/expression_leaf_test.cpp b/src/mongo/db/matcher/expression_leaf_test.cpp index bb06d26b7f7..859fbe3bc98 100644 --- a/src/mongo/db/matcher/expression_leaf_test.cpp +++ b/src/mongo/db/matcher/expression_leaf_test.cpp @@ -71,7 +71,7 @@ TEST(ComparisonMatchExpression, StringMatchingWithNullCollatorUsesBinaryComparis EqualityMatchExpression eq("a", operand["a"]); ASSERT(!eq.matchesBSON(BSON("a" << "string2"), - NULL)); + nullptr)); } TEST(ComparisonMatchExpression, StringMatchingRespectsCollation) { @@ -82,7 +82,7 @@ TEST(ComparisonMatchExpression, StringMatchingRespectsCollation) { eq.setCollator(&collator); ASSERT(eq.matchesBSON(BSON("a" << "string2"), - NULL)); + nullptr)); } TEST(EqOp, MatchesElement) { @@ -105,40 +105,40 @@ DEATH_TEST(EqOp, InvalidEooOperand, "Invariant failure _rhs") { TEST(EqOp, MatchesScalar) { BSONObj operand = BSON("a" << 5); EqualityMatchExpression eq("a", operand["a"]); - ASSERT(eq.matchesBSON(BSON("a" << 5.0), NULL)); - ASSERT(!eq.matchesBSON(BSON("a" << 4), NULL)); + ASSERT(eq.matchesBSON(BSON("a" << 5.0), nullptr)); + ASSERT(!eq.matchesBSON(BSON("a" << 4), nullptr)); } TEST(EqOp, MatchesArrayValue) { BSONObj operand = BSON("a" << 5); EqualityMatchExpression eq("a", operand["a"]); - ASSERT(eq.matchesBSON(BSON("a" << BSON_ARRAY(5.0 << 6)), NULL)); - ASSERT(!eq.matchesBSON(BSON("a" << BSON_ARRAY(6 << 7)), NULL)); + ASSERT(eq.matchesBSON(BSON("a" << BSON_ARRAY(5.0 << 6)), nullptr)); + ASSERT(!eq.matchesBSON(BSON("a" << BSON_ARRAY(6 << 7)), nullptr)); } TEST(EqOp, MatchesReferencedObjectValue) { BSONObj operand = BSON("a.b" << 5); EqualityMatchExpression eq("a.b", operand["a.b"]); - ASSERT(eq.matchesBSON(BSON("a" << BSON("b" << 5)), NULL)); - ASSERT(eq.matchesBSON(BSON("a" << BSON("b" << BSON_ARRAY(5))), NULL)); - ASSERT(eq.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << 5))), NULL)); + ASSERT(eq.matchesBSON(BSON("a" << BSON("b" << 5)), nullptr)); + ASSERT(eq.matchesBSON(BSON("a" << BSON("b" << BSON_ARRAY(5))), nullptr)); + ASSERT(eq.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << 5))), nullptr)); } TEST(EqOp, MatchesReferencedArrayValue) { BSONObj operand = BSON("a.0" << 5); EqualityMatchExpression eq("a.0", operand["a.0"]); - ASSERT(eq.matchesBSON(BSON("a" << BSON_ARRAY(5)), NULL)); - ASSERT(!eq.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(5))), NULL)); + ASSERT(eq.matchesBSON(BSON("a" << BSON_ARRAY(5)), nullptr)); + ASSERT(!eq.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(5))), nullptr)); } TEST(EqOp, MatchesNull) { BSONObj operand = BSON("a" << BSONNULL); EqualityMatchExpression eq("a", operand["a"]); - ASSERT(eq.matchesBSON(BSONObj(), NULL)); - ASSERT(eq.matchesBSON(BSON("a" << BSONNULL), NULL)); - ASSERT(!eq.matchesBSON(BSON("a" << 4), NULL)); + ASSERT(eq.matchesBSON(BSONObj(), nullptr)); + ASSERT(eq.matchesBSON(BSON("a" << BSONNULL), nullptr)); + ASSERT(!eq.matchesBSON(BSON("a" << 4), nullptr)); // A non-existent field is treated same way as an empty bson object - ASSERT(eq.matchesBSON(BSON("b" << 4), NULL)); + ASSERT(eq.matchesBSON(BSON("b" << 4), nullptr)); } // This test documents how the matcher currently works, @@ -147,19 +147,19 @@ TEST(EqOp, MatchesNestedNull) { BSONObj operand = BSON("a.b" << BSONNULL); EqualityMatchExpression eq("a.b", operand["a.b"]); // null matches any empty object that is on a subpath of a.b - ASSERT(eq.matchesBSON(BSONObj(), NULL)); - ASSERT(eq.matchesBSON(BSON("a" << BSONObj()), NULL)); - ASSERT(eq.matchesBSON(BSON("a" << BSON_ARRAY(BSONObj())), NULL)); - ASSERT(eq.matchesBSON(BSON("a" << BSON("b" << BSONNULL)), NULL)); + ASSERT(eq.matchesBSON(BSONObj(), nullptr)); + ASSERT(eq.matchesBSON(BSON("a" << BSONObj()), nullptr)); + ASSERT(eq.matchesBSON(BSON("a" << BSON_ARRAY(BSONObj())), nullptr)); + ASSERT(eq.matchesBSON(BSON("a" << BSON("b" << BSONNULL)), nullptr)); // b does not exist as an element in array under a. - ASSERT(!eq.matchesBSON(BSON("a" << BSONArray()), NULL)); - ASSERT(!eq.matchesBSON(BSON("a" << BSON_ARRAY(BSONNULL)), NULL)); - ASSERT(!eq.matchesBSON(BSON("a" << BSON_ARRAY(1 << 2)), NULL)); + ASSERT(!eq.matchesBSON(BSON("a" << BSONArray()), nullptr)); + ASSERT(!eq.matchesBSON(BSON("a" << BSON_ARRAY(BSONNULL)), nullptr)); + ASSERT(!eq.matchesBSON(BSON("a" << BSON_ARRAY(1 << 2)), nullptr)); // a.b exists but is not null. - ASSERT(!eq.matchesBSON(BSON("a" << BSON("b" << 4)), NULL)); - ASSERT(!eq.matchesBSON(BSON("a" << BSON("b" << BSONObj())), NULL)); + ASSERT(!eq.matchesBSON(BSON("a" << BSON("b" << 4)), nullptr)); + ASSERT(!eq.matchesBSON(BSON("a" << BSON("b" << BSONObj())), nullptr)); // A non-existent field is treated same way as an empty bson object - ASSERT(eq.matchesBSON(BSON("b" << 4), NULL)); + ASSERT(eq.matchesBSON(BSON("b" << 4), nullptr)); } TEST(EqOp, MatchesMinKey) { @@ -169,9 +169,9 @@ TEST(EqOp, MatchesMinKey) { BSONObj maxKeyObj = BSON("a" << MaxKey); BSONObj numObj = BSON("a" << 4); - ASSERT(eq.matchesBSON(minKeyObj, NULL)); - ASSERT(!eq.matchesBSON(maxKeyObj, NULL)); - ASSERT(!eq.matchesBSON(numObj, NULL)); + ASSERT(eq.matchesBSON(minKeyObj, nullptr)); + ASSERT(!eq.matchesBSON(maxKeyObj, nullptr)); + ASSERT(!eq.matchesBSON(numObj, nullptr)); ASSERT(eq.matchesSingleElement(minKeyObj.firstElement())); ASSERT(!eq.matchesSingleElement(maxKeyObj.firstElement())); @@ -186,9 +186,9 @@ TEST(EqOp, MatchesMaxKey) { BSONObj maxKeyObj = BSON("a" << MaxKey); BSONObj numObj = BSON("a" << 4); - ASSERT(!eq.matchesBSON(minKeyObj, NULL)); - ASSERT(eq.matchesBSON(maxKeyObj, NULL)); - ASSERT(!eq.matchesBSON(numObj, NULL)); + ASSERT(!eq.matchesBSON(minKeyObj, nullptr)); + ASSERT(eq.matchesBSON(maxKeyObj, nullptr)); + ASSERT(!eq.matchesBSON(numObj, nullptr)); ASSERT(!eq.matchesSingleElement(minKeyObj.firstElement())); ASSERT(eq.matchesSingleElement(maxKeyObj.firstElement())); @@ -198,17 +198,17 @@ TEST(EqOp, MatchesMaxKey) { TEST(EqOp, MatchesFullArray) { BSONObj operand = BSON("a" << BSON_ARRAY(1 << 2)); EqualityMatchExpression eq("a", operand["a"]); - ASSERT(eq.matchesBSON(BSON("a" << BSON_ARRAY(1 << 2)), NULL)); - ASSERT(!eq.matchesBSON(BSON("a" << BSON_ARRAY(1 << 2 << 3)), NULL)); - ASSERT(!eq.matchesBSON(BSON("a" << BSON_ARRAY(1)), NULL)); - ASSERT(!eq.matchesBSON(BSON("a" << 1), NULL)); + ASSERT(eq.matchesBSON(BSON("a" << BSON_ARRAY(1 << 2)), nullptr)); + ASSERT(!eq.matchesBSON(BSON("a" << BSON_ARRAY(1 << 2 << 3)), nullptr)); + ASSERT(!eq.matchesBSON(BSON("a" << BSON_ARRAY(1)), nullptr)); + ASSERT(!eq.matchesBSON(BSON("a" << 1), nullptr)); } TEST(EqOp, MatchesThroughNestedArray) { BSONObj operand = BSON("a.b.c.d" << 3); EqualityMatchExpression eq("a.b.c.d", operand["a.b.c.d"]); BSONObj obj = fromjson("{a:{b:[{c:[{d:1},{d:2}]},{c:[{d:3}]}]}}"); - ASSERT(eq.matchesBSON(obj, NULL)); + ASSERT(eq.matchesBSON(obj, nullptr)); } TEST(EqOp, ElemMatchKey) { @@ -273,57 +273,57 @@ DEATH_TEST(LtOp, InvalidEooOperand, "Invariant failure _rhs") { TEST(LtOp, MatchesScalar) { BSONObj operand = BSON("$lt" << 5); LTMatchExpression lt("a", operand["$lt"]); - ASSERT(lt.matchesBSON(BSON("a" << 4.5), NULL)); - ASSERT(!lt.matchesBSON(BSON("a" << 6), NULL)); + ASSERT(lt.matchesBSON(BSON("a" << 4.5), nullptr)); + ASSERT(!lt.matchesBSON(BSON("a" << 6), nullptr)); } TEST(LtOp, MatchesScalarEmptyKey) { BSONObj operand = BSON("$lt" << 5); LTMatchExpression lt("", operand["$lt"]); - ASSERT(lt.matchesBSON(BSON("" << 4.5), NULL)); - ASSERT(!lt.matchesBSON(BSON("" << 6), NULL)); + ASSERT(lt.matchesBSON(BSON("" << 4.5), nullptr)); + ASSERT(!lt.matchesBSON(BSON("" << 6), nullptr)); } TEST(LtOp, MatchesArrayValue) { BSONObj operand = BSON("$lt" << 5); LTMatchExpression lt("a", operand["$lt"]); - ASSERT(lt.matchesBSON(BSON("a" << BSON_ARRAY(6 << 4.5)), NULL)); - ASSERT(!lt.matchesBSON(BSON("a" << BSON_ARRAY(6 << 7)), NULL)); + ASSERT(lt.matchesBSON(BSON("a" << BSON_ARRAY(6 << 4.5)), nullptr)); + ASSERT(!lt.matchesBSON(BSON("a" << BSON_ARRAY(6 << 7)), nullptr)); } TEST(LtOp, MatchesWholeArray) { BSONObj operand = BSON("$lt" << BSON_ARRAY(5)); LTMatchExpression lt("a", operand["$lt"]); - ASSERT(lt.matchesBSON(BSON("a" << BSON_ARRAY(4)), NULL)); - ASSERT(!lt.matchesBSON(BSON("a" << BSON_ARRAY(5)), NULL)); - ASSERT(!lt.matchesBSON(BSON("a" << BSON_ARRAY(6)), NULL)); + ASSERT(lt.matchesBSON(BSON("a" << BSON_ARRAY(4)), nullptr)); + ASSERT(!lt.matchesBSON(BSON("a" << BSON_ARRAY(5)), nullptr)); + ASSERT(!lt.matchesBSON(BSON("a" << BSON_ARRAY(6)), nullptr)); // Nested array. - ASSERT(lt.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(4))), NULL)); - ASSERT(!lt.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(5))), NULL)); - ASSERT(!lt.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(6))), NULL)); + ASSERT(lt.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(4))), nullptr)); + ASSERT(!lt.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(5))), nullptr)); + ASSERT(!lt.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(6))), nullptr)); } TEST(LtOp, MatchesNull) { BSONObj operand = BSON("$lt" << BSONNULL); LTMatchExpression lt("a", operand["$lt"]); - ASSERT(!lt.matchesBSON(BSONObj(), NULL)); - ASSERT(!lt.matchesBSON(BSON("a" << BSONNULL), NULL)); - ASSERT(!lt.matchesBSON(BSON("a" << 4), NULL)); + ASSERT(!lt.matchesBSON(BSONObj(), nullptr)); + ASSERT(!lt.matchesBSON(BSON("a" << BSONNULL), nullptr)); + ASSERT(!lt.matchesBSON(BSON("a" << 4), nullptr)); // A non-existent field is treated same way as an empty bson object - ASSERT(!lt.matchesBSON(BSON("b" << 4), NULL)); + ASSERT(!lt.matchesBSON(BSON("b" << 4), nullptr)); } TEST(LtOp, MatchesDotNotationNull) { BSONObj operand = BSON("$lt" << BSONNULL); LTMatchExpression lt("a.b", operand["$lt"]); - ASSERT(!lt.matchesBSON(BSONObj(), NULL)); - ASSERT(!lt.matchesBSON(BSON("a" << BSONNULL), NULL)); - ASSERT(!lt.matchesBSON(BSON("a" << 4), NULL)); - ASSERT(!lt.matchesBSON(BSON("a" << BSONObj()), NULL)); - ASSERT(!lt.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << BSONNULL))), NULL)); - ASSERT(!lt.matchesBSON(BSON("a" << BSON_ARRAY(BSON("a" << 4) << BSON("b" << 4))), NULL)); - ASSERT(!lt.matchesBSON(BSON("a" << BSON_ARRAY(4)), NULL)); - ASSERT(!lt.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << 4))), NULL)); + ASSERT(!lt.matchesBSON(BSONObj(), nullptr)); + ASSERT(!lt.matchesBSON(BSON("a" << BSONNULL), nullptr)); + ASSERT(!lt.matchesBSON(BSON("a" << 4), nullptr)); + ASSERT(!lt.matchesBSON(BSON("a" << BSONObj()), nullptr)); + ASSERT(!lt.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << BSONNULL))), nullptr)); + ASSERT(!lt.matchesBSON(BSON("a" << BSON_ARRAY(BSON("a" << 4) << BSON("b" << 4))), nullptr)); + ASSERT(!lt.matchesBSON(BSON("a" << BSON_ARRAY(4)), nullptr)); + ASSERT(!lt.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << 4))), nullptr)); } TEST(LtOp, MatchesMinKey) { @@ -333,9 +333,9 @@ TEST(LtOp, MatchesMinKey) { BSONObj maxKeyObj = BSON("a" << MaxKey); BSONObj numObj = BSON("a" << 4); - ASSERT(!lt.matchesBSON(minKeyObj, NULL)); - ASSERT(!lt.matchesBSON(maxKeyObj, NULL)); - ASSERT(!lt.matchesBSON(numObj, NULL)); + ASSERT(!lt.matchesBSON(minKeyObj, nullptr)); + ASSERT(!lt.matchesBSON(maxKeyObj, nullptr)); + ASSERT(!lt.matchesBSON(numObj, nullptr)); ASSERT(!lt.matchesSingleElement(minKeyObj.firstElement())); ASSERT(!lt.matchesSingleElement(maxKeyObj.firstElement())); @@ -349,9 +349,9 @@ TEST(LtOp, MatchesMaxKey) { BSONObj maxKeyObj = BSON("a" << MaxKey); BSONObj numObj = BSON("a" << 4); - ASSERT(lt.matchesBSON(minKeyObj, NULL)); - ASSERT(!lt.matchesBSON(maxKeyObj, NULL)); - ASSERT(lt.matchesBSON(numObj, NULL)); + ASSERT(lt.matchesBSON(minKeyObj, nullptr)); + ASSERT(!lt.matchesBSON(maxKeyObj, nullptr)); + ASSERT(lt.matchesBSON(numObj, nullptr)); ASSERT(lt.matchesSingleElement(minKeyObj.firstElement())); ASSERT(!lt.matchesSingleElement(maxKeyObj.firstElement())); @@ -394,50 +394,50 @@ DEATH_TEST(LteOp, InvalidEooOperand, "Invariant failure _rhs") { TEST(LteOp, MatchesScalar) { BSONObj operand = BSON("$lte" << 5); LTEMatchExpression lte("a", operand["$lte"]); - ASSERT(lte.matchesBSON(BSON("a" << 4.5), NULL)); - ASSERT(!lte.matchesBSON(BSON("a" << 6), NULL)); + ASSERT(lte.matchesBSON(BSON("a" << 4.5), nullptr)); + ASSERT(!lte.matchesBSON(BSON("a" << 6), nullptr)); } TEST(LteOp, MatchesArrayValue) { BSONObj operand = BSON("$lte" << 5); LTEMatchExpression lte("a", operand["$lte"]); - ASSERT(lte.matchesBSON(BSON("a" << BSON_ARRAY(6 << 4.5)), NULL)); - ASSERT(!lte.matchesBSON(BSON("a" << BSON_ARRAY(6 << 7)), NULL)); + ASSERT(lte.matchesBSON(BSON("a" << BSON_ARRAY(6 << 4.5)), nullptr)); + ASSERT(!lte.matchesBSON(BSON("a" << BSON_ARRAY(6 << 7)), nullptr)); } TEST(LteOp, MatchesWholeArray) { BSONObj operand = BSON("$lte" << BSON_ARRAY(5)); LTEMatchExpression lte("a", operand["$lte"]); - ASSERT(lte.matchesBSON(BSON("a" << BSON_ARRAY(4)), NULL)); - ASSERT(lte.matchesBSON(BSON("a" << BSON_ARRAY(5)), NULL)); - ASSERT(!lte.matchesBSON(BSON("a" << BSON_ARRAY(6)), NULL)); + ASSERT(lte.matchesBSON(BSON("a" << BSON_ARRAY(4)), nullptr)); + ASSERT(lte.matchesBSON(BSON("a" << BSON_ARRAY(5)), nullptr)); + ASSERT(!lte.matchesBSON(BSON("a" << BSON_ARRAY(6)), nullptr)); // Nested array. - ASSERT(lte.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(4))), NULL)); - ASSERT(lte.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(5))), NULL)); - ASSERT(!lte.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(6))), NULL)); + ASSERT(lte.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(4))), nullptr)); + ASSERT(lte.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(5))), nullptr)); + ASSERT(!lte.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(6))), nullptr)); } TEST(LteOp, MatchesNull) { BSONObj operand = BSON("$lte" << BSONNULL); LTEMatchExpression lte("a", operand["$lte"]); - ASSERT(lte.matchesBSON(BSONObj(), NULL)); - ASSERT(lte.matchesBSON(BSON("a" << BSONNULL), NULL)); - ASSERT(!lte.matchesBSON(BSON("a" << 4), NULL)); + ASSERT(lte.matchesBSON(BSONObj(), nullptr)); + ASSERT(lte.matchesBSON(BSON("a" << BSONNULL), nullptr)); + ASSERT(!lte.matchesBSON(BSON("a" << 4), nullptr)); // A non-existent field is treated same way as an empty bson object - ASSERT(lte.matchesBSON(BSON("b" << 4), NULL)); + ASSERT(lte.matchesBSON(BSON("b" << 4), nullptr)); } TEST(LteOp, MatchesDotNotationNull) { BSONObj operand = BSON("$lte" << BSONNULL); LTEMatchExpression lte("a.b", operand["$lte"]); - ASSERT(lte.matchesBSON(BSONObj(), NULL)); - ASSERT(lte.matchesBSON(BSON("a" << BSONNULL), NULL)); - ASSERT(lte.matchesBSON(BSON("a" << 4), NULL)); - ASSERT(lte.matchesBSON(BSON("a" << BSONObj()), NULL)); - ASSERT(lte.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << BSONNULL))), NULL)); - ASSERT(lte.matchesBSON(BSON("a" << BSON_ARRAY(BSON("a" << 4) << BSON("b" << 4))), NULL)); - ASSERT(!lte.matchesBSON(BSON("a" << BSON_ARRAY(4)), NULL)); - ASSERT(!lte.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << 4))), NULL)); + ASSERT(lte.matchesBSON(BSONObj(), nullptr)); + ASSERT(lte.matchesBSON(BSON("a" << BSONNULL), nullptr)); + ASSERT(lte.matchesBSON(BSON("a" << 4), nullptr)); + ASSERT(lte.matchesBSON(BSON("a" << BSONObj()), nullptr)); + ASSERT(lte.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << BSONNULL))), nullptr)); + ASSERT(lte.matchesBSON(BSON("a" << BSON_ARRAY(BSON("a" << 4) << BSON("b" << 4))), nullptr)); + ASSERT(!lte.matchesBSON(BSON("a" << BSON_ARRAY(4)), nullptr)); + ASSERT(!lte.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << 4))), nullptr)); } TEST(LteOp, MatchesMinKey) { @@ -447,9 +447,9 @@ TEST(LteOp, MatchesMinKey) { BSONObj maxKeyObj = BSON("a" << MaxKey); BSONObj numObj = BSON("a" << 4); - ASSERT(lte.matchesBSON(minKeyObj, NULL)); - ASSERT(!lte.matchesBSON(maxKeyObj, NULL)); - ASSERT(!lte.matchesBSON(numObj, NULL)); + ASSERT(lte.matchesBSON(minKeyObj, nullptr)); + ASSERT(!lte.matchesBSON(maxKeyObj, nullptr)); + ASSERT(!lte.matchesBSON(numObj, nullptr)); ASSERT(lte.matchesSingleElement(minKeyObj.firstElement())); ASSERT(!lte.matchesSingleElement(maxKeyObj.firstElement())); @@ -463,9 +463,9 @@ TEST(LteOp, MatchesMaxKey) { BSONObj maxKeyObj = BSON("a" << MaxKey); BSONObj numObj = BSON("a" << 4); - ASSERT(lte.matchesBSON(minKeyObj, NULL)); - ASSERT(lte.matchesBSON(maxKeyObj, NULL)); - ASSERT(lte.matchesBSON(numObj, NULL)); + ASSERT(lte.matchesBSON(minKeyObj, nullptr)); + ASSERT(lte.matchesBSON(maxKeyObj, nullptr)); + ASSERT(lte.matchesBSON(numObj, nullptr)); ASSERT(lte.matchesSingleElement(minKeyObj.firstElement())); ASSERT(lte.matchesSingleElement(maxKeyObj.firstElement())); @@ -495,52 +495,52 @@ DEATH_TEST(GtOp, InvalidEooOperand, "Invariant failure _rhs") { TEST(GtOp, MatchesScalar) { BSONObj operand = BSON("$gt" << 5); GTMatchExpression gt("a", operand["$gt"]); - ASSERT(gt.matchesBSON(BSON("a" << 5.5), NULL)); - ASSERT(!gt.matchesBSON(BSON("a" << 4), NULL)); + ASSERT(gt.matchesBSON(BSON("a" << 5.5), nullptr)); + ASSERT(!gt.matchesBSON(BSON("a" << 4), nullptr)); } TEST(GtOp, MatchesArrayValue) { BSONObj operand = BSON("$gt" << 5); GTMatchExpression gt("a", operand["$gt"]); - ASSERT(gt.matchesBSON(BSON("a" << BSON_ARRAY(3 << 5.5)), NULL)); - ASSERT(!gt.matchesBSON(BSON("a" << BSON_ARRAY(2 << 4)), NULL)); + ASSERT(gt.matchesBSON(BSON("a" << BSON_ARRAY(3 << 5.5)), nullptr)); + ASSERT(!gt.matchesBSON(BSON("a" << BSON_ARRAY(2 << 4)), nullptr)); } TEST(GtOp, MatchesWholeArray) { BSONObj operand = BSON("$gt" << BSON_ARRAY(5)); GTMatchExpression gt("a", operand["$gt"]); - ASSERT(!gt.matchesBSON(BSON("a" << BSON_ARRAY(4)), NULL)); - ASSERT(!gt.matchesBSON(BSON("a" << BSON_ARRAY(5)), NULL)); - ASSERT(gt.matchesBSON(BSON("a" << BSON_ARRAY(6)), NULL)); + ASSERT(!gt.matchesBSON(BSON("a" << BSON_ARRAY(4)), nullptr)); + ASSERT(!gt.matchesBSON(BSON("a" << BSON_ARRAY(5)), nullptr)); + ASSERT(gt.matchesBSON(BSON("a" << BSON_ARRAY(6)), nullptr)); // Nested array. // XXX: The following assertion documents current behavior. - ASSERT(gt.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(4))), NULL)); + ASSERT(gt.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(4))), nullptr)); // XXX: The following assertion documents current behavior. - ASSERT(gt.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(5))), NULL)); - ASSERT(gt.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(6))), NULL)); + ASSERT(gt.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(5))), nullptr)); + ASSERT(gt.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(6))), nullptr)); } TEST(GtOp, MatchesNull) { BSONObj operand = BSON("$gt" << BSONNULL); GTMatchExpression gt("a", operand["$gt"]); - ASSERT(!gt.matchesBSON(BSONObj(), NULL)); - ASSERT(!gt.matchesBSON(BSON("a" << BSONNULL), NULL)); - ASSERT(!gt.matchesBSON(BSON("a" << 4), NULL)); + ASSERT(!gt.matchesBSON(BSONObj(), nullptr)); + ASSERT(!gt.matchesBSON(BSON("a" << BSONNULL), nullptr)); + ASSERT(!gt.matchesBSON(BSON("a" << 4), nullptr)); // A non-existent field is treated same way as an empty bson object - ASSERT(!gt.matchesBSON(BSON("b" << 4), NULL)); + ASSERT(!gt.matchesBSON(BSON("b" << 4), nullptr)); } TEST(GtOp, MatchesDotNotationNull) { BSONObj operand = BSON("$gt" << BSONNULL); GTMatchExpression gt("a.b", operand["$gt"]); - ASSERT(!gt.matchesBSON(BSONObj(), NULL)); - ASSERT(!gt.matchesBSON(BSON("a" << BSONNULL), NULL)); - ASSERT(!gt.matchesBSON(BSON("a" << 4), NULL)); - ASSERT(!gt.matchesBSON(BSON("a" << BSONObj()), NULL)); - ASSERT(!gt.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << BSONNULL))), NULL)); - ASSERT(!gt.matchesBSON(BSON("a" << BSON_ARRAY(BSON("a" << 4) << BSON("b" << 4))), NULL)); - ASSERT(!gt.matchesBSON(BSON("a" << BSON_ARRAY(4)), NULL)); - ASSERT(!gt.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << 4))), NULL)); + ASSERT(!gt.matchesBSON(BSONObj(), nullptr)); + ASSERT(!gt.matchesBSON(BSON("a" << BSONNULL), nullptr)); + ASSERT(!gt.matchesBSON(BSON("a" << 4), nullptr)); + ASSERT(!gt.matchesBSON(BSON("a" << BSONObj()), nullptr)); + ASSERT(!gt.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << BSONNULL))), nullptr)); + ASSERT(!gt.matchesBSON(BSON("a" << BSON_ARRAY(BSON("a" << 4) << BSON("b" << 4))), nullptr)); + ASSERT(!gt.matchesBSON(BSON("a" << BSON_ARRAY(4)), nullptr)); + ASSERT(!gt.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << 4))), nullptr)); } TEST(GtOp, MatchesMinKey) { @@ -550,9 +550,9 @@ TEST(GtOp, MatchesMinKey) { BSONObj maxKeyObj = BSON("a" << MaxKey); BSONObj numObj = BSON("a" << 4); - ASSERT(!gt.matchesBSON(minKeyObj, NULL)); - ASSERT(gt.matchesBSON(maxKeyObj, NULL)); - ASSERT(gt.matchesBSON(numObj, NULL)); + ASSERT(!gt.matchesBSON(minKeyObj, nullptr)); + ASSERT(gt.matchesBSON(maxKeyObj, nullptr)); + ASSERT(gt.matchesBSON(numObj, nullptr)); ASSERT(!gt.matchesSingleElement(minKeyObj.firstElement())); ASSERT(gt.matchesSingleElement(maxKeyObj.firstElement())); @@ -566,9 +566,9 @@ TEST(GtOp, MatchesMaxKey) { BSONObj maxKeyObj = BSON("a" << MaxKey); BSONObj numObj = BSON("a" << 4); - ASSERT(!gt.matchesBSON(minKeyObj, NULL)); - ASSERT(!gt.matchesBSON(maxKeyObj, NULL)); - ASSERT(!gt.matchesBSON(numObj, NULL)); + ASSERT(!gt.matchesBSON(minKeyObj, nullptr)); + ASSERT(!gt.matchesBSON(maxKeyObj, nullptr)); + ASSERT(!gt.matchesBSON(numObj, nullptr)); ASSERT(!gt.matchesSingleElement(minKeyObj.firstElement())); ASSERT(!gt.matchesSingleElement(maxKeyObj.firstElement())); @@ -611,51 +611,51 @@ DEATH_TEST(GteOp, InvalidEooOperand, "Invariant failure _rhs") { TEST(GteOp, MatchesScalar) { BSONObj operand = BSON("$gte" << 5); GTEMatchExpression gte("a", operand["$gte"]); - ASSERT(gte.matchesBSON(BSON("a" << 5.5), NULL)); - ASSERT(!gte.matchesBSON(BSON("a" << 4), NULL)); + ASSERT(gte.matchesBSON(BSON("a" << 5.5), nullptr)); + ASSERT(!gte.matchesBSON(BSON("a" << 4), nullptr)); } TEST(GteOp, MatchesArrayValue) { BSONObj operand = BSON("$gte" << 5); GTEMatchExpression gte("a", operand["$gte"]); - ASSERT(gte.matchesBSON(BSON("a" << BSON_ARRAY(4 << 5.5)), NULL)); - ASSERT(!gte.matchesBSON(BSON("a" << BSON_ARRAY(1 << 2)), NULL)); + ASSERT(gte.matchesBSON(BSON("a" << BSON_ARRAY(4 << 5.5)), nullptr)); + ASSERT(!gte.matchesBSON(BSON("a" << BSON_ARRAY(1 << 2)), nullptr)); } TEST(GteOp, MatchesWholeArray) { BSONObj operand = BSON("$gte" << BSON_ARRAY(5)); GTEMatchExpression gte("a", operand["$gte"]); - ASSERT(!gte.matchesBSON(BSON("a" << BSON_ARRAY(4)), NULL)); - ASSERT(gte.matchesBSON(BSON("a" << BSON_ARRAY(5)), NULL)); - ASSERT(gte.matchesBSON(BSON("a" << BSON_ARRAY(6)), NULL)); + ASSERT(!gte.matchesBSON(BSON("a" << BSON_ARRAY(4)), nullptr)); + ASSERT(gte.matchesBSON(BSON("a" << BSON_ARRAY(5)), nullptr)); + ASSERT(gte.matchesBSON(BSON("a" << BSON_ARRAY(6)), nullptr)); // Nested array. // XXX: The following assertion documents current behavior. - ASSERT(gte.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(4))), NULL)); - ASSERT(gte.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(5))), NULL)); - ASSERT(gte.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(6))), NULL)); + ASSERT(gte.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(4))), nullptr)); + ASSERT(gte.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(5))), nullptr)); + ASSERT(gte.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(6))), nullptr)); } TEST(GteOp, MatchesNull) { BSONObj operand = BSON("$gte" << BSONNULL); GTEMatchExpression gte("a", operand["$gte"]); - ASSERT(gte.matchesBSON(BSONObj(), NULL)); - ASSERT(gte.matchesBSON(BSON("a" << BSONNULL), NULL)); - ASSERT(!gte.matchesBSON(BSON("a" << 4), NULL)); + ASSERT(gte.matchesBSON(BSONObj(), nullptr)); + ASSERT(gte.matchesBSON(BSON("a" << BSONNULL), nullptr)); + ASSERT(!gte.matchesBSON(BSON("a" << 4), nullptr)); // A non-existent field is treated same way as an empty bson object - ASSERT(gte.matchesBSON(BSON("b" << 4), NULL)); + ASSERT(gte.matchesBSON(BSON("b" << 4), nullptr)); } TEST(GteOp, MatchesDotNotationNull) { BSONObj operand = BSON("$gte" << BSONNULL); GTEMatchExpression gte("a.b", operand["$gte"]); - ASSERT(gte.matchesBSON(BSONObj(), NULL)); - ASSERT(gte.matchesBSON(BSON("a" << BSONNULL), NULL)); - ASSERT(gte.matchesBSON(BSON("a" << 4), NULL)); - ASSERT(gte.matchesBSON(BSON("a" << BSONObj()), NULL)); - ASSERT(gte.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << BSONNULL))), NULL)); - ASSERT(gte.matchesBSON(BSON("a" << BSON_ARRAY(BSON("a" << 4) << BSON("b" << 4))), NULL)); - ASSERT(!gte.matchesBSON(BSON("a" << BSON_ARRAY(4)), NULL)); - ASSERT(!gte.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << 4))), NULL)); + ASSERT(gte.matchesBSON(BSONObj(), nullptr)); + ASSERT(gte.matchesBSON(BSON("a" << BSONNULL), nullptr)); + ASSERT(gte.matchesBSON(BSON("a" << 4), nullptr)); + ASSERT(gte.matchesBSON(BSON("a" << BSONObj()), nullptr)); + ASSERT(gte.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << BSONNULL))), nullptr)); + ASSERT(gte.matchesBSON(BSON("a" << BSON_ARRAY(BSON("a" << 4) << BSON("b" << 4))), nullptr)); + ASSERT(!gte.matchesBSON(BSON("a" << BSON_ARRAY(4)), nullptr)); + ASSERT(!gte.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << 4))), nullptr)); } TEST(GteOp, MatchesMinKey) { @@ -665,9 +665,9 @@ TEST(GteOp, MatchesMinKey) { BSONObj maxKeyObj = BSON("a" << MaxKey); BSONObj numObj = BSON("a" << 4); - ASSERT(gte.matchesBSON(minKeyObj, NULL)); - ASSERT(gte.matchesBSON(maxKeyObj, NULL)); - ASSERT(gte.matchesBSON(numObj, NULL)); + ASSERT(gte.matchesBSON(minKeyObj, nullptr)); + ASSERT(gte.matchesBSON(maxKeyObj, nullptr)); + ASSERT(gte.matchesBSON(numObj, nullptr)); ASSERT(gte.matchesSingleElement(minKeyObj.firstElement())); ASSERT(gte.matchesSingleElement(maxKeyObj.firstElement())); @@ -681,9 +681,9 @@ TEST(GteOp, MatchesMaxKey) { BSONObj maxKeyObj = BSON("a" << MaxKey); BSONObj numObj = BSON("a" << 4); - ASSERT(!gte.matchesBSON(minKeyObj, NULL)); - ASSERT(gte.matchesBSON(maxKeyObj, NULL)); - ASSERT(!gte.matchesBSON(numObj, NULL)); + ASSERT(!gte.matchesBSON(minKeyObj, nullptr)); + ASSERT(gte.matchesBSON(maxKeyObj, nullptr)); + ASSERT(!gte.matchesBSON(numObj, nullptr)); ASSERT(!gte.matchesSingleElement(minKeyObj.firstElement())); ASSERT(gte.matchesSingleElement(maxKeyObj.firstElement())); @@ -862,26 +862,26 @@ TEST(RegexMatchExpression, MatchesScalar) { RegexMatchExpression regex("a", "b", ""); ASSERT(regex.matchesBSON(BSON("a" << "b"), - NULL)); + nullptr)); ASSERT(!regex.matchesBSON(BSON("a" << "c"), - NULL)); + nullptr)); } TEST(RegexMatchExpression, MatchesArrayValue) { RegexMatchExpression regex("a", "b", ""); ASSERT(regex.matchesBSON(BSON("a" << BSON_ARRAY("c" << "b")), - NULL)); + nullptr)); ASSERT(!regex.matchesBSON(BSON("a" << BSON_ARRAY("d" << "c")), - NULL)); + nullptr)); } TEST(RegexMatchExpression, MatchesNull) { RegexMatchExpression regex("a", "b", ""); - ASSERT(!regex.matchesBSON(BSONObj(), NULL)); - ASSERT(!regex.matchesBSON(BSON("a" << BSONNULL), NULL)); + ASSERT(!regex.matchesBSON(BSONObj(), nullptr)); + ASSERT(!regex.matchesBSON(BSON("a" << BSONNULL), nullptr)); } TEST(RegexMatchExpression, ElemMatchKey) { @@ -1034,20 +1034,20 @@ TEST(ModMatchExpression, ZeroDivisor) { TEST(ModMatchExpression, MatchesScalar) { ModMatchExpression mod("a", 5, 2); - ASSERT(mod.matchesBSON(BSON("a" << 7.0), NULL)); - ASSERT(!mod.matchesBSON(BSON("a" << 4), NULL)); + ASSERT(mod.matchesBSON(BSON("a" << 7.0), nullptr)); + ASSERT(!mod.matchesBSON(BSON("a" << 4), nullptr)); } TEST(ModMatchExpression, MatchesArrayValue) { ModMatchExpression mod("a", 5, 2); - ASSERT(mod.matchesBSON(BSON("a" << BSON_ARRAY(5 << 12LL)), NULL)); - ASSERT(!mod.matchesBSON(BSON("a" << BSON_ARRAY(6 << 8)), NULL)); + ASSERT(mod.matchesBSON(BSON("a" << BSON_ARRAY(5 << 12LL)), nullptr)); + ASSERT(!mod.matchesBSON(BSON("a" << BSON_ARRAY(6 << 8)), nullptr)); } TEST(ModMatchExpression, MatchesNull) { ModMatchExpression mod("a", 5, 2); - ASSERT(!mod.matchesBSON(BSONObj(), NULL)); - ASSERT(!mod.matchesBSON(BSON("a" << BSONNULL), NULL)); + ASSERT(!mod.matchesBSON(BSONObj(), nullptr)); + ASSERT(!mod.matchesBSON(BSON("a" << BSONNULL), nullptr)); } TEST(ModMatchExpression, ElemMatchKey) { @@ -1095,14 +1095,14 @@ TEST(ExistsMatchExpression, MatchesElementExistsTrueValue) { TEST(ExistsMatchExpression, MatchesScalar) { ExistsMatchExpression exists("a"); - ASSERT(exists.matchesBSON(BSON("a" << 1), NULL)); - ASSERT(exists.matchesBSON(BSON("a" << BSONNULL), NULL)); - ASSERT(!exists.matchesBSON(BSON("b" << 1), NULL)); + ASSERT(exists.matchesBSON(BSON("a" << 1), nullptr)); + ASSERT(exists.matchesBSON(BSON("a" << BSONNULL), nullptr)); + ASSERT(!exists.matchesBSON(BSON("b" << 1), nullptr)); } TEST(ExistsMatchExpression, MatchesArray) { ExistsMatchExpression exists("a"); - ASSERT(exists.matchesBSON(BSON("a" << BSON_ARRAY(4 << 5.5)), NULL)); + ASSERT(exists.matchesBSON(BSON("a" << BSON_ARRAY(4 << 5.5)), nullptr)); } TEST(ExistsMatchExpression, ElemMatchKey) { @@ -1142,8 +1142,8 @@ TEST(InMatchExpression, MatchesEmpty) { BSONObj notMatch = BSON("a" << 2); ASSERT(!in.matchesSingleElement(notMatch["a"])); - ASSERT(!in.matchesBSON(BSON("a" << 1), NULL)); - ASSERT(!in.matchesBSON(BSONObj(), NULL)); + ASSERT(!in.matchesBSON(BSON("a" << 1), nullptr)); + ASSERT(!in.matchesBSON(BSONObj(), nullptr)); } TEST(InMatchExpression, MatchesElementMultiple) { @@ -1170,8 +1170,8 @@ TEST(InMatchExpression, MatchesScalar) { std::vector<BSONElement> equalities{operand.firstElement()}; ASSERT_OK(in.setEqualities(std::move(equalities))); - ASSERT(in.matchesBSON(BSON("a" << 5.0), NULL)); - ASSERT(!in.matchesBSON(BSON("a" << 4), NULL)); + ASSERT(in.matchesBSON(BSON("a" << 5.0), nullptr)); + ASSERT(!in.matchesBSON(BSON("a" << 4), nullptr)); } TEST(InMatchExpression, MatchesArrayValue) { @@ -1180,9 +1180,9 @@ TEST(InMatchExpression, MatchesArrayValue) { std::vector<BSONElement> equalities{operand.firstElement()}; ASSERT_OK(in.setEqualities(std::move(equalities))); - ASSERT(in.matchesBSON(BSON("a" << BSON_ARRAY(5.0 << 6)), NULL)); - ASSERT(!in.matchesBSON(BSON("a" << BSON_ARRAY(6 << 7)), NULL)); - ASSERT(!in.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(5))), NULL)); + ASSERT(in.matchesBSON(BSON("a" << BSON_ARRAY(5.0 << 6)), nullptr)); + ASSERT(!in.matchesBSON(BSON("a" << BSON_ARRAY(6 << 7)), nullptr)); + ASSERT(!in.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(5))), nullptr)); } TEST(InMatchExpression, MatchesNull) { @@ -1192,11 +1192,11 @@ TEST(InMatchExpression, MatchesNull) { std::vector<BSONElement> equalities{operand.firstElement()}; ASSERT_OK(in.setEqualities(std::move(equalities))); - ASSERT(in.matchesBSON(BSONObj(), NULL)); - ASSERT(in.matchesBSON(BSON("a" << BSONNULL), NULL)); - ASSERT(!in.matchesBSON(BSON("a" << 4), NULL)); + ASSERT(in.matchesBSON(BSONObj(), nullptr)); + ASSERT(in.matchesBSON(BSON("a" << BSONNULL), nullptr)); + ASSERT(!in.matchesBSON(BSON("a" << 4), nullptr)); // A non-existent field is treated same way as an empty bson object - ASSERT(in.matchesBSON(BSON("b" << 4), NULL)); + ASSERT(in.matchesBSON(BSON("b" << 4), nullptr)); } TEST(InMatchExpression, MatchesUndefined) { @@ -1213,9 +1213,9 @@ TEST(InMatchExpression, MatchesMinKey) { std::vector<BSONElement> equalities{operand.firstElement()}; ASSERT_OK(in.setEqualities(std::move(equalities))); - ASSERT(in.matchesBSON(BSON("a" << MinKey), NULL)); - ASSERT(!in.matchesBSON(BSON("a" << MaxKey), NULL)); - ASSERT(!in.matchesBSON(BSON("a" << 4), NULL)); + ASSERT(in.matchesBSON(BSON("a" << MinKey), nullptr)); + ASSERT(!in.matchesBSON(BSON("a" << MaxKey), nullptr)); + ASSERT(!in.matchesBSON(BSON("a" << 4), nullptr)); } TEST(InMatchExpression, MatchesMaxKey) { @@ -1224,9 +1224,9 @@ TEST(InMatchExpression, MatchesMaxKey) { std::vector<BSONElement> equalities{operand.firstElement()}; ASSERT_OK(in.setEqualities(std::move(equalities))); - ASSERT(in.matchesBSON(BSON("a" << MaxKey), NULL)); - ASSERT(!in.matchesBSON(BSON("a" << MinKey), NULL)); - ASSERT(!in.matchesBSON(BSON("a" << 4), NULL)); + ASSERT(in.matchesBSON(BSON("a" << MaxKey), nullptr)); + ASSERT(!in.matchesBSON(BSON("a" << MinKey), nullptr)); + ASSERT(!in.matchesBSON(BSON("a" << 4), nullptr)); } TEST(InMatchExpression, MatchesFullArray) { @@ -1235,10 +1235,10 @@ TEST(InMatchExpression, MatchesFullArray) { std::vector<BSONElement> equalities{operand[0], operand[1], operand[2]}; ASSERT_OK(in.setEqualities(std::move(equalities))); - ASSERT(in.matchesBSON(BSON("a" << BSON_ARRAY(1 << 2)), NULL)); - ASSERT(!in.matchesBSON(BSON("a" << BSON_ARRAY(1 << 2 << 3)), NULL)); - ASSERT(!in.matchesBSON(BSON("a" << BSON_ARRAY(1)), NULL)); - ASSERT(!in.matchesBSON(BSON("a" << 1), NULL)); + ASSERT(in.matchesBSON(BSON("a" << BSON_ARRAY(1 << 2)), nullptr)); + ASSERT(!in.matchesBSON(BSON("a" << BSON_ARRAY(1 << 2 << 3)), nullptr)); + ASSERT(!in.matchesBSON(BSON("a" << BSON_ARRAY(1)), nullptr)); + ASSERT(!in.matchesBSON(BSON("a" << 1), nullptr)); } TEST(InMatchExpression, ElemMatchKey) { diff --git a/src/mongo/db/matcher/expression_serialization_test.cpp b/src/mongo/db/matcher/expression_serialization_test.cpp index 7384c53e410..862b955423c 100644 --- a/src/mongo/db/matcher/expression_serialization_test.cpp +++ b/src/mongo/db/matcher/expression_serialization_test.cpp @@ -1695,7 +1695,7 @@ TEST(SerializeInternalBinDataSubType, ExpressionBinDataSubTypeSerializesCorrectl fromjson("{x: {$_internalSchemaBinDataSubType: 1}}")); ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression())); - BSONObj obj = BSON("x" << BSONBinData(NULL, 0, BinDataType::bdtCustom)); + BSONObj obj = BSON("x" << BSONBinData(nullptr, 0, BinDataType::bdtCustom)); ASSERT_EQ(original.matches(obj), reserialized.matches(obj)); uint8_t bytes[] = {0, 1, 2, 10, 11, 12}; diff --git a/src/mongo/db/matcher/expression_text_base.cpp b/src/mongo/db/matcher/expression_text_base.cpp index ea3fa147de2..e9d18bb2e2e 100644 --- a/src/mongo/db/matcher/expression_text_base.cpp +++ b/src/mongo/db/matcher/expression_text_base.cpp @@ -48,7 +48,7 @@ void TextMatchExpressionBase::debugString(StringBuilder& debug, int indentationL << ", caseSensitive=" << ftsQuery.getCaseSensitive() << ", diacriticSensitive=" << ftsQuery.getDiacriticSensitive() << ", tag="; MatchExpression::TagData* td = getTag(); - if (NULL != td) { + if (nullptr != td) { td->debugString(&debug); } else { debug << "NULL"; diff --git a/src/mongo/db/matcher/expression_tree.cpp b/src/mongo/db/matcher/expression_tree.cpp index b56e96f4924..1fb9cbc4af5 100644 --- a/src/mongo/db/matcher/expression_tree.cpp +++ b/src/mongo/db/matcher/expression_tree.cpp @@ -241,7 +241,7 @@ bool AndMatchExpression::isTriviallyTrue() const { bool OrMatchExpression::matches(const MatchableDocument* doc, MatchDetails* details) const { for (size_t i = 0; i < numChildren(); i++) { - if (getChild(i)->matches(doc, NULL)) { + if (getChild(i)->matches(doc, nullptr)) { return true; } } @@ -284,7 +284,7 @@ bool OrMatchExpression::isTriviallyFalse() const { bool NorMatchExpression::matches(const MatchableDocument* doc, MatchDetails* details) const { for (size_t i = 0; i < numChildren(); i++) { - if (getChild(i)->matches(doc, NULL)) { + if (getChild(i)->matches(doc, nullptr)) { return false; } } diff --git a/src/mongo/db/matcher/expression_tree.h b/src/mongo/db/matcher/expression_tree.h index afecd935d79..b08ce6b03fb 100644 --- a/src/mongo/db/matcher/expression_tree.h +++ b/src/mongo/db/matcher/expression_tree.h @@ -110,7 +110,7 @@ public: AndMatchExpression() : ListOfMatchExpression(AND) {} virtual ~AndMatchExpression() {} - virtual bool matches(const MatchableDocument* doc, MatchDetails* details = 0) const; + virtual bool matches(const MatchableDocument* doc, MatchDetails* details = nullptr) const; bool matchesSingleElement(const BSONElement&, MatchDetails* details = nullptr) const final; @@ -139,7 +139,7 @@ public: OrMatchExpression() : ListOfMatchExpression(OR) {} virtual ~OrMatchExpression() {} - virtual bool matches(const MatchableDocument* doc, MatchDetails* details = 0) const; + virtual bool matches(const MatchableDocument* doc, MatchDetails* details = nullptr) const; bool matchesSingleElement(const BSONElement&, MatchDetails* details = nullptr) const final; @@ -168,7 +168,7 @@ public: NorMatchExpression() : ListOfMatchExpression(NOR) {} virtual ~NorMatchExpression() {} - virtual bool matches(const MatchableDocument* doc, MatchDetails* details = 0) const; + virtual bool matches(const MatchableDocument* doc, MatchDetails* details = nullptr) const; bool matchesSingleElement(const BSONElement&, MatchDetails* details = nullptr) const final; @@ -201,8 +201,8 @@ public: return std::move(self); } - virtual bool matches(const MatchableDocument* doc, MatchDetails* details = 0) const { - return !_exp->matches(doc, NULL); + virtual bool matches(const MatchableDocument* doc, MatchDetails* details = nullptr) const { + return !_exp->matches(doc, nullptr); } bool matchesSingleElement(const BSONElement& elt, MatchDetails* details = nullptr) const final { diff --git a/src/mongo/db/matcher/expression_tree_test.cpp b/src/mongo/db/matcher/expression_tree_test.cpp index a0770c2a4df..33bf9352e33 100644 --- a/src/mongo/db/matcher/expression_tree_test.cpp +++ b/src/mongo/db/matcher/expression_tree_test.cpp @@ -46,18 +46,18 @@ TEST(NotMatchExpression, MatchesScalar) { BSONObj baseOperand = BSON("$lt" << 5); unique_ptr<ComparisonMatchExpression> lt(new LTMatchExpression("a", baseOperand["$lt"])); NotMatchExpression notOp(lt.release()); - ASSERT(notOp.matchesBSON(BSON("a" << 6), NULL)); - ASSERT(!notOp.matchesBSON(BSON("a" << 4), NULL)); + ASSERT(notOp.matchesBSON(BSON("a" << 6), nullptr)); + ASSERT(!notOp.matchesBSON(BSON("a" << 4), nullptr)); } TEST(NotMatchExpression, MatchesArray) { BSONObj baseOperand = BSON("$lt" << 5); unique_ptr<ComparisonMatchExpression> lt(new LTMatchExpression("a", baseOperand["$lt"])); NotMatchExpression notOp(lt.release()); - ASSERT(notOp.matchesBSON(BSON("a" << BSON_ARRAY(6)), NULL)); - ASSERT(!notOp.matchesBSON(BSON("a" << BSON_ARRAY(4)), NULL)); + ASSERT(notOp.matchesBSON(BSON("a" << BSON_ARRAY(6)), nullptr)); + ASSERT(!notOp.matchesBSON(BSON("a" << BSON_ARRAY(4)), nullptr)); // All array elements must match. - ASSERT(!notOp.matchesBSON(BSON("a" << BSON_ARRAY(4 << 5 << 6)), NULL)); + ASSERT(!notOp.matchesBSON(BSON("a" << BSON_ARRAY(4 << 5 << 6)), nullptr)); } TEST(NotMatchExpression, ElemMatchKey) { @@ -89,7 +89,7 @@ TEST(NotMatchExpression, SetCollatorPropagatesToChild) { TEST(AndOp, NoClauses) { AndMatchExpression andMatchExpression; - ASSERT(andMatchExpression.matchesBSON(BSONObj(), NULL)); + ASSERT(andMatchExpression.matchesBSON(BSONObj(), nullptr)); } TEST(AndOp, MatchesElementThreeClauses) { @@ -129,10 +129,10 @@ TEST(AndOp, MatchesSingleClause) { AndMatchExpression andOp; andOp.add(ne.release()); - ASSERT(andOp.matchesBSON(BSON("a" << 4), NULL)); - ASSERT(andOp.matchesBSON(BSON("a" << BSON_ARRAY(4 << 6)), NULL)); - ASSERT(!andOp.matchesBSON(BSON("a" << 5), NULL)); - ASSERT(!andOp.matchesBSON(BSON("a" << BSON_ARRAY(4 << 5)), NULL)); + ASSERT(andOp.matchesBSON(BSON("a" << 4), nullptr)); + ASSERT(andOp.matchesBSON(BSON("a" << BSON_ARRAY(4 << 6)), nullptr)); + ASSERT(!andOp.matchesBSON(BSON("a" << 5), nullptr)); + ASSERT(!andOp.matchesBSON(BSON("a" << BSON_ARRAY(4 << 5)), nullptr)); } TEST(AndOp, MatchesThreeClauses) { @@ -149,11 +149,11 @@ TEST(AndOp, MatchesThreeClauses) { andOp.add(sub2.release()); andOp.add(sub3.release()); - ASSERT(andOp.matchesBSON(BSON("a" << 5 << "b" << 6), NULL)); - ASSERT(!andOp.matchesBSON(BSON("a" << 5), NULL)); - ASSERT(!andOp.matchesBSON(BSON("b" << 6), NULL)); - ASSERT(!andOp.matchesBSON(BSON("a" << 1 << "b" << 6), NULL)); - ASSERT(!andOp.matchesBSON(BSON("a" << 10 << "b" << 6), NULL)); + ASSERT(andOp.matchesBSON(BSON("a" << 5 << "b" << 6), nullptr)); + ASSERT(!andOp.matchesBSON(BSON("a" << 5), nullptr)); + ASSERT(!andOp.matchesBSON(BSON("b" << 6), nullptr)); + ASSERT(!andOp.matchesBSON(BSON("a" << 1 << "b" << 6), nullptr)); + ASSERT(!andOp.matchesBSON(BSON("a" << 10 << "b" << 6), nullptr)); } TEST(AndOp, ElemMatchKey) { @@ -181,7 +181,7 @@ TEST(AndOp, ElemMatchKey) { TEST(OrOp, NoClauses) { OrMatchExpression orOp; - ASSERT(!orOp.matchesBSON(BSONObj(), NULL)); + ASSERT(!orOp.matchesBSON(BSONObj(), nullptr)); } TEST(OrOp, MatchesSingleClause) { @@ -192,10 +192,10 @@ TEST(OrOp, MatchesSingleClause) { OrMatchExpression orOp; orOp.add(ne.release()); - ASSERT(orOp.matchesBSON(BSON("a" << 4), NULL)); - ASSERT(orOp.matchesBSON(BSON("a" << BSON_ARRAY(4 << 6)), NULL)); - ASSERT(!orOp.matchesBSON(BSON("a" << 5), NULL)); - ASSERT(!orOp.matchesBSON(BSON("a" << BSON_ARRAY(4 << 5)), NULL)); + ASSERT(orOp.matchesBSON(BSON("a" << 4), nullptr)); + ASSERT(orOp.matchesBSON(BSON("a" << BSON_ARRAY(4 << 6)), nullptr)); + ASSERT(!orOp.matchesBSON(BSON("a" << 5), nullptr)); + ASSERT(!orOp.matchesBSON(BSON("a" << BSON_ARRAY(4 << 5)), nullptr)); } TEST(OrOp, MatchesThreeClauses) { @@ -211,13 +211,13 @@ TEST(OrOp, MatchesThreeClauses) { orOp.add(sub2.release()); orOp.add(sub3.release()); - ASSERT(orOp.matchesBSON(BSON("a" << -1), NULL)); - ASSERT(orOp.matchesBSON(BSON("a" << 11), NULL)); - ASSERT(!orOp.matchesBSON(BSON("a" << 5), NULL)); - ASSERT(orOp.matchesBSON(BSON("b" << 100), NULL)); - ASSERT(!orOp.matchesBSON(BSON("b" << 101), NULL)); - ASSERT(!orOp.matchesBSON(BSONObj(), NULL)); - ASSERT(orOp.matchesBSON(BSON("a" << 11 << "b" << 100), NULL)); + ASSERT(orOp.matchesBSON(BSON("a" << -1), nullptr)); + ASSERT(orOp.matchesBSON(BSON("a" << 11), nullptr)); + ASSERT(!orOp.matchesBSON(BSON("a" << 5), nullptr)); + ASSERT(orOp.matchesBSON(BSON("b" << 100), nullptr)); + ASSERT(!orOp.matchesBSON(BSON("b" << 101), nullptr)); + ASSERT(!orOp.matchesBSON(BSONObj(), nullptr)); + ASSERT(orOp.matchesBSON(BSON("a" << 11 << "b" << 100), nullptr)); } TEST(OrOp, ElemMatchKey) { @@ -243,7 +243,7 @@ TEST(OrOp, ElemMatchKey) { TEST(NorOp, NoClauses) { NorMatchExpression norOp; - ASSERT(norOp.matchesBSON(BSONObj(), NULL)); + ASSERT(norOp.matchesBSON(BSONObj(), nullptr)); } TEST(NorOp, MatchesSingleClause) { @@ -254,10 +254,10 @@ TEST(NorOp, MatchesSingleClause) { NorMatchExpression norOp; norOp.add(ne.release()); - ASSERT(!norOp.matchesBSON(BSON("a" << 4), NULL)); - ASSERT(!norOp.matchesBSON(BSON("a" << BSON_ARRAY(4 << 6)), NULL)); - ASSERT(norOp.matchesBSON(BSON("a" << 5), NULL)); - ASSERT(norOp.matchesBSON(BSON("a" << BSON_ARRAY(4 << 5)), NULL)); + ASSERT(!norOp.matchesBSON(BSON("a" << 4), nullptr)); + ASSERT(!norOp.matchesBSON(BSON("a" << BSON_ARRAY(4 << 6)), nullptr)); + ASSERT(norOp.matchesBSON(BSON("a" << 5), nullptr)); + ASSERT(norOp.matchesBSON(BSON("a" << BSON_ARRAY(4 << 5)), nullptr)); } TEST(NorOp, MatchesThreeClauses) { @@ -274,13 +274,13 @@ TEST(NorOp, MatchesThreeClauses) { norOp.add(sub2.release()); norOp.add(sub3.release()); - ASSERT(!norOp.matchesBSON(BSON("a" << -1), NULL)); - ASSERT(!norOp.matchesBSON(BSON("a" << 11), NULL)); - ASSERT(norOp.matchesBSON(BSON("a" << 5), NULL)); - ASSERT(!norOp.matchesBSON(BSON("b" << 100), NULL)); - ASSERT(norOp.matchesBSON(BSON("b" << 101), NULL)); - ASSERT(norOp.matchesBSON(BSONObj(), NULL)); - ASSERT(!norOp.matchesBSON(BSON("a" << 11 << "b" << 100), NULL)); + ASSERT(!norOp.matchesBSON(BSON("a" << -1), nullptr)); + ASSERT(!norOp.matchesBSON(BSON("a" << 11), nullptr)); + ASSERT(norOp.matchesBSON(BSON("a" << 5), nullptr)); + ASSERT(!norOp.matchesBSON(BSON("b" << 100), nullptr)); + ASSERT(norOp.matchesBSON(BSON("b" << 101), nullptr)); + ASSERT(norOp.matchesBSON(BSONObj(), nullptr)); + ASSERT(!norOp.matchesBSON(BSON("a" << 11 << "b" << 100), nullptr)); } TEST(NorOp, ElemMatchKey) { diff --git a/src/mongo/db/matcher/expression_type_test.cpp b/src/mongo/db/matcher/expression_type_test.cpp index 403a60ee9f4..b4e9ed4b405 100644 --- a/src/mongo/db/matcher/expression_type_test.cpp +++ b/src/mongo/db/matcher/expression_type_test.cpp @@ -75,17 +75,17 @@ TEST(ExpressionTypeTest, MatchesElementNumber) { TEST(ExpressionTypeTest, MatchesScalar) { TypeMatchExpression type("a", Bool); - ASSERT(type.matchesBSON(BSON("a" << true), NULL)); - ASSERT(!type.matchesBSON(BSON("a" << 1), NULL)); + ASSERT(type.matchesBSON(BSON("a" << true), nullptr)); + ASSERT(!type.matchesBSON(BSON("a" << 1), nullptr)); } TEST(ExpressionTypeTest, MatchesArray) { TypeMatchExpression type("a", NumberInt); - ASSERT(type.matchesBSON(BSON("a" << BSON_ARRAY(4)), NULL)); - ASSERT(type.matchesBSON(BSON("a" << BSON_ARRAY(4 << "a")), NULL)); - ASSERT(type.matchesBSON(BSON("a" << BSON_ARRAY("a" << 4)), NULL)); - ASSERT(!type.matchesBSON(BSON("a" << BSON_ARRAY("a")), NULL)); - ASSERT(!type.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(4))), NULL)); + ASSERT(type.matchesBSON(BSON("a" << BSON_ARRAY(4)), nullptr)); + ASSERT(type.matchesBSON(BSON("a" << BSON_ARRAY(4 << "a")), nullptr)); + ASSERT(type.matchesBSON(BSON("a" << BSON_ARRAY("a" << 4)), nullptr)); + ASSERT(!type.matchesBSON(BSON("a" << BSON_ARRAY("a")), nullptr)); + ASSERT(!type.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(4))), nullptr)); } TEST(ExpressionTypeTest, TypeArrayMatchesOuterAndInnerArray) { @@ -100,39 +100,39 @@ TEST(ExpressionTypeTest, TypeArrayMatchesOuterAndInnerArray) { TEST(ExpressionTypeTest, MatchesObject) { TypeMatchExpression type("a", Object); - ASSERT(type.matchesBSON(BSON("a" << BSON("b" << 1)), NULL)); - ASSERT(!type.matchesBSON(BSON("a" << 1), NULL)); + ASSERT(type.matchesBSON(BSON("a" << BSON("b" << 1)), nullptr)); + ASSERT(!type.matchesBSON(BSON("a" << 1), nullptr)); } TEST(ExpressionTypeTest, MatchesDotNotationFieldObject) { TypeMatchExpression type("a.b", Object); - ASSERT(type.matchesBSON(BSON("a" << BSON("b" << BSON("c" << 1))), NULL)); - ASSERT(!type.matchesBSON(BSON("a" << BSON("b" << 1)), NULL)); + ASSERT(type.matchesBSON(BSON("a" << BSON("b" << BSON("c" << 1))), nullptr)); + ASSERT(!type.matchesBSON(BSON("a" << BSON("b" << 1)), nullptr)); } TEST(ExpressionTypeTest, MatchesDotNotationArrayElementArray) { TypeMatchExpression type("a.0", Array); - ASSERT(type.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(1))), NULL)); - ASSERT(!type.matchesBSON(BSON("a" << BSON_ARRAY("b")), NULL)); + ASSERT(type.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(1))), nullptr)); + ASSERT(!type.matchesBSON(BSON("a" << BSON_ARRAY("b")), nullptr)); } TEST(ExpressionTypeTest, MatchesDotNotationArrayElementScalar) { TypeMatchExpression type("a.0", String); - ASSERT(type.matchesBSON(BSON("a" << BSON_ARRAY("b")), NULL)); - ASSERT(!type.matchesBSON(BSON("a" << BSON_ARRAY(1)), NULL)); + ASSERT(type.matchesBSON(BSON("a" << BSON_ARRAY("b")), nullptr)); + ASSERT(!type.matchesBSON(BSON("a" << BSON_ARRAY(1)), nullptr)); } TEST(ExpressionTypeTest, MatchesDotNotationArrayElementObject) { TypeMatchExpression type("a.0", Object); - ASSERT(type.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << 1))), NULL)); - ASSERT(!type.matchesBSON(BSON("a" << BSON_ARRAY(1)), NULL)); + ASSERT(type.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << 1))), nullptr)); + ASSERT(!type.matchesBSON(BSON("a" << BSON_ARRAY(1)), nullptr)); } TEST(ExpressionTypeTest, MatchesNull) { TypeMatchExpression type("a", jstNULL); - ASSERT(type.matchesBSON(BSON("a" << BSONNULL), NULL)); - ASSERT(!type.matchesBSON(BSON("a" << 4), NULL)); - ASSERT(!type.matchesBSON(BSONObj(), NULL)); + ASSERT(type.matchesBSON(BSON("a" << BSONNULL), nullptr)); + ASSERT(!type.matchesBSON(BSON("a" << 4), nullptr)); + ASSERT(!type.matchesBSON(BSONObj(), nullptr)); } TEST(ExpressionTypeTest, ElemMatchKey) { @@ -218,48 +218,48 @@ TEST(ExpressionTypeTest, InternalSchemaTypeExprWithMultipleTypesMatchesAllSuchTy } TEST(ExpressionBinDataSubTypeTest, MatchesBinDataGeneral) { - BSONObj match = BSON("a" << BSONBinData(NULL, 0, BinDataType::BinDataGeneral)); - BSONObj notMatch = BSON("a" << BSONBinData(NULL, 0, BinDataType::bdtCustom)); + BSONObj match = BSON("a" << BSONBinData(nullptr, 0, BinDataType::BinDataGeneral)); + BSONObj notMatch = BSON("a" << BSONBinData(nullptr, 0, BinDataType::bdtCustom)); InternalSchemaBinDataSubTypeExpression type("", BinDataType::BinDataGeneral); ASSERT_TRUE(type.matchesSingleElement(match["a"])); ASSERT_FALSE(type.matchesSingleElement(notMatch["a"])); } TEST(ExpressionBinDataSubTypeTest, MatchesBinDataFunction) { - BSONObj match = BSON("a" << BSONBinData(NULL, 0, BinDataType::Function)); - BSONObj notMatch = BSON("a" << BSONBinData(NULL, 0, BinDataType::MD5Type)); + BSONObj match = BSON("a" << BSONBinData(nullptr, 0, BinDataType::Function)); + BSONObj notMatch = BSON("a" << BSONBinData(nullptr, 0, BinDataType::MD5Type)); InternalSchemaBinDataSubTypeExpression type("", BinDataType::Function); ASSERT_TRUE(type.matchesSingleElement(match["a"])); ASSERT_FALSE(type.matchesSingleElement(notMatch["a"])); } TEST(ExpressionBinDataSubTypeTest, MatchesBinDataNewUUID) { - BSONObj match = BSON("a" << BSONBinData(NULL, 0, BinDataType::newUUID)); - BSONObj notMatch = BSON("a" << BSONBinData(NULL, 0, BinDataType::BinDataGeneral)); + BSONObj match = BSON("a" << BSONBinData(nullptr, 0, BinDataType::newUUID)); + BSONObj notMatch = BSON("a" << BSONBinData(nullptr, 0, BinDataType::BinDataGeneral)); InternalSchemaBinDataSubTypeExpression type("", BinDataType::newUUID); ASSERT_TRUE(type.matchesSingleElement(match["a"])); ASSERT_FALSE(type.matchesSingleElement(notMatch["a"])); } TEST(ExpressionBinDataSubTypeTest, MatchesBinDataMD5Type) { - BSONObj match = BSON("a" << BSONBinData(NULL, 0, BinDataType::MD5Type)); - BSONObj notMatch = BSON("a" << BSONBinData(NULL, 0, BinDataType::newUUID)); + BSONObj match = BSON("a" << BSONBinData(nullptr, 0, BinDataType::MD5Type)); + BSONObj notMatch = BSON("a" << BSONBinData(nullptr, 0, BinDataType::newUUID)); InternalSchemaBinDataSubTypeExpression type("", BinDataType::MD5Type); ASSERT_TRUE(type.matchesSingleElement(match["a"])); ASSERT_FALSE(type.matchesSingleElement(notMatch["a"])); } TEST(ExpressionBinDataSubTypeTest, MatchesBinDataEncryptType) { - BSONObj match = BSON("a" << BSONBinData(NULL, 0, BinDataType::Encrypt)); - BSONObj notMatch = BSON("a" << BSONBinData(NULL, 0, BinDataType::newUUID)); + BSONObj match = BSON("a" << BSONBinData(nullptr, 0, BinDataType::Encrypt)); + BSONObj notMatch = BSON("a" << BSONBinData(nullptr, 0, BinDataType::newUUID)); InternalSchemaBinDataSubTypeExpression type("", BinDataType::Encrypt); ASSERT_TRUE(type.matchesSingleElement(match["a"])); ASSERT_FALSE(type.matchesSingleElement(notMatch["a"])); } TEST(ExpressionBinDataSubTypeTest, MatchesBinDataBdtCustom) { - BSONObj match = BSON("a" << BSONBinData(NULL, 0, BinDataType::bdtCustom)); - BSONObj notMatch = BSON("a" << BSONBinData(NULL, 0, BinDataType::Function)); + BSONObj match = BSON("a" << BSONBinData(nullptr, 0, BinDataType::bdtCustom)); + BSONObj notMatch = BSON("a" << BSONBinData(nullptr, 0, BinDataType::Function)); InternalSchemaBinDataSubTypeExpression type("", BinDataType::bdtCustom); ASSERT_TRUE(type.matchesSingleElement(match["a"])); ASSERT_FALSE(type.matchesSingleElement(notMatch["a"])); @@ -268,11 +268,11 @@ TEST(ExpressionBinDataSubTypeTest, MatchesBinDataBdtCustom) { TEST(ExpressionBinDataSubTypeTest, DoesNotMatchArrays) { InternalSchemaBinDataSubTypeExpression type("a", BinDataType::BinDataGeneral); ASSERT_FALSE(type.matchesBSON( - BSON("a" << BSON_ARRAY(BSONBinData(NULL, 0, BinDataType::BinDataGeneral) - << BSONBinData(NULL, 0, BinDataType::BinDataGeneral))))); - ASSERT_FALSE( - type.matchesBSON(BSON("a" << BSON_ARRAY(BSONBinData(NULL, 0, BinDataType::BinDataGeneral) - << BSONBinData(NULL, 0, BinDataType::Function))))); + BSON("a" << BSON_ARRAY(BSONBinData(nullptr, 0, BinDataType::BinDataGeneral) + << BSONBinData(nullptr, 0, BinDataType::BinDataGeneral))))); + ASSERT_FALSE(type.matchesBSON( + BSON("a" << BSON_ARRAY(BSONBinData(nullptr, 0, BinDataType::BinDataGeneral) + << BSONBinData(nullptr, 0, BinDataType::Function))))); } TEST(ExpressionBinDataSubTypeTest, DoesNotMatchString) { diff --git a/src/mongo/db/matcher/expression_where.cpp b/src/mongo/db/matcher/expression_where.cpp index ff2069fe9ed..a171c10c140 100644 --- a/src/mongo/db/matcher/expression_where.cpp +++ b/src/mongo/db/matcher/expression_where.cpp @@ -54,7 +54,7 @@ WhereMatchExpression::WhereMatchExpression(OperationContext* opCtx, WhereParams params, StringData dbName) : WhereMatchExpressionBase(std::move(params)), _dbName(dbName.toString()), _opCtx(opCtx) { - invariant(_opCtx != NULL); + invariant(_opCtx != nullptr); uassert( ErrorCodes::BadValue, "no globalScriptEngine in $where parsing", getGlobalScriptEngine()); @@ -87,7 +87,7 @@ bool WhereMatchExpression::matches(const MatchableDocument* doc, MatchDetails* d _scope->setObject("obj", const_cast<BSONObj&>(obj)); _scope->setBoolean("fullObject", true); // this is a hack b/c fullObject used to be relevant - int err = _scope->invoke(_func, 0, &obj, 1000 * 60, false); + int err = _scope->invoke(_func, nullptr, &obj, 1000 * 60, false); if (err == -3) { // INVOKE_ERROR stringstream ss; ss << "error on invocation of $where function:\n" << _scope->getError(); diff --git a/src/mongo/db/matcher/matcher.h b/src/mongo/db/matcher/matcher.h index ae39f597bf3..ff7aeaa60dc 100644 --- a/src/mongo/db/matcher/matcher.h +++ b/src/mongo/db/matcher/matcher.h @@ -59,7 +59,7 @@ public: MatchExpressionParser::AllowedFeatureSet allowedFeatures = MatchExpressionParser::kDefaultSpecialFeatures); - bool matches(const BSONObj& doc, MatchDetails* details = NULL) const; + bool matches(const BSONObj& doc, MatchDetails* details = nullptr) const; const BSONObj* getQuery() const { return &_pattern; diff --git a/src/mongo/db/matcher/path.cpp b/src/mongo/db/matcher/path.cpp index efd2c69aa06..e06412814df 100644 --- a/src/mongo/db/matcher/path.cpp +++ b/src/mongo/db/matcher/path.cpp @@ -78,7 +78,7 @@ ElementIterator::Context SimpleArrayElementIterator::next() { // ------ BSONElementIterator::BSONElementIterator() { - _path = NULL; + _path = nullptr; } BSONElementIterator::BSONElementIterator(const ElementPath* path, diff --git a/src/mongo/db/ops/insert.cpp b/src/mongo/db/ops/insert.cpp index d891c998a7c..1939cbe1e3b 100644 --- a/src/mongo/db/ops/insert.cpp +++ b/src/mongo/db/ops/insert.cpp @@ -155,7 +155,7 @@ StatusWith<BSONObj> fixDocumentForInsert(ServiceContext* service, const BSONObj& if (e.type()) { b.append(e); } else { - b.appendOID("_id", NULL, true); + b.appendOID("_id", nullptr, true); } } diff --git a/src/mongo/db/ops/parsed_delete.cpp b/src/mongo/db/ops/parsed_delete.cpp index 63262fac82d..9e3a4f5250b 100644 --- a/src/mongo/db/ops/parsed_delete.cpp +++ b/src/mongo/db/ops/parsed_delete.cpp @@ -119,11 +119,11 @@ PlanExecutor::YieldPolicy ParsedDelete::yieldPolicy() const { } bool ParsedDelete::hasParsedQuery() const { - return _canonicalQuery.get() != NULL; + return _canonicalQuery.get() != nullptr; } std::unique_ptr<CanonicalQuery> ParsedDelete::releaseParsedQuery() { - invariant(_canonicalQuery.get() != NULL); + invariant(_canonicalQuery.get() != nullptr); return std::move(_canonicalQuery); } diff --git a/src/mongo/db/ops/parsed_update.cpp b/src/mongo/db/ops/parsed_update.cpp index c2e355e945a..039bd51f238 100644 --- a/src/mongo/db/ops/parsed_update.cpp +++ b/src/mongo/db/ops/parsed_update.cpp @@ -203,11 +203,11 @@ PlanExecutor::YieldPolicy ParsedUpdate::yieldPolicy() const { } bool ParsedUpdate::hasParsedQuery() const { - return _canonicalQuery.get() != NULL; + return _canonicalQuery.get() != nullptr; } std::unique_ptr<CanonicalQuery> ParsedUpdate::releaseParsedQuery() { - invariant(_canonicalQuery.get() != NULL); + invariant(_canonicalQuery.get() != nullptr); return std::move(_canonicalQuery); } diff --git a/src/mongo/db/pipeline/document.cpp b/src/mongo/db/pipeline/document.cpp index 1b8d8ecb5cb..2ae7e9fcb09 100644 --- a/src/mongo/db/pipeline/document.cpp +++ b/src/mongo/db/pipeline/document.cpp @@ -379,7 +379,7 @@ Document Document::fromBsonWithMetaData(const BSONObj& bson) { } MutableDocument::MutableDocument(size_t expectedFields) - : _storageHolder(NULL), _storage(_storageHolder) { + : _storageHolder(nullptr), _storage(_storageHolder) { if (expectedFields) { storage().reserveFields(expectedFields); } diff --git a/src/mongo/db/pipeline/document.h b/src/mongo/db/pipeline/document.h index f465ecce0e2..c4a9cf3e38b 100644 --- a/src/mongo/db/pipeline/document.h +++ b/src/mongo/db/pipeline/document.h @@ -409,7 +409,7 @@ private: /// Used by MutableDocument(MutableValue) const RefCountable*& getDocPtr() { - if (_val.getType() != Object || _val._storage.genericRCPtr == NULL) { + if (_val.getType() != Object || _val._storage.genericRCPtr == nullptr) { // If the current value isn't an object we replace it with a Object-typed Value. // Note that we can't just use Document() here because that is a NULL pointer and // Value doesn't refcount NULL pointers. This led to a memory leak (SERVER-10554) @@ -451,11 +451,11 @@ public: * * TODO: find some way to convey field-name sizes to make even more efficient */ - MutableDocument() : _storageHolder(NULL), _storage(_storageHolder) {} + MutableDocument() : _storageHolder(nullptr), _storage(_storageHolder) {} explicit MutableDocument(size_t expectedFields); /// No copy of data yet. Copy-on-write. See storage() - explicit MutableDocument(Document d) : _storageHolder(NULL), _storage(_storageHolder) { + explicit MutableDocument(Document d) : _storageHolder(nullptr), _storage(_storageHolder) { reset(std::move(d)); } @@ -589,7 +589,7 @@ public: Document ret; boost::intrusive_ptr<const DocumentStorage> temp(storagePtr(), /*inc_ref_count=*/false); temp.swap(ret._storage); - _storage = NULL; + _storage = nullptr; return ret; } @@ -614,7 +614,7 @@ public: private: friend class MutableValue; // for access to next constructor - explicit MutableDocument(MutableValue mv) : _storageHolder(NULL), _storage(mv.getDocPtr()) {} + explicit MutableDocument(MutableValue mv) : _storageHolder(nullptr), _storage(mv.getDocPtr()) {} void reset(boost::intrusive_ptr<const DocumentStorage> ds) { if (_storage) diff --git a/src/mongo/db/pipeline/document_internal.h b/src/mongo/db/pipeline/document_internal.h index 1d2b1b58951..29b541a52a3 100644 --- a/src/mongo/db/pipeline/document_internal.h +++ b/src/mongo/db/pipeline/document_internal.h @@ -184,8 +184,8 @@ private: class DocumentStorage : public RefCountable { public: DocumentStorage() - : _buffer(NULL), - _bufferEnd(NULL), + : _buffer(nullptr), + _bufferEnd(nullptr), _usedBytes(0), _numFields(0), _hashTabMask(0), diff --git a/src/mongo/db/pipeline/document_source.cpp b/src/mongo/db/pipeline/document_source.cpp index 158e97b33fa..fdd49626589 100644 --- a/src/mongo/db/pipeline/document_source.cpp +++ b/src/mongo/db/pipeline/document_source.cpp @@ -50,7 +50,7 @@ using std::string; using std::vector; DocumentSource::DocumentSource(const intrusive_ptr<ExpressionContext>& pCtx) - : pSource(NULL), pExpCtx(pCtx) {} + : pSource(nullptr), pExpCtx(pCtx) {} namespace { // Used to keep track of which DocumentSources are registered under which name. diff --git a/src/mongo/db/pipeline/document_source_match_test.cpp b/src/mongo/db/pipeline/document_source_match_test.cpp index 9449f9b55c2..83283bbefeb 100644 --- a/src/mongo/db/pipeline/document_source_match_test.cpp +++ b/src/mongo/db/pipeline/document_source_match_test.cpp @@ -184,7 +184,7 @@ TEST_F(DocumentSourceMatchTest, RedactSafePortion) { assertExpectedRedactSafePortion("{a: {$in: [1, 0, null]}}", "{}"); { - const char* comparisonOps[] = {"$gt", "$lt", "$gte", "$lte", NULL}; + const char* comparisonOps[] = {"$gt", "$lt", "$gte", "$lte", nullptr}; for (int i = 0; comparisonOps[i]; i++) { const char* op = comparisonOps[i]; assertExpectedRedactSafePortion(string("{a: {") + op + ": 1}}", @@ -610,7 +610,7 @@ TEST_F(DocumentSourceMatchTest, ShouldCorrectlyEvaluateJSONSchemaPredicate) { fromjson("{$jsonSchema: {properties: {a: {type: 'number'}}}}"), getExpCtx()); const auto mock = DocumentSourceMock::createForTest( - {Document{{"a", 1}}, Document{{"a", "str"_sd}}, Document{{"a", {Document{{0, 1}}}}}}); + {Document{{"a", 1}}, Document{{"a", "str"_sd}}, Document{{"a", {Document{{nullptr, 1}}}}}}); match->setSource(mock.get()); diff --git a/src/mongo/db/pipeline/expression.cpp b/src/mongo/db/pipeline/expression.cpp index c1bf5957066..004fb15b16f 100644 --- a/src/mongo/db/pipeline/expression.cpp +++ b/src/mongo/db/pipeline/expression.cpp @@ -5871,7 +5871,7 @@ int ExpressionRegex::execute(RegexExecutionState* regexState) const { invariant(regexState->pcrePtr); int execResult = pcre_exec(regexState->pcrePtr.get(), - 0, + nullptr, regexState->input->c_str(), regexState->input->size(), regexState->startBytePos, @@ -5981,8 +5981,10 @@ void ExpressionRegex::_compile(RegexExecutionState* executionState) const { executionState->pcrePtr); // Calculate the number of capture groups present in 'pattern' and store in 'numCaptures'. - const int pcre_retval = pcre_fullinfo( - executionState->pcrePtr.get(), NULL, PCRE_INFO_CAPTURECOUNT, &executionState->numCaptures); + const int pcre_retval = pcre_fullinfo(executionState->pcrePtr.get(), + nullptr, + PCRE_INFO_CAPTURECOUNT, + &executionState->numCaptures); invariant(pcre_retval == 0); // The first two-thirds of the vector is used to pass back captured substrings' start and diff --git a/src/mongo/db/query/canonical_query.cpp b/src/mongo/db/query/canonical_query.cpp index cba8c2e2736..03424a838d6 100644 --- a/src/mongo/db/query/canonical_query.cpp +++ b/src/mongo/db/query/canonical_query.cpp @@ -299,7 +299,7 @@ void CanonicalQuery::sortTree(MatchExpression* tree) { sortTree(tree->getChild(i)); } std::vector<MatchExpression*>* children = tree->getChildVector(); - if (NULL != children) { + if (nullptr != children) { std::sort(children->begin(), children->end(), matchExpressionLessThan); } } diff --git a/src/mongo/db/query/explain.cpp b/src/mongo/db/query/explain.cpp index 977b7fdb1a1..4f6093959b4 100644 --- a/src/mongo/db/query/explain.cpp +++ b/src/mongo/db/query/explain.cpp @@ -669,7 +669,7 @@ void Explain::generatePlannerInfo(PlanExecutor* exec, // In general we should have a canonical query, but sometimes we may avoid // creating a canonical query as an optimization (specifically, the update system // does not canonicalize for idhack updates). In these cases, 'query' is NULL. - if (NULL != query) { + if (nullptr != query) { BSONObjBuilder parsedQueryBob(plannerBob.subobjStart("parsedQuery")); query->root()->serialize(&parsedQueryBob); parsedQueryBob.doneFast(); @@ -938,7 +938,7 @@ std::string Explain::getPlanSummary(const PlanStage* root) { // static void Explain::getSummaryStats(const PlanExecutor& exec, PlanSummaryStats* statsOut) { - invariant(NULL != statsOut); + invariant(nullptr != statsOut); PlanStage* root = exec.getRootStage(); diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp index ff1e443abc9..0a47bc2ce0a 100644 --- a/src/mongo/db/query/find.cpp +++ b/src/mongo/db/query/find.cpp @@ -182,7 +182,7 @@ void generateBatch(int ntoreturn, BSONObj obj; while (!FindCommon::enoughForGetMore(ntoreturn, *numResults) && - PlanExecutor::ADVANCED == (*state = exec->getNext(&obj, NULL))) { + PlanExecutor::ADVANCED == (*state = exec->getNext(&obj, nullptr))) { // If we can't fit this result inside the current batch, then we stash it for later. if (!FindCommon::haveSpaceForNext(obj, *numResults, bb->len())) { exec->enqueue(obj); @@ -680,7 +680,7 @@ std::string runQuery(OperationContext* opCtx, curOp.setPlanSummary_inlock(Explain::getPlanSummary(exec.get())); } - while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) { + while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, nullptr))) { // If we can't fit this result inside the current batch, then we stash it for later. if (!FindCommon::haveSpaceForNext(obj, numResults, bb.len())) { exec->enqueue(obj); diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp index 5d82c6bc539..da9207b93f6 100644 --- a/src/mongo/db/query/get_executor.cpp +++ b/src/mongo/db/query/get_executor.cpp @@ -370,7 +370,7 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx, unique_ptr<PlanStage> root; // This can happen as we're called by internal clients as well. - if (NULL == collection) { + if (nullptr == collection) { const string& ns = canonicalQuery->ns(); LOG(2) << "Collection " << ns << " does not exist." << " Using EOF plan: " << redact(canonicalQuery->toStringShort()); @@ -745,7 +745,7 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> _getExecutorFind( unique_ptr<CanonicalQuery> canonicalQuery, PlanExecutor::YieldPolicy yieldPolicy, size_t plannerOptions) { - if (NULL != collection && canonicalQuery->getQueryRequest().isOplogReplay()) { + if (nullptr != collection && canonicalQuery->getQueryRequest().isOplogReplay()) { return getOplogStartHack( opCtx, collection, std::move(canonicalQuery), plannerOptions, yieldPolicy); } @@ -1131,7 +1131,7 @@ bool turnIxscanIntoCount(QuerySolution* soln) { return false; } - if (STAGE_FETCH == root->getType() && NULL != root->filter.get()) { + if (STAGE_FETCH == root->getType() && nullptr != root->filter.get()) { return false; } @@ -1148,7 +1148,7 @@ bool turnIxscanIntoCount(QuerySolution* soln) { // isSimpleRange here? because we could well use it. I just don't think we ever do see // it. - if (NULL != isn->filter.get() || isn->bounds.isSimpleRange) { + if (nullptr != isn->filter.get() || isn->bounds.isSimpleRange) { return false; } diff --git a/src/mongo/db/query/index_tag.cpp b/src/mongo/db/query/index_tag.cpp index 0381e2ac9ba..61e0204d3da 100644 --- a/src/mongo/db/query/index_tag.cpp +++ b/src/mongo/db/query/index_tag.cpp @@ -45,12 +45,12 @@ namespace { bool TagComparison(const MatchExpression* lhs, const MatchExpression* rhs) { IndexTag* lhsTag = static_cast<IndexTag*>(lhs->getTag()); - size_t lhsValue = (NULL == lhsTag) ? IndexTag::kNoIndex : lhsTag->index; - size_t lhsPos = (NULL == lhsTag) ? IndexTag::kNoIndex : lhsTag->pos; + size_t lhsValue = (nullptr == lhsTag) ? IndexTag::kNoIndex : lhsTag->index; + size_t lhsPos = (nullptr == lhsTag) ? IndexTag::kNoIndex : lhsTag->pos; IndexTag* rhsTag = static_cast<IndexTag*>(rhs->getTag()); - size_t rhsValue = (NULL == rhsTag) ? IndexTag::kNoIndex : rhsTag->index; - size_t rhsPos = (NULL == rhsTag) ? IndexTag::kNoIndex : rhsTag->pos; + size_t rhsValue = (nullptr == rhsTag) ? IndexTag::kNoIndex : rhsTag->index; + size_t rhsPos = (nullptr == rhsTag) ? IndexTag::kNoIndex : rhsTag->pos; // First, order on indices. if (lhsValue != rhsValue) { @@ -94,7 +94,7 @@ void sortUsingTags(MatchExpression* tree) { sortUsingTags(tree->getChild(i)); } std::vector<MatchExpression*>* children = tree->getChildVector(); - if (NULL != children) { + if (nullptr != children) { std::sort(children->begin(), children->end(), TagComparison); } } diff --git a/src/mongo/db/query/index_tag.h b/src/mongo/db/query/index_tag.h index be4423d3179..ac69dc16870 100644 --- a/src/mongo/db/query/index_tag.h +++ b/src/mongo/db/query/index_tag.h @@ -88,7 +88,7 @@ public: // used internally class RelevantTag : public MatchExpression::TagData { public: - RelevantTag() : elemMatchExpr(NULL), pathPrefix("") {} + RelevantTag() : elemMatchExpr(nullptr), pathPrefix("") {} std::vector<size_t> first; std::vector<size_t> notFirst; diff --git a/src/mongo/db/query/internal_plans.cpp b/src/mongo/db/query/internal_plans.cpp index bfd88fb27df..2fa5359bd48 100644 --- a/src/mongo/db/query/internal_plans.cpp +++ b/src/mongo/db/query/internal_plans.cpp @@ -55,7 +55,7 @@ std::unique_ptr<PlanExecutor, PlanExecutor::Deleter> InternalPlanner::collection const RecordId startLoc) { std::unique_ptr<WorkingSet> ws = std::make_unique<WorkingSet>(); - if (NULL == collection) { + if (nullptr == collection) { auto eof = std::make_unique<EOFStage>(opCtx); // Takes ownership of 'ws' and 'eof'. auto statusWithPlanExecutor = PlanExecutor::make( diff --git a/src/mongo/db/query/lru_key_value_test.cpp b/src/mongo/db/query/lru_key_value_test.cpp index 6b9ee7b90d6..9f15aad6c9a 100644 --- a/src/mongo/db/query/lru_key_value_test.cpp +++ b/src/mongo/db/query/lru_key_value_test.cpp @@ -41,7 +41,7 @@ namespace { // void assertInKVStore(LRUKeyValue<int, int>& cache, int key, int value) { - int* cachedValue = NULL; + int* cachedValue = nullptr; ASSERT_TRUE(cache.hasKey(key)); Status s = cache.get(key, &cachedValue); ASSERT_OK(s); @@ -49,7 +49,7 @@ void assertInKVStore(LRUKeyValue<int, int>& cache, int key, int value) { } void assertNotInKVStore(LRUKeyValue<int, int>& cache, int key) { - int* cachedValue = NULL; + int* cachedValue = nullptr; ASSERT_FALSE(cache.hasKey(key)); Status s = cache.get(key, &cachedValue); ASSERT_NOT_OK(s); @@ -99,7 +99,7 @@ TEST(LRUKeyValueTest, EvictionTest) { LRUKeyValue<int, int> cache(maxSize); for (int i = 0; i < maxSize; ++i) { std::unique_ptr<int> evicted = cache.add(i, new int(i)); - ASSERT(NULL == evicted.get()); + ASSERT(nullptr == evicted.get()); } ASSERT_EQUALS(cache.size(), (size_t)maxSize); @@ -115,7 +115,7 @@ TEST(LRUKeyValueTest, EvictionTest) { // Adding another entry causes an eviction. std::unique_ptr<int> evicted = cache.add(maxSize + 1, new int(maxSize + 1)); ASSERT_EQUALS(cache.size(), (size_t)maxSize); - ASSERT(NULL != evicted.get()); + ASSERT(nullptr != evicted.get()); ASSERT_EQUALS(*evicted, evictKey); // Check that the least recently accessed has been evicted. @@ -139,7 +139,7 @@ TEST(LRUKeyValueTest, PromotionTest) { LRUKeyValue<int, int> cache(maxSize); for (int i = 0; i < maxSize; ++i) { std::unique_ptr<int> evicted = cache.add(i, new int(i)); - ASSERT(NULL == evicted.get()); + ASSERT(nullptr == evicted.get()); } ASSERT_EQUALS(cache.size(), (size_t)maxSize); @@ -150,7 +150,7 @@ TEST(LRUKeyValueTest, PromotionTest) { // Evict all but one of the original entries. for (int i = maxSize; i < (maxSize + maxSize - 1); ++i) { std::unique_ptr<int> evicted = cache.add(i, new int(i)); - ASSERT(NULL != evicted.get()); + ASSERT(nullptr != evicted.get()); } ASSERT_EQUALS(cache.size(), (size_t)maxSize); diff --git a/src/mongo/db/query/parsed_projection_test.cpp b/src/mongo/db/query/parsed_projection_test.cpp index 84669166c8d..075858687fd 100644 --- a/src/mongo/db/query/parsed_projection_test.cpp +++ b/src/mongo/db/query/parsed_projection_test.cpp @@ -58,7 +58,7 @@ unique_ptr<ParsedProjection> createParsedProjection(const BSONObj& query, const MatchExpressionParser::parse(query, std::move(expCtx)); ASSERT(statusWithMatcher.isOK()); std::unique_ptr<MatchExpression> queryMatchExpr = std::move(statusWithMatcher.getValue()); - ParsedProjection* out = NULL; + ParsedProjection* out = nullptr; Status status = ParsedProjection::make(opCtx.get(), projObj, queryMatchExpr.get(), &out); if (!status.isOK()) { FAIL(str::stream() << "failed to parse projection " << projObj << " (query: " << query @@ -91,7 +91,7 @@ void assertInvalidProjection(const char* queryStr, const char* projStr) { MatchExpressionParser::parse(query, std::move(expCtx)); ASSERT(statusWithMatcher.isOK()); std::unique_ptr<MatchExpression> queryMatchExpr = std::move(statusWithMatcher.getValue()); - ParsedProjection* out = NULL; + ParsedProjection* out = nullptr; Status status = ParsedProjection::make(opCtx.get(), projObj, queryMatchExpr.get(), &out); std::unique_ptr<ParsedProjection> destroy(out); ASSERT(!status.isOK()); @@ -213,9 +213,9 @@ TEST(ParsedProjectionTest, InvalidPositionalProjectionDefaultPathMatchExpression QueryTestServiceContext serviceCtx; auto opCtx = serviceCtx.makeOperationContext(); unique_ptr<MatchExpression> queryMatchExpr(new AlwaysFalseMatchExpression()); - ASSERT(NULL == queryMatchExpr->path().rawData()); + ASSERT(nullptr == queryMatchExpr->path().rawData()); - ParsedProjection* out = NULL; + ParsedProjection* out = nullptr; BSONObj projObj = fromjson("{'a.$': 1}"); Status status = ParsedProjection::make(opCtx.get(), projObj, queryMatchExpr.get(), &out); ASSERT(!status.isOK()); diff --git a/src/mongo/db/query/plan_cache.cpp b/src/mongo/db/query/plan_cache.cpp index ba32ff91e7f..3464c22a86f 100644 --- a/src/mongo/db/query/plan_cache.cpp +++ b/src/mongo/db/query/plan_cache.cpp @@ -269,7 +269,7 @@ void PlanCacheIndexTree::setIndexEntry(const IndexEntry& ie) { PlanCacheIndexTree* PlanCacheIndexTree::clone() const { PlanCacheIndexTree* root = new PlanCacheIndexTree(); - if (NULL != entry.get()) { + if (nullptr != entry.get()) { root->index_pos = index_pos; root->setIndexEntry(*entry.get()); root->canCombineBounds = canCombineBounds; @@ -298,7 +298,7 @@ std::string PlanCacheIndexTree::toString(int indents) const { return result.str(); } else { result << std::string(3 * indents, '-') << "Leaf "; - if (NULL != entry.get()) { + if (nullptr != entry.get()) { result << entry->identifier << ", pos: " << index_pos << ", can combine? " << canCombineBounds; } @@ -326,7 +326,7 @@ std::string PlanCacheIndexTree::toString(int indents) const { SolutionCacheData* SolutionCacheData::clone() const { SolutionCacheData* other = new SolutionCacheData(); - if (NULL != this->tree.get()) { + if (nullptr != this->tree.get()) { // 'tree' could be NULL if the cached solution // is a collection scan. other->tree.reset(this->tree->clone()); @@ -542,7 +542,7 @@ Status PlanCache::set(const CanonicalQuery& query, std::unique_ptr<PlanCacheEntry> evictedEntry = _cache.add(key, newEntry.release()); - if (NULL != evictedEntry.get()) { + if (nullptr != evictedEntry.get()) { LOG(1) << _ns << ": plan cache maximum size exceeded - " << "removed least recently used entry " << redact(evictedEntry->toString()); } diff --git a/src/mongo/db/query/plan_cache_test.cpp b/src/mongo/db/query/plan_cache_test.cpp index c6543f5da59..a2ab4e1f475 100644 --- a/src/mongo/db/query/plan_cache_test.cpp +++ b/src/mongo/db/query/plan_cache_test.cpp @@ -1161,7 +1161,7 @@ protected: dumpSolutions(ss); FAIL(ss); - return NULL; + return nullptr; } /** @@ -1216,8 +1216,8 @@ protected: */ void assertNotCached(const string& solnJson) { QuerySolution* bestSoln = firstMatchingSolution(solnJson); - ASSERT(NULL != bestSoln); - ASSERT(NULL == bestSoln->cacheData.get()); + ASSERT(nullptr != bestSoln); + ASSERT(nullptr == bestSoln->cacheData.get()); } static const PlanCacheKey ck; diff --git a/src/mongo/db/query/plan_enumerator.cpp b/src/mongo/db/query/plan_enumerator.cpp index 19f2188f303..7163d69e474 100644 --- a/src/mongo/db/query/plan_enumerator.cpp +++ b/src/mongo/db/query/plan_enumerator.cpp @@ -290,7 +290,7 @@ std::string PlanEnumerator::dumpMemo() { } string PlanEnumerator::NodeAssignment::toString() const { - if (NULL != andAssignment) { + if (nullptr != andAssignment) { str::stream ss; ss << "AND enumstate counter " << andAssignment->counter; for (size_t i = 0; i < andAssignment->choices.size(); ++i) { @@ -316,7 +316,7 @@ string PlanEnumerator::NodeAssignment::toString() const { } } return ss; - } else if (NULL != arrayAssignment) { + } else if (nullptr != arrayAssignment) { str::stream ss; ss << "ARRAY SUBNODES enumstate " << arrayAssignment->counter << "/ ONE OF: [ "; for (size_t i = 0; i < arrayAssignment->subnodes.size(); ++i) { @@ -325,7 +325,7 @@ string PlanEnumerator::NodeAssignment::toString() const { ss << "]"; return ss; } else { - verify(NULL != orAssignment); + verify(nullptr != orAssignment); str::stream ss; ss << "ALL OF: [ "; for (size_t i = 0; i < orAssignment->subnodes.size(); ++i) { @@ -506,7 +506,7 @@ bool PlanEnumerator::prepMemo(MatchExpression* node, PrepMemoContext context) { // There can only be one mandatory predicate (at most one $text, at most one // $geoNear, can't combine $text/$geoNear). - MatchExpression* mandatoryPred = NULL; + MatchExpression* mandatoryPred = nullptr; // There could be multiple indices which we could use to satisfy the mandatory // predicate. Keep the set of such indices. Currently only one text index is @@ -528,7 +528,7 @@ bool PlanEnumerator::prepMemo(MatchExpression* node, PrepMemoContext context) { // This should include only TEXT and GEO_NEAR preds. // We expect either 0 or 1 mandatory predicates. - invariant(NULL == mandatoryPred); + invariant(nullptr == mandatoryPred); // Mandatory predicates are TEXT or GEO_NEAR. invariant(MatchExpression::TEXT == child->matchType() || @@ -577,7 +577,7 @@ bool PlanEnumerator::prepMemo(MatchExpression* node, PrepMemoContext context) { return true; } - if (NULL != mandatoryPred) { + if (nullptr != mandatoryPred) { // We must have at least one index which can be used to answer 'mandatoryPred'. invariant(!mandatoryIndices.empty()); return enumerateMandatoryIndex( @@ -1325,7 +1325,7 @@ void PlanEnumerator::getMultikeyCompoundablePreds(const vector<MatchExpression*> // initializing the top-level scope with the prefix of the full path. for (size_t i = 0; i < assigned.size(); i++) { const MatchExpression* assignedPred = assigned[i]; - invariant(NULL != assignedPred->getTag()); + invariant(nullptr != assignedPred->getTag()); RelevantTag* usedRt = static_cast<RelevantTag*>(assignedPred->getTag()); set<string> usedPrefixes; usedPrefixes.insert(getPathPrefix(usedRt->path)); @@ -1336,7 +1336,7 @@ void PlanEnumerator::getMultikeyCompoundablePreds(const vector<MatchExpression*> // the $elemMatch context. For example, if 'assigned' is {a: {$elemMatch: {b: 1}}}, // then we will have already added "a" to the set for NULL. We now // also need to add "b" to the set for the $elemMatch. - if (NULL != usedRt->elemMatchExpr) { + if (nullptr != usedRt->elemMatchExpr) { set<string> elemMatchUsed; // Whereas getPathPrefix(usedRt->path) is the prefix of the full path, // usedRt->pathPrefix contains the prefix of the portion of the @@ -1570,17 +1570,17 @@ void PlanEnumerator::compound(const vector<MatchExpression*>& tryCompound, void PlanEnumerator::tagMemo(size_t id) { LOG(5) << "Tagging memoID " << id; NodeAssignment* assign = _memo[id]; - verify(NULL != assign); + verify(nullptr != assign); - if (NULL != assign->orAssignment) { + if (nullptr != assign->orAssignment) { OrAssignment* oa = assign->orAssignment.get(); for (size_t i = 0; i < oa->subnodes.size(); ++i) { tagMemo(oa->subnodes[i]); } - } else if (NULL != assign->arrayAssignment) { + } else if (nullptr != assign->arrayAssignment) { ArrayAssignment* aa = assign->arrayAssignment.get(); tagMemo(aa->subnodes[aa->counter]); - } else if (NULL != assign->andAssignment) { + } else if (nullptr != assign->andAssignment) { AndAssignment* aa = assign->andAssignment.get(); verify(aa->counter < aa->choices.size()); @@ -1622,9 +1622,9 @@ void PlanEnumerator::tagMemo(size_t id) { bool PlanEnumerator::nextMemo(size_t id) { NodeAssignment* assign = _memo[id]; - verify(NULL != assign); + verify(nullptr != assign); - if (NULL != assign->orAssignment) { + if (nullptr != assign->orAssignment) { OrAssignment* oa = assign->orAssignment.get(); // Limit the number of OR enumerations @@ -1644,7 +1644,7 @@ bool PlanEnumerator::nextMemo(size_t id) { } // If we're here, the last subnode had a carry, therefore the OR has a carry. return true; - } else if (NULL != assign->arrayAssignment) { + } else if (nullptr != assign->arrayAssignment) { ArrayAssignment* aa = assign->arrayAssignment.get(); // moving to next on current subnode is OK if (!nextMemo(aa->subnodes[aa->counter])) { @@ -1657,7 +1657,7 @@ bool PlanEnumerator::nextMemo(size_t id) { } aa->counter = 0; return true; - } else if (NULL != assign->andAssignment) { + } else if (nullptr != assign->andAssignment) { AndAssignment* aa = assign->andAssignment.get(); // One of our subnodes might have to move on to its next enumeration state. diff --git a/src/mongo/db/query/plan_enumerator.h b/src/mongo/db/query/plan_enumerator.h index 0ae89741132..33c7923119e 100644 --- a/src/mongo/db/query/plan_enumerator.h +++ b/src/mongo/db/query/plan_enumerator.h @@ -158,7 +158,7 @@ private: }; struct PrepMemoContext { - PrepMemoContext() : elemMatchExpr(NULL) {} + PrepMemoContext() : elemMatchExpr(nullptr) {} MatchExpression* elemMatchExpr; // Maps from indexable predicates that can be pushed into the current node to the route diff --git a/src/mongo/db/query/plan_executor_impl.cpp b/src/mongo/db/query/plan_executor_impl.cpp index 849993364c5..0ff05e2bd27 100644 --- a/src/mongo/db/query/plan_executor_impl.cpp +++ b/src/mongo/db/query/plan_executor_impl.cpp @@ -122,7 +122,7 @@ PlanStage* getStageByType(PlanStage* root, StageType type) { } } - return NULL; + return nullptr; } } // namespace @@ -379,7 +379,7 @@ void PlanExecutorImpl::reattachToOperationContext(OperationContext* opCtx) { PlanExecutor::ExecState PlanExecutorImpl::getNext(BSONObj* objOut, RecordId* dlOut) { Snapshotted<BSONObj> snapshotted; - ExecState state = _getNextImpl(objOut ? &snapshotted : NULL, dlOut); + ExecState state = _getNextImpl(objOut ? &snapshotted : nullptr, dlOut); if (objOut) { *objOut = snapshotted.value(); @@ -483,7 +483,7 @@ PlanExecutor::ExecState PlanExecutorImpl::_getNextImpl(Snapshotted<BSONObj>* obj invariant(_currentState == kUsable); if (isMarkedAsKilled()) { - if (NULL != objOut) { + if (nullptr != objOut) { *objOut = Snapshotted<BSONObj>(SnapshotId(), WorkingSetCommon::buildMemberStatusObject(_killStatus)); } @@ -535,7 +535,7 @@ PlanExecutor::ExecState PlanExecutorImpl::_getNextImpl(Snapshotted<BSONObj>* obj WorkingSetMember* member = _workingSet->get(id); bool hasRequestedData = true; - if (NULL != objOut) { + if (nullptr != objOut) { if (WorkingSetMember::RID_AND_IDX == member->getState()) { if (1 != member->keyData.size()) { _workingSet->free(id); @@ -553,7 +553,7 @@ PlanExecutor::ExecState PlanExecutorImpl::_getNextImpl(Snapshotted<BSONObj>* obj } } - if (NULL != dlOut) { + if (nullptr != dlOut) { if (member->hasRecordId()) { *dlOut = member->recordId; } else { @@ -602,7 +602,7 @@ PlanExecutor::ExecState PlanExecutorImpl::_getNextImpl(Snapshotted<BSONObj>* obj } else { invariant(PlanStage::FAILURE == code); - if (NULL != objOut) { + if (nullptr != objOut) { BSONObj statusObj; invariant(WorkingSet::INVALID_ID != id); WorkingSetCommon::getStatusMemberObject(*_workingSet, id, &statusObj); @@ -641,7 +641,7 @@ Status PlanExecutorImpl::executePlan() { BSONObj obj; PlanExecutor::ExecState state = PlanExecutor::ADVANCED; while (PlanExecutor::ADVANCED == state) { - state = this->getNext(&obj, NULL); + state = this->getNext(&obj, nullptr); } if (PlanExecutor::FAILURE == state) { diff --git a/src/mongo/db/query/planner_access.cpp b/src/mongo/db/query/planner_access.cpp index 9ac03670c3a..ef4ef25bcac 100644 --- a/src/mongo/db/query/planner_access.cpp +++ b/src/mongo/db/query/planner_access.cpp @@ -205,7 +205,7 @@ std::unique_ptr<QuerySolutionNode> QueryPlannerAccess::makeLeafNode( auto ret = std::make_unique<GeoNear2DNode>(index); ret->nq = &nearExpr->getData(); ret->baseBounds.fields.resize(index.keyPattern.nFields()); - if (NULL != query.getProj()) { + if (nullptr != query.getProj()) { ret->addPointMeta = query.getProj()->wantGeoNearPoint(); ret->addDistMeta = query.getProj()->wantGeoNearDistance(); } @@ -215,7 +215,7 @@ std::unique_ptr<QuerySolutionNode> QueryPlannerAccess::makeLeafNode( auto ret = std::make_unique<GeoNear2DSphereNode>(index); ret->nq = &nearExpr->getData(); ret->baseBounds.fields.resize(index.keyPattern.nFields()); - if (NULL != query.getProj()) { + if (nullptr != query.getProj()) { ret->addPointMeta = query.getProj()->wantGeoNearPoint(); ret->addDistMeta = query.getProj()->wantGeoNearDistance(); } @@ -266,11 +266,11 @@ std::unique_ptr<QuerySolutionNode> QueryPlannerAccess::makeLeafNode( bool QueryPlannerAccess::shouldMergeWithLeaf(const MatchExpression* expr, const ScanBuildingState& scanState) { const QuerySolutionNode* node = scanState.currentScan.get(); - if (NULL == node || NULL == expr) { + if (nullptr == node || nullptr == expr) { return false; } - if (NULL == scanState.ixtag) { + if (nullptr == scanState.ixtag) { return false; } @@ -340,7 +340,7 @@ bool QueryPlannerAccess::shouldMergeWithLeaf(const MatchExpression* expr, void QueryPlannerAccess::mergeWithLeafNode(MatchExpression* expr, ScanBuildingState* scanState) { QuerySolutionNode* node = scanState->currentScan.get(); - invariant(NULL != node); + invariant(nullptr != node); const MatchExpression::MatchType mergeType = scanState->root->matchType(); size_t pos = scanState->ixtag->pos; @@ -369,7 +369,7 @@ void QueryPlannerAccess::mergeWithLeafNode(MatchExpression* expr, ScanBuildingSt return; } - IndexBounds* boundsToFillOut = NULL; + IndexBounds* boundsToFillOut = nullptr; if (STAGE_GEO_NEAR_2D == type) { invariant(INDEX_2D == index.type); @@ -459,7 +459,7 @@ void QueryPlannerAccess::finishTextNode(QuerySolutionNode* node, const IndexEntr // We can't create a text stage if there aren't EQ predicates on its prefix terms. So // if we've made it this far, we should have collected the prefix predicates in the // filter. - invariant(NULL != tn->filter.get()); + invariant(nullptr != tn->filter.get()); MatchExpression* textFilterMe = tn->filter.get(); BSONObjBuilder prefixBob; @@ -489,10 +489,10 @@ void QueryPlannerAccess::finishTextNode(QuerySolutionNode* node, const IndexEntr while (curChild < amExpr->numChildren()) { MatchExpression* child = amExpr->getChild(curChild); IndexTag* ixtag = static_cast<IndexTag*>(child->getTag()); - invariant(NULL != ixtag); + invariant(nullptr != ixtag); // Skip this child if it's not part of a prefix, or if we've already assigned a // predicate to this prefix position. - if (ixtag->pos >= tn->numPrefixFields || prefixExprs[ixtag->pos] != NULL) { + if (ixtag->pos >= tn->numPrefixFields || prefixExprs[ixtag->pos] != nullptr) { ++curChild; continue; } @@ -505,7 +505,7 @@ void QueryPlannerAccess::finishTextNode(QuerySolutionNode* node, const IndexEntr // Go through the prefix equalities in order and create an index prefix out of them. for (size_t i = 0; i < prefixExprs.size(); ++i) { MatchExpression* prefixMe = prefixExprs[i]; - invariant(NULL != prefixMe); + invariant(nullptr != prefixMe); invariant(MatchExpression::EQ == prefixMe->matchType()); EqualityMatchExpression* eqExpr = static_cast<EqualityMatchExpression*>(prefixMe); prefixBob.append(eqExpr->getData()); @@ -651,12 +651,12 @@ void QueryPlannerAccess::findElemMatchChildren(const MatchExpression* node, vector<MatchExpression*>* subnodesOut) { for (size_t i = 0; i < node->numChildren(); ++i) { MatchExpression* child = node->getChild(i); - if (Indexability::isBoundsGenerating(child) && NULL != child->getTag()) { + if (Indexability::isBoundsGenerating(child) && nullptr != child->getTag()) { out->push_back(child); } else if (MatchExpression::AND == child->matchType() || Indexability::arrayUsesIndexOnChildren(child)) { findElemMatchChildren(child, out, subnodesOut); - } else if (NULL != child->getTag()) { + } else if (nullptr != child->getTag()) { subnodesOut->push_back(child); } } @@ -728,7 +728,7 @@ bool QueryPlannerAccess::processIndexScans(const CanonicalQuery& query, // If there is no tag, it's not using an index. We've sorted our children such that the // children with tags are first, so we stop now. - if (NULL == child->getTag()) { + if (nullptr == child->getTag()) { break; } @@ -789,7 +789,7 @@ bool QueryPlannerAccess::processIndexScans(const CanonicalQuery& query, mergeWithLeafNode(child, &scanState); handleFilter(&scanState); } else { - if (NULL != scanState.currentScan.get()) { + if (nullptr != scanState.currentScan.get()) { // Output the current scan before starting to construct a new out. finishAndOutputLeaf(&scanState, out); } else { @@ -810,7 +810,7 @@ bool QueryPlannerAccess::processIndexScans(const CanonicalQuery& query, } // Output the scan we're done with, if it exists. - if (NULL != scanState.currentScan.get()) { + if (nullptr != scanState.currentScan.get()) { finishAndOutputLeaf(&scanState, out); } @@ -876,13 +876,13 @@ bool QueryPlannerAccess::processIndexScansElemMatch( // the complete $elemMatch expression will be affixed as a filter later on. for (size_t i = 0; i < emChildren.size(); ++i) { MatchExpression* emChild = emChildren[i]; - invariant(NULL != emChild->getTag()); + invariant(nullptr != emChild->getTag()); scanState->ixtag = static_cast<IndexTag*>(emChild->getTag()); // If 'emChild' is a NOT, then the tag we're interested in is on the NOT's // child node. if (MatchExpression::NOT == emChild->matchType()) { - invariant(NULL != emChild->getChild(0)->getTag()); + invariant(nullptr != emChild->getChild(0)->getTag()); scanState->ixtag = static_cast<IndexTag*>(emChild->getChild(0)->getTag()); invariant(IndexTag::kNoIndex != scanState->ixtag->index); } @@ -895,7 +895,7 @@ bool QueryPlannerAccess::processIndexScansElemMatch( scanState->tightness = IndexBoundsBuilder::INEXACT_FETCH; mergeWithLeafNode(emChild, scanState); } else { - if (NULL != scanState->currentScan.get()) { + if (nullptr != scanState->currentScan.get()) { finishAndOutputLeaf(scanState, out); } else { verify(IndexTag::kNoIndex == scanState->currentIndexNumber); @@ -972,7 +972,7 @@ std::unique_ptr<QuerySolutionNode> QueryPlannerAccess::buildIndexedAnd( std::vector<std::unique_ptr<QuerySolutionNode>> ixscanNodes; const bool inArrayOperator = !ownedRoot; if (!processIndexScans(query, root, inArrayOperator, indices, params, &ixscanNodes)) { - return NULL; + return nullptr; } // @@ -1095,7 +1095,7 @@ std::unique_ptr<QuerySolutionNode> QueryPlannerAccess::buildIndexedOr( const bool inArrayOperator = !ownedRoot; std::vector<std::unique_ptr<QuerySolutionNode>> ixscanNodes; if (!processIndexScans(query, root, inArrayOperator, indices, params, &ixscanNodes)) { - return NULL; + return nullptr; } // Unlike an AND, an OR cannot have filters hanging off of it. We stop processing @@ -1106,7 +1106,7 @@ std::unique_ptr<QuerySolutionNode> QueryPlannerAccess::buildIndexedOr( // We won't enumerate an OR without indices for each child, so this isn't an issue, even // if we have an AND with an OR child -- we won't get here unless the OR is fully // indexed. - return NULL; + return nullptr; } // If all index scans are identical, then we collapse them into a single scan. This prevents @@ -1196,7 +1196,7 @@ std::unique_ptr<QuerySolutionNode> QueryPlannerAccess::_buildIndexedDataAccess( IndexBoundsBuilder::BoundsTightness tightness = IndexBoundsBuilder::EXACT; auto soln = makeLeafNode(query, indices[tag->index], tag->pos, root, &tightness); - verify(NULL != soln); + verify(nullptr != soln); finishLeafNode(soln.get(), indices[tag->index]); if (!ownedRoot) { @@ -1216,7 +1216,7 @@ std::unique_ptr<QuerySolutionNode> QueryPlannerAccess::_buildIndexedDataAccess( return soln; } else if (tightness == IndexBoundsBuilder::INEXACT_COVERED && !indices[tag->index].multikey) { - verify(NULL == soln->filter.get()); + verify(nullptr == soln->filter.get()); soln->filter = std::move(ownedRoot); return soln; } else { @@ -1293,7 +1293,7 @@ std::unique_ptr<QuerySolutionNode> QueryPlannerAccess::scanWholeIndex( void QueryPlannerAccess::addFilterToSolutionNode(QuerySolutionNode* node, MatchExpression* match, MatchExpression::MatchType type) { - if (NULL == node->filter) { + if (nullptr == node->filter) { node->filter.reset(match); } else if (type == node->filter->matchType()) { // The 'node' already has either an AND or OR filter that matches 'type'. Add 'match' as diff --git a/src/mongo/db/query/planner_access.h b/src/mongo/db/query/planner_access.h index e0744d55311..3c9b03ff4d0 100644 --- a/src/mongo/db/query/planner_access.h +++ b/src/mongo/db/query/planner_access.h @@ -146,7 +146,7 @@ private: currentScan(nullptr), curChild(0), currentIndexNumber(IndexTag::kNoIndex), - ixtag(NULL), + ixtag(nullptr), tightness(IndexBoundsBuilder::INEXACT_FETCH), curOr(nullptr), loosestBounds(IndexBoundsBuilder::EXACT) {} @@ -157,7 +157,7 @@ private: * This always should be called prior to allocating a new 'currentScan'. */ void resetForNextScan(IndexTag* newTag) { - currentScan.reset(NULL); + currentScan.reset(nullptr); currentIndexNumber = newTag->index; tightness = IndexBoundsBuilder::INEXACT_FETCH; loosestBounds = IndexBoundsBuilder::EXACT; diff --git a/src/mongo/db/query/planner_analysis.cpp b/src/mongo/db/query/planner_analysis.cpp index 1e8e84da6a7..2be65fbf8d2 100644 --- a/src/mongo/db/query/planner_analysis.cpp +++ b/src/mongo/db/query/planner_analysis.cpp @@ -628,7 +628,7 @@ QuerySolutionNode* QueryPlannerAnalysis::analyzeSort(const CanonicalQuery& query // If we're not allowed to put a blocking sort in, bail out. if (params.options & QueryPlannerParams::NO_BLOCKING_SORT) { delete solnRoot; - return NULL; + return nullptr; } if (!solnRoot->fetched()) { diff --git a/src/mongo/db/query/planner_ixselect.cpp b/src/mongo/db/query/planner_ixselect.cpp index 77f662a4701..f15b87b470c 100644 --- a/src/mongo/db/query/planner_ixselect.cpp +++ b/src/mongo/db/query/planner_ixselect.cpp @@ -557,7 +557,7 @@ bool QueryPlannerIXSelect::_compatible(const BSONElement& keyPatternElt, const CapWithCRS* cap = gc.getCapGeometryHack(); // 2d indices can answer centerSphere queries. - if (NULL == cap) { + if (nullptr == cap) { return false; } @@ -666,7 +666,7 @@ void QueryPlannerIXSelect::_rateIndices(MatchExpression* node, fullPath = prefix + node->path().toString(); } - verify(NULL == node->getTag()); + verify(nullptr == node->getTag()); node->setTag(new RelevantTag()); auto rt = static_cast<RelevantTag*>(node->getTag()); rt->path = fullPath; @@ -995,7 +995,7 @@ static void stripInvalidAssignmentsToTextIndex(MatchExpression* node, MatchExpression* child = node->getChild(i); RelevantTag* tag = static_cast<RelevantTag*>(child->getTag()); - if (NULL == tag) { + if (nullptr == tag) { // 'child' could be a logical operator. Maybe there are some assignments hiding // inside. stripInvalidAssignmentsToTextIndex(child, idx, prefixPaths); diff --git a/src/mongo/db/query/query_planner.cpp b/src/mongo/db/query/query_planner.cpp index ad97acfc6c1..9735dbade0e 100644 --- a/src/mongo/db/query/query_planner.cpp +++ b/src/mongo/db/query/query_planner.cpp @@ -373,16 +373,16 @@ StatusWith<std::unique_ptr<PlanCacheIndexTree>> QueryPlanner::cacheDataFromTagge Status QueryPlanner::tagAccordingToCache(MatchExpression* filter, const PlanCacheIndexTree* const indexTree, const map<IndexEntry::Identifier, size_t>& indexMap) { - if (NULL == filter) { + if (nullptr == filter) { return Status(ErrorCodes::BadValue, "Cannot tag tree: filter is NULL."); } - if (NULL == indexTree) { + if (nullptr == indexTree) { return Status(ErrorCodes::BadValue, "Cannot tag tree: indexTree is NULL."); } // We're tagging the tree here, so it shouldn't have // any tags hanging off yet. - verify(NULL == filter->getTag()); + verify(nullptr == filter->getTag()); if (filter->numChildren() != indexTree->children.size()) { str::stream ss; @@ -721,7 +721,7 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan( LOG(5) << "Rated tree:" << endl << redact(query.root()->debugString()); // If there is a GEO_NEAR it must have an index it can use directly. - const MatchExpression* gnNode = NULL; + const MatchExpression* gnNode = nullptr; if (QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR, &gnNode)) { // No index for GEO_NEAR? No query. RelevantTag* tag = static_cast<RelevantTag*>(gnNode->getTag()); @@ -736,7 +736,7 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan( } // Likewise, if there is a TEXT it must have an index it can use directly. - const MatchExpression* textNode = NULL; + const MatchExpression* textNode = nullptr; if (QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT, &textNode)) { RelevantTag* tag = static_cast<RelevantTag*>(textNode->getTag()); @@ -832,7 +832,8 @@ StatusWith<std::vector<std::unique_ptr<QuerySolution>>> QueryPlanner::plan( // Produce legible error message for failed OR planning with a TEXT child. // TODO: support collection scan for non-TEXT children of OR. - if (out.size() == 0 && textNode != NULL && MatchExpression::OR == query.root()->matchType()) { + if (out.size() == 0 && textNode != nullptr && + MatchExpression::OR == query.root()->matchType()) { MatchExpression* root = query.root(); for (size_t i = 0; i < root->numChildren(); ++i) { if (textNode == root->getChild(i)) { diff --git a/src/mongo/db/query/query_planner_common.h b/src/mongo/db/query/query_planner_common.h index 4cbfc1147dc..c232291e0e3 100644 --- a/src/mongo/db/query/query_planner_common.h +++ b/src/mongo/db/query/query_planner_common.h @@ -47,9 +47,9 @@ public: */ static bool hasNode(const MatchExpression* root, MatchExpression::MatchType type, - const MatchExpression** out = NULL) { + const MatchExpression** out = nullptr) { if (type == root->matchType()) { - if (NULL != out) { + if (nullptr != out) { *out = root; } return true; diff --git a/src/mongo/db/query/query_planner_test.cpp b/src/mongo/db/query/query_planner_test.cpp index 757625df59d..6a91e1ec742 100644 --- a/src/mongo/db/query/query_planner_test.cpp +++ b/src/mongo/db/query/query_planner_test.cpp @@ -4605,7 +4605,7 @@ TEST_F(QueryPlannerTest, KeyPatternOverflowsInt) { TEST_F(QueryPlannerTest, CacheDataFromTaggedTreeFailsOnBadInput) { // Null match expression. std::vector<IndexEntry> relevantIndices; - ASSERT_NOT_OK(QueryPlanner::cacheDataFromTaggedTree(NULL, relevantIndices).getStatus()); + ASSERT_NOT_OK(QueryPlanner::cacheDataFromTaggedTree(nullptr, relevantIndices).getStatus()); // No relevant index matching the index tag. relevantIndices.push_back(buildSimpleIndexEntry(BSON("a" << 1), "a_1")); @@ -4636,11 +4636,11 @@ TEST_F(QueryPlannerTest, TagAccordingToCacheFailsOnBadInput) { std::map<IndexEntry::Identifier, size_t> indexMap; // Null filter. - Status s = QueryPlanner::tagAccordingToCache(NULL, indexTree.get(), indexMap); + Status s = QueryPlanner::tagAccordingToCache(nullptr, indexTree.get(), indexMap); ASSERT_NOT_OK(s); // Null indexTree. - s = QueryPlanner::tagAccordingToCache(scopedCq->root(), NULL, indexMap); + s = QueryPlanner::tagAccordingToCache(scopedCq->root(), nullptr, indexMap); ASSERT_NOT_OK(s); // Index not found. diff --git a/src/mongo/db/query/query_planner_test_lib.cpp b/src/mongo/db/query/query_planner_test_lib.cpp index 2cfcc54985b..f0861044b3a 100644 --- a/src/mongo/db/query/query_planner_test_lib.cpp +++ b/src/mongo/db/query/query_planner_test_lib.cpp @@ -58,7 +58,7 @@ using std::string; bool filterMatches(const BSONObj& testFilter, const BSONObj& testCollation, const QuerySolutionNode* trueFilterNode) { - if (NULL == trueFilterNode->filter) { + if (nullptr == trueFilterNode->filter) { return false; } @@ -267,7 +267,7 @@ bool QueryPlannerTestLib::solutionMatches(const BSONObj& testSoln, if (filter.eoo()) { return true; } else if (filter.isNull()) { - return NULL == csn->filter; + return nullptr == csn->filter; } else if (!filter.isABSONObj()) { return false; } @@ -336,7 +336,7 @@ bool QueryPlannerTestLib::solutionMatches(const BSONObj& testSoln, if (filter.eoo()) { return true; } else if (filter.isNull()) { - return NULL == ixn->filter; + return nullptr == ixn->filter; } else if (!filter.isABSONObj()) { return false; } @@ -453,7 +453,7 @@ bool QueryPlannerTestLib::solutionMatches(const BSONObj& testSoln, BSONElement filter = textObj["filter"]; if (!filter.eoo()) { if (filter.isNull()) { - if (NULL != node->filter) { + if (nullptr != node->filter) { return false; } } else if (!filter.isABSONObj()) { @@ -490,7 +490,7 @@ bool QueryPlannerTestLib::solutionMatches(const BSONObj& testSoln, BSONElement filter = fetchObj["filter"]; if (!filter.eoo()) { if (filter.isNull()) { - if (NULL != fn->filter) { + if (nullptr != fn->filter) { return false; } } else if (!filter.isABSONObj()) { @@ -533,7 +533,7 @@ bool QueryPlannerTestLib::solutionMatches(const BSONObj& testSoln, BSONElement filter = andHashObj["filter"]; if (!filter.eoo()) { if (filter.isNull()) { - if (NULL != ahn->filter) { + if (nullptr != ahn->filter) { return false; } } else if (!filter.isABSONObj()) { @@ -564,7 +564,7 @@ bool QueryPlannerTestLib::solutionMatches(const BSONObj& testSoln, BSONElement filter = andSortedObj["filter"]; if (!filter.eoo()) { if (filter.isNull()) { - if (NULL != asn->filter) { + if (nullptr != asn->filter) { return false; } } else if (!filter.isABSONObj()) { diff --git a/src/mongo/db/query/query_solution.cpp b/src/mongo/db/query/query_solution.cpp index 23c83c6fb8a..9f1951c14a9 100644 --- a/src/mongo/db/query/query_solution.cpp +++ b/src/mongo/db/query/query_solution.cpp @@ -203,7 +203,7 @@ void TextNode::appendToString(str::stream* ss, int indent) const { *ss << "diacriticSensitive= " << ftsQuery->getDiacriticSensitive() << '\n'; addIndent(ss, indent + 1); *ss << "indexPrefix = " << indexPrefix.toString() << '\n'; - if (NULL != filter) { + if (nullptr != filter) { addIndent(ss, indent + 1); *ss << " filter = " << filter->debugString(); } @@ -233,7 +233,7 @@ void CollectionScanNode::appendToString(str::stream* ss, int indent) const { *ss << "COLLSCAN\n"; addIndent(ss, indent + 1); *ss << "ns = " << name << '\n'; - if (NULL != filter) { + if (nullptr != filter) { addIndent(ss, indent + 1); *ss << "filter = " << filter->debugString(); } @@ -265,7 +265,7 @@ AndHashNode::~AndHashNode() {} void AndHashNode::appendToString(str::stream* ss, int indent) const { addIndent(ss, indent); *ss << "AND_HASH\n"; - if (NULL != filter) { + if (nullptr != filter) { addIndent(ss, indent + 1); *ss << " filter = " << filter->debugString() << '\n'; } @@ -369,7 +369,7 @@ OrNode::~OrNode() {} void OrNode::appendToString(str::stream* ss, int indent) const { addIndent(ss, indent); *ss << "OR\n"; - if (NULL != filter) { + if (nullptr != filter) { addIndent(ss, indent + 1); *ss << " filter = " << filter->debugString() << '\n'; } @@ -430,7 +430,7 @@ MergeSortNode::~MergeSortNode() {} void MergeSortNode::appendToString(str::stream* ss, int indent) const { addIndent(ss, indent); *ss << "MERGE_SORT\n"; - if (NULL != filter) { + if (nullptr != filter) { addIndent(ss, indent + 1); *ss << " filter = " << filter->debugString() << '\n'; } @@ -489,7 +489,7 @@ FetchNode::FetchNode() : _sorts(SimpleBSONObjComparator::kInstance.makeBSONObjSe void FetchNode::appendToString(str::stream* ss, int indent) const { addIndent(ss, indent); *ss << "FETCH\n"; - if (NULL != filter) { + if (nullptr != filter) { addIndent(ss, indent + 1); StringBuilder sb; *ss << "filter:\n"; @@ -529,7 +529,7 @@ void IndexScanNode::appendToString(str::stream* ss, int indent) const { addIndent(ss, indent + 1); *ss << "indexName = " << index.identifier.catalogName << '\n'; *ss << "keyPattern = " << index.keyPattern << '\n'; - if (NULL != filter) { + if (nullptr != filter) { addIndent(ss, indent + 1); *ss << "filter = " << filter->debugString(); } @@ -1041,7 +1041,7 @@ void GeoNear2DNode::appendToString(str::stream* ss, int indent) const { *ss << "keyPattern = " << index.keyPattern.toString() << '\n'; addCommon(ss, indent); *ss << "nearQuery = " << nq->toString() << '\n'; - if (NULL != filter) { + if (nullptr != filter) { addIndent(ss, indent + 1); *ss << " filter = " << filter->debugString(); } @@ -1075,7 +1075,7 @@ void GeoNear2DSphereNode::appendToString(str::stream* ss, int indent) const { *ss << "baseBounds = " << baseBounds.toString() << '\n'; addIndent(ss, indent + 1); *ss << "nearQuery = " << nq->toString() << '\n'; - if (NULL != filter) { + if (nullptr != filter) { addIndent(ss, indent + 1); *ss << " filter = " << filter->debugString(); } @@ -1101,7 +1101,7 @@ QuerySolutionNode* GeoNear2DSphereNode::clone() const { void ShardingFilterNode::appendToString(str::stream* ss, int indent) const { addIndent(ss, indent); *ss << "SHARDING_FILTER\n"; - if (NULL != filter) { + if (nullptr != filter) { addIndent(ss, indent + 1); StringBuilder sb; *ss << "filter:\n"; diff --git a/src/mongo/db/query/query_solution.h b/src/mongo/db/query/query_solution.h index 44c63d4bfb6..a665b823529 100644 --- a/src/mongo/db/query/query_solution.h +++ b/src/mongo/db/query/query_solution.h @@ -147,7 +147,7 @@ struct QuerySolutionNode { for (size_t i = 0; i < this->children.size(); i++) { other->children.push_back(this->children[i]->clone()); } - if (NULL != this->filter) { + if (nullptr != this->filter) { other->filter = this->filter->shallowClone(); } } @@ -229,7 +229,7 @@ struct QuerySolution { * Output a human-readable std::string representing the plan. */ std::string toString() { - if (NULL == root) { + if (nullptr == root) { return "empty query solution"; } diff --git a/src/mongo/db/repl/idempotency_test_fixture.cpp b/src/mongo/db/repl/idempotency_test_fixture.cpp index 63c769e58be..6a5fe64ba4a 100644 --- a/src/mongo/db/repl/idempotency_test_fixture.cpp +++ b/src/mongo/db/repl/idempotency_test_fixture.cpp @@ -550,13 +550,13 @@ std::string IdempotencyTest::computeDataHash(Collection* collection) { PlanExecutor::NO_YIELD, InternalPlanner::FORWARD, InternalPlanner::IXSCAN_FETCH); - ASSERT(NULL != exec.get()); + ASSERT(nullptr != exec.get()); md5_state_t st; md5_init(&st); PlanExecutor::ExecState state; BSONObj obj; - while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) { + while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, nullptr))) { obj = this->canonicalizeDocumentForDataHash(obj); md5_append(&st, (const md5_byte_t*)obj.objdata(), obj.objsize()); } diff --git a/src/mongo/db/repl/isself.cpp b/src/mongo/db/repl/isself.cpp index a78298933fd..f864cbb13de 100644 --- a/src/mongo/db/repl/isself.cpp +++ b/src/mongo/db/repl/isself.cpp @@ -107,7 +107,7 @@ std::string stringifyError(int code) { std::vector<std::string> getAddrsForHost(const std::string& iporhost, const int port, const bool ipv6enabled) { - addrinfo* addrs = NULL; + addrinfo* addrs = nullptr; addrinfo hints = {0}; hints.ai_socktype = SOCK_STREAM; hints.ai_family = (ipv6enabled ? AF_UNSPEC : AF_INET); @@ -126,13 +126,13 @@ std::vector<std::string> getAddrsForHost(const std::string& iporhost, ON_BLOCK_EXIT([&] { freeaddrinfo(addrs); }); - for (addrinfo* addr = addrs; addr != NULL; addr = addr->ai_next) { + for (addrinfo* addr = addrs; addr != nullptr; addr = addr->ai_next) { int family = addr->ai_family; char host[NI_MAXHOST]; if (family == AF_INET || family == AF_INET6) { err = getnameinfo( - addr->ai_addr, addr->ai_addrlen, host, NI_MAXHOST, NULL, 0, NI_NUMERICHOST); + addr->ai_addr, addr->ai_addrlen, host, NI_MAXHOST, nullptr, 0, NI_NUMERICHOST); if (err) { warning() << "getnameinfo() failed: " << stringifyError(err) << std::endl; continue; @@ -235,8 +235,8 @@ std::vector<std::string> getBoundAddrs(const bool ipv6enabled) { ON_BLOCK_EXIT([&] { freeifaddrs(addrs); }); // based on example code from linux getifaddrs manpage - for (ifaddrs* addr = addrs; addr != NULL; addr = addr->ifa_next) { - if (addr->ifa_addr == NULL) + for (ifaddrs* addr = addrs; addr != nullptr; addr = addr->ifa_next) { + if (addr->ifa_addr == nullptr) continue; int family = addr->ifa_addr->sa_family; char host[NI_MAXHOST]; @@ -247,7 +247,7 @@ std::vector<std::string> getBoundAddrs(const bool ipv6enabled) { (family == AF_INET ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6)), host, NI_MAXHOST, - NULL, + nullptr, 0, NI_NUMERICHOST); if (err) { diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp index e628976f83c..0f1d20a7ef7 100644 --- a/src/mongo/db/repl/oplog.cpp +++ b/src/mongo/db/repl/oplog.cpp @@ -608,7 +608,7 @@ std::vector<OpTime> logInsertOps(OperationContext* opCtx, nss, uuid, begin[i].doc, - NULL, + nullptr, fromMigrate, insertStatementOplogSlot, wallClockTime, @@ -1683,7 +1683,7 @@ Status applyOperation_inlock(OperationContext* opCtx, // such as an updateCriteria of the form // { _id:..., { x : {$size:...} } // thus this is not ideal. - if (collection == NULL || + if (collection == nullptr || (indexCatalog->haveIdIndex(opCtx) && Helpers::findById(opCtx, collection, updateCriteria).isNull()) || // capped collections won't have an _id index @@ -1980,7 +1980,7 @@ void initTimestampFromOplog(OperationContext* opCtx, const NamespaceString& oplo DBDirectClient c(opCtx); static const BSONObj reverseNaturalObj = BSON("$natural" << -1); BSONObj lastOp = - c.findOne(oplogNss.ns(), Query().sort(reverseNaturalObj), NULL, QueryOption_SlaveOk); + c.findOne(oplogNss.ns(), Query().sort(reverseNaturalObj), nullptr, QueryOption_SlaveOk); if (!lastOp.isEmpty()) { LOG(1) << "replSet setting last Timestamp"; diff --git a/src/mongo/db/repl/oplogreader.cpp b/src/mongo/db/repl/oplogreader.cpp index 5be1c01ecfc..17811adcf9a 100644 --- a/src/mongo/db/repl/oplogreader.cpp +++ b/src/mongo/db/repl/oplogreader.cpp @@ -75,7 +75,7 @@ OplogReader::OplogReader() { } bool OplogReader::connect(const HostAndPort& host) { - if (conn() == NULL || _host != host) { + if (conn() == nullptr || _host != host) { resetConnection(); _conn = std::shared_ptr<DBClientConnection>( new DBClientConnection(false, durationCount<Seconds>(kSocketTimeout))); diff --git a/src/mongo/db/repl/oplogreader.h b/src/mongo/db/repl/oplogreader.h index 10abc580087..dca256bf871 100644 --- a/src/mongo/db/repl/oplogreader.h +++ b/src/mongo/db/repl/oplogreader.h @@ -73,7 +73,7 @@ public: return _conn.get(); } BSONObj findOne(const char* ns, const Query& q) { - return conn()->findOne(ns, q, 0, QueryOption_SlaveOk); + return conn()->findOne(ns, q, nullptr, QueryOption_SlaveOk); } BSONObj findOneByUUID(const std::string& db, UUID uuid, const BSONObj& filter) { // Note that the findOneByUUID() function of DBClient passes SlaveOK to the client. @@ -91,7 +91,7 @@ public: void tailCheck(); bool haveCursor() { - return cursor.get() != 0; + return cursor.get() != nullptr; } void tailingQuery(const char* ns, const BSONObj& query); diff --git a/src/mongo/db/repl/repl_set_commands.cpp b/src/mongo/db/repl/repl_set_commands.cpp index a38d510fd5a..c7b3c3ec3d2 100644 --- a/src/mongo/db/repl/repl_set_commands.cpp +++ b/src/mongo/db/repl/repl_set_commands.cpp @@ -248,14 +248,14 @@ void parseReplSetSeedList(ReplicationCoordinatorExternalState* externalState, *setname = p; } - if (slash == 0) { + if (slash == nullptr) { return; } p = slash + 1; while (1) { const char* comma = strchr(p, ','); - if (comma == 0) { + if (comma == nullptr) { comma = strchr(p, 0); } if (p == comma) { diff --git a/src/mongo/db/repl/repl_set_config.cpp b/src/mongo/db/repl/repl_set_config.cpp index a3f27decf09..371ec2b7e87 100644 --- a/src/mongo/db/repl/repl_set_config.cpp +++ b/src/mongo/db/repl/repl_set_config.cpp @@ -742,7 +742,7 @@ const MemberConfig* ReplSetConfig::findMemberByID(int id) const { return &(*it); } } - return NULL; + return nullptr; } int ReplSetConfig::findMemberIndexByHostAndPort(const HostAndPort& hap) const { @@ -770,7 +770,7 @@ int ReplSetConfig::findMemberIndexByConfigId(long long configId) const { const MemberConfig* ReplSetConfig::findMemberByHostAndPort(const HostAndPort& hap) const { int idx = findMemberIndexByHostAndPort(hap); - return idx != -1 ? &getMemberAt(idx) : NULL; + return idx != -1 ? &getMemberAt(idx) : nullptr; } Milliseconds ReplSetConfig::getHeartbeatInterval() const { diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp index b963d5c9849..f97ebb90f04 100644 --- a/src/mongo/db/repl/replication_coordinator_impl.cpp +++ b/src/mongo/db/repl/replication_coordinator_impl.cpp @@ -2359,7 +2359,7 @@ Status ReplicationCoordinatorImpl::processReplSetGetStatus( _topCoord->prepareStatusResponse( TopologyCoordinator::ReplSetStatusArgs{ _replExecutor->now(), - static_cast<unsigned>(time(0) - serverGlobalParams.started), + static_cast<unsigned>(time(nullptr) - serverGlobalParams.started), _getCurrentCommittedSnapshotOpTimeAndWallTime_inlock(), initialSyncProgress, _storage->getLastStableCheckpointTimestampDeprecated(_service), diff --git a/src/mongo/db/repl/replication_coordinator_impl_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_test.cpp index 448504218b4..ee096286774 100644 --- a/src/mongo/db/repl/replication_coordinator_impl_test.cpp +++ b/src/mongo/db/repl/replication_coordinator_impl_test.cpp @@ -3364,7 +3364,7 @@ TEST_F(ReplCoordTest, DoNotProcessSelfWhenUpdatePositionContainsInfoAboutSelf) { << UpdatePositionArgs::kAppliedWallTimeFieldName << Date_t() + Seconds(time2.getSecs())))))); - ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args, 0)); + ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args, nullptr)); ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern).status); } @@ -3488,7 +3488,8 @@ TEST_F(ReplCoordTest, DoNotProcessUpdatePositionOfMembersWhoseIdsAreNotInTheConf auto opCtx = makeOperationContext(); - ASSERT_EQUALS(ErrorCodes::NodeNotFound, getReplCoord()->processReplSetUpdatePosition(args, 0)); + ASSERT_EQUALS(ErrorCodes::NodeNotFound, + getReplCoord()->processReplSetUpdatePosition(args, nullptr)); ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern).status); } @@ -3565,7 +3566,7 @@ TEST_F(ReplCoordTest, auto opCtx = makeOperationContext(); - ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args, 0)); + ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args, nullptr)); ASSERT_OK(getReplCoord()->awaitReplication(opCtx.get(), time2, writeConcern).status); writeConcern.wNumNodes = 3; @@ -6122,7 +6123,7 @@ TEST_F(ReplCoordTest, StepDownWhenHandleLivenessTimeoutMarksAMajorityOfVotingNod << UpdatePositionArgs::kDurableWallTimeFieldName << Date_t() + Seconds(startingOpTime.getSecs())))))); - ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args, 0)); + ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args, nullptr)); // Become PRIMARY. simulateSuccessfulV1Election(); @@ -6161,7 +6162,7 @@ TEST_F(ReplCoordTest, StepDownWhenHandleLivenessTimeoutMarksAMajorityOfVotingNod const Date_t startDate = getNet()->now(); getNet()->enterNetwork(); getNet()->runUntil(startDate + Milliseconds(100)); - ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args1, 0)); + ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args1, nullptr)); // Confirm that the node remains PRIMARY after the other two nodes are marked DOWN. getNet()->runUntil(startDate + Milliseconds(2080)); @@ -6212,7 +6213,7 @@ TEST_F(ReplCoordTest, StepDownWhenHandleLivenessTimeoutMarksAMajorityOfVotingNod << startingOpTime.toBSON() << UpdatePositionArgs::kAppliedWallTimeFieldName << Date_t() + Seconds(startingOpTime.getSecs())))))); - ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args2, 0)); + ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args2, nullptr)); hbArgs.setSetName("mySet"); hbArgs.setConfigVersion(2); diff --git a/src/mongo/db/repl/replication_info.cpp b/src/mongo/db/repl/replication_info.cpp index e3f26e73513..701b6411d6a 100644 --- a/src/mongo/db/repl/replication_info.cpp +++ b/src/mongo/db/repl/replication_info.cpp @@ -101,7 +101,7 @@ void appendReplicationInfo(OperationContext* opCtx, BSONObjBuilder& result, int opCtx, localSources.ns(), ctx.getCollection(), PlanExecutor::NO_YIELD); BSONObj obj; PlanExecutor::ExecState state; - while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) { + while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, nullptr))) { src.push_back(obj.getOwned()); } diff --git a/src/mongo/db/repl/rollback_source_impl.cpp b/src/mongo/db/repl/rollback_source_impl.cpp index 26a9240bbc8..aecd404f865 100644 --- a/src/mongo/db/repl/rollback_source_impl.cpp +++ b/src/mongo/db/repl/rollback_source_impl.cpp @@ -67,11 +67,13 @@ int RollbackSourceImpl::getRollbackId() const { BSONObj RollbackSourceImpl::getLastOperation() const { const Query query = Query().sort(BSON("$natural" << -1)); - return _getConnection()->findOne(_collectionName, query, 0, QueryOption_SlaveOk); + return _getConnection()->findOne(_collectionName, query, nullptr, QueryOption_SlaveOk); } BSONObj RollbackSourceImpl::findOne(const NamespaceString& nss, const BSONObj& filter) const { - return _getConnection()->findOne(nss.toString(), filter, NULL, QueryOption_SlaveOk).getOwned(); + return _getConnection() + ->findOne(nss.toString(), filter, nullptr, QueryOption_SlaveOk) + .getOwned(); } std::pair<BSONObj, NamespaceString> RollbackSourceImpl::findOneByUUID(const std::string& db, diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp index e2fc2bcdb23..26491c2728d 100644 --- a/src/mongo/db/repl/rs_rollback.cpp +++ b/src/mongo/db/repl/rs_rollback.cpp @@ -793,7 +793,7 @@ void dropCollection(OperationContext* opCtx, opCtx, nss.toString(), collection, PlanExecutor::YIELD_AUTO); BSONObj curObj; PlanExecutor::ExecState execState; - while (PlanExecutor::ADVANCED == (execState = exec->getNext(&curObj, NULL))) { + while (PlanExecutor::ADVANCED == (execState = exec->getNext(&curObj, nullptr))) { auto status = removeSaver.goingToDelete(curObj); if (!status.isOK()) { severe() << "Rolling back createCollection on " << nss @@ -1319,7 +1319,7 @@ void rollback_internal::syncFixUp(OperationContext* opCtx, log() << "Deleting and updating documents to roll back insert, update and remove " "operations"; unsigned deletes = 0, updates = 0; - time_t lastProgressUpdate = time(0); + time_t lastProgressUpdate = time(nullptr); time_t progressUpdateGap = 10; for (const auto& nsAndGoodVersionsByDocID : goodVersions) { @@ -1345,7 +1345,7 @@ void rollback_internal::syncFixUp(OperationContext* opCtx, const auto& goodVersionsByDocID = nsAndGoodVersionsByDocID.second; for (const auto& idAndDoc : goodVersionsByDocID) { - time_t now = time(0); + time_t now = time(nullptr); if (now - lastProgressUpdate > progressUpdateGap) { log() << deletes << " delete and " << updates << " update operations processed out of " << goodVersions.size() diff --git a/src/mongo/db/repl/topology_coordinator.cpp b/src/mongo/db/repl/topology_coordinator.cpp index 7300fdc17cb..ffcf36c57be 100644 --- a/src/mongo/db/repl/topology_coordinator.cpp +++ b/src/mongo/db/repl/topology_coordinator.cpp @@ -1409,7 +1409,7 @@ void TopologyCoordinator::setCurrentPrimary_forTest(int primaryIndex, const MemberConfig* TopologyCoordinator::_currentPrimaryMember() const { if (_currentPrimaryIndex == -1) - return NULL; + return nullptr; return &(_rsConfig.getMemberAt(_currentPrimaryIndex)); } diff --git a/src/mongo/db/repl/vote_requester_test.cpp b/src/mongo/db/repl/vote_requester_test.cpp index 97e5736cd76..4fc8382bf4a 100644 --- a/src/mongo/db/repl/vote_requester_test.cpp +++ b/src/mongo/db/repl/vote_requester_test.cpp @@ -98,7 +98,7 @@ public: } virtual void tearDown() { - _requester.reset(NULL); + _requester.reset(nullptr); } protected: diff --git a/src/mongo/db/s/check_sharding_index_command.cpp b/src/mongo/db/s/check_sharding_index_command.cpp index 7bc56741ae8..88cd5d4b0eb 100644 --- a/src/mongo/db/s/check_sharding_index_command.cpp +++ b/src/mongo/db/s/check_sharding_index_command.cpp @@ -121,7 +121,7 @@ public: collection->getIndexCatalog()->findShardKeyPrefixedIndex(opCtx, keyPattern, true); // requireSingleKey - if (idx == NULL) { + if (idx == nullptr) { errmsg = "couldn't find valid index for shard key"; return false; } diff --git a/src/mongo/db/s/split_chunk.cpp b/src/mongo/db/s/split_chunk.cpp index c76edbf8c6d..be2560efcee 100644 --- a/src/mongo/db/s/split_chunk.cpp +++ b/src/mongo/db/s/split_chunk.cpp @@ -76,8 +76,8 @@ bool checkIfSingleDoc(OperationContext* opCtx, // check if exactly one document found PlanExecutor::ExecState state; BSONObj obj; - if (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) { - if (PlanExecutor::IS_EOF == (state = exec->getNext(&obj, NULL))) { + if (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, nullptr))) { + if (PlanExecutor::IS_EOF == (state = exec->getNext(&obj, nullptr))) { return true; } } diff --git a/src/mongo/db/s/split_vector.cpp b/src/mongo/db/s/split_vector.cpp index 816d0b8dd92..62bc2dcd021 100644 --- a/src/mongo/db/s/split_vector.cpp +++ b/src/mongo/db/s/split_vector.cpp @@ -86,7 +86,7 @@ StatusWith<std::vector<BSONObj>> splitVector(OperationContext* opCtx, // any multi-key index prefixed by shard key cannot be multikey over the shard key fields. const IndexDescriptor* idx = collection->getIndexCatalog()->findShardKeyPrefixedIndex(opCtx, keyPattern, false); - if (idx == NULL) { + if (idx == nullptr) { return {ErrorCodes::IndexNotFound, "couldn't find index over splitting key " + keyPattern.clientReadable().toString()}; @@ -171,7 +171,7 @@ StatusWith<std::vector<BSONObj>> splitVector(OperationContext* opCtx, InternalPlanner::FORWARD); BSONObj currKey; - PlanExecutor::ExecState state = exec->getNext(&currKey, NULL); + PlanExecutor::ExecState state = exec->getNext(&currKey, nullptr); if (PlanExecutor::ADVANCED != state) { return {ErrorCodes::OperationFailed, "can't open a cursor to scan the range (desired range is possibly empty)"}; @@ -189,7 +189,7 @@ StatusWith<std::vector<BSONObj>> splitVector(OperationContext* opCtx, PlanExecutor::YIELD_AUTO, InternalPlanner::BACKWARD); - PlanExecutor::ExecState state = exec->getNext(&maxKeyInChunk, NULL); + PlanExecutor::ExecState state = exec->getNext(&maxKeyInChunk, nullptr); if (PlanExecutor::ADVANCED != state) { return {ErrorCodes::OperationFailed, "can't open a cursor to find final key in range (desired range is possibly " @@ -241,7 +241,7 @@ StatusWith<std::vector<BSONObj>> splitVector(OperationContext* opCtx, break; } - state = exec->getNext(&currKey, NULL); + state = exec->getNext(&currKey, nullptr); } if (PlanExecutor::FAILURE == state) { @@ -271,7 +271,7 @@ StatusWith<std::vector<BSONObj>> splitVector(OperationContext* opCtx, PlanExecutor::YIELD_AUTO, InternalPlanner::FORWARD); - state = exec->getNext(&currKey, NULL); + state = exec->getNext(&currKey, nullptr); } // diff --git a/src/mongo/db/server_options.h b/src/mongo/db/server_options.h index 14237da05e2..08a944fec1d 100644 --- a/src/mongo/db/server_options.h +++ b/src/mongo/db/server_options.h @@ -115,7 +115,7 @@ struct ServerGlobalParams { bool storageDetailsCmdEnabled; // -- enableExperimentalStorageDetailsCmd } experimental; - time_t started = ::time(0); + time_t started = ::time(nullptr); BSONArray argvArray; BSONObj parsedOpts; diff --git a/src/mongo/db/server_options_helpers.cpp b/src/mongo/db/server_options_helpers.cpp index 4a9e1a3acef..01c0f1c0c8c 100644 --- a/src/mongo/db/server_options_helpers.cpp +++ b/src/mongo/db/server_options_helpers.cpp @@ -340,7 +340,8 @@ Status storeBaseOptions(const moe::Environment& params) { bool set = false; // match facility string to facility value size_t facilitynamesLength = sizeof(facilitynames) / sizeof(facilitynames[0]); - for (unsigned long i = 0; i < facilitynamesLength && facilitynames[i].c_name != NULL; i++) { + for (unsigned long i = 0; i < facilitynamesLength && facilitynames[i].c_name != nullptr; + i++) { if (!facility.compare(facilitynames[i].c_name)) { serverGlobalParams.syslogFacility = facilitynames[i].c_val; set = true; @@ -399,8 +400,8 @@ Status storeBaseOptions(const moe::Environment& params) { ServerParameter* parameter = mapFindWithDefault(ServerParameterSet::getGlobal()->getMap(), parametersIt->first, - static_cast<ServerParameter*>(NULL)); - if (NULL == parameter) { + static_cast<ServerParameter*>(nullptr)); + if (nullptr == parameter) { StringBuilder sb; sb << "Illegal --setParameter parameter: \"" << parametersIt->first << "\""; return Status(ErrorCodes::BadValue, sb.str()); diff --git a/src/mongo/db/service_entry_point_common.cpp b/src/mongo/db/service_entry_point_common.cpp index 5c48ade9178..89076c67f13 100644 --- a/src/mongo/db/service_entry_point_common.cpp +++ b/src/mongo/db/service_entry_point_common.cpp @@ -1240,7 +1240,7 @@ DbResponse ServiceEntryPointCommon::handleRequest(OperationContext* opCtx, invariant(!opCtx->lockState()->isLocked()); } - const char* ns = dbmsg.messageShouldHaveNs() ? dbmsg.getns() : NULL; + const char* ns = dbmsg.messageShouldHaveNs() ? dbmsg.getns() : nullptr; const NamespaceString nsString = ns ? NamespaceString(ns) : NamespaceString(); if (op == dbQuery) { diff --git a/src/mongo/db/sorter/sorter_test.cpp b/src/mongo/db/sorter/sorter_test.cpp index 18fcf91f0b1..513954cb684 100644 --- a/src/mongo/db/sorter/sorter_test.cpp +++ b/src/mongo/db/sorter/sorter_test.cpp @@ -516,7 +516,7 @@ class Dupes : public Basic { template <bool Random = true> class LotsOfDataLittleMemory : public Basic { public: - LotsOfDataLittleMemory() : _array(new int[NUM_ITEMS]), _random(int64_t(time(0))) { + LotsOfDataLittleMemory() : _array(new int[NUM_ITEMS]), _random(int64_t(time(nullptr))) { for (int i = 0; i < NUM_ITEMS; i++) _array[i] = i; diff --git a/src/mongo/db/storage/biggie/biggie_record_store.h b/src/mongo/db/storage/biggie/biggie_record_store.h index 5f62b5ab5e8..813c575ecf7 100644 --- a/src/mongo/db/storage/biggie/biggie_record_store.h +++ b/src/mongo/db/storage/biggie/biggie_record_store.h @@ -64,7 +64,7 @@ public: virtual bool isCapped() const; virtual void setCappedCallback(CappedCallback*); virtual int64_t storageSize(OperationContext* opCtx, - BSONObjBuilder* extraInfo = NULL, + BSONObjBuilder* extraInfo = nullptr, int infoLevel = 0) const; virtual bool findRecord(OperationContext* opCtx, const RecordId& loc, RecordData* rd) const; diff --git a/src/mongo/db/storage/devnull/devnull_kv_engine.cpp b/src/mongo/db/storage/devnull/devnull_kv_engine.cpp index 6500879bde3..59a45b990ea 100644 --- a/src/mongo/db/storage/devnull/devnull_kv_engine.cpp +++ b/src/mongo/db/storage/devnull/devnull_kv_engine.cpp @@ -87,7 +87,7 @@ public: } virtual int64_t storageSize(OperationContext* opCtx, - BSONObjBuilder* extraInfo = NULL, + BSONObjBuilder* extraInfo = nullptr, int infoLevel = 0) const { return 0; } diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h index 37b526f4c1a..439cbe1cdf6 100644 --- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h +++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_record_store.h @@ -104,7 +104,7 @@ public: virtual Status touch(OperationContext* opCtx, BSONObjBuilder* output) const; virtual int64_t storageSize(OperationContext* opCtx, - BSONObjBuilder* extraInfo = NULL, + BSONObjBuilder* extraInfo = nullptr, int infoLevel = 0) const; virtual long long dataSize(OperationContext* opCtx) const { diff --git a/src/mongo/db/storage/key_string.cpp b/src/mongo/db/storage/key_string.cpp index a7fab7792c3..d30ac2d93b0 100644 --- a/src/mongo/db/storage/key_string.cpp +++ b/src/mongo/db/storage/key_string.cpp @@ -346,7 +346,7 @@ void KeyString::_appendAllElementsForIndexing(const BSONObj& obj, const int elemIdx = elemCount++; const bool invert = (ord.get(elemIdx) == -1); - _appendBsonValue(elem, invert, NULL); + _appendBsonValue(elem, invert, nullptr); dassert(elem.fieldNameSize() < 3); // fieldNameSize includes the NUL @@ -517,7 +517,7 @@ void KeyString::_appendArray(const BSONArray& val, bool invert) { _append(CType::kArray, invert); BSONForEach(elem, val) { // No generic ctype byte needed here since no name is encoded. - _appendBsonValue(elem, invert, NULL); + _appendBsonValue(elem, invert, nullptr); } _append(int8_t(0), invert); } diff --git a/src/mongo/db/storage/kv/kv_catalog.cpp b/src/mongo/db/storage/kv/kv_catalog.cpp index b9922cea3f9..46a821a805c 100644 --- a/src/mongo/db/storage/kv/kv_catalog.cpp +++ b/src/mongo/db/storage/kv/kv_catalog.cpp @@ -330,7 +330,7 @@ KVCatalog::KVCatalog(RecordStore* rs, _engine(engine) {} KVCatalog::~KVCatalog() { - _rs = NULL; + _rs = nullptr; } std::string KVCatalog::_newRand() { diff --git a/src/mongo/db/storage/mobile/mobile_record_store.h b/src/mongo/db/storage/mobile/mobile_record_store.h index ac2d7e4e229..9379a7760f7 100644 --- a/src/mongo/db/storage/mobile/mobile_record_store.h +++ b/src/mongo/db/storage/mobile/mobile_record_store.h @@ -113,7 +113,7 @@ public: Status touch(OperationContext* opCtx, BSONObjBuilder* output) const override; int64_t storageSize(OperationContext* opCtx, - BSONObjBuilder* extraInfo = NULL, + BSONObjBuilder* extraInfo = nullptr, int infoLevel = 0) const override; long long dataSize(OperationContext* opCtx) const override; diff --git a/src/mongo/db/storage/mobile/mobile_recovery_unit.cpp b/src/mongo/db/storage/mobile/mobile_recovery_unit.cpp index d8ff95a9e50..60181ac0b02 100644 --- a/src/mongo/db/storage/mobile/mobile_recovery_unit.cpp +++ b/src/mongo/db/storage/mobile/mobile_recovery_unit.cpp @@ -146,7 +146,7 @@ bool MobileRecoveryUnit::waitUntilDurable() { Lock::GlobalLock lk(opCtx, MODE_X); // Use FULL mode to guarantee durability ret = sqlite3_wal_checkpoint_v2(_session.get()->getSession(), - NULL, + nullptr, SQLITE_CHECKPOINT_FULL, &framesInWAL, &checkpointedFrames); diff --git a/src/mongo/db/storage/mobile/mobile_sqlite_statement.cpp b/src/mongo/db/storage/mobile/mobile_sqlite_statement.cpp index 151ad0d68d7..7760dff5788 100644 --- a/src/mongo/db/storage/mobile/mobile_sqlite_statement.cpp +++ b/src/mongo/db/storage/mobile/mobile_sqlite_statement.cpp @@ -59,14 +59,14 @@ void SqliteStatement::finalize() { int status = sqlite3_finalize(_stmt); fassert(37053, status == _exceptionStatus); - _stmt = NULL; + _stmt = nullptr; } void SqliteStatement::prepare(const MobileSession& session) { SQLITE_STMT_TRACE() << "Preparing: " << _sqlQuery.data(); - int status = - sqlite3_prepare_v2(session.getSession(), _sqlQuery.data(), _sqlQuery.size(), &_stmt, NULL); + int status = sqlite3_prepare_v2( + session.getSession(), _sqlQuery.data(), _sqlQuery.size(), &_stmt, nullptr); if (status == SQLITE_BUSY) { SQLITE_STMT_TRACE() << "Throwing writeConflictException, " << "SQLITE_BUSY while preparing: " << _sqlQuery.data(); @@ -150,8 +150,8 @@ const char* SqliteStatement::getColText(int colIndex) { void SqliteStatement::_execQuery(sqlite3* session, const char* query) { LOG(MOBILE_TRACE_LEVEL) << "MobileSE: SQLite sqlite3_exec: " << query; - char* errMsg = NULL; - int status = sqlite3_exec(session, query, NULL, NULL, &errMsg); + char* errMsg = nullptr; + int status = sqlite3_exec(session, query, nullptr, nullptr, &errMsg); if (status == SQLITE_BUSY || status == SQLITE_LOCKED) { LOG(MOBILE_TRACE_LEVEL) << "MobileSE: " << (status == SQLITE_BUSY ? "Busy" : "Locked") diff --git a/src/mongo/db/storage/mobile/mobile_util.cpp b/src/mongo/db/storage/mobile/mobile_util.cpp index 141cff1a070..3159dd169f8 100644 --- a/src/mongo/db/storage/mobile/mobile_util.cpp +++ b/src/mongo/db/storage/mobile/mobile_util.cpp @@ -91,7 +91,7 @@ Status sqliteRCToStatus(int retCode, const char* prefix) { } const char* sqliteStatusToStr(int retStatus) { - const char* msg = NULL; + const char* msg = nullptr; switch (retStatus) { case SQLITE_OK: diff --git a/src/mongo/db/storage/mobile/mobile_util.h b/src/mongo/db/storage/mobile/mobile_util.h index 93ca3fdd51d..9cdb8bf0e15 100644 --- a/src/mongo/db/storage/mobile/mobile_util.h +++ b/src/mongo/db/storage/mobile/mobile_util.h @@ -44,7 +44,7 @@ namespace embedded { /** * Converts SQLite return codes to MongoDB statuses. */ -Status sqliteRCToStatus(int retCode, const char* prefix = NULL); +Status sqliteRCToStatus(int retCode, const char* prefix = nullptr); /** * Converts SQLite return codes to string equivalents. @@ -54,7 +54,10 @@ const char* sqliteStatusToStr(int retStatus); /** * Checks if retStatus == desiredStatus; else calls fassert. */ -void checkStatus(int retStatus, int desiredStatus, const char* fnName, const char* errMsg = NULL); +void checkStatus(int retStatus, + int desiredStatus, + const char* fnName, + const char* errMsg = nullptr); /** * Validate helper function to log an error and append the error to the results. diff --git a/src/mongo/db/storage/record_data.h b/src/mongo/db/storage/record_data.h index 5cc99b7f50d..67a3feb8413 100644 --- a/src/mongo/db/storage/record_data.h +++ b/src/mongo/db/storage/record_data.h @@ -43,7 +43,7 @@ namespace mongo { */ class RecordData { public: - RecordData() : _data(NULL), _size(0) {} + RecordData() : _data(nullptr), _size(0) {} RecordData(const char* data, int size) : _data(data), _size(size) {} RecordData(SharedBuffer ownedData, int size) diff --git a/src/mongo/db/storage/record_store.h b/src/mongo/db/storage/record_store.h index 7880616da03..d5152e51d23 100644 --- a/src/mongo/db/storage/record_store.h +++ b/src/mongo/db/storage/record_store.h @@ -286,7 +286,7 @@ public: * @return total estimate size (in bytes) on stable storage */ virtual int64_t storageSize(OperationContext* opCtx, - BSONObjBuilder* extraInfo = NULL, + BSONObjBuilder* extraInfo = nullptr, int infoLevel = 0) const = 0; // CRUD related diff --git a/src/mongo/db/storage/record_store_test_harness.cpp b/src/mongo/db/storage/record_store_test_harness.cpp index 6a72d25e954..b93e8029d34 100644 --- a/src/mongo/db/storage/record_store_test_harness.cpp +++ b/src/mongo/db/storage/record_store_test_harness.cpp @@ -75,7 +75,7 @@ TEST(RecordStoreTestHarness, Simple1) { RecordData rd; ASSERT(!rs->findRecord(opCtx.get(), RecordId(111, 17), &rd)); - ASSERT(rd.data() == NULL); + ASSERT(rd.data() == nullptr); ASSERT(rs->findRecord(opCtx.get(), loc1, &rd)); ASSERT_EQUALS(s, rd.data()); diff --git a/src/mongo/db/storage/record_store_test_recordstore.cpp b/src/mongo/db/storage/record_store_test_recordstore.cpp index c5a95f250c2..38649fa89da 100644 --- a/src/mongo/db/storage/record_store_test_recordstore.cpp +++ b/src/mongo/db/storage/record_store_test_recordstore.cpp @@ -48,7 +48,7 @@ TEST(RecordStoreTestHarness, RecordStoreName) { { const char* name = rs->name(); - ASSERT(name != NULL && name[0] != '\0'); + ASSERT(name != nullptr && name[0] != '\0'); } } diff --git a/src/mongo/db/storage/record_store_test_storagesize.cpp b/src/mongo/db/storage/record_store_test_storagesize.cpp index 743559a1079..cba9e555c16 100644 --- a/src/mongo/db/storage/record_store_test_storagesize.cpp +++ b/src/mongo/db/storage/record_store_test_storagesize.cpp @@ -75,7 +75,7 @@ TEST(RecordStoreTestHarness, StorageSizeNonEmpty) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); - ASSERT(rs->storageSize(opCtx.get(), NULL) >= 0); + ASSERT(rs->storageSize(opCtx.get(), nullptr) >= 0); } } diff --git a/src/mongo/db/storage/record_store_test_touch.cpp b/src/mongo/db/storage/record_store_test_touch.cpp index 43b52b39ba2..edafde0683e 100644 --- a/src/mongo/db/storage/record_store_test_touch.cpp +++ b/src/mongo/db/storage/record_store_test_touch.cpp @@ -122,7 +122,7 @@ TEST(RecordStoreTestHarness, TouchEmptyWithNullStats) { { ServiceContext::UniqueOperationContext opCtx( harnessHelper->newOperationContext(harnessHelper->client())); - Status status = rs->touch(opCtx.get(), NULL /* stats output */); + Status status = rs->touch(opCtx.get(), nullptr /* stats output */); ASSERT(status.isOK() || status.code() == ErrorCodes::CommandNotSupported); } } @@ -164,7 +164,7 @@ TEST(RecordStoreTestHarness, TouchNonEmptyWithNullStats) { harnessHelper->newOperationContext(harnessHelper->client())); // XXX does not verify the collection was loaded into cache // (even if supported by storage engine) - Status status = rs->touch(opCtx.get(), NULL /* stats output */); + Status status = rs->touch(opCtx.get(), nullptr /* stats output */); ASSERT(status.isOK() || status.code() == ErrorCodes::CommandNotSupported); } } diff --git a/src/mongo/db/storage/remove_saver.cpp b/src/mongo/db/storage/remove_saver.cpp index d49e1cc5dd5..16ad9b482e9 100644 --- a/src/mongo/db/storage/remove_saver.cpp +++ b/src/mongo/db/storage/remove_saver.cpp @@ -135,7 +135,7 @@ Status RemoveSaver::goingToDelete(const BSONObj& o) { << " for remove saving: " << redact(errnoWithDescription()); error() << msg; _out.reset(); - _out = 0; + _out = nullptr; return Status(ErrorCodes::FileNotOpen, msg); } } diff --git a/src/mongo/db/storage/sorted_data_interface.h b/src/mongo/db/storage/sorted_data_interface.h index ee745bf7cfe..8bda0d3d660 100644 --- a/src/mongo/db/storage/sorted_data_interface.h +++ b/src/mongo/db/storage/sorted_data_interface.h @@ -180,7 +180,7 @@ public: */ virtual long long numEntries(OperationContext* opCtx) const { long long x = -1; - fullValidate(opCtx, &x, NULL); + fullValidate(opCtx, &x, nullptr); return x; } diff --git a/src/mongo/db/storage/sorted_data_interface_test_fullvalidate.cpp b/src/mongo/db/storage/sorted_data_interface_test_fullvalidate.cpp index db5d4493df5..e396052e9fa 100644 --- a/src/mongo/db/storage/sorted_data_interface_test_fullvalidate.cpp +++ b/src/mongo/db/storage/sorted_data_interface_test_fullvalidate.cpp @@ -69,7 +69,7 @@ TEST(SortedDataInterface, FullValidate) { { long long numKeysOut; const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); - sorted->fullValidate(opCtx.get(), &numKeysOut, NULL); + sorted->fullValidate(opCtx.get(), &numKeysOut, nullptr); // fullValidate() can set numKeysOut as the number of existing keys or -1. ASSERT(numKeysOut == nToInsert || numKeysOut == -1); } diff --git a/src/mongo/db/storage/sorted_data_interface_test_harness.cpp b/src/mongo/db/storage/sorted_data_interface_test_harness.cpp index f92ba9c83bb..43bb6df5c4a 100644 --- a/src/mongo/db/storage/sorted_data_interface_test_harness.cpp +++ b/src/mongo/db/storage/sorted_data_interface_test_harness.cpp @@ -97,7 +97,7 @@ TEST(SortedDataInterface, InsertWithDups1) { ASSERT_EQUALS(2, sorted->numEntries(opCtx.get())); long long x = 0; - sorted->fullValidate(opCtx.get(), &x, NULL); + sorted->fullValidate(opCtx.get(), &x, nullptr); ASSERT_EQUALS(2, x); } } diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp index 95fe1bf3b50..966cfe4c8c4 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp @@ -79,7 +79,7 @@ MONGO_FAIL_POINT_DEFINE(WTEmulateOutOfOrderNextIndexKey); using std::string; using std::vector; -static const WiredTigerItem emptyItem(NULL, 0); +static const WiredTigerItem emptyItem(nullptr, 0); bool hasFieldNames(const BSONObj& obj) { @@ -572,14 +572,14 @@ protected: // an unexpected pause in building an index. WT_SESSION* session = _session->getSession(); int err = session->open_cursor( - session, idx->uri().c_str(), NULL, "bulk,checkpoint_wait=false", &cursor); + session, idx->uri().c_str(), nullptr, "bulk,checkpoint_wait=false", &cursor); if (!err) return cursor; warning() << "failed to create WiredTiger bulk cursor: " << wiredtiger_strerror(err); warning() << "falling back to non-bulk cursor for index " << idx->uri(); - invariantWTOK(session->open_cursor(session, idx->uri().c_str(), NULL, NULL, &cursor)); + invariantWTOK(session->open_cursor(session, idx->uri().c_str(), nullptr, nullptr, &cursor)); return cursor; } diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp index 9971b1e387d..38dc2f6913a 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp @@ -631,7 +631,7 @@ WiredTigerKVEngine::WiredTigerKVEngine(const std::string& canonicalName, Status s(wtRCToStatus(ret)); msgasserted(28718, s.reason()); } - invariantWTOK(_conn->close(_conn, NULL)); + invariantWTOK(_conn->close(_conn, nullptr)); // After successful recovery, remove the journal directory. try { boost::filesystem::remove_all(journalPath); @@ -706,7 +706,7 @@ WiredTigerKVEngine::~WiredTigerKVEngine() { cleanShutdown(); } - _sessionCache.reset(NULL); + _sessionCache.reset(nullptr); } void WiredTigerKVEngine::appendGlobalStats(BSONObjBuilder& b) { @@ -887,7 +887,7 @@ Status WiredTigerKVEngine::_salvageIfNeeded(const char* uri) { WiredTigerSession sessionWrapper(_conn); WT_SESSION* session = sessionWrapper.getSession(); - int rc = (session->verify)(session, uri, NULL); + int rc = (session->verify)(session, uri, nullptr); if (rc == 0) { log() << "Verify succeeded on uri " << uri << ". Not salvaging."; return Status::OK(); @@ -912,7 +912,7 @@ Status WiredTigerKVEngine::_salvageIfNeeded(const char* uri) { } log() << "Verify failed on uri " << uri << ". Running a salvage operation."; - auto status = wtRCToStatus(session->salvage(session, uri, NULL), "Salvage failed:"); + auto status = wtRCToStatus(session->salvage(session, uri, nullptr), "Salvage failed:"); if (status.isOK()) { return {ErrorCodes::DataModifiedByRepair, str::stream() << "Salvaged data for " << uri}; } @@ -952,7 +952,7 @@ Status WiredTigerKVEngine::_rebuildIdent(WT_SESSION* session, const char* uri) { return swMetadata.getStatus(); } - int rc = session->drop(session, uri, NULL); + int rc = session->drop(session, uri, nullptr); if (rc != 0) { error() << "Failed to drop " << uri; return wtRCToStatus(rc); @@ -995,9 +995,9 @@ Status WiredTigerKVEngine::beginBackup(OperationContext* opCtx) { // This cursor will be freed by the backupSession being closed as the session is uncached auto session = std::make_unique<WiredTigerSession>(_conn); - WT_CURSOR* c = NULL; + WT_CURSOR* c = nullptr; WT_SESSION* s = session->getSession(); - int ret = WT_OP_CHECK(s->open_cursor(s, "backup:", NULL, NULL, &c)); + int ret = WT_OP_CHECK(s->open_cursor(s, "backup:", nullptr, nullptr, &c)); if (ret != 0) { return wtRCToStatus(ret); } @@ -1025,9 +1025,9 @@ StatusWith<std::vector<std::string>> WiredTigerKVEngine::beginNonBlockingBackup( // This cursor will be freed by the backupSession being closed as the session is uncached auto sessionRaii = std::make_unique<WiredTigerSession>(_conn); - WT_CURSOR* cursor = NULL; + WT_CURSOR* cursor = nullptr; WT_SESSION* session = sessionRaii->getSession(); - int wtRet = session->open_cursor(session, "backup:", NULL, NULL, &cursor); + int wtRet = session->open_cursor(session, "backup:", nullptr, nullptr, &cursor); if (wtRet != 0) { return wtRCToStatus(wtRet); } @@ -1059,9 +1059,9 @@ StatusWith<std::vector<std::string>> WiredTigerKVEngine::extendBackupCursor( uassert(51033, "Cannot extend backup cursor with in-memory mode.", !isEphemeral()); invariant(_backupCursor); - WT_CURSOR* cursor = NULL; + WT_CURSOR* cursor = nullptr; WT_SESSION* session = _backupSession->getSession(); - int wtRet = session->open_cursor(session, NULL, _backupCursor, "target=(\"log:\")", &cursor); + int wtRet = session->open_cursor(session, nullptr, _backupCursor, "target=(\"log:\")", &cursor); if (wtRet != 0) { return wtRCToStatus(wtRet); } @@ -1475,8 +1475,8 @@ bool WiredTigerKVEngine::hasIdent(OperationContext* opCtx, StringData ident) con bool WiredTigerKVEngine::_hasUri(WT_SESSION* session, const std::string& uri) const { // can't use WiredTigerCursor since this is called from constructor. - WT_CURSOR* c = NULL; - int ret = session->open_cursor(session, "metadata:create", NULL, NULL, &c); + WT_CURSOR* c = nullptr; + int ret = session->open_cursor(session, "metadata:create", nullptr, nullptr, &c); if (ret == ENOENT) return false; invariantWTOK(ret); diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_index_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_index_test.cpp index 3ded0dc99b6..df39f655421 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_index_test.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_prefixed_index_test.cpp @@ -56,9 +56,9 @@ using std::string; class MyHarnessHelper final : public SortedDataInterfaceHarnessHelper { public: - MyHarnessHelper() : _dbpath("wt_test"), _conn(NULL) { + MyHarnessHelper() : _dbpath("wt_test"), _conn(nullptr) { const char* config = "create,cache_size=1G,"; - int ret = wiredtiger_open(_dbpath.path().c_str(), NULL, config, &_conn); + int ret = wiredtiger_open(_dbpath.path().c_str(), nullptr, config, &_conn); invariantWTOK(ret); _fastClockSource = std::make_unique<SystemClockSource>(); @@ -67,7 +67,7 @@ public: ~MyHarnessHelper() final { delete _sessionCache; - _conn->close(_conn, NULL); + _conn->close(_conn, nullptr); } std::unique_ptr<SortedDataInterface> newSortedDataInterface(bool unique, bool partial) final { diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp index 84d2f3ece7b..0b16064f6e8 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp @@ -1502,7 +1502,7 @@ Status WiredTigerRecordStore::truncate(OperationContext* opCtx) { invariantWTOK(ret); WT_SESSION* session = WiredTigerRecoveryUnit::get(opCtx)->getSession()->getSession(); - invariantWTOK(WT_OP_CHECK(session->truncate(session, NULL, start, NULL, NULL))); + invariantWTOK(WT_OP_CHECK(session->truncate(session, nullptr, start, nullptr, nullptr))); _changeNumRecords(opCtx, -numRecords(opCtx)); _increaseDataSize(opCtx, -dataSize(opCtx)); @@ -1714,7 +1714,7 @@ public: DataSizeChange(WiredTigerRecordStore* rs, int64_t amount) : _rs(rs), _amount(amount) {} virtual void commit(boost::optional<Timestamp>) {} virtual void rollback() { - _rs->_increaseDataSize(NULL, -_amount); + _rs->_increaseDataSize(nullptr, -_amount); } private: diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h index f47db72d8a7..3e2121cdd5d 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h @@ -130,7 +130,7 @@ public: virtual bool isCapped() const; virtual int64_t storageSize(OperationContext* opCtx, - BSONObjBuilder* extraInfo = NULL, + BSONObjBuilder* extraInfo = nullptr, int infoLevel = 0) const; // CRUD related diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp index f6040a92621..2b59fc2c762 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp @@ -144,8 +144,8 @@ TEST(WiredTigerRecordStoreTest, Isolation1) { rs->updateRecord(t2.get(), id1, "c", 2).transitional_ignore(); ASSERT(0); } catch (WriteConflictException&) { - w2.reset(NULL); - t2.reset(NULL); + w2.reset(nullptr); + t2.reset(nullptr); } w1->commit(); // this should succeed diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp index 5769add2cb1..0363230e58b 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp @@ -146,7 +146,7 @@ public: void getCursor(WiredTigerRecoveryUnit* ru, WT_CURSOR** cursor) { WT_SESSION* wt_session = ru->getSession()->getSession(); invariantWTOK(wt_session->create(wt_session, wt_uri, wt_config)); - invariantWTOK(wt_session->open_cursor(wt_session, wt_uri, NULL, NULL, cursor)); + invariantWTOK(wt_session->open_cursor(wt_session, wt_uri, nullptr, nullptr, cursor)); } void setUp() override { diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp index 5344e0163fc..91a40336e9f 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp @@ -56,11 +56,11 @@ const std::string kWTRepairMsg = WiredTigerSession::WiredTigerSession(WT_CONNECTION* conn, uint64_t epoch, uint64_t cursorEpoch) : _epoch(epoch), _cursorEpoch(cursorEpoch), - _session(NULL), + _session(nullptr), _cursorGen(0), _cursorsOut(0), _idleExpireTime(Date_t::min()) { - invariantWTOK(conn->open_session(conn, NULL, "isolation=snapshot", &_session)); + invariantWTOK(conn->open_session(conn, nullptr, "isolation=snapshot", &_session)); } WiredTigerSession::WiredTigerSession(WT_CONNECTION* conn, @@ -70,16 +70,16 @@ WiredTigerSession::WiredTigerSession(WT_CONNECTION* conn, : _epoch(epoch), _cursorEpoch(cursorEpoch), _cache(cache), - _session(NULL), + _session(nullptr), _cursorGen(0), _cursorsOut(0), _idleExpireTime(Date_t::min()) { - invariantWTOK(conn->open_session(conn, NULL, "isolation=snapshot", &_session)); + invariantWTOK(conn->open_session(conn, nullptr, "isolation=snapshot", &_session)); } WiredTigerSession::~WiredTigerSession() { if (_session) { - invariantWTOK(_session->close(_session, NULL)); + invariantWTOK(_session->close(_session, nullptr)); } } @@ -88,7 +88,7 @@ void _openCursor(WT_SESSION* session, const std::string& uri, const char* config, WT_CURSOR** cursorOut) { - int ret = session->open_cursor(session, uri.c_str(), NULL, config, cursorOut); + int ret = session->open_cursor(session, uri.c_str(), nullptr, config, cursorOut); if (ret == EBUSY) { // This can only happen when trying to open a cursor on the oplog and it is currently locked // by a verify or salvage, because we don't employ database locks to protect the oplog. @@ -116,7 +116,7 @@ WT_CURSOR* WiredTigerSession::getCursor(const std::string& uri, uint64_t id, boo } } - WT_CURSOR* cursor = NULL; + WT_CURSOR* cursor = nullptr; _openCursor(_session, uri, allowOverwrite ? "" : "overwrite=false", &cursor); _cursorsOut++; return cursor; @@ -125,7 +125,7 @@ WT_CURSOR* WiredTigerSession::getCursor(const std::string& uri, uint64_t id, boo WT_CURSOR* WiredTigerSession::getReadOnceCursor(const std::string& uri, bool allowOverwrite) { const char* config = allowOverwrite ? "read_once=true" : "read_once=true,overwrite=false"; - WT_CURSOR* cursor = NULL; + WT_CURSOR* cursor = nullptr; _openCursor(_session, uri, config, &cursor); _cursorsOut++; return cursor; @@ -288,7 +288,7 @@ void WiredTigerSessionCache::waitUntilDurable(bool forceCheckpoint, bool stableC // Initialize on first use. if (!_waitUntilDurableSession) { invariantWTOK( - _conn->open_session(_conn, NULL, "isolation=snapshot", &_waitUntilDurableSession)); + _conn->open_session(_conn, nullptr, "isolation=snapshot", &_waitUntilDurableSession)); } // Use the journal when available, or a checkpoint otherwise. @@ -296,7 +296,7 @@ void WiredTigerSessionCache::waitUntilDurable(bool forceCheckpoint, bool stableC invariantWTOK(_waitUntilDurableSession->log_flush(_waitUntilDurableSession, "sync=on")); LOG(4) << "flushed journal"; } else { - invariantWTOK(_waitUntilDurableSession->checkpoint(_waitUntilDurableSession, NULL)); + invariantWTOK(_waitUntilDurableSession->checkpoint(_waitUntilDurableSession, nullptr)); LOG(4) << "created checkpoint"; } _journalListener->onDurable(token); diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache_test.cpp index 37ac7eddc4e..7d3326f57db 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache_test.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache_test.cpp @@ -46,18 +46,18 @@ using std::stringstream; class WiredTigerConnection { public: - WiredTigerConnection(StringData dbpath, StringData extraStrings) : _conn(NULL) { + WiredTigerConnection(StringData dbpath, StringData extraStrings) : _conn(nullptr) { std::stringstream ss; ss << "create,"; ss << extraStrings; string config = ss.str(); _fastClockSource = std::make_unique<SystemClockSource>(); - int ret = wiredtiger_open(dbpath.toString().c_str(), NULL, config.c_str(), &_conn); + int ret = wiredtiger_open(dbpath.toString().c_str(), nullptr, config.c_str(), &_conn); ASSERT_OK(wtRCToStatus(ret)); ASSERT(_conn); } ~WiredTigerConnection() { - _conn->close(_conn, NULL); + _conn->close(_conn, nullptr); } WT_CONNECTION* getConnection() const { return _conn; diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_standard_index_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_standard_index_test.cpp index 6fe47de5eca..0185b30fadd 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_standard_index_test.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_standard_index_test.cpp @@ -56,9 +56,9 @@ using std::string; class MyHarnessHelper final : public SortedDataInterfaceHarnessHelper { public: - MyHarnessHelper() : _dbpath("wt_test"), _conn(NULL) { + MyHarnessHelper() : _dbpath("wt_test"), _conn(nullptr) { const char* config = "create,cache_size=1G,"; - int ret = wiredtiger_open(_dbpath.path().c_str(), NULL, config, &_conn); + int ret = wiredtiger_open(_dbpath.path().c_str(), nullptr, config, &_conn); invariantWTOK(ret); _fastClockSource = std::make_unique<SystemClockSource>(); @@ -67,7 +67,7 @@ public: ~MyHarnessHelper() final { delete _sessionCache; - _conn->close(_conn, NULL); + _conn->close(_conn, nullptr); } std::unique_ptr<SortedDataInterface> newSortedDataInterface(bool unique, bool partial) final { diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp index 1c4a436831d..1f790206918 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp @@ -242,7 +242,7 @@ TEST(WiredTigerRecordStoreTest, SizeStorer1) { ASSERT_EQUALS(N, rs->numRecords(opCtx.get())); } - rs.reset(NULL); + rs.reset(nullptr); { auto& info = *ss.load(uri); diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp index e426394574f..fb424183586 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp @@ -119,7 +119,7 @@ StatusWith<std::string> WiredTigerUtil::getMetadataRaw(WT_SESSION* session, Stri } else if (ret != 0) { return StatusWith<std::string>(wtRCToStatus(ret)); } - const char* metadata = NULL; + const char* metadata = nullptr; ret = cursor->get_value(cursor, &metadata); if (ret != 0) { return StatusWith<std::string>(wtRCToStatus(ret)); @@ -147,7 +147,7 @@ StatusWith<std::string> WiredTigerUtil::getMetadata(OperationContext* opCtx, Str } else if (ret != 0) { return StatusWith<std::string>(wtRCToStatus(ret)); } - const char* metadata = NULL; + const char* metadata = nullptr; ret = cursor->get_value(cursor, &metadata); if (ret != 0) { return StatusWith<std::string>(wtRCToStatus(ret)); @@ -314,9 +314,9 @@ StatusWith<int64_t> WiredTigerUtil::getStatisticsValue(WT_SESSION* session, const std::string& config, int statisticsKey) { invariant(session); - WT_CURSOR* cursor = NULL; - const char* cursorConfig = config.empty() ? NULL : config.c_str(); - int ret = session->open_cursor(session, uri.c_str(), NULL, cursorConfig, &cursor); + WT_CURSOR* cursor = nullptr; + const char* cursorConfig = config.empty() ? nullptr : config.c_str(); + int ret = session->open_cursor(session, uri.c_str(), nullptr, cursorConfig, &cursor); if (ret != 0) { return StatusWith<int64_t>(ErrorCodes::CursorNotFound, str::stream() << "unable to open cursor at URI " << uri @@ -516,11 +516,11 @@ int WiredTigerUtil::verifyTable(OperationContext* opCtx, // Open a new session with custom error handlers. WT_CONNECTION* conn = WiredTigerRecoveryUnit::get(opCtx)->getSessionCache()->conn(); WT_SESSION* session; - invariantWTOK(conn->open_session(conn, &eventHandler, NULL, &session)); + invariantWTOK(conn->open_session(conn, &eventHandler, nullptr, &session)); ON_BLOCK_EXIT([&] { session->close(session, ""); }); // Do the verify. Weird parens prevent treating "verify" as a macro. - return (session->verify)(session, uri.c_str(), NULL); + return (session->verify)(session, uri.c_str(), nullptr); } bool WiredTigerUtil::useTableLogging(NamespaceString ns, bool replEnabled) { @@ -612,9 +612,9 @@ Status WiredTigerUtil::exportTableToBSON(WT_SESSION* session, const std::vector<std::string>& filter) { invariant(session); invariant(bob); - WT_CURSOR* c = NULL; - const char* cursorConfig = config.empty() ? NULL : config.c_str(); - int ret = session->open_cursor(session, uri.c_str(), NULL, cursorConfig, &c); + WT_CURSOR* c = nullptr; + const char* cursorConfig = config.empty() ? nullptr : config.c_str(); + int ret = session->open_cursor(session, uri.c_str(), nullptr, cursorConfig, &c); if (ret != 0) { return Status(ErrorCodes::CursorNotFound, str::stream() << "unable to open cursor at URI " << uri << ". reason: " diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_util.h b/src/mongo/db/storage/wiredtiger/wiredtiger_util.h index 6c03efc774d..42729da118e 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_util.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_util.h @@ -55,7 +55,7 @@ Status wtRCToStatus_slow(int retCode, const char* prefix); /** * converts wiredtiger return codes to mongodb statuses. */ -inline Status wtRCToStatus(int retCode, const char* prefix = NULL) { +inline Status wtRCToStatus(int retCode, const char* prefix = nullptr) { if (MONGO_likely(retCode == 0)) return Status::OK(); @@ -247,7 +247,7 @@ public: */ static int verifyTable(OperationContext* opCtx, const std::string& uri, - std::vector<std::string>* errors = NULL); + std::vector<std::string>* errors = nullptr); static bool useTableLogging(NamespaceString ns, bool replEnabled); @@ -278,12 +278,12 @@ class WiredTigerConfigParser { public: WiredTigerConfigParser(StringData config) { invariantWTOK( - wiredtiger_config_parser_open(NULL, config.rawData(), config.size(), &_parser)); + wiredtiger_config_parser_open(nullptr, config.rawData(), config.size(), &_parser)); } WiredTigerConfigParser(const WT_CONFIG_ITEM& nested) { invariant(nested.type == WT_CONFIG_ITEM::WT_CONFIG_ITEM_STRUCT); - invariantWTOK(wiredtiger_config_parser_open(NULL, nested.str, nested.len, &_parser)); + invariantWTOK(wiredtiger_config_parser_open(nullptr, nested.str, nested.len, &_parser)); } ~WiredTigerConfigParser() { diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_util_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_util_test.cpp index 0199cd8a1ed..0a3442459d9 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_util_test.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_util_test.cpp @@ -50,18 +50,18 @@ using std::stringstream; class WiredTigerConnection { public: - WiredTigerConnection(StringData dbpath, StringData extraStrings) : _conn(NULL) { + WiredTigerConnection(StringData dbpath, StringData extraStrings) : _conn(nullptr) { std::stringstream ss; ss << "create,"; ss << extraStrings; string config = ss.str(); _fastClockSource = std::make_unique<SystemClockSource>(); - int ret = wiredtiger_open(dbpath.toString().c_str(), NULL, config.c_str(), &_conn); + int ret = wiredtiger_open(dbpath.toString().c_str(), nullptr, config.c_str(), &_conn); ASSERT_OK(wtRCToStatus(ret)); ASSERT(_conn); } ~WiredTigerConnection() { - _conn->close(_conn, NULL); + _conn->close(_conn, nullptr); } WT_CONNECTION* getConnection() const { return _conn; @@ -111,8 +111,8 @@ public: } virtual void tearDown() { - _opCtx.reset(NULL); - _harnessHelper.reset(NULL); + _opCtx.reset(nullptr); + _harnessHelper.reset(nullptr); } protected: @@ -143,7 +143,7 @@ TEST_F(WiredTigerUtilMetadataTest, GetConfigurationStringInvalidURI) { } TEST_F(WiredTigerUtilMetadataTest, GetConfigurationStringNull) { - const char* config = NULL; + const char* config = nullptr; createSession(config); StatusWith<std::string> result = WiredTigerUtil::getMetadata(getOperationContext(), getURI()); ASSERT_OK(result.getStatus()); @@ -166,7 +166,7 @@ TEST_F(WiredTigerUtilMetadataTest, GetApplicationMetadataInvalidURI) { } TEST_F(WiredTigerUtilMetadataTest, GetApplicationMetadataNull) { - const char* config = NULL; + const char* config = nullptr; createSession(config); StatusWith<BSONObj> result = WiredTigerUtil::getApplicationMetadata(getOperationContext(), getURI()); @@ -282,7 +282,7 @@ TEST(WiredTigerUtilTest, GetStatisticsValueStatisticsDisabled) { harnessHelper.getOplogManager()); WiredTigerSession* session = recoveryUnit.getSession(); WT_SESSION* wtSession = session->getSession(); - ASSERT_OK(wtRCToStatus(wtSession->create(wtSession, "table:mytable", NULL))); + ASSERT_OK(wtRCToStatus(wtSession->create(wtSession, "table:mytable", nullptr))); auto result = WiredTigerUtil::getStatisticsValue(session->getSession(), "statistics:table:mytable", "statistics=(fast)", @@ -297,7 +297,7 @@ TEST(WiredTigerUtilTest, GetStatisticsValueInvalidKey) { harnessHelper.getOplogManager()); WiredTigerSession* session = recoveryUnit.getSession(); WT_SESSION* wtSession = session->getSession(); - ASSERT_OK(wtRCToStatus(wtSession->create(wtSession, "table:mytable", NULL))); + ASSERT_OK(wtRCToStatus(wtSession->create(wtSession, "table:mytable", nullptr))); // Use connection statistics key which does not apply to a table. auto result = WiredTigerUtil::getStatisticsValue(session->getSession(), "statistics:table:mytable", @@ -313,7 +313,7 @@ TEST(WiredTigerUtilTest, GetStatisticsValueValidKey) { harnessHelper.getOplogManager()); WiredTigerSession* session = recoveryUnit.getSession(); WT_SESSION* wtSession = session->getSession(); - ASSERT_OK(wtRCToStatus(wtSession->create(wtSession, "table:mytable", NULL))); + ASSERT_OK(wtRCToStatus(wtSession->create(wtSession, "table:mytable", nullptr))); // Use connection statistics key which does not apply to a table. auto result = WiredTigerUtil::getStatisticsValue(session->getSession(), "statistics:table:mytable", diff --git a/src/mongo/db/update/field_checker.cpp b/src/mongo/db/update/field_checker.cpp index a915da0a6ab..a6aba4eb559 100644 --- a/src/mongo/db/update/field_checker.cpp +++ b/src/mongo/db/update/field_checker.cpp @@ -66,7 +66,7 @@ bool isPositionalElement(const StringData& field) { bool isPositional(const FieldRef& fieldRef, size_t* pos, size_t* count) { // 'count' is optional. size_t dummy; - if (count == NULL) { + if (count == nullptr) { count = &dummy; } diff --git a/src/mongo/db/update/field_checker.h b/src/mongo/db/update/field_checker.h index 117a30995a9..1bc75b51709 100644 --- a/src/mongo/db/update/field_checker.h +++ b/src/mongo/db/update/field_checker.h @@ -58,7 +58,7 @@ bool isPositionalElement(const StringData& field); * isPositional assumes that the field is updatable. Call isUpdatable() above to * verify. */ -bool isPositional(const FieldRef& fieldRef, size_t* pos, size_t* count = NULL); +bool isPositional(const FieldRef& fieldRef, size_t* pos, size_t* count = nullptr); /** * Returns true iff 'field' is an array filter (matching the regular expression /\$\[.*\]/). diff --git a/src/mongo/db/update/path_support.cpp b/src/mongo/db/update/path_support.cpp index 6b9fc80a284..55f369164a8 100644 --- a/src/mongo/db/update/path_support.cpp +++ b/src/mongo/db/update/path_support.cpp @@ -420,7 +420,7 @@ Status extractFullEqualityMatches(const MatchExpression& root, } Status extractEqualityMatches(const MatchExpression& root, EqualityMatches* equalities) { - return _extractFullEqualityMatches(root, NULL, equalities); + return _extractFullEqualityMatches(root, nullptr, equalities); } Status addEqualitiesToDoc(const EqualityMatches& equalities, mutablebson::Document* doc) { diff --git a/src/mongo/db/update/path_support_test.cpp b/src/mongo/db/update/path_support_test.cpp index b9c3c0685a8..ecd2fa9bab5 100644 --- a/src/mongo/db/update/path_support_test.cpp +++ b/src/mongo/db/update/path_support_test.cpp @@ -724,7 +724,7 @@ public: void addPath(const string& path) { _ownedPaths.push_back(std::make_unique<FieldRef>(path)); - FieldRef const* conflictPath = NULL; + FieldRef const* conflictPath = nullptr; ASSERT(_immutablePathSet.insert(_ownedPaths.back().get(), &conflictPath)); } diff --git a/src/mongo/db/update/update_driver.cpp b/src/mongo/db/update/update_driver.cpp index 4cd79d7b1cf..7dab889aa35 100644 --- a/src/mongo/db/update/update_driver.cpp +++ b/src/mongo/db/update/update_driver.cpp @@ -261,7 +261,7 @@ Status UpdateDriver::update(StringData matchedField, _affectIndices = ((_updateType == UpdateType::kReplacement || _updateType == UpdateType::kPipeline) && - (_indexedFields != NULL)); + (_indexedFields != nullptr)); _logDoc.reset(); LogBuilder logBuilder(_logDoc.root()); diff --git a/src/mongo/dbtests/framework.cpp b/src/mongo/dbtests/framework.cpp index f3d0b6f2ebb..c4947ebba57 100644 --- a/src/mongo/dbtests/framework.cpp +++ b/src/mongo/dbtests/framework.cpp @@ -66,7 +66,7 @@ namespace dbtests { int runDbTests(int argc, char** argv) { frameworkGlobalParams.perfHist = 1; - frameworkGlobalParams.seed = time(0); + frameworkGlobalParams.seed = time(nullptr); frameworkGlobalParams.runsPerTest = 1; registerShutdownTask([] { diff --git a/src/mongo/dbtests/jsobjtests.cpp b/src/mongo/dbtests/jsobjtests.cpp index 65b4b142ac1..e7398a5116d 100644 --- a/src/mongo/dbtests/jsobjtests.cpp +++ b/src/mongo/dbtests/jsobjtests.cpp @@ -1072,9 +1072,9 @@ class append { public: void run() { BSONObjBuilder b; - b.appendOID("a", 0); - b.appendOID("b", 0, false); - b.appendOID("c", 0, true); + b.appendOID("a", nullptr); + b.appendOID("b", nullptr, false); + b.appendOID("c", nullptr, true); BSONObj o = b.obj(); ASSERT(o["a"].__oid().toString() == "000000000000000000000000"); ASSERT(o["b"].__oid().toString() == "000000000000000000000000"); @@ -1086,7 +1086,7 @@ class increasing { public: BSONObj g() { BSONObjBuilder b; - b.appendOID("_id", 0, true); + b.appendOID("_id", nullptr, true); return b.obj(); } void run() { @@ -1869,7 +1869,7 @@ public: state = 1; } catch (std::exception& e) { state = 2; - ASSERT(strstr(e.what(), "_id: 5") != NULL); + ASSERT(strstr(e.what(), "_id: 5") != nullptr); } free(crap); ASSERT_EQUALS(2, state); diff --git a/src/mongo/dbtests/jstests.cpp b/src/mongo/dbtests/jstests.cpp index 7f06eea59f1..051bc478dbc 100644 --- a/src/mongo/dbtests/jstests.cpp +++ b/src/mongo/dbtests/jstests.cpp @@ -141,25 +141,25 @@ public: void run() { unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)()); - s->invoke("x=5;", 0, 0); + s->invoke("x=5;", nullptr, nullptr); ASSERT(5 == s->getNumber("x")); - s->invoke("return 17;", 0, 0); + s->invoke("return 17;", nullptr, nullptr); ASSERT(17 == s->getNumber("__returnValue")); - s->invoke("function(){ return 18; }", 0, 0); + s->invoke("function(){ return 18; }", nullptr, nullptr); ASSERT(18 == s->getNumber("__returnValue")); s->setNumber("x", 1.76); - s->invoke("return x == 1.76; ", 0, 0); + s->invoke("return x == 1.76; ", nullptr, nullptr); ASSERT(s->getBoolean("__returnValue")); s->setNumber("x", 1.76); - s->invoke("return x == 1.79; ", 0, 0); + s->invoke("return x == 1.79; ", nullptr, nullptr); ASSERT(!s->getBoolean("__returnValue")); BSONObj obj = BSON("" << 11.0); - s->invoke("function( z ){ return 5 + z; }", &obj, 0); + s->invoke("function( z ){ return 5 + z; }", &obj, nullptr); ASSERT_EQUALS(16, s->getNumber("__returnValue")); } }; @@ -240,12 +240,12 @@ public: unique_ptr<Scope> scope((getGlobalScriptEngine()->*scopeFactory)()); // No error is logged for a valid statement. - ASSERT_EQUALS(0, scope->invoke("validStatement = true", 0, 0)); + ASSERT_EQUALS(0, scope->invoke("validStatement = true", nullptr, nullptr)); ASSERT(!_logger.logged()); // An error is logged for an invalid statement. try { - scope->invoke("notAFunction()", 0, 0); + scope->invoke("notAFunction()", nullptr, nullptr); } catch (const DBException&) { // ignore the exception; just test that we logged something } @@ -276,48 +276,48 @@ public: << "sara"); s->setObject("blah", o); - s->invoke("return blah.x;", 0, 0); + s->invoke("return blah.x;", nullptr, nullptr); ASSERT_EQUALS(17, s->getNumber("__returnValue")); - s->invoke("return blah.y;", 0, 0); + s->invoke("return blah.y;", nullptr, nullptr); ASSERT_EQUALS("eliot", s->getString("__returnValue")); - s->invoke("return this.z;", 0, &o); + s->invoke("return this.z;", nullptr, &o); ASSERT_EQUALS("sara", s->getString("__returnValue")); - s->invoke("return this.z == 'sara';", 0, &o); + s->invoke("return this.z == 'sara';", nullptr, &o); ASSERT_EQUALS(true, s->getBoolean("__returnValue")); - s->invoke("this.z == 'sara';", 0, &o); + s->invoke("this.z == 'sara';", nullptr, &o); ASSERT_EQUALS(true, s->getBoolean("__returnValue")); - s->invoke("this.z == 'asara';", 0, &o); + s->invoke("this.z == 'asara';", nullptr, &o); ASSERT_EQUALS(false, s->getBoolean("__returnValue")); - s->invoke("return this.x == 17;", 0, &o); + s->invoke("return this.x == 17;", nullptr, &o); ASSERT_EQUALS(true, s->getBoolean("__returnValue")); - s->invoke("return this.x == 18;", 0, &o); + s->invoke("return this.x == 18;", nullptr, &o); ASSERT_EQUALS(false, s->getBoolean("__returnValue")); - s->invoke("function(){ return this.x == 17; }", 0, &o); + s->invoke("function(){ return this.x == 17; }", nullptr, &o); ASSERT_EQUALS(true, s->getBoolean("__returnValue")); - s->invoke("function(){ return this.x == 18; }", 0, &o); + s->invoke("function(){ return this.x == 18; }", nullptr, &o); ASSERT_EQUALS(false, s->getBoolean("__returnValue")); - s->invoke("function (){ return this.x == 17; }", 0, &o); + s->invoke("function (){ return this.x == 17; }", nullptr, &o); ASSERT_EQUALS(true, s->getBoolean("__returnValue")); - s->invoke("function z(){ return this.x == 18; }", 0, &o); + s->invoke("function z(){ return this.x == 18; }", nullptr, &o); ASSERT_EQUALS(false, s->getBoolean("__returnValue")); - s->invoke("function (){ this.x == 17; }", 0, &o); + s->invoke("function (){ this.x == 17; }", nullptr, &o); ASSERT_EQUALS(false, s->getBoolean("__returnValue")); - s->invoke("function z(){ this.x == 18; }", 0, &o); + s->invoke("function z(){ this.x == 18; }", nullptr, &o); ASSERT_EQUALS(false, s->getBoolean("__returnValue")); - s->invoke("x = 5; for( ; x <10; x++){ a = 1; }", 0, &o); + s->invoke("x = 5; for( ; x <10; x++){ a = 1; }", nullptr, &o); ASSERT_EQUALS(10, s->getNumber("x")); } }; @@ -328,12 +328,12 @@ public: void run() { unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)()); - s->invoke("z = { num : 1 };", 0, 0); + s->invoke("z = { num : 1 };", nullptr, nullptr); BSONObj out = s->getObject("z"); ASSERT_EQUALS(1, out["num"].number()); ASSERT_EQUALS(1, out.nFields()); - s->invoke("z = { x : 'eliot' };", 0, 0); + s->invoke("z = { x : 'eliot' };", nullptr, nullptr); out = s->getObject("z"); ASSERT_EQUALS((string) "eliot", out["x"].valuestr()); ASSERT_EQUALS(1, out.nFields()); @@ -417,14 +417,15 @@ public: BSONObj out; - ASSERT_THROWS(s->invoke("blah.y = 'e'", 0, 0), mongo::AssertionException); - ASSERT_THROWS(s->invoke("blah.a = 19;", 0, 0), mongo::AssertionException); - ASSERT_THROWS(s->invoke("blah.zz.a = 19;", 0, 0), mongo::AssertionException); - ASSERT_THROWS(s->invoke("blah.zz = { a : 19 };", 0, 0), mongo::AssertionException); - ASSERT_THROWS(s->invoke("delete blah['x']", 0, 0), mongo::AssertionException); + ASSERT_THROWS(s->invoke("blah.y = 'e'", nullptr, nullptr), mongo::AssertionException); + ASSERT_THROWS(s->invoke("blah.a = 19;", nullptr, nullptr), mongo::AssertionException); + ASSERT_THROWS(s->invoke("blah.zz.a = 19;", nullptr, nullptr), mongo::AssertionException); + ASSERT_THROWS(s->invoke("blah.zz = { a : 19 };", nullptr, nullptr), + mongo::AssertionException); + ASSERT_THROWS(s->invoke("delete blah['x']", nullptr, nullptr), mongo::AssertionException); // read-only object itself can be overwritten - s->invoke("blah = {}", 0, 0); + s->invoke("blah = {}", nullptr, nullptr); out = s->getObject("blah"); ASSERT(out.isEmpty()); @@ -456,13 +457,13 @@ public: } s->setObject("x", o); - s->invoke("return x.d.getTime() != 12;", 0, 0); + s->invoke("return x.d.getTime() != 12;", nullptr, nullptr); ASSERT_EQUALS(true, s->getBoolean("__returnValue")); - s->invoke("z = x.d.getTime();", 0, 0); + s->invoke("z = x.d.getTime();", nullptr, nullptr); ASSERT_EQUALS(123456789, s->getNumber("z")); - s->invoke("z = { z : x.d }", 0, 0); + s->invoke("z = { z : x.d }", nullptr, nullptr); BSONObj out = s->getObject("z"); ASSERT(out["z"].type() == Date); } @@ -477,16 +478,16 @@ public: } s->setObject("x", o); - s->invoke("z = x.r.test( 'b' );", 0, 0); + s->invoke("z = x.r.test( 'b' );", nullptr, nullptr); ASSERT_EQUALS(false, s->getBoolean("z")); - s->invoke("z = x.r.test( 'a' );", 0, 0); + s->invoke("z = x.r.test( 'a' );", nullptr, nullptr); ASSERT_EQUALS(true, s->getBoolean("z")); - s->invoke("z = x.r.test( 'ba' );", 0, 0); + s->invoke("z = x.r.test( 'ba' );", nullptr, nullptr); ASSERT_EQUALS(false, s->getBoolean("z")); - s->invoke("z = { a : x.r };", 0, 0); + s->invoke("z = { a : x.r };", nullptr, nullptr); BSONObj out = s->getObject("z"); ASSERT_EQUALS((string) "^a", out["a"].regex()); @@ -505,7 +506,7 @@ public: " }" " assert(threw);" "}"; - ASSERT_EQUALS(s->invoke(code, &invalidRegex, NULL), 0); + ASSERT_EQUALS(s->invoke(code, &invalidRegex, nullptr), 0); } // array @@ -559,7 +560,7 @@ public: s->setObject("z", b.obj()); - ASSERT(s->invoke("y = { a : z.a , b : z.b , c : z.c , d: z.d }", 0, 0) == 0); + ASSERT(s->invoke("y = { a : z.a , b : z.b , c : z.c , d: z.d }", nullptr, nullptr) == 0); BSONObj out = s->getObject("y"); ASSERT_EQUALS(bsonTimestamp, out["a"].type()); @@ -592,7 +593,7 @@ public: ASSERT_EQUALS(NumberDouble, o["b"].type()); s->setObject("z", o); - s->invoke("return z", 0, 0); + s->invoke("return z", nullptr, nullptr); BSONObj out = s->getObject("__returnValue"); ASSERT_EQUALS(5, out["a"].number()); ASSERT_EQUALS(5.6, out["b"].number()); @@ -610,7 +611,7 @@ public: } s->setObject("z", o, false); - s->invoke("return z", 0, 0); + s->invoke("return z", nullptr, nullptr); out = s->getObject("__returnValue"); ASSERT_EQUALS(5, out["a"].number()); ASSERT_EQUALS(5.6, out["b"].number()); @@ -643,7 +644,7 @@ public: ASSERT_EQUALS(NumberDouble, out["a"].embeddedObjectUserCheck()["0"].type()); ASSERT_EQUALS(NumberInt, out["a"].embeddedObjectUserCheck()["1"].type()); - s->invokeSafe("z.z = 5;", 0, 0); + s->invokeSafe("z.z = 5;", nullptr, nullptr); out = s->getObject("z"); ASSERT_EQUALS(5, out["z"].number()); ASSERT_EQUALS(NumberDouble, out["a"].embeddedObjectUserCheck()["0"].type()); @@ -913,10 +914,10 @@ public: for (int i = 5; i < 100; i += 10) { s->setObject("a", build(i), false); - s->invokeSafe("tojson( a )", 0, 0); + s->invokeSafe("tojson( a )", nullptr, nullptr); s->setObject("a", build(5), true); - s->invokeSafe("tojson( a )", 0, 0); + s->invokeSafe("tojson( a )", nullptr, nullptr); } } }; @@ -970,8 +971,8 @@ public: "function() { " " while (true) { } " "} ", - 0, - 0, + nullptr, + nullptr, 1); } catch (const DBException&) { caught = true; @@ -1040,8 +1041,8 @@ public: "function() { " " for (var i=0; i<1; i++) { ; } " "} ", - 0, - 0, + nullptr, + nullptr, 5 * 60 * 1000); } }; @@ -1111,8 +1112,8 @@ public: s->setObject("x", in); } - s->invokeSafe("myb = x.b; print( myb ); printjson( myb );", 0, 0); - s->invokeSafe("y = { c : myb };", 0, 0); + s->invokeSafe("myb = x.b; print( myb ); printjson( myb );", nullptr, nullptr); + s->invokeSafe("y = { c : myb };", nullptr, nullptr); BSONObj out = s->getObject("y"); ASSERT_EQUALS(BinData, out["c"].type()); @@ -1121,7 +1122,7 @@ public: ASSERT_EQUALS(0, in["b"].woCompare(out["c"], false)); // check that BinData js class is utilized - s->invokeSafe("q = x.b.toString();", 0, 0); + s->invokeSafe("q = x.b.toString();", nullptr, nullptr); stringstream expected; expected << "BinData(" << BinDataGeneral << ",\"" << base64 << "\")"; ASSERT_EQUALS(expected.str(), s->getString("q")); @@ -1130,12 +1131,12 @@ public: scriptBuilder << "z = { c : new BinData( " << BinDataGeneral << ", \"" << base64 << "\" ) };"; string script = scriptBuilder.str(); - s->invokeSafe(script.c_str(), 0, 0); + s->invokeSafe(script.c_str(), nullptr, nullptr); out = s->getObject("z"); // pp( "out" , out["c"] ); ASSERT_EQUALS(0, in["b"].woCompare(out["c"], false)); - s->invokeSafe("a = { f: new BinData( 128, \"\" ) };", 0, 0); + s->invokeSafe("a = { f: new BinData( 128, \"\" ) };", nullptr, nullptr); out = s->getObject("a"); int len = -1; out["f"].binData(len); @@ -1187,14 +1188,14 @@ public: unique_ptr<Scope> s; s.reset((getGlobalScriptEngine()->*scopeFactory)()); - s->invokeSafe("x = 5;", 0, 0); + s->invokeSafe("x = 5;", nullptr, nullptr); { BSONObjBuilder b; s->append(b, "z", "x"); ASSERT_BSONOBJ_EQ(BSON("z" << 5), b.obj()); } - s->invokeSafe("x = function(){ return 17; }", 0, 0); + s->invokeSafe("x = function(){ return 17; }", nullptr, nullptr); BSONObj temp; { BSONObjBuilder b; @@ -1202,7 +1203,7 @@ public: temp = b.obj(); } - s->invokeSafe("foo = this.z();", 0, &temp); + s->invokeSafe("foo = this.z();", nullptr, &temp); ASSERT_EQUALS(17, s->getNumber("foo")); } }; @@ -1244,7 +1245,7 @@ public: s->setObject("val", BSONObj(reinterpret_cast<char*>(bits)).getOwned()); - s->invoke("val[\"a\"];", 0, 0); + s->invoke("val[\"a\"];", nullptr, nullptr); ASSERT_TRUE(std::isnan(s->getNumber("__returnValue"))); } }; @@ -1255,43 +1256,43 @@ public: void run() { unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)()); - s->invoke("x=5;", 0, 0); + s->invoke("x=5;", nullptr, nullptr); ASSERT_EQUALS(5, s->getNumber("__returnValue")); - s->invoke("x='test'", 0, 0); + s->invoke("x='test'", nullptr, nullptr); ASSERT_EQUALS("test", s->getString("__returnValue")); - s->invoke("x='return'", 0, 0); + s->invoke("x='return'", nullptr, nullptr); ASSERT_EQUALS("return", s->getString("__returnValue")); - s->invoke("return 'return'", 0, 0); + s->invoke("return 'return'", nullptr, nullptr); ASSERT_EQUALS("return", s->getString("__returnValue")); - s->invoke("x = ' return '", 0, 0); + s->invoke("x = ' return '", nullptr, nullptr); ASSERT_EQUALS(" return ", s->getString("__returnValue")); - s->invoke("x = \" return \"", 0, 0); + s->invoke("x = \" return \"", nullptr, nullptr); ASSERT_EQUALS(" return ", s->getString("__returnValue")); - s->invoke("x = \"' return '\"", 0, 0); + s->invoke("x = \"' return '\"", nullptr, nullptr); ASSERT_EQUALS("' return '", s->getString("__returnValue")); - s->invoke("x = '\" return \"'", 0, 0); + s->invoke("x = '\" return \"'", nullptr, nullptr); ASSERT_EQUALS("\" return \"", s->getString("__returnValue")); - s->invoke(";return 5", 0, 0); + s->invoke(";return 5", nullptr, nullptr); ASSERT_EQUALS(5, s->getNumber("__returnValue")); - s->invoke("String('return')", 0, 0); + s->invoke("String('return')", nullptr, nullptr); ASSERT_EQUALS("return", s->getString("__returnValue")); - s->invoke("String(' return ')", 0, 0); + s->invoke("String(' return ')", nullptr, nullptr); ASSERT_EQUALS(" return ", s->getString("__returnValue")); - s->invoke("String(\"'return\")", 0, 0); + s->invoke("String(\"'return\")", nullptr, nullptr); ASSERT_EQUALS("'return", s->getString("__returnValue")); - s->invoke("String('\"return')", 0, 0); + s->invoke("String('\"return')", nullptr, nullptr); ASSERT_EQUALS("\"return", s->getString("__returnValue")); } }; @@ -1302,7 +1303,7 @@ public: static BSONObj callback(const BSONObj& args, void* data) { auto scope = static_cast<Scope*>(data); - scope->invoke("x = 10;", 0, 0); + scope->invoke("x = 10;", nullptr, nullptr); return BSONObj(); } @@ -1311,7 +1312,7 @@ public: unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)()); s->injectNative("foo", callback, s.get()); - s->invoke("var x = 1; foo();", 0, 0); + s->invoke("var x = 1; foo();", nullptr, nullptr); ASSERT_EQUALS(s->getNumberInt("x"), 10); } }; @@ -1325,7 +1326,7 @@ public: { bool threwException = false; try { - s->invoke("\"use strict\"; x = 10;", 0, 0); + s->invoke("\"use strict\"; x = 10;", nullptr, nullptr); } catch (...) { threwException = true; @@ -1340,7 +1341,7 @@ public: { bool threwException = false; try { - s->invoke("UUID(1,2,3,4,5);", 0, 0); + s->invoke("UUID(1,2,3,4,5);", nullptr, nullptr); } catch (...) { threwException = true; @@ -1368,7 +1369,9 @@ public: s->injectNative("foo", sidecarThrowingFunc); ASSERT_THROWS_WITH_CHECK( - s->invoke("try { foo(); } catch (e) { throw e; } throw new Error(\"bar\");", 0, 0), + s->invoke("try { foo(); } catch (e) { throw e; } throw new Error(\"bar\");", + nullptr, + nullptr), ExceptionFor<ErrorCodes::ForTestingErrorExtraInfo>, [](const auto& ex) { ASSERT_EQ(ex->data, 123); }); } @@ -1423,7 +1426,7 @@ class ConvertShardKeyToHashed { public: void check(shared_ptr<Scope> s, const mongo::BSONObj& o) { s->setObject("o", o, true); - s->invoke("return convertShardKeyToHashed(o);", 0, 0); + s->invoke("return convertShardKeyToHashed(o);", nullptr, nullptr); const auto scopeShardKey = s->getNumber("__returnValue"); // Wrapping to form a proper element @@ -1438,7 +1441,7 @@ public: void checkWithSeed(shared_ptr<Scope> s, const mongo::BSONObj& o, int seed) { s->setObject("o", o, true); s->setNumber("seed", seed); - s->invoke("return convertShardKeyToHashed(o, seed);", 0, 0); + s->invoke("return convertShardKeyToHashed(o, seed);", nullptr, nullptr); const auto scopeShardKey = s->getNumber("__returnValue"); // Wrapping to form a proper element @@ -1450,19 +1453,19 @@ public: } void checkNoArgs(shared_ptr<Scope> s) { - s->invoke("return convertShardKeyToHashed();", 0, 0); + s->invoke("return convertShardKeyToHashed();", nullptr, nullptr); } void checkWithExtraArg(shared_ptr<Scope> s, const mongo::BSONObj& o, int seed) { s->setObject("o", o, true); s->setNumber("seed", seed); - s->invoke("return convertShardKeyToHashed(o, seed, 1);", 0, 0); + s->invoke("return convertShardKeyToHashed(o, seed, 1);", nullptr, nullptr); } void checkWithBadSeed(shared_ptr<Scope> s, const mongo::BSONObj& o) { s->setObject("o", o, true); s->setString("seed", "sunflower"); - s->invoke("return convertShardKeyToHashed(o, seed);", 0, 0); + s->invoke("return convertShardKeyToHashed(o, seed);", nullptr, nullptr); } void run() { @@ -1520,12 +1523,12 @@ public: "let f = async function() { return 28; };" "f().then(function(y){ x = y; });" "return x;", - 0, - 0); + nullptr, + nullptr); ASSERT(0 == scope->getNumber("__returnValue")); /* When we return x the second time the value has been updated * by the async function */ - scope->invoke("return x;", 0, 0); + scope->invoke("return x;", nullptr, nullptr); ASSERT(28 == scope->getNumber("__returnValue")); } }; diff --git a/src/mongo/dbtests/mock/mock_dbclient_connection.h b/src/mongo/dbtests/mock/mock_dbclient_connection.h index 8995e92ab19..aaa4968d58e 100644 --- a/src/mongo/dbtests/mock/mock_dbclient_connection.h +++ b/src/mongo/dbtests/mock/mock_dbclient_connection.h @@ -78,7 +78,7 @@ public: mongo::Query query = mongo::Query(), int nToReturn = 0, int nToSkip = 0, - const mongo::BSONObj* fieldsToReturn = 0, + const mongo::BSONObj* fieldsToReturn = nullptr, int queryOptions = 0, int batchSize = 0) override; @@ -107,7 +107,7 @@ public: unsigned long long query(std::function<void(mongo::DBClientCursorBatchIterator&)> f, const NamespaceStringOrUUID& nsOrUuid, mongo::Query query, - const mongo::BSONObj* fieldsToReturn = 0, + const mongo::BSONObj* fieldsToReturn = nullptr, int queryOptions = 0, int batchSize = 0) override; @@ -120,7 +120,9 @@ public: mongo::Message& response, bool assertOk, std::string* actualServer) override; - void say(mongo::Message& toSend, bool isRetry = false, std::string* actualServer = 0) override; + void say(mongo::Message& toSend, + bool isRetry = false, + std::string* actualServer = nullptr) override; bool lazySupported() const override; private: diff --git a/src/mongo/dbtests/mock/mock_remote_db_server.h b/src/mongo/dbtests/mock/mock_remote_db_server.h index 1ccddf63942..6c0ccc8170d 100644 --- a/src/mongo/dbtests/mock/mock_remote_db_server.h +++ b/src/mongo/dbtests/mock/mock_remote_db_server.h @@ -166,7 +166,7 @@ public: mongo::Query query = mongo::Query(), int nToReturn = 0, int nToSkip = 0, - const mongo::BSONObj* fieldsToReturn = 0, + const mongo::BSONObj* fieldsToReturn = nullptr, int queryOptions = 0, int batchSize = 0); diff --git a/src/mongo/dbtests/mock/mock_replica_set.cpp b/src/mongo/dbtests/mock/mock_replica_set.cpp index f6158af9abb..fb17ad1a960 100644 --- a/src/mongo/dbtests/mock/mock_replica_set.cpp +++ b/src/mongo/dbtests/mock/mock_replica_set.cpp @@ -166,7 +166,7 @@ vector<string> MockReplicaSet::getSecondaries() const { } MockRemoteDBServer* MockReplicaSet::getNode(const string& hostAndPort) { - return mapFindWithDefault(_nodeMap, hostAndPort, static_cast<MockRemoteDBServer*>(NULL)); + return mapFindWithDefault(_nodeMap, hostAndPort, static_cast<MockRemoteDBServer*>(nullptr)); } repl::ReplSetConfig MockReplicaSet::getReplConfig() const { @@ -342,7 +342,7 @@ void MockReplicaSet::mockReplSetGetStatusCmd() { // TODO: syncingTo fullStatBuilder.append("set", _setName); - fullStatBuilder.appendTimeT("date", time(0)); + fullStatBuilder.appendTimeT("date", time(nullptr)); fullStatBuilder.append("myState", getState(node->getServerAddress())); fullStatBuilder.append("members", hostsField); fullStatBuilder.append("ok", true); diff --git a/src/mongo/dbtests/mock_replica_set_test.cpp b/src/mongo/dbtests/mock_replica_set_test.cpp index 44a6c09e8aa..2a44c20ec31 100644 --- a/src/mongo/dbtests/mock_replica_set_test.cpp +++ b/src/mongo/dbtests/mock_replica_set_test.cpp @@ -68,7 +68,7 @@ TEST(MockReplicaSetTest, GetNode) { ASSERT_EQUALS("$n0:27017", replSet.getNode("$n0:27017")->getServerAddress()); ASSERT_EQUALS("$n1:27017", replSet.getNode("$n1:27017")->getServerAddress()); ASSERT_EQUALS("$n2:27017", replSet.getNode("$n2:27017")->getServerAddress()); - ASSERT(replSet.getNode("$n3:27017") == NULL); + ASSERT(replSet.getNode("$n3:27017") == nullptr); } TEST(MockReplicaSetTest, IsMasterNode0) { diff --git a/src/mongo/dbtests/plan_executor_invalidation_test.cpp b/src/mongo/dbtests/plan_executor_invalidation_test.cpp index 2cc4eccd7fa..48e9a7adfb1 100644 --- a/src/mongo/dbtests/plan_executor_invalidation_test.cpp +++ b/src/mongo/dbtests/plan_executor_invalidation_test.cpp @@ -76,7 +76,7 @@ public: params.direction = CollectionScanParams::FORWARD; params.tailable = false; unique_ptr<CollectionScan> scan( - new CollectionScan(&_opCtx, collection(), params, ws.get(), NULL)); + new CollectionScan(&_opCtx, collection(), params, ws.get(), nullptr)); // Create a plan executor to hold it auto qr = std::make_unique<QueryRequest>(nss); @@ -138,7 +138,7 @@ TEST_F(PlanExecutorInvalidationTest, ExecutorToleratesDeletedDocumentsDuringYiel // Read some of it. for (int i = 0; i < 10; ++i) { - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj["foo"].numberInt()); } @@ -153,11 +153,11 @@ TEST_F(PlanExecutorInvalidationTest, ExecutorToleratesDeletedDocumentsDuringYiel // Make sure that the PlanExecutor moved forward over the deleted data. We don't see foo==10 or // foo==11. for (int i = 12; i < N(); ++i) { - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj["foo"].numberInt()); } - ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&obj, nullptr)); } TEST_F(PlanExecutorInvalidationTest, PlanExecutorThrowsOnRestoreWhenCollectionIsDropped) { @@ -166,7 +166,7 @@ TEST_F(PlanExecutorInvalidationTest, PlanExecutorThrowsOnRestoreWhenCollectionIs // Read some of it. for (int i = 0; i < 10; ++i) { - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj["foo"].numberInt()); } @@ -177,7 +177,7 @@ TEST_F(PlanExecutorInvalidationTest, PlanExecutorThrowsOnRestoreWhenCollectionIs exec->restoreState(); - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(10, obj["foo"].numberInt()); exec->saveState(); @@ -195,7 +195,7 @@ TEST_F(PlanExecutorInvalidationTest, CollScanExecutorDoesNotDieWhenAllIndicesDro // Read some of it. for (int i = 0; i < 10; ++i) { - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj["foo"].numberInt()); } @@ -205,7 +205,7 @@ TEST_F(PlanExecutorInvalidationTest, CollScanExecutorDoesNotDieWhenAllIndicesDro // Read the rest of the collection. for (int i = 10; i < N(); ++i) { - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj["foo"].numberInt()); } } @@ -218,7 +218,7 @@ TEST_F(PlanExecutorInvalidationTest, CollScanExecutorDoesNotDieWhenOneIndexDropp // Read some of it. for (int i = 0; i < 10; ++i) { - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj["foo"].numberInt()); } @@ -228,7 +228,7 @@ TEST_F(PlanExecutorInvalidationTest, CollScanExecutorDoesNotDieWhenOneIndexDropp // Read the rest of the collection. for (int i = 10; i < N(); ++i) { - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj["foo"].numberInt()); } } @@ -245,7 +245,7 @@ TEST_F(PlanExecutorInvalidationTest, IxscanExecutorDiesWhenAllIndexesDropped) { // Start scanning the index. BSONObj obj; for (int i = 0; i < 10; ++i) { - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj.firstElement().numberInt()); } @@ -266,7 +266,7 @@ TEST_F(PlanExecutorInvalidationTest, IxscanExecutorDiesWhenIndexBeingScannedIsDr // Start scanning the index. BSONObj obj; for (int i = 0; i < 10; ++i) { - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj.firstElement().numberInt()); } @@ -289,7 +289,7 @@ TEST_F(PlanExecutorInvalidationTest, IxscanExecutorSurvivesWhenUnrelatedIndexIsD // Start scanning the index. BSONObj obj; for (int i = 0; i < 10; ++i) { - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj.firstElement().numberInt()); } @@ -301,7 +301,7 @@ TEST_F(PlanExecutorInvalidationTest, IxscanExecutorSurvivesWhenUnrelatedIndexIsD // Scan the rest of the index. for (int i = 10; i < N(); ++i) { - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj.firstElement().numberInt()); } } @@ -312,7 +312,7 @@ TEST_F(PlanExecutorInvalidationTest, ExecutorThrowsOnRestoreWhenDatabaseIsDroppe // Read some of it. for (int i = 0; i < 10; ++i) { - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj["foo"].numberInt()); } @@ -325,7 +325,7 @@ TEST_F(PlanExecutorInvalidationTest, ExecutorThrowsOnRestoreWhenDatabaseIsDroppe _ctx.reset(new dbtests::WriteContextForTests(&_opCtx, nss.ns())); exec->restoreState(); - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(10, obj["foo"].numberInt()); exec->saveState(); @@ -344,7 +344,7 @@ TEST_F(PlanExecutorInvalidationTest, CollScanDiesOnCollectionRenameWithinDatabas // Partially scan the collection. BSONObj obj; for (int i = 0; i < 10; ++i) { - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj["foo"].numberInt()); } @@ -371,7 +371,7 @@ TEST_F(PlanExecutorInvalidationTest, IxscanDiesOnCollectionRenameWithinDatabase) // Partially scan the index. BSONObj obj; for (int i = 0; i < 10; ++i) { - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj.firstElement().numberInt()); } @@ -400,7 +400,7 @@ TEST_F(PlanExecutorInvalidationTest, CollScanDiesOnRestartCatalog) { // Partially scan the collection. BSONObj obj; for (int i = 0; i < 10; ++i) { - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj["foo"].numberInt()); } @@ -421,7 +421,7 @@ TEST_F(PlanExecutorInvalidationTest, IxscanDiesWhenTruncateCollectionDropsAllInd // Partially scan the index. BSONObj obj; for (int i = 0; i < 10; ++i) { - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj.firstElement().numberInt()); } @@ -438,7 +438,7 @@ TEST_F(PlanExecutorInvalidationTest, CollScanExecutorSurvivesCollectionTruncate) // Partially scan the collection. BSONObj obj; for (int i = 0; i < 10; ++i) { - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj["foo"].numberInt()); } @@ -449,7 +449,7 @@ TEST_F(PlanExecutorInvalidationTest, CollScanExecutorSurvivesCollectionTruncate) exec->restoreState(); // Since all documents in the collection have been deleted, the PlanExecutor should issue EOF. - ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&obj, nullptr)); } } // namespace mongo diff --git a/src/mongo/dbtests/plan_ranking.cpp b/src/mongo/dbtests/plan_ranking.cpp index 9e965b270c3..4709a63c8bc 100644 --- a/src/mongo/dbtests/plan_ranking.cpp +++ b/src/mongo/dbtests/plan_ranking.cpp @@ -147,7 +147,7 @@ public: * Was a backup plan picked during the ranking process? */ bool hasBackupPlan() const { - ASSERT(NULL != _mps.get()); + ASSERT(nullptr != _mps.get()); return _mps->hasBackupPlan(); } @@ -256,7 +256,7 @@ public: auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(qr)); verify(statusWithCQ.isOK()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); - ASSERT(NULL != cq.get()); + ASSERT(nullptr != cq.get()); // Turn on the "force intersect" option. // This will be reverted by PlanRankingTestBase's destructor when the test completes. @@ -297,7 +297,7 @@ public: auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(qr)); ASSERT_OK(statusWithCQ.getStatus()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); - ASSERT(NULL != cq.get()); + ASSERT(nullptr != cq.get()); QuerySolution* soln = pickBestPlan(cq.get()); @@ -331,7 +331,7 @@ public: auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(qr)); ASSERT_OK(statusWithCQ.getStatus()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); - ASSERT(NULL != cq.get()); + ASSERT(nullptr != cq.get()); QuerySolution* soln = pickBestPlan(cq.get()); @@ -370,7 +370,7 @@ public: auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(qr)); ASSERT_OK(statusWithCQ.getStatus()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); - ASSERT(NULL != cq.get()); + ASSERT(nullptr != cq.get()); QuerySolution* soln = pickBestPlan(cq.get()); // Prefer the fully covered plan. @@ -403,7 +403,7 @@ public: auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(qr)); verify(statusWithCQ.isOK()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); - ASSERT(NULL != cq.get()); + ASSERT(nullptr != cq.get()); // {a: 100} is super selective so choose that. QuerySolution* soln = pickBestPlan(cq.get()); @@ -439,7 +439,7 @@ public: auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(qr)); verify(statusWithCQ.isOK()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); - ASSERT(NULL != cq.get()); + ASSERT(nullptr != cq.get()); // {a: 100} is super selective so choose that. QuerySolution* soln = pickBestPlan(cq.get()); @@ -498,7 +498,7 @@ public: auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(qr)); verify(statusWithCQ.isOK()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); - ASSERT(NULL != cq.get()); + ASSERT(nullptr != cq.get()); QuerySolution* soln = pickBestPlan(cq.get()); @@ -532,7 +532,7 @@ public: auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(qr)); ASSERT_OK(statusWithCQ.getStatus()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); - ASSERT(NULL != cq.get()); + ASSERT(nullptr != cq.get()); // No results will be returned during the trial period, // so we expect to choose {d: 1, e: 1}, as it allows us @@ -569,7 +569,7 @@ public: auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(qr)); ASSERT_OK(statusWithCQ.getStatus()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); - ASSERT(NULL != cq.get()); + ASSERT(nullptr != cq.get()); // Use index on 'b'. QuerySolution* soln = pickBestPlan(cq.get()); @@ -601,7 +601,7 @@ public: auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(qr)); ASSERT_OK(statusWithCQ.getStatus()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); - ASSERT(NULL != cq.get()); + ASSERT(nullptr != cq.get()); // Expect to use index {a: 1, b: 1}. QuerySolution* soln = pickBestPlan(cq.get()); diff --git a/src/mongo/dbtests/query_plan_executor.cpp b/src/mongo/dbtests/query_plan_executor.cpp index d790c24dca0..4d645862ffc 100644 --- a/src/mongo/dbtests/query_plan_executor.cpp +++ b/src/mongo/dbtests/query_plan_executor.cpp @@ -112,7 +112,7 @@ public: auto statusWithCQ = CanonicalQuery::canonicalize(&_opCtx, std::move(qr)); ASSERT_OK(statusWithCQ.getStatus()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); - verify(NULL != cq.get()); + verify(nullptr != cq.get()); // Make the stage. unique_ptr<PlanStage> root( @@ -151,14 +151,14 @@ public: const Collection* coll = db->getCollection(&_opCtx, nss); unique_ptr<WorkingSet> ws(new WorkingSet()); - IndexScan* ix = new IndexScan(&_opCtx, ixparams, ws.get(), NULL); - unique_ptr<PlanStage> root(new FetchStage(&_opCtx, ws.get(), ix, NULL, coll)); + IndexScan* ix = new IndexScan(&_opCtx, ixparams, ws.get(), nullptr); + unique_ptr<PlanStage> root(new FetchStage(&_opCtx, ws.get(), ix, nullptr, coll)); auto qr = std::make_unique<QueryRequest>(nss); auto statusWithCQ = CanonicalQuery::canonicalize(&_opCtx, std::move(qr)); verify(statusWithCQ.isOK()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); - verify(NULL != cq.get()); + verify(nullptr != cq.get()); // Hand the plan off to the executor. auto statusWithPlanExecutor = PlanExecutor::make(&_opCtx, @@ -334,7 +334,7 @@ protected: BSONObj objOut; int idcount = 0; PlanExecutor::ExecState state; - while (PlanExecutor::ADVANCED == (state = exec->getNext(&objOut, NULL))) { + while (PlanExecutor::ADVANCED == (state = exec->getNext(&objOut, nullptr))) { ASSERT_EQUALS(expectedIds[idcount], objOut["_id"].numberInt()); ++idcount; } @@ -358,7 +358,7 @@ TEST_F(PlanExecutorSnapshotTest, SnapshotControl) { auto exec = makeCollScanExec(coll, filterObj); BSONObj objOut; - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&objOut, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&objOut, nullptr)); ASSERT_EQUALS(2, objOut["a"].numberInt()); forceDocumentMove(); @@ -382,7 +382,7 @@ TEST_F(PlanExecutorSnapshotTest, SnapshotTest) { auto exec = makeIndexScanExec(ctx.db(), indexSpec, 2, 5); BSONObj objOut; - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&objOut, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&objOut, nullptr)); ASSERT_EQUALS(2, objOut["a"].numberInt()); forceDocumentMove(); diff --git a/src/mongo/dbtests/query_stage_and.cpp b/src/mongo/dbtests/query_stage_and.cpp index aab96cdff95..1bad15c7280 100644 --- a/src/mongo/dbtests/query_stage_and.cpp +++ b/src/mongo/dbtests/query_stage_and.cpp @@ -205,12 +205,12 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 20); params.direction = -1; - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // Bar >= 10. params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 10); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // 'ah' reads the first child into its hash table: foo=20, foo=19, ..., foo=0 // in that order. Read half of them. @@ -292,13 +292,13 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 20); params.direction = -1; - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // Bar <= 19 (descending). params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 19); params.direction = -1; - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // First call to work reads the first result from the children. The first result for the // first scan over foo is {foo: 20, bar: 20, baz: 20}. The first result for the second scan @@ -372,13 +372,13 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 20); params.direction = -1; - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // Bar >= 10 params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 10); params.direction = -1; - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // foo == bar == baz, and foo<=20, bar>=10, so our values are: // foo == 10, 11, 12, 13, 14, 15. 16, 17, 18, 19, 20 @@ -421,13 +421,13 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1 << "big" << 1), coll)); params.bounds.startKey = BSON("" << 20 << "" << big); params.direction = -1; - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // Bar >= 10 params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 10); params.direction = -1; - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // Stage execution should fail. ASSERT_EQUALS(-1, countResults(ah.get())); @@ -468,13 +468,13 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 20); params.direction = -1; - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // Bar >= 10 params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1 << "big" << 1), coll)); params.bounds.startKey = BSON("" << 10 << "" << big); params.direction = -1; - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // foo == bar == baz, and foo<=20, bar>=10, so our values are: // foo == 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20. @@ -510,18 +510,18 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 20); params.direction = -1; - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // Bar >= 10 params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 10); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // 5 <= baz <= 15 params = makeIndexScanParams(&_opCtx, getIndex(BSON("baz" << 1), coll)); params.bounds.startKey = BSON("" << 5); params.bounds.endKey = BSON("" << 15); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // foo == bar == baz, and foo<=20, bar>=10, 5<=baz<=15, so our values are: // foo == 10, 11, 12, 13, 14, 15. @@ -568,18 +568,18 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 20); params.direction = -1; - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // Bar >= 10 params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1 << "big" << 1), coll)); params.bounds.startKey = BSON("" << 10 << "" << big); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // 5 <= baz <= 15 params = makeIndexScanParams(&_opCtx, getIndex(BSON("baz" << 1), coll)); params.bounds.startKey = BSON("" << 5); params.bounds.endKey = BSON("" << 15); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // Stage execution should fail. ASSERT_EQUALS(-1, countResults(ah.get())); @@ -613,13 +613,13 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 20); params.direction = -1; - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // Bar == 5. Index scan should be eof. params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 5); params.bounds.endKey = BSON("" << 5); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); int count = 0; int works = 0; @@ -669,7 +669,7 @@ public: // Foo >= 100 auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 100); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // Bar <= 100 params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll)); @@ -681,7 +681,7 @@ public: << ""); params.bounds.boundInclusion = BoundInclusion::kIncludeStartKeyOnly; params.direction = -1; - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); ASSERT_EQUALS(0, countResults(ah.get())); } @@ -717,17 +717,17 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 20); params.direction = -1; - IndexScan* firstScan = new IndexScan(&_opCtx, params, &ws, NULL); + IndexScan* firstScan = new IndexScan(&_opCtx, params, &ws, nullptr); // First child of the AND_HASH stage is a Fetch. The NULL in the // constructor means there is no filter. - FetchStage* fetch = new FetchStage(&_opCtx, &ws, firstScan, NULL, coll); + FetchStage* fetch = new FetchStage(&_opCtx, &ws, firstScan, nullptr, coll); ah->addChild(fetch); // Bar >= 10 params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 10); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // Check that the AndHash stage returns docs {foo: 10, bar: 10} // through {foo: 20, bar: 20}. @@ -769,16 +769,16 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 20); params.direction = -1; - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // Bar >= 10 params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 10); - IndexScan* secondScan = new IndexScan(&_opCtx, params, &ws, NULL); + IndexScan* secondScan = new IndexScan(&_opCtx, params, &ws, nullptr); // Second child of the AND_HASH stage is a Fetch. The NULL in the // constructor means there is no filter. - FetchStage* fetch = new FetchStage(&_opCtx, &ws, secondScan, NULL, coll); + FetchStage* fetch = new FetchStage(&_opCtx, &ws, secondScan, nullptr, coll); ah->addChild(fetch); // Check that the AndHash stage returns docs {foo: 10, bar: 10} @@ -955,13 +955,13 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // Scan over bar == 1. params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // Get the set of RecordIds in our collection to use later. set<RecordId> data; @@ -1072,19 +1072,19 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // bar == 1 params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // baz == 1 params = makeIndexScanParams(&_opCtx, getIndex(BSON("baz" << 1), coll)); params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); ASSERT_EQUALS(50, countResults(ah.get())); } @@ -1117,13 +1117,13 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 7); params.bounds.endKey = BSON("" << 7); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // Bar == 20, not EOF. params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 20); params.bounds.endKey = BSON("" << 20); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); ASSERT_EQUALS(0, countResults(ah.get())); } @@ -1160,13 +1160,13 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 7); params.bounds.endKey = BSON("" << 7); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // bar == 20. params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 20); params.bounds.endKey = BSON("" << 20); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); ASSERT_EQUALS(0, countResults(ah.get())); } @@ -1199,13 +1199,13 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // Intersect with 7 <= bar < 10000 params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 7); params.bounds.endKey = BSON("" << 10000); - ah->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ah->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); WorkingSetID lastId = WorkingSet::INVALID_ID; @@ -1261,18 +1261,18 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); - IndexScan* firstScan = new IndexScan(&_opCtx, params, &ws, NULL); + IndexScan* firstScan = new IndexScan(&_opCtx, params, &ws, nullptr); // First child of the AND_SORTED stage is a Fetch. The NULL in the // constructor means there is no filter. - FetchStage* fetch = new FetchStage(&_opCtx, &ws, firstScan, NULL, coll); + FetchStage* fetch = new FetchStage(&_opCtx, &ws, firstScan, nullptr, coll); as->addChild(fetch); // bar == 1 params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); - as->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + as->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); for (int i = 0; i < 50; i++) { BSONObj obj = getNext(as.get(), &ws); @@ -1313,17 +1313,17 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); - as->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + as->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); // bar == 1 params = makeIndexScanParams(&_opCtx, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); - IndexScan* secondScan = new IndexScan(&_opCtx, params, &ws, NULL); + IndexScan* secondScan = new IndexScan(&_opCtx, params, &ws, nullptr); // Second child of the AND_SORTED stage is a Fetch. The NULL in the // constructor means there is no filter. - FetchStage* fetch = new FetchStage(&_opCtx, &ws, secondScan, NULL, coll); + FetchStage* fetch = new FetchStage(&_opCtx, &ws, secondScan, nullptr, coll); as->addChild(fetch); for (int i = 0; i < 50; i++) { diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp index 615f30f6869..2a5e808e6b0 100644 --- a/src/mongo/dbtests/query_stage_collscan.cpp +++ b/src/mongo/dbtests/query_stage_collscan.cpp @@ -114,7 +114,7 @@ public: // Use the runner to count the number of objects scanned. int count = 0; PlanExecutor::ExecState state; - for (BSONObj obj; PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL));) { + for (BSONObj obj; PlanExecutor::ADVANCED == (state = exec->getNext(&obj, nullptr));) { ++count; } ASSERT_EQUALS(PlanExecutor::IS_EOF, state); @@ -130,7 +130,8 @@ public: params.direction = direction; params.tailable = false; - unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, collection, params, &ws, NULL)); + unique_ptr<CollectionScan> scan( + new CollectionScan(&_opCtx, collection, params, &ws, nullptr)); while (!scan->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); @@ -227,7 +228,7 @@ public: int count = 0; PlanExecutor::ExecState state; - for (BSONObj obj; PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL));) { + for (BSONObj obj; PlanExecutor::ADVANCED == (state = exec->getNext(&obj, nullptr));) { // Make sure we get the objects in the order we want ASSERT_EQUALS(count, obj["foo"].numberInt()); ++count; @@ -262,7 +263,7 @@ public: int count = 0; PlanExecutor::ExecState state; - for (BSONObj obj; PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL));) { + for (BSONObj obj; PlanExecutor::ADVANCED == (state = exec->getNext(&obj, nullptr));) { ++count; ASSERT_EQUALS(numObj() - count, obj["foo"].numberInt()); } @@ -293,7 +294,7 @@ public: params.tailable = false; WorkingSet ws; - unique_ptr<PlanStage> scan(new CollectionScan(&_opCtx, coll, params, &ws, NULL)); + unique_ptr<PlanStage> scan(new CollectionScan(&_opCtx, coll, params, &ws, nullptr)); int count = 0; while (count < 10) { @@ -352,7 +353,7 @@ public: params.tailable = false; WorkingSet ws; - unique_ptr<PlanStage> scan(new CollectionScan(&_opCtx, coll, params, &ws, NULL)); + unique_ptr<PlanStage> scan(new CollectionScan(&_opCtx, coll, params, &ws, nullptr)); int count = 0; while (count < 10) { diff --git a/src/mongo/dbtests/query_stage_count.cpp b/src/mongo/dbtests/query_stage_count.cpp index 80770a69e70..003beb748d9 100644 --- a/src/mongo/dbtests/query_stage_count.cpp +++ b/src/mongo/dbtests/query_stage_count.cpp @@ -57,7 +57,9 @@ const int kInterjections = kDocuments; class CountStageTest { public: CountStageTest() - : _dbLock(&_opCtx, nsToDatabaseSubstring(ns()), MODE_X), _ctx(&_opCtx, ns()), _coll(NULL) {} + : _dbLock(&_opCtx, nsToDatabaseSubstring(ns()), MODE_X), + _ctx(&_opCtx, ns()), + _coll(nullptr) {} virtual ~CountStageTest() {} @@ -94,7 +96,7 @@ public: params.direction = CollectionScanParams::FORWARD; params.tailable = false; - unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, _coll, params, &ws, NULL)); + unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, _coll, params, &ws, nullptr)); while (!scan->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); @@ -129,7 +131,7 @@ public: Snapshotted<BSONObj>(_opCtx.recoveryUnit()->getSnapshotId(), oldDoc), newDoc, true, - NULL, + nullptr, &args); wunit.commit(); } diff --git a/src/mongo/dbtests/query_stage_delete.cpp b/src/mongo/dbtests/query_stage_delete.cpp index f10b7390243..708d68e068a 100644 --- a/src/mongo/dbtests/query_stage_delete.cpp +++ b/src/mongo/dbtests/query_stage_delete.cpp @@ -90,7 +90,8 @@ public: params.direction = direction; params.tailable = false; - unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, collection, params, &ws, NULL)); + unique_ptr<CollectionScan> scan( + new CollectionScan(&_opCtx, collection, params, &ws, nullptr)); while (!scan->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); @@ -150,7 +151,7 @@ public: std::move(deleteStageParams), &ws, coll, - new CollectionScan(&_opCtx, coll, collScanParams, &ws, NULL)); + new CollectionScan(&_opCtx, coll, collScanParams, &ws, nullptr)); const DeleteStats* stats = static_cast<const DeleteStats*>(deleteStage.getSpecificStats()); diff --git a/src/mongo/dbtests/query_stage_fetch.cpp b/src/mongo/dbtests/query_stage_fetch.cpp index 2eb09c39057..f8bf178e5c3 100644 --- a/src/mongo/dbtests/query_stage_fetch.cpp +++ b/src/mongo/dbtests/query_stage_fetch.cpp @@ -138,7 +138,7 @@ public: } unique_ptr<FetchStage> fetchStage( - new FetchStage(&_opCtx, &ws, mockStage.release(), NULL, coll)); + new FetchStage(&_opCtx, &ws, mockStage.release(), nullptr, coll)); WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state; diff --git a/src/mongo/dbtests/query_stage_ixscan.cpp b/src/mongo/dbtests/query_stage_ixscan.cpp index d5112419175..d38c26eb009 100644 --- a/src/mongo/dbtests/query_stage_ixscan.cpp +++ b/src/mongo/dbtests/query_stage_ixscan.cpp @@ -47,7 +47,9 @@ const auto kIndexVersion = IndexDescriptor::IndexVersion::kV2; class IndexScanTest { public: IndexScanTest() - : _dbLock(&_opCtx, nsToDatabaseSubstring(ns()), MODE_X), _ctx(&_opCtx, ns()), _coll(NULL) {} + : _dbLock(&_opCtx, nsToDatabaseSubstring(ns()), MODE_X), + _ctx(&_opCtx, ns()), + _coll(nullptr) {} virtual ~IndexScanTest() {} @@ -109,7 +111,7 @@ public: params.direction = 1; // This child stage gets owned and freed by the caller. - MatchExpression* filter = NULL; + MatchExpression* filter = nullptr; return new IndexScan(&_opCtx, params, &_ws, filter); } @@ -133,7 +135,7 @@ public: oil.intervals.push_back(Interval(bob.obj(), startInclusive, endInclusive)); params.bounds.fields.push_back(oil); - MatchExpression* filter = NULL; + MatchExpression* filter = nullptr; return new IndexScan(&_opCtx, params, &_ws, filter); } diff --git a/src/mongo/dbtests/query_stage_merge_sort.cpp b/src/mongo/dbtests/query_stage_merge_sort.cpp index aa6cb5e174a..5656174f7b3 100644 --- a/src/mongo/dbtests/query_stage_merge_sort.cpp +++ b/src/mongo/dbtests/query_stage_merge_sort.cpp @@ -173,11 +173,11 @@ public: // a:1 auto params = makeIndexScanParams(&_opCtx, getIndex(firstIndex, coll)); - ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr)); // b:1 params = makeIndexScanParams(&_opCtx, getIndex(secondIndex, coll)); - ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr)); unique_ptr<FetchStage> fetchStage = make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll); @@ -189,9 +189,9 @@ public: for (int i = 0; i < N; ++i) { BSONObj first, second; - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, nullptr)); first = first.getOwned(); - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, nullptr)); ASSERT_EQUALS(first["c"].numberInt(), second["c"].numberInt()); ASSERT_EQUALS(i, first["c"].numberInt()); ASSERT((first.hasField("a") && second.hasField("b")) || @@ -200,7 +200,7 @@ public: // Should be done now. BSONObj foo; - ASSERT_NOT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&foo, NULL)); + ASSERT_NOT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&foo, nullptr)); } }; @@ -238,11 +238,11 @@ public: // a:1 auto params = makeIndexScanParams(&_opCtx, getIndex(firstIndex, coll)); - ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr)); // b:1 params = makeIndexScanParams(&_opCtx, getIndex(secondIndex, coll)); - ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr)); unique_ptr<FetchStage> fetchStage = make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll); @@ -253,9 +253,9 @@ public: for (int i = 0; i < N; ++i) { BSONObj first, second; - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, nullptr)); first = first.getOwned(); - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, nullptr)); ASSERT_EQUALS(first["c"].numberInt(), second["c"].numberInt()); ASSERT_EQUALS(i, first["c"].numberInt()); ASSERT((first.hasField("a") && second.hasField("b")) || @@ -264,7 +264,7 @@ public: // Should be done now. BSONObj foo; - ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&foo, NULL)); + ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&foo, nullptr)); } }; @@ -302,11 +302,11 @@ public: // a:1 auto params = makeIndexScanParams(&_opCtx, getIndex(firstIndex, coll)); - ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr)); // b:1 params = makeIndexScanParams(&_opCtx, getIndex(secondIndex, coll)); - ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr)); unique_ptr<FetchStage> fetchStage = make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll); @@ -318,9 +318,9 @@ public: for (int i = 0; i < N; ++i) { BSONObj first, second; // We inserted N objects but we get 2 * N from the runner because of dups. - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, nullptr)); first = first.getOwned(); - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, nullptr)); ASSERT_EQUALS(first["c"].numberInt(), second["c"].numberInt()); ASSERT_EQUALS(i, first["c"].numberInt()); ASSERT((first.hasField("a") && second.hasField("b")) || @@ -329,7 +329,7 @@ public: // Should be done now. BSONObj foo; - ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&foo, NULL)); + ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&foo, nullptr)); } }; @@ -370,13 +370,13 @@ public: auto params = makeIndexScanParams(&_opCtx, getIndex(firstIndex, coll)); params.bounds.startKey = objWithMaxKey(1); params.bounds.endKey = objWithMinKey(1); - ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr)); // b:1 params = makeIndexScanParams(&_opCtx, getIndex(secondIndex, coll)); params.bounds.startKey = objWithMaxKey(1); params.bounds.endKey = objWithMinKey(1); - ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr)); unique_ptr<FetchStage> fetchStage = make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll); @@ -387,9 +387,9 @@ public: for (int i = 0; i < N; ++i) { BSONObj first, second; - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, nullptr)); first = first.getOwned(); - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, nullptr)); ASSERT_EQUALS(first["c"].numberInt(), second["c"].numberInt()); ASSERT_EQUALS(N - i - 1, first["c"].numberInt()); ASSERT((first.hasField("a") && second.hasField("b")) || @@ -398,7 +398,7 @@ public: // Should be done now. BSONObj foo; - ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&foo, NULL)); + ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&foo, nullptr)); } }; @@ -436,13 +436,13 @@ public: // a:1 auto params = makeIndexScanParams(&_opCtx, getIndex(firstIndex, coll)); - ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr)); // b:51 (EOF) params = makeIndexScanParams(&_opCtx, getIndex(secondIndex, coll)); params.bounds.startKey = BSON("" << 51 << "" << MinKey); params.bounds.endKey = BSON("" << 51 << "" << MaxKey); - ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr)); unique_ptr<FetchStage> fetchStage = make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll); @@ -454,14 +454,14 @@ public: // Only getting results from the a:1 index scan. for (int i = 0; i < N; ++i) { BSONObj obj; - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj["c"].numberInt()); ASSERT_EQUALS(1, obj["a"].numberInt()); } // Should be done now. BSONObj foo; - ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&foo, NULL)); + ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&foo, nullptr)); } }; @@ -493,7 +493,7 @@ public: BSONObj indexSpec = BSON(index << 1 << "foo" << 1); addIndex(indexSpec); auto params = makeIndexScanParams(&_opCtx, getIndex(indexSpec, coll)); - ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr)); } unique_ptr<FetchStage> fetchStage = make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll); @@ -505,7 +505,7 @@ public: for (int i = 0; i < numIndices; ++i) { BSONObj obj; - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); ASSERT_EQUALS(i, obj["foo"].numberInt()); string index(1, 'a' + i); ASSERT_EQUALS(1, obj[index].numberInt()); @@ -513,7 +513,7 @@ public: // Should be done now. BSONObj foo; - ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&foo, NULL)); + ASSERT_EQUALS(PlanExecutor::IS_EOF, exec->getNext(&foo, nullptr)); } }; @@ -547,7 +547,7 @@ public: BSONObj indexSpec = BSON(index << 1 << "foo" << 1); addIndex(indexSpec); auto params = makeIndexScanParams(&_opCtx, getIndex(indexSpec, coll)); - ms->addChild(new IndexScan(&_opCtx, params, &ws, NULL)); + ms->addChild(new IndexScan(&_opCtx, params, &ws, nullptr)); } set<RecordId> recordIds; @@ -765,11 +765,11 @@ public: // a:1 auto params = makeIndexScanParams(&_opCtx, getIndex(firstIndex, coll)); - ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr)); // b:1 params = makeIndexScanParams(&_opCtx, getIndex(secondIndex, coll)); - ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr)); unique_ptr<FetchStage> fetchStage = make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll); @@ -781,9 +781,9 @@ public: for (int i = 0; i < N; ++i) { BSONObj first, second; - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, nullptr)); first = first.getOwned(); - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, nullptr)); ASSERT_EQUALS(first["c"].numberInt(), second["c"].numberInt()); ASSERT_EQUALS(i, first["c"].numberInt()); // {a: 1, c: i, d: "abc"} should precede {b: 1, c: i, d: "bca"}. @@ -792,7 +792,7 @@ public: // Should be done now. BSONObj foo; - ASSERT_NOT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&foo, NULL)); + ASSERT_NOT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&foo, nullptr)); } }; @@ -833,11 +833,11 @@ public: // a:1 auto params = makeIndexScanParams(&_opCtx, getIndex(firstIndex, coll)); - ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr)); // b:1 params = makeIndexScanParams(&_opCtx, getIndex(secondIndex, coll)); - ms->addChild(new IndexScan(&_opCtx, params, ws.get(), NULL)); + ms->addChild(new IndexScan(&_opCtx, params, ws.get(), nullptr)); unique_ptr<FetchStage> fetchStage = make_unique<FetchStage>(&_opCtx, ws.get(), ms, nullptr, coll); @@ -849,9 +849,9 @@ public: for (int i = 0; i < N; ++i) { BSONObj first, second; - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&first, nullptr)); first = first.getOwned(); - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&second, nullptr)); ASSERT_EQUALS(first["c"].numberInt(), second["c"].numberInt()); ASSERT_EQUALS(i, first["c"].numberInt()); // {b: 1, c: i, d: "cba"} should precede {a: 1, c: i, d: "abc"}. @@ -860,7 +860,7 @@ public: // Should be done now. BSONObj foo; - ASSERT_NOT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&foo, NULL)); + ASSERT_NOT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&foo, nullptr)); } }; diff --git a/src/mongo/dbtests/query_stage_multiplan.cpp b/src/mongo/dbtests/query_stage_multiplan.cpp index 622d284045b..97c50d74952 100644 --- a/src/mongo/dbtests/query_stage_multiplan.cpp +++ b/src/mongo/dbtests/query_stage_multiplan.cpp @@ -267,7 +267,7 @@ TEST_F(QueryStageMultiPlanTest, MPSCollectionScanVsHighlySelectiveIXScan) { int results = 0; BSONObj obj; PlanExecutor::ExecState state; - while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) { + while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, nullptr))) { ASSERT_EQUALS(obj["foo"].numberInt(), 7); ++results; } @@ -378,7 +378,7 @@ TEST_F(QueryStageMultiPlanTest, MPSBackupPlan) { auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(qr)); verify(statusWithCQ.isOK()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); - ASSERT(NULL != cq.get()); + ASSERT(nullptr != cq.get()); // Force index intersection. bool forceIxisectOldValue = internalQueryForceIntersectionPlans.load(); diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp index a50de5f0d3f..1982273bc79 100644 --- a/src/mongo/dbtests/query_stage_sort.cpp +++ b/src/mongo/dbtests/query_stage_sort.cpp @@ -175,7 +175,7 @@ public: // Look at pairs of objects to make sure that the sort order is pairwise (and therefore // totally) correct. BSONObj last; - ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&last, NULL)); + ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&last, nullptr)); last = last.getOwned(); // Count 'last'. @@ -183,7 +183,7 @@ public: BSONObj current; PlanExecutor::ExecState state; - while (PlanExecutor::ADVANCED == (state = exec->getNext(¤t, NULL))) { + while (PlanExecutor::ADVANCED == (state = exec->getNext(¤t, nullptr))) { int cmp = sgn(dps::compareObjectsAccordingToSort(current, last, params.pattern)); // The next object should be equal to the previous or oriented according to the sort // pattern. @@ -364,7 +364,7 @@ public: CollectionUpdateArgs args; { WriteUnitOfWork wuow(&_opCtx); - coll->updateDocument(&_opCtx, *it, oldDoc, newDoc(oldDoc), false, NULL, &args); + coll->updateDocument(&_opCtx, *it, oldDoc, newDoc(oldDoc), false, nullptr, &args); wuow.commit(); } exec->restoreState(); @@ -382,7 +382,7 @@ public: oldDoc = coll->docFor(&_opCtx, *it); { WriteUnitOfWork wuow(&_opCtx); - coll->updateDocument(&_opCtx, *it++, oldDoc, newDoc(oldDoc), false, NULL, &args); + coll->updateDocument(&_opCtx, *it++, oldDoc, newDoc(oldDoc), false, nullptr, &args); wuow.commit(); } } @@ -571,7 +571,7 @@ public: &_opCtx, std::move(ws), std::move(fetchStage), coll, PlanExecutor::NO_YIELD); auto exec = std::move(statusWithPlanExecutor.getValue()); - PlanExecutor::ExecState runnerState = exec->getNext(NULL, NULL); + PlanExecutor::ExecState runnerState = exec->getNext(nullptr, nullptr); ASSERT_EQUALS(PlanExecutor::FAILURE, runnerState); } }; diff --git a/src/mongo/dbtests/query_stage_tests.cpp b/src/mongo/dbtests/query_stage_tests.cpp index ec2a54934e3..ffa55f0040b 100644 --- a/src/mongo/dbtests/query_stage_tests.cpp +++ b/src/mongo/dbtests/query_stage_tests.cpp @@ -102,7 +102,7 @@ public: int count = 0; PlanExecutor::ExecState state; - for (RecordId dl; PlanExecutor::ADVANCED == (state = exec->getNext(NULL, &dl));) { + for (RecordId dl; PlanExecutor::ADVANCED == (state = exec->getNext(nullptr, &dl));) { ++count; } ASSERT_EQUALS(PlanExecutor::IS_EOF, state); diff --git a/src/mongo/dbtests/query_stage_update.cpp b/src/mongo/dbtests/query_stage_update.cpp index 99829659d0c..adafcaadccd 100644 --- a/src/mongo/dbtests/query_stage_update.cpp +++ b/src/mongo/dbtests/query_stage_update.cpp @@ -131,7 +131,8 @@ public: params.direction = CollectionScanParams::FORWARD; params.tailable = false; - unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, collection, params, &ws, NULL)); + unique_ptr<CollectionScan> scan( + new CollectionScan(&_opCtx, collection, params, &ws, nullptr)); while (!scan->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); @@ -152,7 +153,8 @@ public: params.direction = direction; params.tailable = false; - unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, collection, params, &ws, NULL)); + unique_ptr<CollectionScan> scan( + new CollectionScan(&_opCtx, collection, params, &ws, nullptr)); while (!scan->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp index c981c80a888..a41e2d9054b 100644 --- a/src/mongo/dbtests/querytests.cpp +++ b/src/mongo/dbtests/querytests.cpp @@ -220,7 +220,7 @@ public: WriteUnitOfWork wunit(&_opCtx); Database* db = ctx.db(); if (db->getCollection(&_opCtx, nss())) { - _collection = NULL; + _collection = nullptr; db->dropCollection(&_opCtx, nss()).transitional_ignore(); } _collection = db->createCollection(&_opCtx, nss(), CollectionOptions(), false); @@ -338,7 +338,8 @@ public: } // Create a cursor on the collection, with a batch size of 200. - unique_ptr<DBClientCursor> cursor = _client.query(NamespaceString(ns), "", 0, 0, 0, 0, 200); + unique_ptr<DBClientCursor> cursor = + _client.query(NamespaceString(ns), "", 0, 0, nullptr, 0, 200); // Count 500 results, spanning a few batches of documents. for (int i = 0; i < 500; ++i) { @@ -383,7 +384,8 @@ public: } // Create a cursor on the collection, with a batch size of 200. - unique_ptr<DBClientCursor> cursor = _client.query(NamespaceString(ns), "", 0, 0, 0, 0, 200); + unique_ptr<DBClientCursor> cursor = + _client.query(NamespaceString(ns), "", 0, 0, nullptr, 0, 200); CursorId cursorId = cursor->getCursorId(); // Count 500 results, spanning a few batches of documents. @@ -461,7 +463,7 @@ public: Query().hint(BSON("$natural" << 1)), 2, 0, - 0, + nullptr, QueryOption_CursorTailable); ASSERT(0 != c->getCursorId()); while (c->more()) @@ -493,7 +495,7 @@ public: Query().hint(BSON("$natural" << 1)), 2, 0, - 0, + nullptr, QueryOption_CursorTailable); ASSERT_EQUALS(0, c->getCursorId()); ASSERT(c->isDead()); @@ -502,7 +504,7 @@ public: QUERY("a" << 1).hint(BSON("$natural" << 1)), 2, 0, - 0, + nullptr, QueryOption_CursorTailable); ASSERT(0 != c->getCursorId()); ASSERT(!c->isDead()); @@ -528,7 +530,7 @@ public: Query().hint(BSON("$natural" << 1)), 2, 0, - 0, + nullptr, QueryOption_CursorTailable); c->next(); c->next(); @@ -560,7 +562,7 @@ public: Query().hint(BSON("$natural" << 1)), 2, 0, - 0, + nullptr, QueryOption_CursorTailable); c->next(); c->next(); @@ -594,7 +596,7 @@ public: Query().hint(BSON("$natural" << 1)), 2, 0, - 0, + nullptr, QueryOption_CursorTailable); c->next(); c->next(); @@ -616,7 +618,8 @@ public: const char* ns = "unittests.querytests.TailCappedOnly"; _client.insert(ns, BSONObj()); ASSERT_THROWS( - _client.query(NamespaceString(ns), BSONObj(), 0, 0, 0, QueryOption_CursorTailable), + _client.query( + NamespaceString(ns), BSONObj(), 0, 0, nullptr, QueryOption_CursorTailable), AssertionException); } }; @@ -629,8 +632,8 @@ public: void insertA(const char* ns, int a) { BSONObjBuilder b; - b.appendOID("_id", 0, true); - b.appendOID("value", 0, true); + b.appendOID("_id", nullptr, true); + b.appendOID("value", nullptr, true); b.append("a", a); insert(ns, b.obj()); } @@ -656,11 +659,15 @@ public: insertA(ns, 0); insertA(ns, 1); unique_ptr<DBClientCursor> c1 = _client.query( - NamespaceString(ns), QUERY("a" << GT << -1), 0, 0, 0, QueryOption_CursorTailable); + NamespaceString(ns), QUERY("a" << GT << -1), 0, 0, nullptr, QueryOption_CursorTailable); OID id; id.init("000000000000000000000000"); - unique_ptr<DBClientCursor> c2 = _client.query( - NamespaceString(ns), QUERY("value" << GT << id), 0, 0, 0, QueryOption_CursorTailable); + unique_ptr<DBClientCursor> c2 = _client.query(NamespaceString(ns), + QUERY("value" << GT << id), + 0, + 0, + nullptr, + QueryOption_CursorTailable); c1->next(); c1->next(); ASSERT(!c1->more()); @@ -703,7 +710,7 @@ public: QUERY("ts" << GT << Timestamp(1000, 1)).hint(BSON("$natural" << 1)), 0, 0, - 0, + nullptr, QueryOption_OplogReplay); ASSERT(c->more()); ASSERT_EQUALS(2u, c->next()["ts"].timestamp().getInc()); @@ -714,7 +721,7 @@ public: QUERY("ts" << GT << Timestamp(1000, 1)).hint(BSON("$natural" << 1)), 0, 0, - 0, + nullptr, QueryOption_OplogReplay); ASSERT(c->more()); ASSERT_EQUALS(2u, c->next()["ts"].timestamp().getInc()); @@ -747,7 +754,7 @@ public: QUERY("ts" << GT << Timestamp(1000, 1)).hint(BSON("$natural" << 1)).explain(), 0, 0, - 0, + nullptr, QueryOption_OplogReplay); ASSERT(c->more()); @@ -1370,7 +1377,7 @@ public: QUERY("i" << GT << 0).hint(BSON("$natural" << 1)), 0, 0, - 0, + nullptr, QueryOption_CursorTailable); int n = 0; while (c->more()) { @@ -1397,7 +1404,7 @@ public: void insertNext() { BSONObjBuilder b; - b.appendOID("_id", 0, true); + b.appendOID("_id", nullptr, true); b.append("i", _n++); insert(ns(), b.obj()); } @@ -1539,7 +1546,7 @@ public: QUERY("ts" << GTE << Timestamp(1000, j)), 0, 0, - 0, + nullptr, QueryOption_OplogReplay); ASSERT(c->more()); BSONObj next = c->next(); @@ -1596,7 +1603,7 @@ public: QUERY("ts" << GTE << Timestamp(1000, j)), 0, 0, - 0, + nullptr, QueryOption_OplogReplay); ASSERT(c->more()); BSONObj next = c->next(); @@ -1634,7 +1641,7 @@ public: QUERY("ts" << GTE << Timestamp(1000, 50)), 0, 0, - 0, + nullptr, QueryOption_OplogReplay); ASSERT(!c0->more()); @@ -1656,7 +1663,7 @@ public: QUERY("ts" << GTE << Timestamp(1000, 50)), 0, 0, - 0, + nullptr, QueryOption_OplogReplay); ASSERT(!c->more()); @@ -1667,7 +1674,7 @@ public: QUERY("ts" << GTE << Timestamp(1000, 50)), 0, 0, - 0, + nullptr, QueryOption_OplogReplay); ASSERT(c->more()); ASSERT_EQUALS(100u, c->next()["ts"].timestamp().getInc()); @@ -1752,7 +1759,7 @@ public: BSON("ts" << GTE << Timestamp(1000, 0)), 0, 0, - 0, + nullptr, QueryOption_OplogReplay | QueryOption_CursorTailable | QueryOption_Exhaust, message); diff --git a/src/mongo/dbtests/rollbacktests.cpp b/src/mongo/dbtests/rollbacktests.cpp index 71824e31bb1..d7d1607c1c2 100644 --- a/src/mongo/dbtests/rollbacktests.cpp +++ b/src/mongo/dbtests/rollbacktests.cpp @@ -117,12 +117,12 @@ void assertEmpty(OperationContext* opCtx, const NamespaceString& nss) { bool indexExists(OperationContext* opCtx, const NamespaceString& nss, const string& idxName) { auto databaseHolder = DatabaseHolder::get(opCtx); auto coll = databaseHolder->getDb(opCtx, nss.db())->getCollection(opCtx, nss); - return coll->getIndexCatalog()->findIndexByName(opCtx, idxName, true) != NULL; + return coll->getIndexCatalog()->findIndexByName(opCtx, idxName, true) != nullptr; } bool indexReady(OperationContext* opCtx, const NamespaceString& nss, const string& idxName) { auto databaseHolder = DatabaseHolder::get(opCtx); auto coll = databaseHolder->getDb(opCtx, nss.db())->getCollection(opCtx, nss); - return coll->getIndexCatalog()->findIndexByName(opCtx, idxName, false) != NULL; + return coll->getIndexCatalog()->findIndexByName(opCtx, idxName, false) != nullptr; } size_t getNumIndexEntries(OperationContext* opCtx, const NamespaceString& nss, diff --git a/src/mongo/executor/task_executor.h b/src/mongo/executor/task_executor.h index 2cceb303a56..a392f72b974 100644 --- a/src/mongo/executor/task_executor.h +++ b/src/mongo/executor/task_executor.h @@ -448,7 +448,7 @@ struct TaskExecutor::CallbackArgs { CallbackArgs(TaskExecutor* theExecutor, CallbackHandle theHandle, Status theStatus, - OperationContext* opCtx = NULL); + OperationContext* opCtx = nullptr); TaskExecutor* executor; CallbackHandle myHandle; diff --git a/src/mongo/logger/ramlog.cpp b/src/mongo/logger/ramlog.cpp index a1f32e26726..a2bf4bffd15 100644 --- a/src/mongo/logger/ramlog.cpp +++ b/src/mongo/logger/ramlog.cpp @@ -43,8 +43,8 @@ using std::string; namespace { typedef std::map<string, RamLog*> RM; -stdx::mutex* _namedLock = NULL; -RM* _named = NULL; +stdx::mutex* _namedLock = nullptr; +RM* _named = nullptr; } // namespace @@ -58,7 +58,7 @@ RamLog::~RamLog() {} void RamLog::write(const std::string& str) { stdx::lock_guard<stdx::mutex> lk(_mutex); - _lastWrite = time(0); + _lastWrite = time(nullptr); _totalLinesWritten++; char* p = lines[(h + n) % N]; @@ -136,7 +136,7 @@ string RamLog::clean(const std::vector<const char*>& v, int i, string line) { string RamLog::linkify(const char* s) { const char* p = s; const char* h = strstr(p, "http://"); - if (h == 0) + if (h == nullptr) return s; const char* sp = h + 7; @@ -179,7 +179,7 @@ RamLog* RamLog::get(const std::string& name) { _named = new RM(); } - RamLog* result = mapFindWithDefault(*_named, name, static_cast<RamLog*>(NULL)); + RamLog* result = mapFindWithDefault(*_named, name, static_cast<RamLog*>(nullptr)); if (!result) { result = new RamLog(name); (*_named)[name] = result; @@ -189,9 +189,9 @@ RamLog* RamLog::get(const std::string& name) { RamLog* RamLog::getIfExists(const std::string& name) { if (!_named) - return NULL; + return nullptr; stdx::lock_guard<stdx::mutex> lk(*_namedLock); - return mapFindWithDefault(*_named, name, static_cast<RamLog*>(NULL)); + return mapFindWithDefault(*_named, name, static_cast<RamLog*>(nullptr)); } void RamLog::getNames(std::vector<string>& names) { diff --git a/src/mongo/logger/rotatable_file_manager.cpp b/src/mongo/logger/rotatable_file_manager.cpp index f5eb9ebe73f..4ba06af6f5c 100644 --- a/src/mongo/logger/rotatable_file_manager.cpp +++ b/src/mongo/logger/rotatable_file_manager.cpp @@ -61,7 +61,7 @@ StatusWithRotatableFileWriter RotatableFileManager::openFile(const std::string& } RotatableFileWriter* RotatableFileManager::getFile(const std::string& name) { - return mapFindWithDefault(_writers, name, static_cast<RotatableFileWriter*>(NULL)); + return mapFindWithDefault(_writers, name, static_cast<RotatableFileWriter*>(nullptr)); } RotatableFileManager::FileNameStatusPairVector RotatableFileManager::rotateAll( diff --git a/src/mongo/s/chunk_manager_index_bounds_test.cpp b/src/mongo/s/chunk_manager_index_bounds_test.cpp index ecb46584090..c551d06a29a 100644 --- a/src/mongo/s/chunk_manager_index_bounds_test.cpp +++ b/src/mongo/s/chunk_manager_index_bounds_test.cpp @@ -74,7 +74,7 @@ protected: const char* queryStr, const IndexBounds& expectedBounds) { auto query(canonicalize(queryStr)); - ASSERT(query.get() != NULL); + ASSERT(query.get() != nullptr); BSONObj key = fromjson(keyStr); @@ -98,7 +98,7 @@ protected: // Assume shard key is { a: 1 } void checkIndexBounds(const char* queryStr, const OrderedIntervalList& expectedOil) { auto query(canonicalize(queryStr)); - ASSERT(query.get() != NULL); + ASSERT(query.get() != nullptr); BSONObj key = fromjson("{a: 1}"); @@ -284,7 +284,7 @@ TEST_F(CMCollapseTreeTest, BasicAllElemMatch) { const char* queryStr = "{foo: {$all: [ {$elemMatch: {a:1, b:1}} ]}}"; auto query(canonicalize(queryStr)); - ASSERT(query.get() != NULL); + ASSERT(query.get() != nullptr); BSONObj key = fromjson("{'foo.a': 1}"); @@ -365,7 +365,7 @@ TEST_F(CMCollapseTreeTest, TextWithQuery) { TEST_F(CMCollapseTreeTest, HashedSinglePoint) { const char* queryStr = "{ a: 0 }"; auto query(canonicalize(queryStr)); - ASSERT(query.get() != NULL); + ASSERT(query.get() != nullptr); BSONObj key = fromjson("{a: 'hashed'}"); diff --git a/src/mongo/s/client/parallel.cpp b/src/mongo/s/client/parallel.cpp index 7903b2c48e5..3320e4517b7 100644 --- a/src/mongo/s/client/parallel.cpp +++ b/src/mongo/s/client/parallel.cpp @@ -198,7 +198,7 @@ ParallelSortClusteredCursor::~ParallelSortClusteredCursor() { } delete[] _cursors; - _cursors = 0; + _cursors = nullptr; // Clear out our metadata after removing legacy cursor data _cursorMap.clear(); @@ -224,11 +224,11 @@ void ParallelSortClusteredCursor::init(OperationContext* opCtx) { void ParallelSortClusteredCursor::_finishCons() { _numServers = _servers.size(); _lastFrom = 0; - _cursors = 0; + _cursors = nullptr; if (!_qSpec.isEmpty()) { _needToSkip = _qSpec.ntoskip(); - _cursors = 0; + _cursors = nullptr; _sortKey = _qSpec.sort(); _fields = _qSpec.fields(); } @@ -552,8 +552,9 @@ void ParallelSortClusteredCursor::startInit(OperationContext* opCtx) { isCommand() ? 1 : 0, // nToReturn (0 if query indicates multi) 0, // nToSkip // Does this need to be a ptr? - _qSpec.fields().isEmpty() ? 0 : _qSpec.fieldsData(), // fieldsToReturn - _qSpec.options(), // options + _qSpec.fields().isEmpty() ? nullptr + : _qSpec.fieldsData(), // fieldsToReturn + _qSpec.options(), // options // NtoReturn is weird. // If zero, it means use default size, so we do that for all cursors // If positive, it's the batch size (we don't want this cursor limiting @@ -579,9 +580,10 @@ void ParallelSortClusteredCursor::startInit(OperationContext* opCtx) { _qSpec.ntoreturn(), // nToReturn _qSpec.ntoskip(), // nToSkip // Does this need to be a ptr? - _qSpec.fields().isEmpty() ? 0 : _qSpec.fieldsData(), // fieldsToReturn - _qSpec.options(), // options - 0)); // batchSize + _qSpec.fields().isEmpty() ? nullptr + : _qSpec.fieldsData(), // fieldsToReturn + _qSpec.options(), // options + 0)); // batchSize } } @@ -1005,25 +1007,25 @@ void ParallelSortClusteredCursor::_oldInit(OperationContext* opCtx) { new DBClientCursor(conns[i]->get(), NamespaceString(_ns), _query, - 0, // nToReturn - 0, // nToSkip - _fields.isEmpty() ? 0 : &_fields, // fieldsToReturn + 0, // nToReturn + 0, // nToSkip + _fields.isEmpty() ? nullptr : &_fields, // fieldsToReturn _options, _batchSize == 0 ? 0 : _batchSize + _needToSkip // batchSize ), - NULL); + nullptr); try { _cursors[i].get()->initLazy(!firstPass); } catch (NetworkException& e) { socketExs.push_back(e.what() + errLoc); - _cursors[i].reset(NULL, NULL); + _cursors[i].reset(nullptr, nullptr); conns[i]->done(); if (!returnPartial) break; } catch (std::exception& e) { otherExs.push_back(e.what() + errLoc); - _cursors[i].reset(NULL, NULL); + _cursors[i].reset(nullptr, nullptr); conns[i]->done(); break; } @@ -1051,7 +1053,7 @@ void ParallelSortClusteredCursor::_oldInit(OperationContext* opCtx) { if (!_cursors[i].get()->initLazyFinish(retry)) { warning() << "invalid result from " << conns[i]->getHost() << (retry ? ", retrying" : ""); - _cursors[i].reset(NULL, NULL); + _cursors[i].reset(nullptr, nullptr); if (!retry) { socketExs.push_back(str::stream() << "error querying server: " @@ -1071,17 +1073,17 @@ void ParallelSortClusteredCursor::_oldInit(OperationContext* opCtx) { staleConfigExs.push_back( (string) "stale config detected when receiving response for " + e.toString() + errLoc); - _cursors[i].reset(NULL, NULL); + _cursors[i].reset(nullptr, nullptr); conns[i]->done(); continue; } catch (NetworkException& e) { socketExs.push_back(e.what() + errLoc); - _cursors[i].reset(NULL, NULL); + _cursors[i].reset(nullptr, nullptr); conns[i]->done(); continue; } catch (std::exception& e) { otherExs.push_back(e.what() + errLoc); - _cursors[i].reset(NULL, NULL); + _cursors[i].reset(nullptr, nullptr); conns[i]->done(); continue; } @@ -1100,12 +1102,12 @@ void ParallelSortClusteredCursor::_oldInit(OperationContext* opCtx) { staleConfigExs.push_back((string) "stale config detected for " + e.toString() + errLoc); - _cursors[i].reset(NULL, NULL); + _cursors[i].reset(nullptr, nullptr); conns[i]->done(); continue; } catch (std::exception& e) { otherExs.push_back(e.what() + errLoc); - _cursors[i].reset(NULL, NULL); + _cursors[i].reset(nullptr, nullptr); conns[i]->done(); continue; } diff --git a/src/mongo/s/client/shard_connection.cpp b/src/mongo/s/client/shard_connection.cpp index e5698ef5f69..2cc44c44a1f 100644 --- a/src/mongo/s/client/shard_connection.cpp +++ b/src/mongo/s/client/shard_connection.cpp @@ -127,7 +127,7 @@ public: std::unique_ptr<DBClientBase> c; if (s->avail) { c.reset(s->avail); - s->avail = 0; + s->avail = nullptr; // May throw an exception shardConnectionPool.onHandedOut(c.get()); @@ -162,7 +162,7 @@ public: release(addr, ss->avail); } - ss->avail = 0; + ss->avail = nullptr; } if (fromDestructor) { @@ -181,7 +181,7 @@ public: const bool isConnGood = shardConnectionPool.isConnectionGood(addr, conn); - if (s->avail != NULL) { + if (s->avail != nullptr) { warning() << "Detected additional sharded connection in the " << "thread local pool for " << addr; @@ -193,7 +193,7 @@ public: if (!isConnGood) { delete s->avail; - s->avail = NULL; + s->avail = nullptr; } // Let the internal pool handle the bad connection, this can also @@ -268,7 +268,7 @@ public: */ void clearPool() { for (HostMap::iterator iter = _hosts.begin(); iter != _hosts.end(); ++iter) { - if (iter->second->avail != NULL) { + if (iter->second->avail != nullptr) { delete iter->second->avail; } delete iter->second; @@ -443,7 +443,7 @@ void ShardConnection::kill() { delete _conn; } - _conn = 0; + _conn = nullptr; _finishedInit = true; } } diff --git a/src/mongo/s/client/shard_connection.h b/src/mongo/s/client/shard_connection.h index 2b28aff0e18..c23c03964bb 100644 --- a/src/mongo/s/client/shard_connection.h +++ b/src/mongo/s/client/shard_connection.h @@ -110,7 +110,7 @@ public: } bool ok() const { - return _conn != NULL; + return _conn != nullptr; } /** reports all thread local connections on this instance */ diff --git a/src/mongo/s/client/version_manager.cpp b/src/mongo/s/client/version_manager.cpp index 75c235d6cc6..f1cdfbc5e6b 100644 --- a/src/mongo/s/client/version_manager.cpp +++ b/src/mongo/s/client/version_manager.cpp @@ -196,7 +196,7 @@ bool initShardVersionEmptyNS(OperationContext* opCtx, DBClientBase* conn_in) { "", Grid::get(opCtx)->shardRegistry()->getConfigServerConnectionString(), ChunkVersion(), - NULL, + nullptr, true, result); diff --git a/src/mongo/s/commands/cluster_get_last_error_cmd.cpp b/src/mongo/s/commands/cluster_get_last_error_cmd.cpp index ec7f151302c..aabd696f845 100644 --- a/src/mongo/s/commands/cluster_get_last_error_cmd.cpp +++ b/src/mongo/s/commands/cluster_get_last_error_cmd.cpp @@ -277,7 +277,7 @@ public: BSONArrayBuilder errorRawGLE; int numWCErrors = 0; - const LegacyWCResponse* lastErrResponse = NULL; + const LegacyWCResponse* lastErrResponse = nullptr; for (std::vector<LegacyWCResponse>::const_iterator it = wcResponses.begin(); it != wcResponses.end(); diff --git a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp index 6f169e55140..39ec9ddae0f 100644 --- a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp +++ b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp @@ -70,7 +70,7 @@ const Milliseconds kNoDistLockTimeout(-1); * Generates a unique name for the temporary M/R output collection. */ std::string getTmpName(StringData coll) { - return str::stream() << "tmp.mrs." << coll << "_" << time(0) << "_" + return str::stream() << "tmp.mrs." << coll << "_" << time(nullptr) << "_" << JOB_NUMBER.fetchAndAdd(1); } diff --git a/src/mongo/s/write_ops/batch_write_op.cpp b/src/mongo/s/write_ops/batch_write_op.cpp index 79abd418faf..5efce61128d 100644 --- a/src/mongo/s/write_ops/batch_write_op.cpp +++ b/src/mongo/s/write_ops/batch_write_op.cpp @@ -363,7 +363,7 @@ Status BatchWriteOp::targetBatch(const NSTargeter& targeter, if (ordered && !batchMap.empty()) { dassert(batchMap.size() == 1u); if (isNewBatchRequiredOrdered(writes, batchMap)) { - writeOp.cancelWrites(NULL); + writeOp.cancelWrites(nullptr); break; } } @@ -591,7 +591,7 @@ void BatchWriteOp::noteBatchResponse(const TargetedWriteBatch& targetedBatch, vector<WriteErrorDetail*>::iterator itemErrorIt = itemErrors.begin(); int index = 0; - WriteErrorDetail* lastError = NULL; + WriteErrorDetail* lastError = nullptr; for (vector<TargetedWrite *>::const_iterator it = targetedBatch.getWrites().begin(); it != targetedBatch.getWrites().end(); ++it, ++index) { @@ -601,7 +601,7 @@ void BatchWriteOp::noteBatchResponse(const TargetedWriteBatch& targetedBatch, dassert(writeOp.getWriteState() == WriteOpState_Pending); // See if we have an error for the write - WriteErrorDetail* writeError = NULL; + WriteErrorDetail* writeError = nullptr; if (itemErrorIt != itemErrors.end() && (*itemErrorIt)->getIndex() == index) { // We have an per-item error for this write op's index @@ -610,7 +610,7 @@ void BatchWriteOp::noteBatchResponse(const TargetedWriteBatch& targetedBatch, } // Finish the response (with error, if needed) - if (NULL == writeError) { + if (nullptr == writeError) { if (!ordered || !lastError) { writeOp.noteWriteComplete(*write); } else { @@ -625,7 +625,7 @@ void BatchWriteOp::noteBatchResponse(const TargetedWriteBatch& targetedBatch, } // Track errors we care about, whether batch or individual errors - if (NULL != trackedErrors) { + if (nullptr != trackedErrors) { trackErrors(targetedBatch.getEndpoint(), itemErrors, trackedErrors); } @@ -723,7 +723,7 @@ void BatchWriteOp::buildClientResponse(BatchedCommandResponse* batchResp) { // For non-verbose, it's all we need. if (!_clientRequest.isVerboseWC()) { - dassert(batchResp->isValid(NULL)); + dassert(batchResp->isValid(nullptr)); return; } @@ -802,7 +802,7 @@ void BatchWriteOp::buildClientResponse(BatchedCommandResponse* batchResp) { batchResp->setNModified(_numModified); } - dassert(batchResp->isValid(NULL)); + dassert(batchResp->isValid(nullptr)); } int BatchWriteOp::numWriteOpsIn(WriteOpState opState) const { diff --git a/src/mongo/s/write_ops/batch_write_op_test.cpp b/src/mongo/s/write_ops/batch_write_op_test.cpp index 85a289bf6d4..49b586fc58a 100644 --- a/src/mongo/s/write_ops/batch_write_op_test.cpp +++ b/src/mongo/s/write_ops/batch_write_op_test.cpp @@ -92,14 +92,14 @@ void buildResponse(int n, BatchedCommandResponse* response) { response->clear(); response->setStatus(Status::OK()); response->setN(n); - ASSERT(response->isValid(NULL)); + ASSERT(response->isValid(nullptr)); } void buildErrResponse(int code, const std::string& message, BatchedCommandResponse* response) { response->clear(); response->setN(0); response->setStatus({ErrorCodes::Error(code), message}); - ASSERT(response->isValid(NULL)); + ASSERT(response->isValid(nullptr)); } void addError(int code, const std::string& message, int index, BatchedCommandResponse* response) { @@ -154,7 +154,7 @@ TEST_F(BatchWriteOpTest, SingleOp) { BatchedCommandResponse response; buildResponse(1, &response); - batchOp.noteBatchResponse(*targeted.begin()->second, response, NULL); + batchOp.noteBatchResponse(*targeted.begin()->second, response, nullptr); ASSERT(batchOp.isFinished()); BatchedCommandResponse clientResponse; @@ -187,7 +187,7 @@ TEST_F(BatchWriteOpTest, SingleError) { BatchedCommandResponse response; buildErrResponse(ErrorCodes::UnknownError, "message", &response); - batchOp.noteBatchResponse(*targeted.begin()->second, response, NULL); + batchOp.noteBatchResponse(*targeted.begin()->second, response, nullptr); ASSERT(batchOp.isFinished()); BatchedCommandResponse clientResponse; @@ -266,7 +266,7 @@ TEST_F(BatchWriteOpTest, SingleWriteConcernErrorOrdered) { addWCError(&response); // First stale response comes back, we should retry - batchOp.noteBatchResponse(*targeted.begin()->second, response, NULL); + batchOp.noteBatchResponse(*targeted.begin()->second, response, nullptr); ASSERT(batchOp.isFinished()); BatchedCommandResponse clientResponse; @@ -301,14 +301,14 @@ TEST_F(BatchWriteOpTest, SingleStaleError) { addError(ErrorCodes::StaleShardVersion, "mock stale error", 0, &response); // First stale response comes back, we should retry - batchOp.noteBatchResponse(*targeted.begin()->second, response, NULL); + batchOp.noteBatchResponse(*targeted.begin()->second, response, nullptr); ASSERT(!batchOp.isFinished()); targetedOwned.clear(); ASSERT_OK(batchOp.targetBatch(targeter, false, &targeted)); // Respond again with a stale response - batchOp.noteBatchResponse(*targeted.begin()->second, response, NULL); + batchOp.noteBatchResponse(*targeted.begin()->second, response, nullptr); ASSERT(!batchOp.isFinished()); targetedOwned.clear(); @@ -317,7 +317,7 @@ TEST_F(BatchWriteOpTest, SingleStaleError) { buildResponse(1, &response); // Respond with an 'ok' response - batchOp.noteBatchResponse(*targeted.begin()->second, response, NULL); + batchOp.noteBatchResponse(*targeted.begin()->second, response, nullptr); ASSERT(batchOp.isFinished()); BatchedCommandResponse clientResponse; @@ -359,7 +359,7 @@ TEST_F(BatchWriteOpTest, MultiOpSameShardOrdered) { BatchedCommandResponse response; buildResponse(2, &response); - batchOp.noteBatchResponse(*targeted.begin()->second, response, NULL); + batchOp.noteBatchResponse(*targeted.begin()->second, response, nullptr); ASSERT(batchOp.isFinished()); BatchedCommandResponse clientResponse; @@ -401,7 +401,7 @@ TEST_F(BatchWriteOpTest, MultiOpSameShardUnordered) { BatchedCommandResponse response; buildResponse(2, &response); - batchOp.noteBatchResponse(*targeted.begin()->second, response, NULL); + batchOp.noteBatchResponse(*targeted.begin()->second, response, nullptr); ASSERT(batchOp.isFinished()); BatchedCommandResponse clientResponse; @@ -440,7 +440,7 @@ TEST_F(BatchWriteOpTest, MultiOpTwoShardsOrdered) { buildResponse(1, &response); // Respond to first targeted batch - batchOp.noteBatchResponse(*targeted.begin()->second, response, NULL); + batchOp.noteBatchResponse(*targeted.begin()->second, response, nullptr); ASSERT(!batchOp.isFinished()); targetedOwned.clear(); @@ -452,7 +452,7 @@ TEST_F(BatchWriteOpTest, MultiOpTwoShardsOrdered) { assertEndpointsEqual(targeted.begin()->second->getEndpoint(), endpointB); // Respond to second targeted batch - batchOp.noteBatchResponse(*targeted.begin()->second, response, NULL); + batchOp.noteBatchResponse(*targeted.begin()->second, response, nullptr); ASSERT(batchOp.isFinished()); BatchedCommandResponse clientResponse; @@ -513,7 +513,7 @@ TEST_F(BatchWriteOpTest, MultiOpTwoShardsUnordered) { // Respond to both targeted batches for (auto it = targeted.begin(); it != targeted.end(); ++it) { ASSERT(!batchOp.isFinished()); - batchOp.noteBatchResponse(*it->second, response, NULL); + batchOp.noteBatchResponse(*it->second, response, nullptr); } ASSERT(batchOp.isFinished()); @@ -555,7 +555,7 @@ TEST_F(BatchWriteOpTest, MultiOpTwoShardsEachOrdered) { // Respond to both targeted batches for first multi-delete for (auto it = targeted.begin(); it != targeted.end(); ++it) { ASSERT(!batchOp.isFinished()); - batchOp.noteBatchResponse(*it->second, response, NULL); + batchOp.noteBatchResponse(*it->second, response, nullptr); } ASSERT(!batchOp.isFinished()); @@ -569,7 +569,7 @@ TEST_F(BatchWriteOpTest, MultiOpTwoShardsEachOrdered) { // Respond to second targeted batches for second multi-delete for (auto it = targeted.begin(); it != targeted.end(); ++it) { ASSERT(!batchOp.isFinished()); - batchOp.noteBatchResponse(*it->second, response, NULL); + batchOp.noteBatchResponse(*it->second, response, nullptr); } ASSERT(batchOp.isFinished()); @@ -616,7 +616,7 @@ TEST_F(BatchWriteOpTest, MultiOpTwoShardsEachUnordered) { // Respond to both targeted batches, each containing two ops for (auto it = targeted.begin(); it != targeted.end(); ++it) { ASSERT(!batchOp.isFinished()); - batchOp.noteBatchResponse(*it->second, response, NULL); + batchOp.noteBatchResponse(*it->second, response, nullptr); } ASSERT(batchOp.isFinished()); @@ -667,7 +667,7 @@ TEST_F(BatchWriteOpTest, MultiOpOneOrTwoShardsOrdered) { buildResponse(2, &response); // Respond to first targeted batch containing the two single-host deletes - batchOp.noteBatchResponse(*targeted.begin()->second, response, NULL); + batchOp.noteBatchResponse(*targeted.begin()->second, response, nullptr); ASSERT(!batchOp.isFinished()); targetedOwned.clear(); @@ -683,7 +683,7 @@ TEST_F(BatchWriteOpTest, MultiOpOneOrTwoShardsOrdered) { // Respond to two targeted batches for first multi-delete for (auto it = targeted.begin(); it != targeted.end(); ++it) { ASSERT(!batchOp.isFinished()); - batchOp.noteBatchResponse(*it->second, response, NULL); + batchOp.noteBatchResponse(*it->second, response, nullptr); } ASSERT(!batchOp.isFinished()); @@ -697,7 +697,7 @@ TEST_F(BatchWriteOpTest, MultiOpOneOrTwoShardsOrdered) { // Respond to two targeted batches for second multi-delete for (auto it = targeted.begin(); it != targeted.end(); ++it) { ASSERT(!batchOp.isFinished()); - batchOp.noteBatchResponse(*it->second, response, NULL); + batchOp.noteBatchResponse(*it->second, response, nullptr); } ASSERT(!batchOp.isFinished()); @@ -713,7 +713,7 @@ TEST_F(BatchWriteOpTest, MultiOpOneOrTwoShardsOrdered) { buildResponse(2, &response); // Respond to final targeted batch containing the last two single-host deletes - batchOp.noteBatchResponse(*targeted.begin()->second, response, NULL); + batchOp.noteBatchResponse(*targeted.begin()->second, response, nullptr); ASSERT(batchOp.isFinished()); BatchedCommandResponse clientResponse; @@ -766,7 +766,7 @@ TEST_F(BatchWriteOpTest, MultiOpOneOrTwoShardsUnordered) { // Respond to first targeted batch containing the two single-host deletes for (auto it = targeted.begin(); it != targeted.end(); ++it) { ASSERT(!batchOp.isFinished()); - batchOp.noteBatchResponse(*it->second, response, NULL); + batchOp.noteBatchResponse(*it->second, response, nullptr); } ASSERT(batchOp.isFinished()); @@ -812,7 +812,7 @@ TEST_F(BatchWriteOpTest, MultiOpSingleShardErrorUnordered) { auto targetedIt = targeted.begin(); // No error on first shard - batchOp.noteBatchResponse(*targetedIt->second, response, NULL); + batchOp.noteBatchResponse(*targetedIt->second, response, nullptr); ASSERT(!batchOp.isFinished()); buildResponse(0, &response); @@ -820,7 +820,7 @@ TEST_F(BatchWriteOpTest, MultiOpSingleShardErrorUnordered) { // Error on second write on second shard ++targetedIt; - batchOp.noteBatchResponse(*targetedIt->second, response, NULL); + batchOp.noteBatchResponse(*targetedIt->second, response, nullptr); ASSERT(batchOp.isFinished()); ASSERT(++targetedIt == targeted.end()); @@ -873,7 +873,7 @@ TEST_F(BatchWriteOpTest, MultiOpTwoShardErrorsUnordered) { // Error on first write on first shard and second write on second shard. for (auto it = targeted.begin(); it != targeted.end(); ++it) { ASSERT(!batchOp.isFinished()); - batchOp.noteBatchResponse(*it->second, response, NULL); + batchOp.noteBatchResponse(*it->second, response, nullptr); } ASSERT(batchOp.isFinished()); @@ -932,7 +932,7 @@ TEST_F(BatchWriteOpTest, MultiOpPartialSingleShardErrorUnordered) { buildResponse(2, &response); // No errors on first shard - batchOp.noteBatchResponse(*targetedIt->second, response, NULL); + batchOp.noteBatchResponse(*targetedIt->second, response, nullptr); ASSERT(!batchOp.isFinished()); buildResponse(1, &response); @@ -940,7 +940,7 @@ TEST_F(BatchWriteOpTest, MultiOpPartialSingleShardErrorUnordered) { // Error on second write on second shard ++targetedIt; - batchOp.noteBatchResponse(*targetedIt->second, response, NULL); + batchOp.noteBatchResponse(*targetedIt->second, response, nullptr); ASSERT(batchOp.isFinished()); ASSERT(++targetedIt == targeted.end()); @@ -990,7 +990,7 @@ TEST_F(BatchWriteOpTest, MultiOpPartialSingleShardErrorOrdered) { buildResponse(1, &response); // No errors on first shard - batchOp.noteBatchResponse(*targetedIt->second, response, NULL); + batchOp.noteBatchResponse(*targetedIt->second, response, nullptr); ASSERT(!batchOp.isFinished()); buildResponse(0, &response); @@ -998,7 +998,7 @@ TEST_F(BatchWriteOpTest, MultiOpPartialSingleShardErrorOrdered) { // Error on second write on second shard ++targetedIt; - batchOp.noteBatchResponse(*targetedIt->second, response, NULL); + batchOp.noteBatchResponse(*targetedIt->second, response, nullptr); ASSERT(batchOp.isFinished()); ASSERT(++targetedIt == targeted.end()); @@ -1052,7 +1052,7 @@ TEST_F(BatchWriteOpTest, MultiOpErrorAndWriteConcernErrorUnordered) { addWCError(&response); // First stale response comes back, we should retry - batchOp.noteBatchResponse(*targeted.begin()->second, response, NULL); + batchOp.noteBatchResponse(*targeted.begin()->second, response, nullptr); ASSERT(batchOp.isFinished()); // Unordered reports write concern error @@ -1099,7 +1099,7 @@ TEST_F(BatchWriteOpTest, SingleOpErrorAndWriteConcernErrorOrdered) { addWCError(&response); // First response comes back with write concern error - batchOp.noteBatchResponse(*targetedIt->second, response, NULL); + batchOp.noteBatchResponse(*targetedIt->second, response, nullptr); ASSERT(!batchOp.isFinished()); buildResponse(0, &response); @@ -1107,7 +1107,7 @@ TEST_F(BatchWriteOpTest, SingleOpErrorAndWriteConcernErrorOrdered) { // Second response comes back with write error ++targetedIt; - batchOp.noteBatchResponse(*targetedIt->second, response, NULL); + batchOp.noteBatchResponse(*targetedIt->second, response, nullptr); ASSERT(batchOp.isFinished()); ASSERT(++targetedIt == targeted.end()); @@ -1157,7 +1157,7 @@ TEST_F(BatchWriteOpTest, MultiOpFailedTargetOrdered) { buildResponse(1, &response); // First response ok - batchOp.noteBatchResponse(*targeted.begin()->second, response, NULL); + batchOp.noteBatchResponse(*targeted.begin()->second, response, nullptr); ASSERT(!batchOp.isFinished()); targetedOwned.clear(); @@ -1217,7 +1217,7 @@ TEST_F(BatchWriteOpTest, MultiOpFailedTargetUnordered) { buildResponse(2, &response); // Response is ok for first and third write - batchOp.noteBatchResponse(*targeted.begin()->second, response, NULL); + batchOp.noteBatchResponse(*targeted.begin()->second, response, nullptr); ASSERT(batchOp.isFinished()); BatchedCommandResponse clientResponse; @@ -1254,7 +1254,7 @@ TEST_F(BatchWriteOpTest, MultiOpFailedBatchOrdered) { buildResponse(1, &response); // First shard batch is ok - batchOp.noteBatchResponse(*targeted.begin()->second, response, NULL); + batchOp.noteBatchResponse(*targeted.begin()->second, response, nullptr); ASSERT(!batchOp.isFinished()); targetedOwned.clear(); @@ -1263,7 +1263,7 @@ TEST_F(BatchWriteOpTest, MultiOpFailedBatchOrdered) { buildErrResponse(ErrorCodes::UnknownError, "mock error", &response); // Second shard batch fails - batchOp.noteBatchResponse(*targeted.begin()->second, response, NULL); + batchOp.noteBatchResponse(*targeted.begin()->second, response, nullptr); ASSERT(batchOp.isFinished()); // We should have recorded an error for the second write @@ -1310,14 +1310,14 @@ TEST_F(BatchWriteOpTest, MultiOpFailedBatchUnordered) { buildResponse(1, &response); // First shard batch is ok - batchOp.noteBatchResponse(*targetedIt->second, response, NULL); + batchOp.noteBatchResponse(*targetedIt->second, response, nullptr); ASSERT(!batchOp.isFinished()); buildErrResponse(ErrorCodes::UnknownError, "mock error", &response); // Second shard batch fails ++targetedIt; - batchOp.noteBatchResponse(*targetedIt->second, response, NULL); + batchOp.noteBatchResponse(*targetedIt->second, response, nullptr); ASSERT(batchOp.isFinished()); ASSERT(++targetedIt == targeted.end()); @@ -1359,7 +1359,7 @@ TEST_F(BatchWriteOpTest, MultiOpAbortOrdered) { buildResponse(1, &response); // First shard batch is ok - batchOp.noteBatchResponse(*targeted.begin()->second, response, NULL); + batchOp.noteBatchResponse(*targeted.begin()->second, response, nullptr); ASSERT(!batchOp.isFinished()); WriteErrorDetail abortError; @@ -1447,14 +1447,14 @@ TEST_F(BatchWriteOpTest, MultiOpTwoWCErrors) { addWCError(&response); // First shard write write concern fails. - batchOp.noteBatchResponse(*targeted.begin()->second, response, NULL); + batchOp.noteBatchResponse(*targeted.begin()->second, response, nullptr); ASSERT(!batchOp.isFinished()); targetedOwned.clear(); ASSERT_OK(batchOp.targetBatch(targeter, true, &targeted)); // Second shard write write concern fails. - batchOp.noteBatchResponse(*targeted.begin()->second, response, NULL); + batchOp.noteBatchResponse(*targeted.begin()->second, response, nullptr); ASSERT(batchOp.isFinished()); BatchedCommandResponse clientResponse; @@ -1503,7 +1503,7 @@ TEST_F(BatchWriteOpLimitTests, OneBigDoc) { BatchedCommandResponse response; buildResponse(1, &response); - batchOp.noteBatchResponse(*targeted.begin()->second, response, NULL); + batchOp.noteBatchResponse(*targeted.begin()->second, response, nullptr); ASSERT(batchOp.isFinished()); } @@ -1535,7 +1535,7 @@ TEST_F(BatchWriteOpLimitTests, OneBigOneSmall) { BatchedCommandResponse response; buildResponse(1, &response); - batchOp.noteBatchResponse(*targeted.begin()->second, response, NULL); + batchOp.noteBatchResponse(*targeted.begin()->second, response, nullptr); ASSERT(!batchOp.isFinished()); targetedOwned.clear(); @@ -1543,7 +1543,7 @@ TEST_F(BatchWriteOpLimitTests, OneBigOneSmall) { ASSERT_EQUALS(targeted.size(), 1u); ASSERT_EQUALS(targeted.begin()->second->getWrites().size(), 1u); - batchOp.noteBatchResponse(*targeted.begin()->second, response, NULL); + batchOp.noteBatchResponse(*targeted.begin()->second, response, nullptr); ASSERT(batchOp.isFinished()); } diff --git a/src/mongo/s/write_ops/batched_command_response.cpp b/src/mongo/s/write_ops/batched_command_response.cpp index 9ec01a62e61..0c2396f7499 100644 --- a/src/mongo/s/write_ops/batched_command_response.cpp +++ b/src/mongo/s/write_ops/batched_command_response.cpp @@ -66,7 +66,7 @@ BatchedCommandResponse::~BatchedCommandResponse() { bool BatchedCommandResponse::isValid(std::string* errMsg) const { std::string dummy; - if (errMsg == NULL) { + if (errMsg == nullptr) { errMsg = &dummy; } @@ -195,7 +195,7 @@ bool BatchedCommandResponse::parseBSON(const BSONObj& source, string* errMsg) { _nModified = intNModified; } - std::vector<BatchedUpsertDetail*>* tempUpsertDetails = NULL; + std::vector<BatchedUpsertDetail*>* tempUpsertDetails = nullptr; fieldState = FieldParser::extract(source, upsertDetails, &tempUpsertDetails, errMsg); if (fieldState == FieldParser::FIELD_INVALID) return false; @@ -223,12 +223,12 @@ bool BatchedCommandResponse::parseBSON(const BSONObj& source, string* errMsg) { return false; _isElectionIdSet = fieldState == FieldParser::FIELD_SET; - std::vector<WriteErrorDetail*>* tempErrDetails = NULL; + std::vector<WriteErrorDetail*>* tempErrDetails = nullptr; fieldState = FieldParser::extract(source, writeErrors, &tempErrDetails, errMsg); if (fieldState == FieldParser::FIELD_INVALID) return false; _writeErrorDetails.reset(tempErrDetails); - WriteConcernErrorDetail* wcError = NULL; + WriteConcernErrorDetail* wcError = nullptr; fieldState = FieldParser::extract(source, writeConcernError, &wcError, errMsg); if (fieldState == FieldParser::FIELD_INVALID) return false; @@ -347,14 +347,14 @@ void BatchedCommandResponse::setUpsertDetails( } void BatchedCommandResponse::addToUpsertDetails(BatchedUpsertDetail* upsertDetails) { - if (_upsertDetails.get() == NULL) { + if (_upsertDetails.get() == nullptr) { _upsertDetails.reset(new std::vector<BatchedUpsertDetail*>); } _upsertDetails->push_back(upsertDetails); } void BatchedCommandResponse::unsetUpsertDetails() { - if (_upsertDetails.get() != NULL) { + if (_upsertDetails.get() != nullptr) { for (std::vector<BatchedUpsertDetail*>::iterator it = _upsertDetails->begin(); it != _upsertDetails->end(); ++it) { @@ -365,7 +365,7 @@ void BatchedCommandResponse::unsetUpsertDetails() { } bool BatchedCommandResponse::isUpsertDetailsSet() const { - return _upsertDetails.get() != NULL; + return _upsertDetails.get() != nullptr; } size_t BatchedCommandResponse::sizeUpsertDetails() const { @@ -432,14 +432,14 @@ void BatchedCommandResponse::setErrDetails(const std::vector<WriteErrorDetail*>& } void BatchedCommandResponse::addToErrDetails(WriteErrorDetail* errDetails) { - if (_writeErrorDetails.get() == NULL) { + if (_writeErrorDetails.get() == nullptr) { _writeErrorDetails.reset(new std::vector<WriteErrorDetail*>); } _writeErrorDetails->push_back(errDetails); } void BatchedCommandResponse::unsetErrDetails() { - if (_writeErrorDetails.get() != NULL) { + if (_writeErrorDetails.get() != nullptr) { for (std::vector<WriteErrorDetail*>::iterator it = _writeErrorDetails->begin(); it != _writeErrorDetails->end(); ++it) { @@ -450,7 +450,7 @@ void BatchedCommandResponse::unsetErrDetails() { } bool BatchedCommandResponse::isErrDetailsSet() const { - return _writeErrorDetails.get() != NULL; + return _writeErrorDetails.get() != nullptr; } size_t BatchedCommandResponse::sizeErrDetails() const { diff --git a/src/mongo/s/write_ops/cluster_write.cpp b/src/mongo/s/write_ops/cluster_write.cpp index 3955cf5bce2..4e3aa4c7e3b 100644 --- a/src/mongo/s/write_ops/cluster_write.cpp +++ b/src/mongo/s/write_ops/cluster_write.cpp @@ -58,7 +58,7 @@ namespace { void toBatchError(const Status& status, BatchedCommandResponse* response) { response->clear(); response->setStatus(status); - dassert(response->isValid(NULL)); + dassert(response->isValid(nullptr)); } } // namespace diff --git a/src/mongo/s/write_ops/write_error_detail.cpp b/src/mongo/s/write_ops/write_error_detail.cpp index 5d4e5b84cfc..d51618fe25b 100644 --- a/src/mongo/s/write_ops/write_error_detail.cpp +++ b/src/mongo/s/write_ops/write_error_detail.cpp @@ -50,7 +50,7 @@ WriteErrorDetail::WriteErrorDetail() { bool WriteErrorDetail::isValid(std::string* errMsg) const { std::string dummy; - if (errMsg == NULL) { + if (errMsg == nullptr) { errMsg = &dummy; } diff --git a/src/mongo/s/write_ops/write_op.cpp b/src/mongo/s/write_ops/write_op.cpp index 1d647c0ce30..97e5b1d1795 100644 --- a/src/mongo/s/write_ops/write_op.cpp +++ b/src/mongo/s/write_ops/write_op.cpp @@ -228,7 +228,7 @@ void WriteOp::noteWriteComplete(const TargetedWrite& targetedWrite) { const WriteOpRef& ref = targetedWrite.writeOpRef; auto& childOp = _childOps[ref.second]; - childOp.pendingWrite = NULL; + childOp.pendingWrite = nullptr; childOp.endpoint.reset(new ShardEndpoint(targetedWrite.endpoint)); childOp.state = WriteOpState_Completed; _updateOpState(); @@ -238,7 +238,7 @@ void WriteOp::noteWriteError(const TargetedWrite& targetedWrite, const WriteErro const WriteOpRef& ref = targetedWrite.writeOpRef; auto& childOp = _childOps[ref.second]; - childOp.pendingWrite = NULL; + childOp.pendingWrite = nullptr; childOp.endpoint.reset(new ShardEndpoint(targetedWrite.endpoint)); childOp.error.reset(new WriteErrorDetail); error.cloneTo(childOp.error.get()); diff --git a/src/mongo/s/write_ops/write_op_test.cpp b/src/mongo/s/write_ops/write_op_test.cpp index 21499683120..3110a74c909 100644 --- a/src/mongo/s/write_ops/write_op_test.cpp +++ b/src/mongo/s/write_ops/write_op_test.cpp @@ -441,7 +441,7 @@ TEST(WriteOpTests, CancelSingle) { ASSERT_EQUALS(targeted.size(), 1u); assertEndpointsEqual(targeted.front()->endpoint, endpoint); - writeOp.cancelWrites(NULL); + writeOp.cancelWrites(nullptr); ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Ready); } diff --git a/src/mongo/scripting/deadline_monitor_test.cpp b/src/mongo/scripting/deadline_monitor_test.cpp index a9dd1a546ce..1ad1fb70cdb 100644 --- a/src/mongo/scripting/deadline_monitor_test.cpp +++ b/src/mongo/scripting/deadline_monitor_test.cpp @@ -66,7 +66,7 @@ private: class Task { public: - Task() : _group(NULL), _killed(0) {} + Task() : _group(nullptr), _killed(0) {} explicit Task(TaskGroup* group) : _group(group), _killed(0) {} void kill() { _killed = curTimeMillis64(); diff --git a/src/mongo/scripting/engine.cpp b/src/mongo/scripting/engine.cpp index 28cb06c949e..93f70408757 100644 --- a/src/mongo/scripting/engine.cpp +++ b/src/mongo/scripting/engine.cpp @@ -225,7 +225,7 @@ void Scope::loadStored(OperationContext* opCtx, bool ignoreNotConnected) { auto directDBClient = DBDirectClientFactory::get(opCtx).create(opCtx); unique_ptr<DBClientCursor> c = - directDBClient->query(coll, Query(), 0, 0, NULL, QueryOption_SlaveOk, 0); + directDBClient->query(coll, Query(), 0, 0, nullptr, QueryOption_SlaveOk, 0); massert(16669, "unable to get db client cursor from query", c.get()); set<string> thisTime; @@ -560,7 +560,7 @@ unique_ptr<Scope> ScriptEngine::getPooledScope(OperationContext* opCtx, return p; } -void (*ScriptEngine::_connectCallback)(DBClientBase&) = 0; +void (*ScriptEngine::_connectCallback)(DBClientBase&) = nullptr; ScriptEngine* getGlobalScriptEngine() { if (hasGlobalServiceContext()) diff --git a/src/mongo/scripting/engine.h b/src/mongo/scripting/engine.h index f5a85d44f3e..a1f02724ee2 100644 --- a/src/mongo/scripting/engine.h +++ b/src/mongo/scripting/engine.h @@ -140,7 +140,7 @@ public: uasserted(9005, std::string("invoke failed: ") + getError()); } - virtual void injectNative(const char* field, NativeFunction func, void* data = 0) = 0; + virtual void injectNative(const char* field, NativeFunction func, void* data = nullptr) = 0; virtual bool exec(StringData code, const std::string& name, diff --git a/src/mongo/scripting/mozjs/implscope.h b/src/mongo/scripting/mozjs/implscope.h index b0eb7c55c14..5c428528544 100644 --- a/src/mongo/scripting/mozjs/implscope.h +++ b/src/mongo/scripting/mozjs/implscope.h @@ -153,7 +153,7 @@ public: bool assertOnError, int timeoutMs) override; - void injectNative(const char* field, NativeFunction func, void* data = 0) override; + void injectNative(const char* field, NativeFunction func, void* data = nullptr) override; ScriptingFunction _createFunction(const char* code) override; diff --git a/src/mongo/scripting/mozjs/mongo.cpp b/src/mongo/scripting/mozjs/mongo.cpp index a2ef99c93f7..e86f0582bb4 100644 --- a/src/mongo/scripting/mozjs/mongo.cpp +++ b/src/mongo/scripting/mozjs/mongo.cpp @@ -365,7 +365,7 @@ void MongoBase::Functions::find::call(JSContext* cx, JS::CallArgs args) { q, nToReturn, nToSkip, - haveFields ? &fields : NULL, + haveFields ? &fields : nullptr, options, batchSize)); if (!cursor.get()) { diff --git a/src/mongo/scripting/mozjs/proxyscope.h b/src/mongo/scripting/mozjs/proxyscope.h index 9d11a857923..e4948a6bc9f 100644 --- a/src/mongo/scripting/mozjs/proxyscope.h +++ b/src/mongo/scripting/mozjs/proxyscope.h @@ -170,7 +170,7 @@ public: bool assertOnError, int timeoutMs) override; - void injectNative(const char* field, NativeFunction func, void* data = 0) override; + void injectNative(const char* field, NativeFunction func, void* data = nullptr) override; ScriptingFunction _createFunction(const char* code) override; diff --git a/src/mongo/shell/dbshell.cpp b/src/mongo/shell/dbshell.cpp index 9ee4527f86c..4282d05a525 100644 --- a/src/mongo/shell/dbshell.cpp +++ b/src/mongo/shell/dbshell.cpp @@ -184,7 +184,7 @@ void generateCompletions(const std::string& prefix, std::vector<std::string>& al try { BSONObj args = BSON("0" << prefix); shellMainScope->invokeSafe( - "function callShellAutocomplete(x) {shellAutocomplete(x)}", &args, NULL); + "function callShellAutocomplete(x) {shellAutocomplete(x)}", &args, nullptr); BSONObjBuilder b; shellMainScope->append(b, "", "__autocomplete__"); BSONObj res = b.obj(); @@ -543,7 +543,7 @@ static void edit(const std::string& whatToEdit) { GetTempPathA(sizeof tempFolder, tempFolder); sb << tempFolder << "mongo_edit" << time(0) + i << ".js"; #else - sb << "/tmp/mongo_edit" << time(0) + i << ".js"; + sb << "/tmp/mongo_edit" << time(nullptr) + i << ".js"; #endif filename = sb.str(); if (!::mongo::shell_utils::fileExists(filename)) @@ -834,7 +834,7 @@ int _main(int argc, char* argv[], char** envp) { "function() { return typeof TestData === 'object' && TestData !== null && " "TestData.hasOwnProperty('failIfUnterminatedProcesses') && " "TestData.failIfUnterminatedProcesses; }"_sd; - shellMainScope->invokeSafe(code.rawData(), 0, 0); + shellMainScope->invokeSafe(code.rawData(), nullptr, nullptr); failIfUnterminatedProcesses = shellMainScope->getBoolean("__returnValue"); if (failIfUnterminatedProcesses) { @@ -858,7 +858,7 @@ int _main(int argc, char* argv[], char** envp) { std::string rcLocation; if (!shellGlobalParams.norc) { #ifndef _WIN32 - if (getenv("HOME") != NULL) + if (getenv("HOME") != nullptr) rcLocation = str::stream() << getenv("HOME") << "/.mongorc.js"; #else if (getenv("HOMEDRIVE") != NULL && getenv("HOMEPATH") != NULL) diff --git a/src/mongo/shell/linenoise.cpp b/src/mongo/shell/linenoise.cpp index bc3373def85..f4fdc340e79 100644 --- a/src/mongo/shell/linenoise.cpp +++ b/src/mongo/shell/linenoise.cpp @@ -385,12 +385,12 @@ public: } Utf32String* yank() { - return (size > 0) ? &theRing[indexToSlot[index]] : 0; + return (size > 0) ? &theRing[indexToSlot[index]] : nullptr; } Utf32String* yankPop() { if (size == 0) { - return 0; + return nullptr; } ++index; if (index == size) { @@ -467,8 +467,8 @@ static const int DELETE_KEY = 0x10E00000; static const int PAGE_UP_KEY = 0x11000000; static const int PAGE_DOWN_KEY = 0x11200000; -static const char* unsupported_term[] = {"dumb", "cons25", "emacs", NULL}; -static linenoiseCompletionCallback* completionCallback = NULL; +static const char* unsupported_term[] = {"dumb", "cons25", "emacs", nullptr}; +static linenoiseCompletionCallback* completionCallback = nullptr; #ifdef _WIN32 static HANDLE console_in, console_out; @@ -485,7 +485,7 @@ static int atexit_registered = 0; /* register atexit just 1 time */ static int historyMaxLen = LINENOISE_DEFAULT_HISTORY_MAX_LEN; static int historyLen = 0; static int historyIndex = 0; -static UChar8** history = NULL; +static UChar8** history = nullptr; // used to emulate Windows command prompt on down-arrow after a recall // we use -2 as our "not set" value because we add 1 to the previous index on down-arrow, @@ -497,7 +497,7 @@ static void linenoiseAtExit(void); static bool isUnsupportedTerm(void) { char* term = getenv("TERM"); - if (term == NULL) + if (term == nullptr) return false; for (int j = 0; unsupported_term[j]; ++j) if (!strcasecmp(term, unsupported_term[j])) { @@ -517,7 +517,7 @@ void linenoiseHistoryFree(void) { free(history[j]); historyLen = 0; free(history); - history = 0; + history = nullptr; } } @@ -1754,7 +1754,7 @@ int InputBuffer::incrementalHistorySearch(PromptBase& pi, int startChar) { bool keepLooping = true; bool useSearchedLine = true; bool searchAgain = false; - UChar32* activeHistoryLine = 0; + UChar32* activeHistoryLine = nullptr; while (keepLooping) { c = linenoiseReadChar(); c = cleanupCtrl(c); // convert CTRL + <char> into normal ctrl @@ -2639,12 +2639,12 @@ char* linenoise(const char* prompt) { PromptInfo pi(reinterpret_cast<const UChar8*>(prompt), getScreenColumns()); if (isUnsupportedTerm()) { if (write32(1, pi.promptText.get(), pi.promptChars) == -1) - return 0; + return nullptr; fflush(stdout); if (preloadedBufferContents.empty()) { unique_ptr<char[]> buf8(new char[LINENOISE_MAX_LINE]); - if (fgets(buf8.get(), LINENOISE_MAX_LINE, stdin) == NULL) { - return NULL; + if (fgets(buf8.get(), LINENOISE_MAX_LINE, stdin) == nullptr) { + return nullptr; } size_t len = strlen(buf8.get()); while (len && (buf8[len - 1] == '\n' || buf8[len - 1] == '\r')) { @@ -2659,7 +2659,7 @@ char* linenoise(const char* prompt) { } } else { if (enableRawMode() == -1) { - return NULL; + return nullptr; } InputBuffer ib(buf32, charWidths, LINENOISE_MAX_LINE); if (!preloadedBufferContents.empty()) { @@ -2670,7 +2670,7 @@ char* linenoise(const char* prompt) { disableRawMode(); printf("\n"); if (count == -1) { - return NULL; + return nullptr; } size_t bufferSize = sizeof(UChar32) * ib.length() + 1; unique_ptr<UChar8[]> buf8(new UChar8[bufferSize]); @@ -2679,8 +2679,8 @@ char* linenoise(const char* prompt) { } } else { // input not from a terminal, we should work with piped input, i.e. redirected stdin unique_ptr<char[]> buf8(new char[LINENOISE_MAX_LINE]); - if (fgets(buf8.get(), LINENOISE_MAX_LINE, stdin) == NULL) { - return NULL; + if (fgets(buf8.get(), LINENOISE_MAX_LINE, stdin) == nullptr) { + return nullptr; } // if fgets() gave us the newline, remove it @@ -2706,9 +2706,9 @@ int linenoiseHistoryAdd(const char* line) { if (historyMaxLen == 0) { return 0; } - if (history == NULL) { + if (history == nullptr) { history = reinterpret_cast<UChar8**>(malloc(sizeof(UChar8*) * historyMaxLen)); - if (history == NULL) { + if (history == nullptr) { return 0; } memset(history, 0, (sizeof(char*) * historyMaxLen)); @@ -2746,7 +2746,7 @@ int linenoiseHistorySetMaxLen(int len) { if (history) { int tocopy = historyLen; UChar8** newHistory = reinterpret_cast<UChar8**>(malloc(sizeof(UChar8*) * len)); - if (newHistory == NULL) { + if (newHistory == nullptr) { return 0; } if (len < tocopy) { @@ -2784,7 +2784,7 @@ mongo::Status linenoiseHistorySave(const char* filename) { return linenoiseFileError(mongo::ErrorCodes::FileOpenFailed, "open()", filename, ewd); } fp = fdopen(fd, "wt"); - if (fp == NULL) { + if (fp == nullptr) { const auto ewd = mongo::errnoWithDescription(); // We've already failed, so no need to report any close() failure. (void)close(fd); @@ -2820,7 +2820,7 @@ mongo::Status linenoiseHistorySave(const char* filename) { /* Load the history from the specified file. */ mongo::Status linenoiseHistoryLoad(const char* filename) { FILE* fp = fopen(filename, "rt"); - if (fp == NULL) { + if (fp == nullptr) { if (errno == ENOENT) { // Not having a history file isn't an error condition. // For example, it's always the case when the shell is run for the first time. @@ -2831,7 +2831,7 @@ mongo::Status linenoiseHistoryLoad(const char* filename) { } char buf[LINENOISE_MAX_LINE]; - while (fgets(buf, LINENOISE_MAX_LINE, fp) != NULL) { + while (fgets(buf, LINENOISE_MAX_LINE, fp) != nullptr) { char* p = strchr(buf, '\r'); if (!p) { p = strchr(buf, '\n'); diff --git a/src/mongo/shell/shell_utils.cpp b/src/mongo/shell/shell_utils.cpp index af46190753f..b97e6358b14 100644 --- a/src/mongo/shell/shell_utils.cpp +++ b/src/mongo/shell/shell_utils.cpp @@ -264,7 +264,7 @@ bool isBalanced(const std::string& code) { std::string dbConnect; -static const char* argv0 = 0; +static const char* argv0 = nullptr; EnterpriseShellCallback* enterpriseCallback = nullptr; void RecordMyLocation(const char* _argv0) { diff --git a/src/mongo/shell/shell_utils_launcher.cpp b/src/mongo/shell/shell_utils_launcher.cpp index 51b266dc9ed..c25acf297f7 100644 --- a/src/mongo/shell/shell_utils_launcher.cpp +++ b/src/mongo/shell/shell_utils_launcher.cpp @@ -119,7 +119,7 @@ void safeClose(int fd) { } ~ScopedSignalBlocker() { - pthread_sigmask(SIG_SETMASK, &_oldMask, NULL); + pthread_sigmask(SIG_SETMASK, &_oldMask, nullptr); } private: @@ -314,7 +314,7 @@ ProgramRunner::ProgramRunner(const BSONObj& args, const BSONObj& env, bool isMon if (str == "--port") { _port = -2; } else if (_port == -2) { - _port = strtol(str.c_str(), 0, 10); + _port = strtol(str.c_str(), nullptr, 10); } else if (isMongodProgram && str == "--configsvr") { _name = "c"; } @@ -428,7 +428,7 @@ void ProgramRunner::start() { } #endif - fflush(0); + fflush(nullptr); launchProcess(pipeEnds[1]); // sets _pid @@ -692,7 +692,7 @@ void ProgramRunner::launchProcess(int child_stdout) { // returns true if process exited // If this function returns true, it will always call `registry.unregisterProgram(pid);` // If block is true, this will throw if it cannot wait for the processes to exit. -bool wait_for_pid(ProcessId pid, bool block = true, int* exit_code = NULL) { +bool wait_for_pid(ProcessId pid, bool block = true, int* exit_code = nullptr) { #ifdef _WIN32 verify(registry.countHandleForPid(pid)); HANDLE h = registry.getHandleForPid(pid); @@ -1100,7 +1100,7 @@ std::vector<ProcessId> getRunningMongoChildProcessIds() { } MongoProgramScope::~MongoProgramScope() { - DESTRUCTOR_GUARD(KillMongoProgramInstances(); ClearRawMongoProgramOutput(BSONObj(), 0);) + DESTRUCTOR_GUARD(KillMongoProgramInstances(); ClearRawMongoProgramOutput(BSONObj(), nullptr);) } void installShellUtilsLauncher(Scope& scope) { diff --git a/src/mongo/unittest/system_resource_canary_bm.cpp b/src/mongo/unittest/system_resource_canary_bm.cpp index beaba9aa585..f2f09bc162b 100644 --- a/src/mongo/unittest/system_resource_canary_bm.cpp +++ b/src/mongo/unittest/system_resource_canary_bm.cpp @@ -137,7 +137,7 @@ BENCHMARK_DEFINE_F(CacheLatencyTest, BM_CacheLatency)(benchmark::State& state) { size_t counter = arrLength / (kStrideBytes * 100) + 1; for (auto keepRunning : state) { - char** dummyResult = 0; // Dummy result to prevent the loop from being optimized out. + char** dummyResult = nullptr; // Dummy result to prevent the loop from being optimized out. char** ptrToNextLinkedListNode = reinterpret_cast<char**>(data.get()[0]); for (size_t i = 0; i < counter; ++i) { diff --git a/src/mongo/unittest/unittest.cpp b/src/mongo/unittest/unittest.cpp index 764be0908d0..3cce9bf868d 100644 --- a/src/mongo/unittest/unittest.cpp +++ b/src/mongo/unittest/unittest.cpp @@ -138,7 +138,7 @@ public: static Result* cur; }; -Result* Result::cur = 0; +Result* Result::cur = nullptr; namespace { @@ -390,7 +390,7 @@ int Suite::run(const std::vector<std::string>& suites, const std::string& filter for (std::string name : torun) { std::shared_ptr<Suite>& s = _allSuites()[name]; - fassert(16145, s != NULL); + fassert(16145, s != nullptr); log() << "going to run suite: " << name << std::endl; results.emplace_back(s->run(filter, runsPerTest)); @@ -407,7 +407,7 @@ int Suite::run(const std::vector<std::string>& suites, const std::string& filter Result totals("TOTALS"); std::vector<std::string> failedSuites; - Result::cur = NULL; + Result::cur = nullptr; for (const auto& r : results) { log().setIsTruncatable(false) << r->toString(); if (abs(r->rc()) > abs(rc)) diff --git a/src/mongo/util/allocator.cpp b/src/mongo/util/allocator.cpp index 358689a0655..66e87518b59 100644 --- a/src/mongo/util/allocator.cpp +++ b/src/mongo/util/allocator.cpp @@ -37,7 +37,7 @@ namespace mongo { void* mongoMalloc(size_t size) { void* x = std::malloc(size); - if (x == NULL) { + if (x == nullptr) { reportOutOfMemoryErrorAndExit(); } return x; @@ -45,7 +45,7 @@ void* mongoMalloc(size_t size) { void* mongoRealloc(void* ptr, size_t size) { void* x = std::realloc(ptr, size); - if (x == NULL) { + if (x == nullptr) { reportOutOfMemoryErrorAndExit(); } return x; diff --git a/src/mongo/util/assert_util.cpp b/src/mongo/util/assert_util.cpp index 2167b2849ec..dcf2b580d42 100644 --- a/src/mongo/util/assert_util.cpp +++ b/src/mongo/util/assert_util.cpp @@ -223,7 +223,7 @@ std::string demangleName(const std::type_info& typeinfo) { #else int status; - char* niceName = abi::__cxa_demangle(typeinfo.name(), 0, 0, &status); + char* niceName = abi::__cxa_demangle(typeinfo.name(), nullptr, nullptr, &status); if (!niceName) return typeinfo.name(); diff --git a/src/mongo/util/background.cpp b/src/mongo/util/background.cpp index 441cdab0e53..e5b40238230 100644 --- a/src/mongo/util/background.cpp +++ b/src/mongo/util/background.cpp @@ -276,7 +276,7 @@ void PeriodicTaskRunner::remove(PeriodicTask* task) { stdx::lock_guard<stdx::mutex> lock(_mutex); for (size_t i = 0; i != _tasks.size(); i++) { if (_tasks[i] == task) { - _tasks[i] = NULL; + _tasks[i] = nullptr; break; } } diff --git a/src/mongo/util/concurrency/mutex.h b/src/mongo/util/concurrency/mutex.h index 8674fe269a4..30ee48f8a30 100644 --- a/src/mongo/util/concurrency/mutex.h +++ b/src/mongo/util/concurrency/mutex.h @@ -79,7 +79,7 @@ class SimpleMutex { public: SimpleMutex() { - verify(pthread_mutex_init(&_lock, 0) == 0); + verify(pthread_mutex_init(&_lock, nullptr) == 0); } ~SimpleMutex() { diff --git a/src/mongo/util/concurrency/ticketholder.h b/src/mongo/util/concurrency/ticketholder.h index 51c232bc5d1..8ab3d4a39d9 100644 --- a/src/mongo/util/concurrency/ticketholder.h +++ b/src/mongo/util/concurrency/ticketholder.h @@ -118,7 +118,7 @@ class TicketHolderReleaser { public: TicketHolderReleaser() { - _holder = NULL; + _holder = nullptr; } explicit TicketHolderReleaser(TicketHolder* holder) { @@ -132,10 +132,10 @@ public: } bool hasTicket() const { - return _holder != NULL; + return _holder != nullptr; } - void reset(TicketHolder* holder = NULL) { + void reset(TicketHolder* holder = nullptr) { if (_holder) { _holder->release(); } diff --git a/src/mongo/util/heap_profiler.cpp b/src/mongo/util/heap_profiler.cpp index 3edb8717f79..05a8a59f03a 100644 --- a/src/mongo/util/heap_profiler.cpp +++ b/src/mongo/util/heap_profiler.cpp @@ -481,7 +481,7 @@ private: if (dladdr(stack.frames[j], &dli)) { if (dli.dli_sname) { int status; - demangled = abi::__cxa_demangle(dli.dli_sname, 0, 0, &status); + demangled = abi::__cxa_demangle(dli.dli_sname, nullptr, nullptr, &status); if (demangled) { // strip off function parameters as they are very verbose and not useful char* p = strchr(demangled, '('); diff --git a/src/mongo/util/log.h b/src/mongo/util/log.h index bca7c87a33a..68a16c34493 100644 --- a/src/mongo/util/log.h +++ b/src/mongo/util/log.h @@ -236,7 +236,7 @@ extern Tee* const startupWarningsLog; // Things put here get reported in MMS /** * Write the current context (backtrace), along with the optional "msg". */ -void logContext(const char* msg = NULL); +void logContext(const char* msg = nullptr); /** * Turns the global log manager into a plain console logger (no adornments). diff --git a/src/mongo/util/md5.cpp b/src/mongo/util/md5.cpp index 383b046c9ac..6a2867f0007 100644 --- a/src/mongo/util/md5.cpp +++ b/src/mongo/util/md5.cpp @@ -159,7 +159,7 @@ static void md5_process(md5_state_t* pms, const md5_byte_t* data /*[64]*/) { * On little-endian machines, we can process properly aligned * data without copying it. */ - if (!((data - (const md5_byte_t*)0) & 3)) { + if (!((data - (const md5_byte_t*)nullptr) & 3)) { /* data are properly aligned */ X = (const md5_word_t*)data; } else { diff --git a/src/mongo/util/net/sock.cpp b/src/mongo/util/net/sock.cpp index 6d7b2d1e8f7..bb616b4452e 100644 --- a/src/mongo/util/net/sock.cpp +++ b/src/mongo/util/net/sock.cpp @@ -190,7 +190,7 @@ Socket::Socket(int fd, const SockAddr& remote) : _fd(fd), _remote(remote), _timeout(0), - _lastValidityCheckAtSecs(time(0)), + _lastValidityCheckAtSecs(time(nullptr)), _logLevel(logger::LogSeverity::Log()) { _init(); if (fd >= 0) { @@ -201,7 +201,7 @@ Socket::Socket(int fd, const SockAddr& remote) Socket::Socket(double timeout, logger::LogSeverity ll) : _logLevel(ll) { _fd = INVALID_SOCKET; _timeout = timeout; - _lastValidityCheckAtSecs = time(0); + _lastValidityCheckAtSecs = time(nullptr); _init(); } @@ -214,7 +214,7 @@ void Socket::_init() { _bytesIn = 0; _awaitingHandshake = true; #ifdef MONGO_CONFIG_SSL - _sslManager = 0; + _sslManager = nullptr; #endif } @@ -612,7 +612,7 @@ bool Socket::isStillConnected() { if (!isPollSupported()) return true; // nothing we can do - time_t now = time(0); + time_t now = time(nullptr); time_t idleTimeSecs = now - _lastValidityCheckAtSecs; // Only check once every 5 secs diff --git a/src/mongo/util/net/sock.h b/src/mongo/util/net/sock.h index 561cd1cd9e4..a9edfc71de6 100644 --- a/src/mongo/util/net/sock.h +++ b/src/mongo/util/net/sock.h @@ -196,7 +196,7 @@ public: * * This function may throw SocketException. */ - SSLPeerInfo doSSLHandshake(const char* firstBytes = NULL, int len = 0); + SSLPeerInfo doSSLHandshake(const char* firstBytes = nullptr, int len = 0); /** * @return the time when the socket was opened. diff --git a/src/mongo/util/net/sock_test.cpp b/src/mongo/util/net/sock_test.cpp index 14cc5721e29..bb246175eef 100644 --- a/src/mongo/util/net/sock_test.cpp +++ b/src/mongo/util/net/sock_test.cpp @@ -210,7 +210,7 @@ public: SocketFailPointTest() : _failPoint(getGlobalFailPointRegistry()->getFailPoint(kSocketFailPointName)), _sockets(socketPair(SOCK_STREAM)) { - ASSERT_TRUE(_failPoint != NULL); + ASSERT_TRUE(_failPoint != nullptr); ASSERT_TRUE(_sockets.first); ASSERT_TRUE(_sockets.second); } diff --git a/src/mongo/util/net/sockaddr.cpp b/src/mongo/util/net/sockaddr.cpp index be88e9c206e..9efa10ca409 100644 --- a/src/mongo/util/net/sockaddr.cpp +++ b/src/mongo/util/net/sockaddr.cpp @@ -280,7 +280,7 @@ std::string SockAddr::getAddr() const { case AF_INET6: { const int buflen = 128; char buffer[buflen]; - int ret = getnameinfo(raw(), addressSize, buffer, buflen, NULL, 0, NI_NUMERICHOST); + int ret = getnameinfo(raw(), addressSize, buffer, buflen, nullptr, 0, NI_NUMERICHOST); massert( 13082, str::stream() << "getnameinfo error " << getAddrInfoStrError(ret), ret == 0); return buffer; diff --git a/src/mongo/util/net/ssl/detail/impl/engine_openssl.ipp b/src/mongo/util/net/ssl/detail/impl/engine_openssl.ipp index d143a358253..fc0ff1128b7 100644 --- a/src/mongo/util/net/ssl/detail/impl/engine_openssl.ipp +++ b/src/mongo/util/net/ssl/detail/impl/engine_openssl.ipp @@ -42,14 +42,14 @@ engine::engine(SSL_CTX* context, const std::string& remoteHostName) ::SSL_set_mode(ssl_, SSL_MODE_ENABLE_PARTIAL_WRITE); ::SSL_set_mode(ssl_, SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER); - ::BIO* int_bio = 0; + ::BIO* int_bio = nullptr; ::BIO_new_bio_pair(&int_bio, 0, &ext_bio_, 0); ::SSL_set_bio(ssl_, int_bio, int_bio); } engine::~engine() { if (SSL_get_app_data(ssl_)) { - SSL_set_app_data(ssl_, 0); + SSL_set_app_data(ssl_, nullptr); } ::BIO_free(ext_bio_); @@ -63,14 +63,14 @@ SSL* engine::native_handle() { engine::want engine::handshake(stream_base::handshake_type type, asio::error_code& ec) { return perform((type == asio::ssl::stream_base::client) ? &engine::do_connect : &engine::do_accept, - 0, + nullptr, 0, ec, - 0); + nullptr); } engine::want engine::shutdown(asio::error_code& ec) { - return perform(&engine::do_shutdown, 0, 0, ec, 0); + return perform(&engine::do_shutdown, nullptr, 0, ec, nullptr); } engine::want engine::write(const asio::const_buffer& data, diff --git a/src/mongo/util/net/ssl/impl/context_openssl.ipp b/src/mongo/util/net/ssl/impl/context_openssl.ipp index b0b48910b4a..ef3ba9cb992 100644 --- a/src/mongo/util/net/ssl/impl/context_openssl.ipp +++ b/src/mongo/util/net/ssl/impl/context_openssl.ipp @@ -29,7 +29,7 @@ namespace asio { namespace ssl { -context::context(context::method m) : handle_(0) { +context::context(context::method m) : handle_(nullptr) { ::ERR_clear_error(); switch (m) { @@ -257,11 +257,11 @@ context::context(context::method m) : handle_(0) { #endif // (OPENSSL_VERSION_NUMBER >= 0x10100000L) default: - handle_ = ::SSL_CTX_new(0); + handle_ = ::SSL_CTX_new(nullptr); break; } - if (handle_ == 0) { + if (handle_ == nullptr) { asio::error_code ec(static_cast<int>(::ERR_get_error()), asio::error::get_ssl_category()); asio::detail::throw_error(ec, "context"); } @@ -270,13 +270,13 @@ context::context(context::method m) : handle_(0) { #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) context::context(context&& other) { handle_ = other.handle_; - other.handle_ = 0; + other.handle_ = nullptr; } context& context::operator=(context&& other) { context tmp(ASIO_MOVE_CAST(context)(*this)); handle_ = other.handle_; - other.handle_ = 0; + other.handle_ = nullptr; return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) @@ -284,7 +284,7 @@ context& context::operator=(context&& other) { context::~context() { if (handle_) { if (SSL_CTX_get_app_data(handle_)) { - SSL_CTX_set_app_data(handle_, 0); + SSL_CTX_set_app_data(handle_, nullptr); } ::SSL_CTX_free(handle_); diff --git a/src/mongo/util/net/ssl_manager_openssl.cpp b/src/mongo/util/net/ssl_manager_openssl.cpp index 59695f00504..3c178b9f0b2 100644 --- a/src/mongo/util/net/ssl_manager_openssl.cpp +++ b/src/mongo/util/net/ssl_manager_openssl.cpp @@ -738,7 +738,7 @@ SSLConnectionOpenSSL::SSLConnectionOpenSSL(SSL_CTX* context, ssl = SSL_new(context); std::string sslErr = - NULL != getSSLManager() ? getSSLManager()->getSSLErrorMessage(ERR_get_error()) : ""; + nullptr != getSSLManager() ? getSSLManager()->getSSLErrorMessage(ERR_get_error()) : ""; massert(15861, "Error creating new SSL object " + sslErr, ssl); BIO_new_bio_pair(&internalBIO, BUFFER_SIZE, &networkBIO, BUFFER_SIZE); @@ -794,7 +794,7 @@ SSLManagerOpenSSL::SSLManagerOpenSSL(const SSLParams& params, bool isServer) if (!clientPEM.empty()) { if (!_parseAndValidateCertificate( - clientPEM, clientPassword, &_sslConfiguration.clientSubjectName, NULL)) { + clientPEM, clientPassword, &_sslConfiguration.clientSubjectName, nullptr)) { uasserted(16941, "ssl initialization problem"); } } @@ -1069,7 +1069,7 @@ bool SSLManagerOpenSSL::_parseAndValidateCertificate(const std::string& keyFile, SSLX509Name* subjectName, Date_t* serverCertificateExpirationDate) { BIO* inBIO = BIO_new(BIO_s_file()); - if (inBIO == NULL) { + if (inBIO == nullptr) { error() << "failed to allocate BIO object: " << getSSLErrorMessage(ERR_get_error()); return false; } @@ -1082,8 +1082,8 @@ bool SSLManagerOpenSSL::_parseAndValidateCertificate(const std::string& keyFile, } X509* x509 = PEM_read_bio_X509( - inBIO, NULL, &SSLManagerOpenSSL::password_cb, static_cast<void*>(&keyPassword)); - if (x509 == NULL) { + inBIO, nullptr, &SSLManagerOpenSSL::password_cb, static_cast<void*>(&keyPassword)); + if (x509 == nullptr) { error() << "cannot retrieve certificate from keyfile: " << keyFile << ' ' << getSSLErrorMessage(ERR_get_error()); return false; @@ -1091,7 +1091,7 @@ bool SSLManagerOpenSSL::_parseAndValidateCertificate(const std::string& keyFile, ON_BLOCK_EXIT([&] { X509_free(x509); }); *subjectName = getCertificateSubjectX509Name(x509); - if (serverCertificateExpirationDate != NULL) { + if (serverCertificateExpirationDate != nullptr) { unsigned long long notBeforeMillis = _convertASN1ToMillis(X509_get_notBefore(x509)); if (notBeforeMillis == 0) { error() << "date conversion failed"; @@ -1166,7 +1166,7 @@ bool SSLManagerOpenSSL::_setupPEM(SSL_CTX* context, Status SSLManagerOpenSSL::_setupCA(SSL_CTX* context, const std::string& caFile) { // Set the list of CAs sent to clients STACK_OF(X509_NAME)* certNames = SSL_load_client_CA_file(caFile.c_str()); - if (certNames == NULL) { + if (certNames == nullptr) { return Status(ErrorCodes::InvalidSSLConfiguration, str::stream() << "cannot read certificate authority file: " << caFile << " " << getSSLErrorMessage(ERR_get_error())); @@ -1174,7 +1174,7 @@ Status SSLManagerOpenSSL::_setupCA(SSL_CTX* context, const std::string& caFile) SSL_CTX_set_client_CA_list(context, certNames); // Load trusted CA - if (SSL_CTX_load_verify_locations(context, caFile.c_str(), NULL) != 1) { + if (SSL_CTX_load_verify_locations(context, caFile.c_str(), nullptr) != 1) { return Status(ErrorCodes::InvalidSSLConfiguration, str::stream() << "cannot read certificate authority file: " << caFile << " " << getSSLErrorMessage(ERR_get_error())); @@ -1426,8 +1426,8 @@ bool SSLManagerOpenSSL::_doneWithSSLOp(SSLConnectionOpenSSL* conn, int status) { } SSLConnectionInterface* SSLManagerOpenSSL::connect(Socket* socket) { - std::unique_ptr<SSLConnectionOpenSSL> sslConn = - std::make_unique<SSLConnectionOpenSSL>(_clientContext.get(), socket, (const char*)NULL, 0); + std::unique_ptr<SSLConnectionOpenSSL> sslConn = std::make_unique<SSLConnectionOpenSSL>( + _clientContext.get(), socket, (const char*)nullptr, 0); const auto undotted = removeFQDNRoot(socket->remoteAddr().hostOrIp()); int ret = ::SSL_set_tlsext_host_name(sslConn->ssl, undotted.c_str()); @@ -1497,7 +1497,7 @@ StatusWith<SSLPeerInfo> SSLManagerOpenSSL::parseAndValidatePeerCertificate( X509* peerCert = SSL_get_peer_certificate(conn); - if (NULL == peerCert) { // no certificate presented by peer + if (nullptr == peerCert) { // no certificate presented by peer if (_weakValidation) { // do not give warning if certificate warnings are suppressed if (!_suppressNoCertificateWarning) { @@ -1561,9 +1561,9 @@ StatusWith<SSLPeerInfo> SSLManagerOpenSSL::parseAndValidatePeerCertificate( StringBuilder certificateNames; STACK_OF(GENERAL_NAME)* sanNames = static_cast<STACK_OF(GENERAL_NAME)*>( - X509_get_ext_d2i(peerCert, NID_subject_alt_name, NULL, NULL)); + X509_get_ext_d2i(peerCert, NID_subject_alt_name, nullptr, nullptr)); - if (sanNames != NULL) { + if (sanNames != nullptr) { int sanNamesList = sk_GENERAL_NAME_num(sanNames); certificateNames << "SAN(s): "; for (int i = 0; i < sanNamesList; i++) { diff --git a/src/mongo/util/processinfo_linux.cpp b/src/mongo/util/processinfo_linux.cpp index c9bd249d478..32e98f7fc1d 100644 --- a/src/mongo/util/processinfo_linux.cpp +++ b/src/mongo/util/processinfo_linux.cpp @@ -255,8 +255,8 @@ public: char fstr[1024] = {0}; f = fopen(fname, "r"); - if (f != NULL) { - if (fgets(fstr, 1023, f) != NULL) + if (f != nullptr) { + if (fgets(fstr, 1023, f) != nullptr) fstr[strlen(fstr) < 1 ? 0 : strlen(fstr) - 1] = '\0'; fclose(f); } @@ -272,10 +272,10 @@ public: procCount = 0; f = fopen("/proc/cpuinfo", "r"); - if (f == NULL) + if (f == nullptr) return; - while (fgets(fstr, 1023, f) != NULL && !feof(f)) { + while (fgets(fstr, 1023, f) != nullptr && !feof(f)) { // until the end of the file fstr[strlen(fstr) < 1 ? 0 : strlen(fstr) - 1] = '\0'; if (strncmp(fstr, "processor ", 10) == 0 || strncmp(fstr, "processor\t:", 11) == 0) diff --git a/src/mongo/util/progress_meter.cpp b/src/mongo/util/progress_meter.cpp index 9ad977581ec..6287cbd7552 100644 --- a/src/mongo/util/progress_meter.cpp +++ b/src/mongo/util/progress_meter.cpp @@ -45,7 +45,7 @@ void ProgressMeter::reset(unsigned long long total, int secondsBetween, int chec _done = 0; _hits = 0; - _lastTime = (int)time(0); + _lastTime = (int)time(nullptr); _active = true; } @@ -62,7 +62,7 @@ bool ProgressMeter::hit(int n) { if (_hits % _checkInterval) return false; - int t = (int)time(0); + int t = (int)time(nullptr); if (t - _lastTime < _secondsBetween) return false; diff --git a/src/mongo/util/shared_buffer.h b/src/mongo/util/shared_buffer.h index 00000d0a3d5..c026f92f848 100644 --- a/src/mongo/util/shared_buffer.h +++ b/src/mongo/util/shared_buffer.h @@ -95,7 +95,7 @@ public: } char* get() const { - return _holder ? _holder->data() : NULL; + return _holder ? _holder->data() : nullptr; } explicit operator bool() const { diff --git a/src/mongo/util/signal_handlers.cpp b/src/mongo/util/signal_handlers.cpp index 707a76f3233..f6f81234c11 100644 --- a/src/mongo/util/signal_handlers.cpp +++ b/src/mongo/util/signal_handlers.cpp @@ -180,7 +180,7 @@ void signalProcessingThread(LogFileStatus rotate) { switch (actualSignal) { case SIGUSR1: // log rotate signal - signalTimeSeconds = time(0); + signalTimeSeconds = time(nullptr); if (signalTimeSeconds <= lastSignalTimeSeconds) { // ignore multiple signals in the same or earlier second. break; @@ -227,7 +227,7 @@ void startSignalProcessingThread(LogFileStatus rotate) { stdx::thread(eventProcessingThread).detach(); #else // Mask signals in the current (only) thread. All new threads will inherit this mask. - invariant(pthread_sigmask(SIG_SETMASK, &asyncSignals, 0) == 0); + invariant(pthread_sigmask(SIG_SETMASK, &asyncSignals, nullptr) == 0); // Spawn a thread to capture the signals we just masked off. stdx::thread(signalProcessingThread, rotate).detach(); #endif diff --git a/src/mongo/util/stacktrace_posix.cpp b/src/mongo/util/stacktrace_posix.cpp index 9eaed27e06c..996cdb52b16 100644 --- a/src/mongo/util/stacktrace_posix.cpp +++ b/src/mongo/util/stacktrace_posix.cpp @@ -61,7 +61,7 @@ const int maxBackTraceFrames = 100; /// Optional string containing extra unwinding information. Should take the form of a /// JSON document. -std::string* soMapJson = NULL; +std::string* soMapJson = nullptr; /** * Returns the "basename" of a path. The returned StringData is valid until the data referenced @@ -188,9 +188,9 @@ void printStackTrace(std::ostream& os) { Dl_info& dlinfo(dlinfoForFrames[i]); if (!dladdr(addresses[i], &dlinfo)) { dlinfo.dli_fname = unknownFileName; - dlinfo.dli_fbase = NULL; - dlinfo.dli_sname = NULL; - dlinfo.dli_saddr = NULL; + dlinfo.dli_fbase = nullptr; + dlinfo.dli_sname = nullptr; + dlinfo.dli_saddr = nullptr; } os << ' ' << addresses[i]; } diff --git a/src/mongo/util/str.h b/src/mongo/util/str.h index b2ee26dbbdb..f36aa3b0f6d 100644 --- a/src/mongo/util/str.h +++ b/src/mongo/util/str.h @@ -128,7 +128,7 @@ inline bool endsWith(const char* p, const char* suffix) { /** find char x, and return rest of the string thereafter, or an empty string if not found */ inline const char* after(const char* s, char x) { const char* p = strchr(s, x); - return (p != 0) ? p + 1 : ""; + return (p != nullptr) ? p + 1 : ""; } inline mongo::StringData after(mongo::StringData s, char x) { auto pos = s.find(x); @@ -138,7 +138,7 @@ inline mongo::StringData after(mongo::StringData s, char x) { /** find string x, and return rest of the string thereafter, or an empty string if not found */ inline const char* after(const char* s, const char* x) { const char* p = strstr(s, x); - return (p != 0) ? p + strlen(x) : ""; + return (p != nullptr) ? p + strlen(x) : ""; } inline mongo::StringData after(mongo::StringData s, mongo::StringData x) { auto pos = s.find(x); diff --git a/src/mongo/util/text_test.cpp b/src/mongo/util/text_test.cpp index cee43fd57cc..77d6561e795 100644 --- a/src/mongo/util/text_test.cpp +++ b/src/mongo/util/text_test.cpp @@ -43,7 +43,7 @@ static std::vector<std::string> svec(const char* first, ...) { va_list ap; va_start(ap, first); const char* curr; - while (NULL != (curr = va_arg(ap, const char*))) { + while (nullptr != (curr = va_arg(ap, const char*))) { result.push_back(curr); } va_end(ap); @@ -52,7 +52,7 @@ static std::vector<std::string> svec(const char* first, ...) { } TEST(WindowsCommandLineConstruction, EmptyCommandLine) { - ASSERT_EQUALS("", constructUtf8WindowsCommandLine(svec(NULL))); + ASSERT_EQUALS("", constructUtf8WindowsCommandLine(svec(nullptr))); } TEST(WindowsCommandLineConstruction, NothingToQuote) { diff --git a/src/mongo/util/time_support.cpp b/src/mongo/util/time_support.cpp index f3c1fee7a33..b40ebcb19ee 100644 --- a/src/mongo/util/time_support.cpp +++ b/src/mongo/util/time_support.cpp @@ -143,7 +143,7 @@ std::string time_t_to_String_short(time_t t) { // colonsOk should be false when creating filenames string terseCurrentTime(bool colonsOk) { struct tm t; - time_t_to_Struct(time(0), &t); + time_t_to_Struct(time(nullptr), &t); const char* fmt = (colonsOk ? "%Y-%m-%dT%H:%M:%S" : "%Y-%m-%dT%H-%M-%S"); char buf[32]; @@ -921,13 +921,13 @@ unsigned long long curTimeMicros64() { #include <sys/time.h> unsigned long long curTimeMillis64() { timeval tv; - gettimeofday(&tv, NULL); + gettimeofday(&tv, nullptr); return ((unsigned long long)tv.tv_sec) * 1000 + tv.tv_usec / 1000; } unsigned long long curTimeMicros64() { timeval tv; - gettimeofday(&tv, NULL); + gettimeofday(&tv, nullptr); return (((unsigned long long)tv.tv_sec) * 1000 * 1000) + tv.tv_usec; } #endif diff --git a/src/mongo/util/unowned_ptr_test.cpp b/src/mongo/util/unowned_ptr_test.cpp index ac1bf86067f..4a6ec0b1d34 100644 --- a/src/mongo/util/unowned_ptr_test.cpp +++ b/src/mongo/util/unowned_ptr_test.cpp @@ -46,7 +46,7 @@ TEST(UnownedPtr, Construction) { ASSERT_EQUALS(aNullPtr, unowned_ptr<int>()); ASSERT_EQUALS(aNullPtr, unowned_ptr<int>({})); ASSERT_EQUALS(aNullPtr, unowned_ptr<int>(nullptr)); - ASSERT_EQUALS(aNullPtr, unowned_ptr<int>(NULL)); + ASSERT_EQUALS(aNullPtr, unowned_ptr<int>(nullptr)); ASSERT_EQUALS(p1.get(), unowned_ptr<int>(p1.get())); ASSERT_EQUALS(p1.get(), unowned_ptr<int>(p1)); ASSERT_EQUALS(p2.get(), unowned_ptr<int>(p2)); @@ -59,7 +59,7 @@ TEST(UnownedPtr, Construction) { ASSERT_EQUALS(aNullPtr, unowned_ptr<const int>()); ASSERT_EQUALS(aNullPtr, unowned_ptr<const int>({})); ASSERT_EQUALS(aNullPtr, unowned_ptr<const int>(nullptr)); - ASSERT_EQUALS(aNullPtr, unowned_ptr<const int>(NULL)); + ASSERT_EQUALS(aNullPtr, unowned_ptr<const int>(nullptr)); ASSERT_EQUALS(p1.get(), unowned_ptr<const int>(p1.get())); ASSERT_EQUALS(cp1.get(), unowned_ptr<const int>(cp1.get())); ASSERT_EQUALS(p1.get(), unowned_ptr<const int>(p1)); @@ -86,7 +86,7 @@ TEST(UnownedPtr, Assignment) { ASSERT_EQUALS(aNullPtr, (unowned_ptr<int>() = {})); ASSERT_EQUALS(aNullPtr, (unowned_ptr<int>() = nullptr)); - ASSERT_EQUALS(aNullPtr, (unowned_ptr<int>() = NULL)); + ASSERT_EQUALS(aNullPtr, (unowned_ptr<int>() = nullptr)); ASSERT_EQUALS(p1.get(), (unowned_ptr<int>() = p1.get())); ASSERT_EQUALS(p1.get(), (unowned_ptr<int>() = p1)); ASSERT_EQUALS(p2.get(), (unowned_ptr<int>() = p2)); @@ -98,7 +98,7 @@ TEST(UnownedPtr, Assignment) { ASSERT_EQUALS(aNullPtr, (unowned_ptr<const int>() = {})); ASSERT_EQUALS(aNullPtr, (unowned_ptr<const int>() = nullptr)); - ASSERT_EQUALS(aNullPtr, (unowned_ptr<const int>() = NULL)); + ASSERT_EQUALS(aNullPtr, (unowned_ptr<const int>() = nullptr)); ASSERT_EQUALS(p1.get(), (unowned_ptr<const int>() = p1.get())); ASSERT_EQUALS(cp1.get(), (unowned_ptr<const int>() = cp1.get())); ASSERT_EQUALS(p1.get(), (unowned_ptr<const int>() = p1)); |