diff options
Diffstat (limited to 'src/mongo')
41 files changed, 885 insertions, 367 deletions
diff --git a/src/mongo/bson/bson_obj_test.cpp b/src/mongo/bson/bson_obj_test.cpp index 890d3c4580f..5a9360605d5 100644 --- a/src/mongo/bson/bson_obj_test.cpp +++ b/src/mongo/bson/bson_obj_test.cpp @@ -608,4 +608,24 @@ TEST(BSONObj, ShareOwnershipWith) { ASSERT_BSONOBJ_EQ(obj, BSON("a" << 1)); } +TEST(BSONObj, addField) { + auto obj = BSON("a" << 1 << "b" << 2); + + // Check that replacing a field maintains the same ordering and doesn't add a field. + auto objA2 = BSON("a" << 2); + auto elemA2 = objA2.firstElement(); + auto addFieldA2 = obj.addField(elemA2); + ASSERT_EQ(addFieldA2.nFields(), 2); + ASSERT_BSONOBJ_EQ(addFieldA2, BSON("a" << 2 << "b" << 2)); + + // Check that adding a new field places it at the end. + auto objC3 = BSON("c" << 3); + auto elemC3 = objC3.firstElement(); + auto addFieldC3 = obj.addField(elemC3); + ASSERT_BSONOBJ_EQ(addFieldC3, BSON("a" << 1 << "b" << 2 << "c" << 3)); + + // Check that after all this obj is unchanged. + ASSERT_BSONOBJ_EQ(obj, BSON("a" << 1 << "b" << 2)); +} + } // unnamed namespace diff --git a/src/mongo/bson/bsonobj.cpp b/src/mongo/bson/bsonobj.cpp index 2f7133a2a63..06f446c9c05 100644 --- a/src/mongo/bson/bsonobj.cpp +++ b/src/mongo/bson/bsonobj.cpp @@ -503,6 +503,26 @@ bool BSONObj::getObjectID(BSONElement& e) const { return false; } +BSONObj BSONObj::addField(const BSONElement& field) const { + if (!field.ok()) + return copy(); + BSONObjBuilder b; + StringData name = field.fieldNameStringData(); + bool added = false; + for (auto e : *this) { + if (e.fieldNameStringData() == name) { + if (!added) + b.append(field); + added = true; + } else { + b.append(e); + } + } + if (!added) + b.append(field); + return b.obj(); +} + BSONObj BSONObj::removeField(StringData name) const { BSONObjBuilder b; BSONObjIterator i(*this); diff --git a/src/mongo/bson/bsonobj.h b/src/mongo/bson/bsonobj.h index 8d62bc8fd3d..15e2326c0db 100644 --- a/src/mongo/bson/bsonobj.h +++ b/src/mongo/bson/bsonobj.h @@ -245,6 +245,13 @@ public: /** note: addFields always adds _id even if not specified */ int addFields(BSONObj& from, std::set<std::string>& fields); /* returns n added */ + /** + * Add specific field to the end of the object if it did not exist, otherwise replace it + * preserving original field order. Returns newly built object. Returns copy of this for empty + * field. + */ + BSONObj addField(const BSONElement& field) const; + /** remove specified field and return a new object with the remaining fields. slowish as builds a full new object */ diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript index 83fd4b3ec40..2ce2c26873a 100644 --- a/src/mongo/db/SConscript +++ b/src/mongo/db/SConscript @@ -504,7 +504,7 @@ env.Library( 'commands/test_commands_enabled', 'service_context', '$BUILD_DIR/mongo/util/uuid', - '$BUILD_DIR/mongo/util/uuid_catalog', + '$BUILD_DIR/mongo/db/catalog/uuid_catalog', ], ) @@ -608,7 +608,7 @@ env.Library( '$BUILD_DIR/mongo/base', 'repl/serveronly', 'views/views_mongod', - '$BUILD_DIR/mongo/util/uuid_catalog', + '$BUILD_DIR/mongo/db/catalog/uuid_catalog', ], ) @@ -1230,6 +1230,8 @@ env.Library( ], LIBDEPS= [ '$BUILD_DIR/mongo/db/repl/optime', + '$BUILD_DIR/mongo/db/catalog/uuid_catalog', + '$BUILD_DIR/mongo/db/catalog/database_holder', ], ) diff --git a/src/mongo/db/catalog/SConscript b/src/mongo/db/catalog/SConscript index 96796d1ff23..8e62bb4b74b 100644 --- a/src/mongo/db/catalog/SConscript +++ b/src/mongo/db/catalog/SConscript @@ -149,6 +149,44 @@ env.Library( ) env.Library( + target='uuid_catalog', + source=[ + 'namespace_uuid_cache.cpp', + 'uuid_catalog.cpp', + ], + LIBDEPS=[ + 'collection', + 'database', + '$BUILD_DIR/mongo/base', + '$BUILD_DIR/mongo/db/namespace_string', + '$BUILD_DIR/mongo/db/storage/storage_options', + '$BUILD_DIR/mongo/util/decorable', + '$BUILD_DIR/mongo/util/uuid', + ], +) + +env.CppUnitTest( + target='namespace_uuid_cache_test', + source=[ + 'namespace_uuid_cache_test.cpp' + ], + LIBDEPS=[ + 'uuid_catalog', + ], +) + +env.CppUnitTest( + target='uuid_catalog_test', + source=[ + 'uuid_catalog_test.cpp', + ], + LIBDEPS=[ + 'uuid_catalog', + '$BUILD_DIR/mongo/db/service_context', + ] + ) + +env.Library( target='catalog', source=[ "collection_compact.cpp", diff --git a/src/mongo/db/catalog/capped_utils.cpp b/src/mongo/db/catalog/capped_utils.cpp index 69bac1c04ff..3f26e0c5bd6 100644 --- a/src/mongo/db/catalog/capped_utils.cpp +++ b/src/mongo/db/catalog/capped_utils.cpp @@ -262,15 +262,18 @@ mongo::Status mongo::convertToCapped(OperationContext* opCtx, } BackgroundOperation::assertNoBgOpInProgForDb(dbname); + auto opObserver = getGlobalServiceContext()->getOpObserver(); std::string shortTmpName = str::stream() << "tmp.convertToCapped." << shortSource; NamespaceString longTmpName(dbname, shortTmpName); - if (db->getCollection(opCtx, longTmpName)) { + if (auto existingTmpColl = db->getCollection(opCtx, longTmpName)) { WriteUnitOfWork wunit(opCtx); Status status = db->dropCollection(opCtx, longTmpName.ns()); if (!status.isOK()) return status; + opObserver->onDropCollection(opCtx, longTmpName, existingTmpColl->uuid()); + wunit.commit(); } { @@ -283,7 +286,8 @@ mongo::Status mongo::convertToCapped(OperationContext* opCtx, } } - OptionalCollectionUUID uuid = db->getCollection(opCtx, longTmpName)->uuid(); + OptionalCollectionUUID origUUID = db->getCollection(opCtx, collectionName)->uuid(); + OptionalCollectionUUID cappedUUID = db->getCollection(opCtx, longTmpName)->uuid(); { WriteUnitOfWork wunit(opCtx); @@ -299,7 +303,7 @@ mongo::Status mongo::convertToCapped(OperationContext* opCtx, return status; getGlobalServiceContext()->getOpObserver()->onConvertToCapped( - opCtx, collectionName, uuid, size); + opCtx, collectionName, origUUID, cappedUUID, size); wunit.commit(); } diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp index 8e7316c0fa8..da81803a1b0 100644 --- a/src/mongo/db/catalog/collection_impl.cpp +++ b/src/mongo/db/catalog/collection_impl.cpp @@ -45,6 +45,8 @@ #include "mongo/db/catalog/database_catalog_entry.h" #include "mongo/db/catalog/document_validation.h" #include "mongo/db/catalog/index_create.h" +#include "mongo/db/catalog/namespace_uuid_cache.h" +#include "mongo/db/catalog/uuid_catalog.h" #include "mongo/db/clientcursor.h" #include "mongo/db/commands/server_status_metric.h" #include "mongo/db/concurrency/d_concurrency.h" @@ -206,6 +208,17 @@ CollectionImpl::~CollectionImpl() { _recordStore->setCappedCallback(nullptr); _cappedNotifier->kill(); } + + if (_uuid) { + if (auto opCtx = cc().getOperationContext()) { + auto& uuidCatalog = UUIDCatalog::get(opCtx); + invariant(uuidCatalog.lookupCollectionByUUID(_uuid.get()) != _this); + auto& cache = NamespaceUUIDCache::get(opCtx); + // TODO(geert): cache.verifyNotCached(ns(), uuid().get()); + cache.evictNamespace(ns()); + } + LOG(2) << "destructed collection " << ns() << " with UUID " << uuid()->toString(); + } _magic = 0; } diff --git a/src/mongo/db/catalog/collection_impl.h b/src/mongo/db/catalog/collection_impl.h index 043b98ef424..c65983d39e6 100644 --- a/src/mongo/db/catalog/collection_impl.h +++ b/src/mongo/db/catalog/collection_impl.h @@ -32,6 +32,7 @@ #include "mongo/db/catalog/collection_catalog_entry.h" namespace mongo { +class UUIDCatalog; class CollectionImpl final : virtual public Collection::Impl, virtual CappedCallback, virtual UpdateNotifier { diff --git a/src/mongo/db/catalog/create_collection.cpp b/src/mongo/db/catalog/create_collection.cpp index 8ef6b79ad00..9af83620ab6 100644 --- a/src/mongo/db/catalog/create_collection.cpp +++ b/src/mongo/db/catalog/create_collection.cpp @@ -31,6 +31,9 @@ #include "mongo/db/catalog/create_collection.h" #include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/database_holder.h" +#include "mongo/db/catalog/uuid_catalog.h" +#include "mongo/db/commands.h" #include "mongo/db/concurrency/write_conflict_exception.h" #include "mongo/db/curop.h" #include "mongo/db/db_raii.h" @@ -39,26 +42,28 @@ #include "mongo/db/ops/insert.h" #include "mongo/db/repl/replication_coordinator_global.h" -mongo::Status mongo::createCollection(OperationContext* opCtx, - const std::string& dbName, - const BSONObj& cmdObj, - const BSONObj& idIndex) { +namespace mongo { +namespace { +/** + * Shared part of the implementation of the createCollection versions for replicated and regular + * collection creation. + */ +Status createCollection(OperationContext* opCtx, + const NamespaceString& nss, + const BSONObj& cmdObj, + const BSONObj& idIndex, + CollectionOptions::ParseKind kind) { BSONObjIterator it(cmdObj); - // Extract ns from first cmdObj element. + // Skip the first cmdObj element. BSONElement firstElt = it.next(); - uassert(ErrorCodes::TypeMismatch, - str::stream() << "Expected first element to be of type String in: " << cmdObj, - firstElt.type() == BSONType::String); - uassert(15888, "must pass name of collection to create", !firstElt.valueStringData().empty()); + invariant(firstElt.fieldNameStringData() == "create"); - Status status = userAllowedCreateNS(dbName, firstElt.valueStringData()); + Status status = userAllowedCreateNS(nss.db(), nss.coll()); if (!status.isOK()) { return status; } - const NamespaceString nss(dbName, firstElt.valueStringData()); - // Build options object from remaining cmdObj elements. BSONObjBuilder optionsBuilder; while (it.more()) { @@ -74,7 +79,7 @@ mongo::Status mongo::createCollection(OperationContext* opCtx, options.hasField("$nExtents")); MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { - Lock::DBLock dbXLock(opCtx, dbName, MODE_X); + Lock::DBLock dbXLock(opCtx, nss.db(), MODE_X); OldClientContext ctx(opCtx, nss.ns()); if (opCtx->writesAreReplicated() && !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(opCtx, nss)) { @@ -86,13 +91,8 @@ mongo::Status mongo::createCollection(OperationContext* opCtx, // Create collection. const bool createDefaultIndexes = true; - status = userCreateNS(opCtx, - ctx.db(), - nss.ns(), - options, - CollectionOptions::parseForCommand, - createDefaultIndexes, - idIndex); + status = + userCreateNS(opCtx, ctx.db(), nss.ns(), options, kind, createDefaultIndexes, idIndex); if (!status.isOK()) { return status; } @@ -102,3 +102,106 @@ mongo::Status mongo::createCollection(OperationContext* opCtx, MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "create", nss.ns()); return Status::OK(); } +} // namespace + +Status createCollection(OperationContext* opCtx, + const std::string& dbName, + const BSONObj& cmdObj, + const BSONObj& idIndex) { + return createCollection(opCtx, + Command::parseNsCollectionRequired(dbName, cmdObj), + cmdObj, + idIndex, + CollectionOptions::parseForCommand); +} + +Status createCollectionForApplyOps(OperationContext* opCtx, + const std::string& dbName, + const BSONElement& ui, + const BSONObj& cmdObj, + const BSONObj& idIndex) { + invariant(opCtx->lockState()->isDbLockedForMode(dbName, MODE_X)); + auto db = dbHolder().get(opCtx, dbName); + const NamespaceString newCollName(Command::parseNsCollectionRequired(dbName, cmdObj)); + auto newCmd = cmdObj; + + // If a UUID is given, see if we need to rename a collection out of the way, and whether the + // collection already exists under a different name. If so, rename it into place. As this is + // done during replay of the oplog, the operations do not need to be atomic, just idempotent. + // We need to do the renaming part in a separate transaction, as we cannot transactionally + // create a database on MMAPv1, which could result in createCollection failing if the database + // does not yet exist. + if (ui.ok()) { + MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { + WriteUnitOfWork wunit(opCtx); + // Options need the field to be named "uuid", so parse/recreate. + auto uuid = uassertStatusOK(UUID::parse(ui)); + uassert(ErrorCodes::InvalidUUID, + "Invalid UUID in applyOps create command: " + uuid.toString(), + uuid.isRFC4122v4()); + + auto& catalog = UUIDCatalog::get(opCtx); + auto currentName = catalog.lookupNSSByUUID(uuid); + OpObserver* opObserver = getGlobalServiceContext()->getOpObserver(); + if (currentName == newCollName) + return Status::OK(); + + // In the case of oplog replay, a future command may have created or renamed a + // collection with that same name. In that case, renaming this future collection to a + // random temporary name is correct: once all entries are replayed no temporary names + // will remain. + // On MMAPv1 the rename can result in index names that are too long. However this should + // only happen for initial sync and "resync collection" for rollback, so we can let the + // error propagate resulting in an abort and restart of the initial sync or result in + // rollback to fassert, requiring a resync of that node. + const bool stayTemp = true; + if (auto futureColl = db ? db->getCollection(opCtx, newCollName) : nullptr) { + auto tmpName = NamespaceString(newCollName.db(), "tmp" + UUID::gen().toString()); + Status status = + db->renameCollection(opCtx, newCollName.ns(), tmpName.ns(), stayTemp); + if (!status.isOK()) + return status; + opObserver->onRenameCollection(opCtx, + newCollName, + tmpName, + futureColl->uuid(), + /*dropTarget*/ false, + /*dropTargetUUID*/ {}, + /*dropSourceUUID*/ {}, + stayTemp); + } + + // If the collection with the requested UUID already exists, but with a different name, + // just rename it to 'newCollName'. + if (catalog.lookupCollectionByUUID(uuid)) { + Status status = + db->renameCollection(opCtx, currentName.ns(), newCollName.ns(), stayTemp); + if (!status.isOK()) + return status; + opObserver->onRenameCollection(opCtx, + currentName, + newCollName, + uuid, + /*dropTarget*/ false, + /*dropTargetUUID*/ {}, + /*dropSourceUUID*/ {}, + stayTemp); + + wunit.commit(); + return Status::OK(); + } + + // A new collection with the specific UUID must be created, so add the UUID to the + // creation options. Regular user collection creation commands cannot do this. + auto uuidObj = uuid.toBSON(); + newCmd = cmdObj.addField(uuidObj.firstElement()); + wunit.commit(); + } + MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "createCollectionForApplyOps", newCollName.ns()); + } + + return createCollection( + opCtx, newCollName, newCmd, idIndex, CollectionOptions::parseForStorage); +} + +} // namespace mongo diff --git a/src/mongo/db/catalog/create_collection.h b/src/mongo/db/catalog/create_collection.h index 73dd82bebd8..8b8316f8629 100644 --- a/src/mongo/db/catalog/create_collection.h +++ b/src/mongo/db/catalog/create_collection.h @@ -34,6 +34,7 @@ namespace mongo { class BSONObj; class OperationContext; +class BSONElement; /** * Creates a collection as described in "cmdObj" on the database "dbName". Creates the collection's @@ -44,4 +45,17 @@ Status createCollection(OperationContext* opCtx, const std::string& dbName, const BSONObj& cmdObj, const BSONObj& idIndex = BSONObj()); + +/** + * As above, but only used by replication to apply operations. This allows recreating collections + * with specific UUIDs (if ui is given), and in that case will rename any existing collections with + * the same name and a UUID to a temporary name. If ui is not given, an existing collection will + * result in an error. + */ +Status createCollectionForApplyOps(OperationContext* opCtx, + const std::string& dbName, + const BSONElement& ui, + const BSONObj& cmdObj, + const BSONObj& idIndex = BSONObj()); + } // namespace mongo diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp index 387d96cf98b..9933ea6f36c 100644 --- a/src/mongo/db/catalog/database_impl.cpp +++ b/src/mongo/db/catalog/database_impl.cpp @@ -44,6 +44,8 @@ #include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/database_catalog_entry.h" #include "mongo/db/catalog/database_holder.h" +#include "mongo/db/catalog/namespace_uuid_cache.h" +#include "mongo/db/catalog/uuid_catalog.h" #include "mongo/db/clientcursor.h" #include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/write_conflict_exception.h" @@ -66,8 +68,6 @@ #include "mongo/db/views/view_catalog.h" #include "mongo/util/assert_util.h" #include "mongo/util/log.h" -#include "mongo/util/namespace_uuid_cache.h" -#include "mongo/util/uuid_catalog.h" namespace mongo { namespace { @@ -584,11 +584,8 @@ Collection* DatabaseImpl::getCollection(OperationContext* opCtx, const Namespace Collection* found = it->second; if (enableCollectionUUIDs) { NamespaceUUIDCache& cache = NamespaceUUIDCache::get(opCtx); - CollectionOptions found_options = found->getCatalogEntry()->getCollectionOptions(opCtx); - if (found_options.uuid) { - CollectionUUID uuid = found_options.uuid.get(); - cache.ensureNamespaceInCache(nss, uuid); - } + if (auto uuid = found->uuid()) + cache.ensureNamespaceInCache(nss, uuid.get()); } return found; } @@ -698,11 +695,16 @@ Collection* DatabaseImpl::createCollection(OperationContext* opCtx, invariant(opCtx->lockState()->isDbLockedForMode(name(), MODE_X)); invariant(!options.isView()); + CollectionOptions optionsWithUUID = options; + if (enableCollectionUUIDs && !optionsWithUUID.uuid) + optionsWithUUID.uuid.emplace(CollectionUUID::gen()); + NamespaceString nss(ns); - _checkCanCreateCollection(opCtx, nss, options); + _checkCanCreateCollection(opCtx, nss, optionsWithUUID); audit::logCreateCollection(&cc(), ns); - Status status = _dbEntry->createCollection(opCtx, ns, options, true /*allocateDefaultSpace*/); + Status status = + _dbEntry->createCollection(opCtx, ns, optionsWithUUID, true /*allocateDefaultSpace*/); massertNoTraceStatusOK(status); opCtx->recoveryUnit()->registerChange(new AddCollectionChange(opCtx, this, ns)); @@ -714,8 +716,8 @@ Collection* DatabaseImpl::createCollection(OperationContext* opCtx, if (createIdIndex) { if (collection->requiresIdIndex()) { - if (options.autoIndexId == CollectionOptions::YES || - options.autoIndexId == CollectionOptions::DEFAULT) { + if (optionsWithUUID.autoIndexId == CollectionOptions::YES || + optionsWithUUID.autoIndexId == CollectionOptions::DEFAULT) { const auto featureCompatibilityVersion = serverGlobalParams.featureCompatibility.version.load(); IndexCatalog* ic = collection->getIndexCatalog(); @@ -732,7 +734,7 @@ Collection* DatabaseImpl::createCollection(OperationContext* opCtx, } getGlobalServiceContext()->getOpObserver()->onCreateCollection( - opCtx, collection, nss, options, fullIdIndexSpec); + opCtx, collection, nss, optionsWithUUID, fullIdIndexSpec); return collection; } @@ -754,8 +756,10 @@ void DatabaseImpl::dropDatabase(OperationContext* opCtx, Database* db) { audit::logDropDatabase(opCtx->getClient(), name); + UUIDCatalog::get(opCtx).onCloseDatabase(db); for (auto&& coll : *db) { Top::get(opCtx->getClient()->getServiceContext()).collectionDropped(coll->ns().ns(), true); + NamespaceUUIDCache::get(opCtx).evictNamespace(coll->ns()); } dbHolder().close(opCtx, name, "database dropped"); @@ -893,8 +897,6 @@ auto mongo::userCreateNSImpl(OperationContext* opCtx, invariant(parseKind == CollectionOptions::parseForCommand); uassertStatusOK(db->createView(opCtx, ns, collectionOptions)); } else { - if (enableCollectionUUIDs && !collectionOptions.uuid) - collectionOptions.uuid.emplace(CollectionUUID::gen()); invariant( db->createCollection(opCtx, ns, collectionOptions, createDefaultIndexes, idIndex)); } diff --git a/src/mongo/db/catalog/drop_indexes.h b/src/mongo/db/catalog/drop_indexes.h index bd46f1ebe40..4c288c08b3f 100644 --- a/src/mongo/db/catalog/drop_indexes.h +++ b/src/mongo/db/catalog/drop_indexes.h @@ -35,7 +35,7 @@ class NamespaceString; class OperationContext; /** - * Drops the index from collection "ns" that matches the "idxDescriptor" and populates + * Drops the index from collection "nss" that matches the "idxDescriptor" and populates * "result" with some statistics about the dropped index. */ Status dropIndexes(OperationContext* opCtx, diff --git a/src/mongo/util/namespace_uuid_cache.cpp b/src/mongo/db/catalog/namespace_uuid_cache.cpp index 59e40b73166..8919865e4b8 100644 --- a/src/mongo/util/namespace_uuid_cache.cpp +++ b/src/mongo/db/catalog/namespace_uuid_cache.cpp @@ -26,12 +26,24 @@ * it in the license file. */ +#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kStorage + +#include "mongo/platform/basic.h" + +#include <algorithm> + #include "namespace_uuid_cache.h" +#include "mongo/db/catalog/database.h" +#include "mongo/db/server_parameters.h" #include "mongo/util/assert_util.h" +#include "mongo/util/log.h" namespace mongo { +// TODO(geert): Enable checks by default +MONGO_EXPORT_STARTUP_SERVER_PARAMETER(debugCollectionUUIDs, bool, false); + const OperationContext::Decoration<NamespaceUUIDCache> NamespaceUUIDCache::get = OperationContext::declareDecoration<NamespaceUUIDCache>(); @@ -41,23 +53,31 @@ void NamespaceUUIDCache::ensureNamespaceInCache(const NamespaceString& nss, Coll if (it == _cache.end()) { // Add ns, uuid pair to the cache if it does not yet exist. invariant(_cache.try_emplace(ns, uuid).second == true); - } else { + LOG(2) << "NamespaceUUIDCache: registered namespace " << nss.ns() << " with UUID " << uuid; + + } else if (it->second != uuid) { // If ns exists in the cache, make sure it does not correspond to another uuid. - uassert(40418, - "Cannot continue operation on namespace " + ns + ": it now resolves " + - uuid.toString() + " instead of " + it->second.toString(), - it->second == uuid); + auto msg = "Namespace " + ns + " now resolves to UUID " + uuid.toString() + + " instead of UUID " + it->second.toString(); + LOG(1) << msg; + uassert(40418, "Cannot continue operation: " + msg, !debugCollectionUUIDs); } } -void NamespaceUUIDCache::onDropCollection(const NamespaceString& nss) { - _evictNamespace(nss); -} -void NamespaceUUIDCache::onRenameCollection(const NamespaceString& nss) { - _evictNamespace(nss); +void NamespaceUUIDCache::evictNamespace(const NamespaceString& nss) { + size_t evicted = _cache.erase(nss.ns()); + if (evicted) { + LOG(2) << "NamespaceUUIDCache: evicted namespace " << nss.ns(); + } + invariant(evicted <= 1); } -void NamespaceUUIDCache::_evictNamespace(const NamespaceString& nss) { - invariant(_cache.erase(nss.ns()) <= 1); +void NamespaceUUIDCache::evictNamespacesInDatabase(StringData dbname) { + for (auto&& it = _cache.begin(); it != _cache.end();) { + auto entry = it++; + if (entry->first.empty() || nsToDatabaseSubstring(entry->first) == dbname) + _cache.erase(entry); + } } + } // namespace mongo diff --git a/src/mongo/util/namespace_uuid_cache.h b/src/mongo/db/catalog/namespace_uuid_cache.h index 10f4bd75206..6f10d3bf80a 100644 --- a/src/mongo/util/namespace_uuid_cache.h +++ b/src/mongo/db/catalog/namespace_uuid_cache.h @@ -36,11 +36,14 @@ namespace mongo { +extern bool debugCollectionUUIDs; + /** * This class comprises the NamespaceString to UUID cache that prevents given namespaces * from resolving to multiple UUIDs. */ using CollectionUUID = UUID; +class Database; class NamespaceUUIDCache { MONGO_DISALLOW_COPYING(NamespaceUUIDCache); @@ -50,36 +53,36 @@ public: NamespaceUUIDCache() = default; /** - * This function adds the pair nss.ns(), uuid to the namespace uuid cache + * This function adds the pair nss.ns(), uuid to the namespace UUID cache * if it does not yet exist. If nss.ns() already exists in the cache with - * a different uuid, a UserException is thrown, so we can guarantee that + * a different UUID, a UserException is thrown, so we can guarantee that * an operation will always resolve the same name to the same collection, * even in presence of drops and renames. */ void ensureNamespaceInCache(const NamespaceString& nss, CollectionUUID uuid); /** - * This function removes the entry for nss.ns() from the namespace uuid + * This function removes the entry for nss.ns() from the namespace UUID * cache. Does nothing if the entry doesn't exist. It is called via the - * op observer when a collection is dropped. + * op observer when a collection is dropped or renamed. */ - void onDropCollection(const NamespaceString& nss); + void evictNamespace(const NamespaceString& nss); /** - * This function removes the entry for nss.ns() from the namespace uuid - * cache. Does nothing if the entry doesn't exist. It is called via the - * op observer when a collection is renamed. + * Same as above, but for all registered namespaces in the given dbname. */ - void onRenameCollection(const NamespaceString& nss); + void evictNamespacesInDatabase(StringData dbname); -private: /** - * This function removes the entry for nss.ns() from the namespace uuid - * cache. Does nothing if the entry doesn't exist. + * For testing only: verify that 'nss' is not cached. */ - void _evictNamespace(const NamespaceString& nss); + void verifyNotCached(const NamespaceString& nss, CollectionUUID uuid) { + auto it = _cache.find(nss.ns()); + invariant(it == _cache.end() || it->second != uuid); + } + +private: using CollectionUUIDMap = StringMap<CollectionUUID>; CollectionUUIDMap _cache; }; - } // namespace mongo diff --git a/src/mongo/util/namespace_uuid_cache_test.cpp b/src/mongo/db/catalog/namespace_uuid_cache_test.cpp index 7834dcb4968..22034f8ee43 100644 --- a/src/mongo/util/namespace_uuid_cache_test.cpp +++ b/src/mongo/db/catalog/namespace_uuid_cache_test.cpp @@ -26,7 +26,7 @@ * it in the license file. */ -#include "mongo/util/namespace_uuid_cache.h" +#include "mongo/db/catalog/namespace_uuid_cache.h" #include "mongo/unittest/unittest.h" @@ -43,8 +43,11 @@ TEST(NamespaceUUIDCache, ensureNamespaceInCache) { cache.ensureNamespaceInCache(nss, uuid); // Do nothing if we query for existing nss, uuid pairing. cache.ensureNamespaceInCache(nss, uuid); - // Uassert if we query for existing nss and uuid that does not match. - ASSERT_THROWS(cache.ensureNamespaceInCache(nss, uuidConflict), UserException); + + if (debugCollectionUUIDs) { + // Uassert if we query for existing nss and uuid that does not match. + ASSERT_THROWS(cache.ensureNamespaceInCache(nss, uuidConflict), UserException); + } } TEST(NamespaceUUIDCache, onDropCollection) { @@ -53,7 +56,7 @@ TEST(NamespaceUUIDCache, onDropCollection) { CollectionUUID newUuid = CollectionUUID::gen(); NamespaceString nss("test", "test_collection_ns"); cache.ensureNamespaceInCache(nss, uuid); - cache.onDropCollection(nss); + cache.evictNamespace(nss); // Add nss to the cache with a different uuid. This should not throw since // we evicted the previous entry from the cache. cache.ensureNamespaceInCache(nss, newUuid); diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp index 8fdf3551b7b..2e71e2be1a0 100644 --- a/src/mongo/db/catalog/rename_collection.cpp +++ b/src/mongo/db/catalog/rename_collection.cpp @@ -40,6 +40,7 @@ #include "mongo/db/catalog/document_validation.h" #include "mongo/db/catalog/index_catalog.h" #include "mongo/db/catalog/index_create.h" +#include "mongo/db/catalog/uuid_catalog.h" #include "mongo/db/client.h" #include "mongo/db/concurrency/write_conflict_exception.h" #include "mongo/db/curop.h" @@ -63,6 +64,37 @@ static void dropCollection(OperationContext* opCtx, Database* db, StringData col } } // namespace +Status renameCollectionForApplyOps(OperationContext* opCtx, + const std::string& dbName, + const BSONElement& ui, + const BSONObj& cmd) { + + const auto sourceNsElt = cmd.firstElement(); + const auto targetNsElt = cmd["to"]; + uassert(ErrorCodes::TypeMismatch, + "'renameCollection' must be of type String", + sourceNsElt.type() == BSONType::String); + uassert(ErrorCodes::TypeMismatch, + "'to' must be of type String", + targetNsElt.type() == BSONType::String); + + NamespaceString sourceNss(sourceNsElt.valueStringData()); + NamespaceString targetNss(targetNsElt.valueStringData()); + if (!ui.eoo()) { + auto statusWithUUID = UUID::parse(ui); + uassertStatusOK(statusWithUUID); + auto uuid = statusWithUUID.getValue(); + Collection* source = UUIDCatalog::get(opCtx).lookupCollectionByUUID(uuid); + uassert(ErrorCodes::NamespaceNotFound, + "cannot find collection with UUID " + uuid.toString(), + source); + sourceNss = source->ns(); + } + + return renameCollection( + opCtx, sourceNss, targetNss, cmd["dropTarget"].trueValue(), cmd["stayTemp"].trueValue()); +} + Status renameCollection(OperationContext* opCtx, const NamespaceString& source, const NamespaceString& target, diff --git a/src/mongo/db/catalog/rename_collection.h b/src/mongo/db/catalog/rename_collection.h index c6a3f56b380..2c59831922a 100644 --- a/src/mongo/db/catalog/rename_collection.h +++ b/src/mongo/db/catalog/rename_collection.h @@ -27,6 +27,8 @@ */ #include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" namespace mongo { class NamespaceString; @@ -43,4 +45,13 @@ Status renameCollection(OperationContext* opCtx, bool dropTarget, bool stayTemp); +/** + * As above, but may only be called from applyCommand_inlock. This allows creating a collection + * with a specific UUID for cross-database renames. + */ +Status renameCollectionForApplyOps(OperationContext* opCtx, + const std::string& dbName, + const BSONElement& ui, + const BSONObj& cmd); + } // namespace mongo diff --git a/src/mongo/util/uuid_catalog.cpp b/src/mongo/db/catalog/uuid_catalog.cpp index a73b1288b7a..ef5727a4bbc 100644 --- a/src/mongo/util/uuid_catalog.cpp +++ b/src/mongo/db/catalog/uuid_catalog.cpp @@ -31,14 +31,23 @@ #include "uuid_catalog.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/storage/recovery_unit.h" #include "mongo/util/log.h" #include "mongo/util/uuid.h" namespace mongo { - -const ServiceContext::Decoration<UUIDCatalog> UUIDCatalog::get = +namespace { +const ServiceContext::Decoration<UUIDCatalog> getCatalog = ServiceContext::declareDecoration<UUIDCatalog>(); +} // namespace + +UUIDCatalog& UUIDCatalog::get(ServiceContext* svcCtx) { + return getCatalog(svcCtx); +} +UUIDCatalog& UUIDCatalog::get(OperationContext* opCtx) { + return getCatalog(opCtx->getServiceContext()); +} void UUIDCatalog::onCreateCollection(OperationContext* opCtx, Collection* coll, @@ -47,6 +56,33 @@ void UUIDCatalog::onCreateCollection(OperationContext* opCtx, opCtx->recoveryUnit()->onRollback([this, uuid] { _removeUUIDCatalogEntry(uuid); }); } +void UUIDCatalog::onDropCollection(OperationContext* opCtx, CollectionUUID uuid) { + Collection* foundColl = _removeUUIDCatalogEntry(uuid); + opCtx->recoveryUnit()->onRollback( + [this, foundColl, uuid] { _registerUUIDCatalogEntry(uuid, foundColl); }); +} + +void UUIDCatalog::onRenameCollection(OperationContext* opCtx, + Collection* newColl, + CollectionUUID uuid) { + Collection* oldColl = _removeUUIDCatalogEntry(uuid); + _registerUUIDCatalogEntry(uuid, newColl); + opCtx->recoveryUnit()->onRollback([this, oldColl, uuid] { + _removeUUIDCatalogEntry(uuid); + _registerUUIDCatalogEntry(uuid, oldColl); + }); +} + +void UUIDCatalog::onCloseDatabase(Database* db) { + for (auto&& coll : *db) { + if (coll->uuid()) { + // While the collection does not actually get dropped, we're going to destroy the + // Collection object, so for purposes of the UUIDCatalog it looks the same. + _removeUUIDCatalogEntry(coll->uuid().get()); + } + } +} + Collection* UUIDCatalog::lookupCollectionByUUID(CollectionUUID uuid) const { stdx::lock_guard<stdx::mutex> lock(_catalogLock); auto foundIt = _catalog.find(uuid); @@ -56,28 +92,28 @@ Collection* UUIDCatalog::lookupCollectionByUUID(CollectionUUID uuid) const { NamespaceString UUIDCatalog::lookupNSSByUUID(CollectionUUID uuid) const { stdx::lock_guard<stdx::mutex> lock(_catalogLock); auto foundIt = _catalog.find(uuid); - return foundIt == _catalog.end() ? NamespaceString() : foundIt->second->ns(); -} - -void UUIDCatalog::onDropCollection(OperationContext* opCtx, CollectionUUID uuid) { - Collection* foundCol = _removeUUIDCatalogEntry(uuid); - opCtx->recoveryUnit()->onRollback( - [this, foundCol, uuid] { _registerUUIDCatalogEntry(uuid, foundCol); }); + Collection* coll = foundIt == _catalog.end() ? nullptr : foundIt->second; + return foundIt == _catalog.end() ? NamespaceString() : coll->ns(); } void UUIDCatalog::_registerUUIDCatalogEntry(CollectionUUID uuid, Collection* coll) { stdx::lock_guard<stdx::mutex> lock(_catalogLock); if (coll) { std::pair<CollectionUUID, Collection*> entry = std::make_pair(uuid, coll); - LOG(2) << "registering collection " << coll->ns() << " as having UUID " << uuid.toString(); - invariant(_catalog.insert(entry).second); + LOG(2) << "registering collection " << coll->ns() << " with UUID " << uuid.toString(); + invariant(_catalog.insert(entry).second == true); } } Collection* UUIDCatalog::_removeUUIDCatalogEntry(CollectionUUID uuid) { stdx::lock_guard<stdx::mutex> lock(_catalogLock); - Collection* foundCol = _catalog[uuid]; - invariant(_catalog.erase(uuid) <= 1); + auto foundIt = _catalog.find(uuid); + if (foundIt == _catalog.end()) + return nullptr; + + auto foundCol = foundIt->second; + LOG(2) << "unregistering collection " << foundCol->ns() << " with UUID " << uuid.toString(); + _catalog.erase(foundIt); return foundCol; } } // namespace mongo diff --git a/src/mongo/util/uuid_catalog.h b/src/mongo/db/catalog/uuid_catalog.h index 6f532745ba7..84ce57cb9b0 100644 --- a/src/mongo/util/uuid_catalog.h +++ b/src/mongo/db/catalog/uuid_catalog.h @@ -42,20 +42,38 @@ namespace mongo { * collection lookup by UUID. */ using CollectionUUID = UUID; +class Database; class UUIDCatalog { MONGO_DISALLOW_COPYING(UUIDCatalog); public: - static const ServiceContext::Decoration<UUIDCatalog> get; + static UUIDCatalog& get(ServiceContext* svcCtx); + static UUIDCatalog& get(OperationContext* opCtx); UUIDCatalog() = default; - /* This function inserts the entry for uuid, coll into the UUID - * Collection. It is called by the op observer when a collection - * is created. + /** + * This function inserts the entry for uuid, coll into the UUID Collection. It is called by + * the op observer when a collection is created. */ void onCreateCollection(OperationContext* opCtx, Collection* coll, CollectionUUID uuid); + /** + * This function removes the entry for uuid from the UUID catalog. It is called by the op + * observer when a collection is dropped. + */ + void onDropCollection(OperationContext* opCtx, CollectionUUID uuid); + + /** + * Combination of onDropCollection and onCreateCollection. + */ + void onRenameCollection(OperationContext* opCtx, Collection* newColl, CollectionUUID uuid); + + /** + * Implies onDropCollection for all collections in db, but is not transactional. + */ + void onCloseDatabase(Database* db); + /* This function gets the Collection* pointer that corresponds to * CollectionUUID uuid. The required locks should be obtained prior * to calling this function, or else the found Collection pointer @@ -69,11 +87,6 @@ public: */ NamespaceString lookupNSSByUUID(CollectionUUID uuid) const; - /* This function removes the entry for uuid from the UUID catalog. It - * is called by the op observer when a collection is dropped. - */ - void onDropCollection(OperationContext* opCtx, CollectionUUID uuid); - private: mutable mongo::stdx::mutex _catalogLock; mongo::stdx::unordered_map<CollectionUUID, Collection*, CollectionUUID::Hash> _catalog; diff --git a/src/mongo/util/uuid_catalog_test.cpp b/src/mongo/db/catalog/uuid_catalog_test.cpp index 65720343f45..cbcf9c2ec13 100644 --- a/src/mongo/util/uuid_catalog_test.cpp +++ b/src/mongo/db/catalog/uuid_catalog_test.cpp @@ -26,7 +26,7 @@ * it in the license file. */ -#include "mongo/util/uuid_catalog.h" +#include "mongo/db/catalog/uuid_catalog.h" #include "mongo/db/catalog/collection_mock.h" #include "mongo/db/operation_context_noop.h" diff --git a/src/mongo/db/commands.cpp b/src/mongo/db/commands.cpp index dd75b69c6be..ebf796282e6 100644 --- a/src/mongo/db/commands.cpp +++ b/src/mongo/db/commands.cpp @@ -43,6 +43,7 @@ #include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/auth/privilege.h" +#include "mongo/db/catalog/uuid_catalog.h" #include "mongo/db/client.h" #include "mongo/db/curop.h" #include "mongo/db/jsobj.h" @@ -50,7 +51,6 @@ #include "mongo/db/server_parameters.h" #include "mongo/rpc/write_concern_error_detail.h" #include "mongo/util/log.h" -#include "mongo/util/uuid_catalog.h" namespace mongo { @@ -108,7 +108,7 @@ NamespaceString Command::parseNsOrUUID(OperationContext* opCtx, if (first.type() == BinData && first.binDataType() == BinDataType::newUUID) { StatusWith<UUID> uuidRes = UUID::parse(first); uassertStatusOK(uuidRes); - UUIDCatalog& catalog = UUIDCatalog::get(opCtx->getServiceContext()); + UUIDCatalog& catalog = UUIDCatalog::get(opCtx); return catalog.lookupNSSByUUID(uuidRes.getValue()); } else { // Ensure collection identifier is not a Command or specialCommand diff --git a/src/mongo/db/commands.h b/src/mongo/db/commands.h index 6a7abae5879..6ad9e536b47 100644 --- a/src/mongo/db/commands.h +++ b/src/mongo/db/commands.h @@ -284,7 +284,7 @@ private: * Serves as a base for server commands. See the constructor for more details. */ class Command : public CommandInterface { -protected: +public: // The type of the first field in 'cmdObj' must be mongo::String. The first field is // interpreted as a collection name. static std::string parseNsFullyQualified(const std::string& dbname, const BSONObj& cmdObj); @@ -297,7 +297,6 @@ protected: const std::string& dbname, const BSONObj& cmdObj); -public: typedef StringMap<Command*> CommandMap; /** diff --git a/src/mongo/db/op_observer.h b/src/mongo/db/op_observer.h index 00dc3ec324b..742ada49432 100644 --- a/src/mongo/db/op_observer.h +++ b/src/mongo/db/op_observer.h @@ -192,7 +192,8 @@ public: OptionalCollectionUUID uuid) = 0; virtual void onConvertToCapped(OperationContext* opCtx, const NamespaceString& collectionName, - OptionalCollectionUUID uuid, + OptionalCollectionUUID origUUID, + OptionalCollectionUUID cappedUUID, double size) = 0; }; diff --git a/src/mongo/db/op_observer_impl.cpp b/src/mongo/db/op_observer_impl.cpp index d7b47edeb2f..bafe2bd07f6 100644 --- a/src/mongo/db/op_observer_impl.cpp +++ b/src/mongo/db/op_observer_impl.cpp @@ -33,6 +33,10 @@ #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/authorization_manager_global.h" #include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog/database.h" +#include "mongo/db/catalog/database_holder.h" +#include "mongo/db/catalog/namespace_uuid_cache.h" +#include "mongo/db/catalog/uuid_catalog.h" #include "mongo/db/commands/feature_compatibility_version.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/namespace_string.h" @@ -42,8 +46,6 @@ #include "mongo/db/server_options.h" #include "mongo/db/views/durable_view_catalog.h" #include "mongo/scripting/engine.h" -#include "mongo/util/namespace_uuid_cache.h" -#include "mongo/util/uuid_catalog.h" namespace mongo { @@ -55,7 +57,7 @@ void OpObserverImpl::onCreateIndex(OperationContext* opCtx, NamespaceString systemIndexes{nss.getSystemIndexesCollection()}; if (uuid) { BSONObjBuilder builder; - builder.append("createIndex", nss.coll()); + builder.append("createIndexes", nss.coll()); for (const auto& e : indexDoc) { if (e.fieldNameStringData() != "ns"_sd) @@ -215,8 +217,11 @@ void OpObserverImpl::onCreateCollection(OperationContext* opCtx, getGlobalAuthorizationManager()->logOp(opCtx, "c", dbName, cmdObj, nullptr); if (options.uuid) { - UUIDCatalog& catalog = UUIDCatalog::get(opCtx->getServiceContext()); + UUIDCatalog& catalog = UUIDCatalog::get(opCtx); catalog.onCreateCollection(opCtx, coll, options.uuid.get()); + opCtx->recoveryUnit()->onRollback([opCtx, collectionName]() { + NamespaceUUIDCache::get(opCtx).evictNamespace(collectionName); + }); } } @@ -292,6 +297,8 @@ void OpObserverImpl::onDropDatabase(OperationContext* opCtx, const std::string& FeatureCompatibilityVersion::onDropCollection(); } + NamespaceUUIDCache::get(opCtx).evictNamespacesInDatabase(dbName); + getGlobalAuthorizationManager()->logOp(opCtx, "c", cmdNss, cmdObj, nullptr); } @@ -321,12 +328,11 @@ repl::OpTime OpObserverImpl::onDropCollection(OperationContext* opCtx, css->onDropCollection(opCtx, collectionName); // Evict namespace entry from the namespace/uuid cache if it exists. - NamespaceUUIDCache& cache = NamespaceUUIDCache::get(opCtx); - cache.onDropCollection(collectionName); + NamespaceUUIDCache::get(opCtx).evictNamespace(collectionName); // Remove collection from the uuid catalog. if (uuid) { - UUIDCatalog& catalog = UUIDCatalog::get(opCtx->getServiceContext()); + UUIDCatalog& catalog = UUIDCatalog::get(opCtx); catalog.onDropCollection(opCtx, uuid.get()); } @@ -379,7 +385,20 @@ void OpObserverImpl::onRenameCollection(OperationContext* opCtx, // Evict namespace entry from the namespace/uuid cache if it exists. NamespaceUUIDCache& cache = NamespaceUUIDCache::get(opCtx); - cache.onRenameCollection(fromCollection); + cache.evictNamespace(fromCollection); + cache.evictNamespace(toCollection); + opCtx->recoveryUnit()->onRollback( + [&cache, toCollection]() { cache.evictNamespace(toCollection); }); + + + // Finally update the UUID Catalog. + if (uuid) { + auto db = dbHolder().get(opCtx, toCollection.db()); + auto newColl = db->getCollection(opCtx, toCollection); + invariant(newColl); + UUIDCatalog& catalog = UUIDCatalog::get(opCtx); + catalog.onRenameCollection(opCtx, newColl, uuid.get()); + } } void OpObserverImpl::onApplyOps(OperationContext* opCtx, @@ -393,14 +412,33 @@ void OpObserverImpl::onApplyOps(OperationContext* opCtx, void OpObserverImpl::onConvertToCapped(OperationContext* opCtx, const NamespaceString& collectionName, - OptionalCollectionUUID uuid, + OptionalCollectionUUID origUUID, + OptionalCollectionUUID cappedUUID, double size) { const NamespaceString cmdNss = collectionName.getCommandNS(); BSONObj cmdObj = BSON("convertToCapped" << collectionName.coll() << "size" << size); if (!collectionName.isSystemDotProfile()) { // do not replicate system.profile modifications - repl::logOp(opCtx, "c", cmdNss, uuid, cmdObj, nullptr, false); + repl::logOp(opCtx, "c", cmdNss, cappedUUID, cmdObj, nullptr, false); + } + + // Evict namespace entry from the namespace/uuid cache if it exists. + NamespaceUUIDCache& cache = NamespaceUUIDCache::get(opCtx); + cache.evictNamespace(collectionName); + opCtx->recoveryUnit()->onRollback( + [&cache, collectionName]() { cache.evictNamespace(collectionName); }); + + // Finally update the UUID Catalog. + UUIDCatalog& catalog = UUIDCatalog::get(opCtx); + if (origUUID) { + catalog.onDropCollection(opCtx, origUUID.get()); + } + if (cappedUUID) { + auto db = dbHolder().get(opCtx, collectionName.db()); + auto newColl = db->getCollection(opCtx, collectionName); + invariant(newColl); + catalog.onRenameCollection(opCtx, newColl, cappedUUID.get()); } getGlobalAuthorizationManager()->logOp(opCtx, "c", cmdNss, cmdObj, nullptr); diff --git a/src/mongo/db/op_observer_impl.h b/src/mongo/db/op_observer_impl.h index 730ccfb1f84..476711a86f5 100644 --- a/src/mongo/db/op_observer_impl.h +++ b/src/mongo/db/op_observer_impl.h @@ -96,7 +96,8 @@ public: OptionalCollectionUUID uuid); void onConvertToCapped(OperationContext* opCtx, const NamespaceString& collectionName, - OptionalCollectionUUID uuid, + OptionalCollectionUUID origUUID, + OptionalCollectionUUID cappedUUID, double size) override; }; diff --git a/src/mongo/db/op_observer_noop.cpp b/src/mongo/db/op_observer_noop.cpp index 310df4c5424..10901aa4cd0 100644 --- a/src/mongo/db/op_observer_noop.cpp +++ b/src/mongo/db/op_observer_noop.cpp @@ -28,6 +28,11 @@ #include "mongo/platform/basic.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/database.h" +#include "mongo/db/catalog/database_holder.h" +#include "mongo/db/catalog/namespace_uuid_cache.h" +#include "mongo/db/catalog/uuid_catalog.h" #include "mongo/db/op_observer_noop.h" namespace mongo { @@ -58,11 +63,19 @@ void OpObserverNoop::onDelete(OperationContext*, void OpObserverNoop::onOpMessage(OperationContext*, const BSONObj&) {} -void OpObserverNoop::onCreateCollection(OperationContext*, - Collection*, - const NamespaceString&, - const CollectionOptions&, - const BSONObj&) {} +void OpObserverNoop::onCreateCollection(OperationContext* opCtx, + Collection* coll, + const NamespaceString& collectionName, + const CollectionOptions& options, + const BSONObj& idIndex) { + if (options.uuid) { + UUIDCatalog& catalog = UUIDCatalog::get(opCtx); + catalog.onCreateCollection(opCtx, coll, options.uuid.get()); + NamespaceUUIDCache& cache = NamespaceUUIDCache::get(opCtx); + opCtx->recoveryUnit()->onRollback( + [&cache, collectionName]() { cache.evictNamespace(collectionName); }); + } +} void OpObserverNoop::onCollMod(OperationContext*, const NamespaceString&, @@ -73,9 +86,19 @@ void OpObserverNoop::onCollMod(OperationContext*, void OpObserverNoop::onDropDatabase(OperationContext*, const std::string&) {} -repl::OpTime OpObserverNoop::onDropCollection(OperationContext*, - const NamespaceString&, - OptionalCollectionUUID) { +repl::OpTime OpObserverNoop::onDropCollection(OperationContext* opCtx, + const NamespaceString& collectionName, + OptionalCollectionUUID uuid) { + // Evict namespace entry from the namespace/uuid cache if it exists. + NamespaceUUIDCache& cache = NamespaceUUIDCache::get(opCtx); + cache.evictNamespace(collectionName); + + // Remove collection from the uuid catalog. + if (uuid) { + UUIDCatalog& catalog = UUIDCatalog::get(opCtx); + catalog.onDropCollection(opCtx, uuid.get()); + } + return {}; } @@ -85,20 +108,37 @@ void OpObserverNoop::onDropIndex(OperationContext*, const std::string&, const BSONObj&) {} -void OpObserverNoop::onRenameCollection(OperationContext*, - const NamespaceString&, - const NamespaceString&, - OptionalCollectionUUID, - bool, - OptionalCollectionUUID, - OptionalCollectionUUID, - bool) {} +void OpObserverNoop::onRenameCollection(OperationContext* opCtx, + const NamespaceString& fromCollection, + const NamespaceString& toCollection, + OptionalCollectionUUID uuid, + bool dropTarget, + OptionalCollectionUUID dropTargetUUID, + OptionalCollectionUUID dropSourceUUID, + bool stayTemp) { + // Evict namespace entry from the namespace/uuid cache if it exists. + NamespaceUUIDCache& cache = NamespaceUUIDCache::get(opCtx); + cache.evictNamespace(fromCollection); + cache.evictNamespace(toCollection); + opCtx->recoveryUnit()->onRollback( + [&cache, toCollection]() { cache.evictNamespace(toCollection); }); + + // Finally update the UUID Catalog. + if (uuid) { + auto db = dbHolder().get(opCtx, toCollection.db()); + auto newColl = db->getCollection(opCtx, toCollection); + invariant(newColl); + UUIDCatalog& catalog = UUIDCatalog::get(opCtx); + catalog.onRenameCollection(opCtx, newColl, uuid.get()); + } +} void OpObserverNoop::onApplyOps(OperationContext*, const std::string&, const BSONObj&) {} void OpObserverNoop::onConvertToCapped(OperationContext*, const NamespaceString&, OptionalCollectionUUID, + OptionalCollectionUUID, double) {} void OpObserverNoop::onEmptyCapped(OperationContext*, diff --git a/src/mongo/db/op_observer_noop.h b/src/mongo/db/op_observer_noop.h index d7e9e3dee24..393b707f2c9 100644 --- a/src/mongo/db/op_observer_noop.h +++ b/src/mongo/db/op_observer_noop.h @@ -96,7 +96,8 @@ public: OptionalCollectionUUID uuid); void onConvertToCapped(OperationContext* opCtx, const NamespaceString& collectionName, - OptionalCollectionUUID uuid, + OptionalCollectionUUID origUUID, + OptionalCollectionUUID cappedUUID, double size) override; }; diff --git a/src/mongo/db/repair_database.cpp b/src/mongo/db/repair_database.cpp index 70405da7928..771c6278137 100644 --- a/src/mongo/db/repair_database.cpp +++ b/src/mongo/db/repair_database.cpp @@ -44,6 +44,8 @@ #include "mongo/db/catalog/document_validation.h" #include "mongo/db/catalog/index_create.h" #include "mongo/db/catalog/index_key_validate.h" +#include "mongo/db/catalog/namespace_uuid_cache.h" +#include "mongo/db/catalog/uuid_catalog.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/storage/mmap_v1/mmap_v1_engine.h" @@ -242,7 +244,15 @@ Status repairDatabase(OperationContext* opCtx, return Status(ErrorCodes::BadValue, "backupOriginalFiles not supported"); } - // Close the db to invalidate all current users and caches. + // Close the db and invalidate all current users and caches. + { + Database* db = dbHolder().get(opCtx, dbName); + if (db) { + UUIDCatalog::get(opCtx).onCloseDatabase(db); + for (auto&& coll : *db) + NamespaceUUIDCache::get(opCtx).evictNamespace(coll->ns()); + } + } dbHolder().close(opCtx, dbName, "database closed for repair"); ON_BLOCK_EXIT([&dbName, &opCtx] { try { diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp index b6d7249e6dc..9baa4b211cc 100644 --- a/src/mongo/db/repl/oplog.cpp +++ b/src/mongo/db/repl/oplog.cpp @@ -54,6 +54,7 @@ #include "mongo/db/catalog/drop_database.h" #include "mongo/db/catalog/drop_indexes.h" #include "mongo/db/catalog/rename_collection.h" +#include "mongo/db/catalog/uuid_catalog.h" #include "mongo/db/client.h" #include "mongo/db/commands.h" #include "mongo/db/concurrency/write_conflict_exception.h" @@ -567,8 +568,72 @@ NamespaceString parseNs(const string& ns, const BSONObj& cmdObj) { return NamespaceString(NamespaceString(ns).db().toString(), coll); } -using OpApplyFn = stdx::function<Status( - OperationContext* opCtx, const char* ns, BSONObj& cmd, const OpTime& opTime)>; +NamespaceString parseUUID(OperationContext* opCtx, const BSONElement& ui) { + auto statusWithUUID = UUID::parse(ui); + uassertStatusOK(statusWithUUID); + auto uuid = statusWithUUID.getValue(); + auto& catalog = UUIDCatalog::get(opCtx); + auto nss = catalog.lookupNSSByUUID(uuid); + uassert( + ErrorCodes::NamespaceNotFound, "No namespace with UUID " + uuid.toString(), !nss.isEmpty()); + return nss; +} + +NamespaceString parseUUIDorNs(OperationContext* opCtx, + const char* ns, + const BSONElement& ui, + BSONObj& cmd) { + return ui.ok() ? parseUUID(opCtx, ui) : parseNs(ns, cmd); +} + +void createIndexForApplyOps(OperationContext* opCtx, + const BSONObj& indexSpec, + const NamespaceString& indexNss, + IncrementOpsAppliedStatsFn incrementOpsAppliedStats) { + // Check if collection exists. + Database* db = dbHolder().get(opCtx, indexNss.ns()); + auto indexCollection = db ? db->getCollection(opCtx, indexNss) : nullptr; + uassert(ErrorCodes::NamespaceNotFound, + str::stream() << "Failed to create index due to missing collection: " << indexNss.ns(), + indexCollection); + + OpCounters* opCounters = opCtx->writesAreReplicated() ? &globalOpCounters : &replOpCounters; + opCounters->gotInsert(); + + bool relaxIndexConstraints = + ReplicationCoordinator::get(opCtx)->shouldRelaxIndexConstraints(opCtx, indexNss); + if (indexSpec["background"].trueValue()) { + Lock::TempRelease release(opCtx->lockState()); + if (opCtx->lockState()->isLocked()) { + // If TempRelease fails, background index build will deadlock. + LOG(3) << "apply op: building background index " << indexSpec + << " in the foreground because temp release failed"; + IndexBuilder builder(indexSpec, relaxIndexConstraints); + Status status = builder.buildInForeground(opCtx, db); + uassertStatusOK(status); + } else { + IndexBuilder* builder = new IndexBuilder(indexSpec, relaxIndexConstraints); + // This spawns a new thread and returns immediately. + builder->go(); + // Wait for thread to start and register itself + IndexBuilder::waitForBgIndexStarting(); + } + opCtx->recoveryUnit()->abandonSnapshot(); + } else { + IndexBuilder builder(indexSpec, relaxIndexConstraints); + Status status = builder.buildInForeground(opCtx, db); + uassertStatusOK(status); + } + if (incrementOpsAppliedStats) { + incrementOpsAppliedStats(); + } +} + +using OpApplyFn = stdx::function<Status(OperationContext* opCtx, + const char* ns, + const BSONElement& ui, + BSONObj& cmd, + const OpTime& opTime)>; struct ApplyOpMetadata { OpApplyFn applyFunc; @@ -586,13 +651,17 @@ struct ApplyOpMetadata { std::map<std::string, ApplyOpMetadata> opsMap = { {"create", - {[](OperationContext* opCtx, const char* ns, BSONObj& cmd, const OpTime& opTime) -> Status { + {[](OperationContext* opCtx, + const char* ns, + const BSONElement& ui, + BSONObj& cmd, + const OpTime& opTime) -> Status { const NamespaceString nss(parseNs(ns, cmd)); if (auto idIndexElem = cmd["idIndex"]) { // Remove "idIndex" field from command. auto cmdWithoutIdIndex = cmd.removeField("idIndex"); - return createCollection( - opCtx, nss.db().toString(), cmdWithoutIdIndex, idIndexElem.Obj()); + return createCollectionForApplyOps( + opCtx, nss.db().toString(), ui, cmdWithoutIdIndex, idIndexElem.Obj()); } // No _id index spec was provided, so we should build a v:1 _id index. @@ -602,25 +671,59 @@ std::map<std::string, ApplyOpMetadata> opsMap = { idIndexSpecBuilder.append(IndexDescriptor::kIndexNameFieldName, "_id_"); idIndexSpecBuilder.append(IndexDescriptor::kNamespaceFieldName, nss.ns()); idIndexSpecBuilder.append(IndexDescriptor::kKeyPatternFieldName, BSON("_id" << 1)); - return createCollection(opCtx, nss.db().toString(), cmd, idIndexSpecBuilder.done()); + return createCollectionForApplyOps( + opCtx, nss.db().toString(), ui, cmd, idIndexSpecBuilder.done()); }, {ErrorCodes::NamespaceExists}}}, + {"createIndexes", + {[](OperationContext* opCtx, + const char* ns, + const BSONElement& ui, + BSONObj& cmd, + const OpTime& opTime) -> Status { + const NamespaceString nss(parseUUID(opCtx, ui)); + BSONElement first = cmd.firstElement(); + invariant(first.fieldNameStringData() == "createIndexes"); + uassert(ErrorCodes::InvalidNamespace, + "createIndexes value must be a string", + first.type() == mongo::String); + BSONObj indexSpec = cmd.removeField("createIndexes"); + // The UUID determines the collection to build the index on, so create new 'ns' field. + BSONObj nsObj = BSON("ns" << nss.ns()); + indexSpec = indexSpec.addField(nsObj.firstElement()); + + createIndexForApplyOps(opCtx, indexSpec, nss, {}); + return Status::OK(); + }, + {ErrorCodes::IndexAlreadyExists}}}, {"collMod", - {[](OperationContext* opCtx, const char* ns, BSONObj& cmd, const OpTime& opTime) -> Status { + {[](OperationContext* opCtx, + const char* ns, + const BSONElement& ui, + BSONObj& cmd, + const OpTime& opTime) -> Status { BSONObjBuilder resultWeDontCareAbout; - return collMod(opCtx, parseNs(ns, cmd), cmd, &resultWeDontCareAbout); + return collMod(opCtx, parseUUIDorNs(opCtx, ns, ui, cmd), cmd, &resultWeDontCareAbout); }, {ErrorCodes::IndexNotFound, ErrorCodes::NamespaceNotFound}}}, {"dropDatabase", - {[](OperationContext* opCtx, const char* ns, BSONObj& cmd, const OpTime& opTime) -> Status { + {[](OperationContext* opCtx, + const char* ns, + const BSONElement& ui, + BSONObj& cmd, + const OpTime& opTime) -> Status { return dropDatabase(opCtx, NamespaceString(ns).db().toString()); }, {ErrorCodes::NamespaceNotFound}}}, {"drop", - {[](OperationContext* opCtx, const char* ns, BSONObj& cmd, const OpTime& opTime) -> Status { + {[](OperationContext* opCtx, + const char* ns, + const BSONElement& ui, + BSONObj& cmd, + const OpTime& opTime) -> Status { BSONObjBuilder resultWeDontCareAbout; return dropCollection(opCtx, - parseNs(ns, cmd), + parseUUIDorNs(opCtx, ns, ui, cmd), resultWeDontCareAbout, opTime, DropCollectionSystemCollectionMode::kAllowSystemCollectionDrops); @@ -628,59 +731,79 @@ std::map<std::string, ApplyOpMetadata> opsMap = { {ErrorCodes::NamespaceNotFound}}}, // deleteIndex(es) is deprecated but still works as of April 10, 2015 {"deleteIndex", - {[](OperationContext* opCtx, const char* ns, BSONObj& cmd, const OpTime& opTime) -> Status { + {[](OperationContext* opCtx, + const char* ns, + const BSONElement& ui, + BSONObj& cmd, + const OpTime& opTime) -> Status { BSONObjBuilder resultWeDontCareAbout; - return dropIndexes(opCtx, parseNs(ns, cmd), cmd, &resultWeDontCareAbout); + return dropIndexes(opCtx, parseUUIDorNs(opCtx, ns, ui, cmd), cmd, &resultWeDontCareAbout); }, {ErrorCodes::NamespaceNotFound, ErrorCodes::IndexNotFound}}}, {"deleteIndexes", - {[](OperationContext* opCtx, const char* ns, BSONObj& cmd, const OpTime& opTime) -> Status { + {[](OperationContext* opCtx, + const char* ns, + const BSONElement& ui, + BSONObj& cmd, + const OpTime& opTime) -> Status { BSONObjBuilder resultWeDontCareAbout; - return dropIndexes(opCtx, parseNs(ns, cmd), cmd, &resultWeDontCareAbout); + return dropIndexes(opCtx, parseUUIDorNs(opCtx, ns, ui, cmd), cmd, &resultWeDontCareAbout); }, {ErrorCodes::NamespaceNotFound, ErrorCodes::IndexNotFound}}}, {"dropIndex", - {[](OperationContext* opCtx, const char* ns, BSONObj& cmd, const OpTime& opTime) -> Status { + {[](OperationContext* opCtx, + const char* ns, + const BSONElement& ui, + BSONObj& cmd, + const OpTime& opTime) -> Status { BSONObjBuilder resultWeDontCareAbout; - return dropIndexes(opCtx, parseNs(ns, cmd), cmd, &resultWeDontCareAbout); + return dropIndexes(opCtx, parseUUIDorNs(opCtx, ns, ui, cmd), cmd, &resultWeDontCareAbout); }, {ErrorCodes::NamespaceNotFound, ErrorCodes::IndexNotFound}}}, {"dropIndexes", - {[](OperationContext* opCtx, const char* ns, BSONObj& cmd, const OpTime& opTime) -> Status { + {[](OperationContext* opCtx, + const char* ns, + const BSONElement& ui, + BSONObj& cmd, + const OpTime& opTime) -> Status { BSONObjBuilder resultWeDontCareAbout; - return dropIndexes(opCtx, parseNs(ns, cmd), cmd, &resultWeDontCareAbout); + return dropIndexes(opCtx, parseUUIDorNs(opCtx, ns, ui, cmd), cmd, &resultWeDontCareAbout); }, {ErrorCodes::NamespaceNotFound, ErrorCodes::IndexNotFound}}}, {"renameCollection", - {[](OperationContext* opCtx, const char* ns, BSONObj& cmd, const OpTime& opTime) -> Status { - const auto sourceNsElt = cmd.firstElement(); - const auto targetNsElt = cmd["to"]; - uassert(ErrorCodes::TypeMismatch, - "'renameCollection' must be of type String", - sourceNsElt.type() == BSONType::String); - uassert(ErrorCodes::TypeMismatch, - "'to' must be of type String", - targetNsElt.type() == BSONType::String); - return renameCollection(opCtx, - NamespaceString(sourceNsElt.valueStringData()), - NamespaceString(targetNsElt.valueStringData()), - cmd["dropTarget"].trueValue(), - cmd["stayTemp"].trueValue()); + {[](OperationContext* opCtx, + const char* ns, + const BSONElement& ui, + BSONObj& cmd, + const OpTime& opTime) -> Status { + return renameCollectionForApplyOps(opCtx, nsToDatabase(ns), ui, cmd); }, {ErrorCodes::NamespaceNotFound, ErrorCodes::NamespaceExists}}}, {"applyOps", - {[](OperationContext* opCtx, const char* ns, BSONObj& cmd, const OpTime& opTime) -> Status { + {[](OperationContext* opCtx, + const char* ns, + const BSONElement& ui, + BSONObj& cmd, + const OpTime& opTime) -> Status { BSONObjBuilder resultWeDontCareAbout; return applyOps(opCtx, nsToDatabase(ns), cmd, &resultWeDontCareAbout); }, {ErrorCodes::UnknownError}}}, {"convertToCapped", - {[](OperationContext* opCtx, const char* ns, BSONObj& cmd, const OpTime& opTime) -> Status { - return convertToCapped(opCtx, parseNs(ns, cmd), cmd["size"].number()); + {[](OperationContext* opCtx, + const char* ns, + const BSONElement& ui, + BSONObj& cmd, + const OpTime& opTime) -> Status { + return convertToCapped(opCtx, parseUUIDorNs(opCtx, ns, ui, cmd), cmd["size"].number()); }}}, {"emptycapped", - {[](OperationContext* opCtx, const char* ns, BSONObj& cmd, const OpTime& opTime) -> Status { - return emptyCapped(opCtx, parseNs(ns, cmd)); + {[](OperationContext* opCtx, + const char* ns, + const BSONElement& ui, + BSONObj& cmd, + const OpTime& opTime) -> Status { + return emptyCapped(opCtx, parseUUIDorNs(opCtx, ns, ui, cmd)); }}}, }; @@ -736,24 +859,54 @@ Status applyOperation_inlock(OperationContext* opCtx, OpCounters* opCounters = opCtx->writesAreReplicated() ? &globalOpCounters : &replOpCounters; - const char* names[] = {"o", "ns", "op", "b", "o2"}; - BSONElement fields[5]; - op.getFields(5, names, fields); + std::array<StringData, 6> names = {"o", "ui", "ns", "op", "b", "o2"}; + std::array<BSONElement, 6> fields; + op.getFields(names, &fields); BSONElement& fieldO = fields[0]; - BSONElement& fieldNs = fields[1]; - BSONElement& fieldOp = fields[2]; - BSONElement& fieldB = fields[3]; - BSONElement& fieldO2 = fields[4]; + BSONElement& fieldUI = fields[1]; + BSONElement& fieldNs = fields[2]; + BSONElement& fieldOp = fields[3]; + BSONElement& fieldB = fields[4]; + BSONElement& fieldO2 = fields[5]; BSONObj o; if (fieldO.isABSONObj()) o = fieldO.embeddedObject(); - uassert(ErrorCodes::InvalidNamespace, - "'ns' must be of type String", - fieldNs.type() == BSONType::String); - const StringData ns = fieldNs.valueStringData(); - NamespaceString requestNss{ns}; + // operation type -- see logOp() comments for types + const char* opType = fieldOp.valuestrsafe(); + + NamespaceString requestNss; + Collection* collection = nullptr; + if (fieldUI) { + UUIDCatalog& catalog = UUIDCatalog::get(opCtx); + auto uuid = UUID::parse(fieldUI); + uassertStatusOK(uuid); + collection = catalog.lookupCollectionByUUID(uuid.getValue()); + if (collection) { + requestNss = collection->ns(); + dassert(opCtx->lockState()->isCollectionLockedForMode( + requestNss.ns(), supportsDocLocking() ? MODE_IX : MODE_X)); + } + } else { + uassert(ErrorCodes::InvalidNamespace, + "'ns' must be of type String", + fieldNs.type() == BSONType::String); + const StringData ns = fieldNs.valueStringData(); + requestNss = NamespaceString(ns); + if (nsIsFull(ns)) { + if (supportsDocLocking()) { + // WiredTiger, and others requires MODE_IX since the applier threads driving + // this allow writes to the same collection on any thread. + dassert(opCtx->lockState()->isCollectionLockedForMode(ns, MODE_IX)); + } else { + // mmapV1 ensures that all operations to the same collection are executed from + // the same worker thread, so it takes an exclusive lock (MODE_X) + dassert(opCtx->lockState()->isCollectionLockedForMode(ns, MODE_X)); + } + } + collection = db->getCollection(opCtx, requestNss); + } BSONObj o2; if (fieldO2.isABSONObj()) @@ -761,27 +914,11 @@ Status applyOperation_inlock(OperationContext* opCtx, bool valueB = fieldB.booleanSafe(); - if (nsIsFull(ns)) { - if (supportsDocLocking()) { - // WiredTiger, and others requires MODE_IX since the applier threads driving - // this allow writes to the same collection on any thread. - dassert(opCtx->lockState()->isCollectionLockedForMode(ns, MODE_IX)); - } else { - // mmapV1 ensures that all operations to the same collection are executed from - // the same worker thread, so it takes an exclusive lock (MODE_X) - dassert(opCtx->lockState()->isCollectionLockedForMode(ns, MODE_X)); - } - } - Collection* collection = db->getCollection(opCtx, requestNss); IndexCatalog* indexCatalog = collection == nullptr ? nullptr : collection->getIndexCatalog(); const bool haveWrappingWriteUnitOfWork = opCtx->lockState()->inAWriteUnitOfWork(); uassert(ErrorCodes::CommandNotSupportedOnView, - str::stream() << "applyOps not supported on view: " << ns, - collection || !db->getViewCatalog()->lookup(opCtx, ns)); - - // operation type -- see logOp() comments for types - const char* opType = fieldOp.valuestrsafe(); - invariant(*opType != 'c'); // commands are processed in applyCommand_inlock() + str::stream() << "applyOps not supported on view: " << requestNss.ns(), + collection || !db->getViewCatalog()->lookup(opCtx, requestNss.ns())); if (*opType == 'i') { if (requestNss.isSystemDotIndexes()) { @@ -789,44 +926,7 @@ Status applyOperation_inlock(OperationContext* opCtx, NamespaceString indexNss; std::tie(indexSpec, indexNss) = repl::prepForApplyOpsIndexInsert(fieldO, op, requestNss); - - // Check if collection exists. - auto indexCollection = db->getCollection(opCtx, indexNss); - uassert(ErrorCodes::NamespaceNotFound, - str::stream() << "Failed to create index due to missing collection: " - << op.toString(), - indexCollection); - - opCounters->gotInsert(); - - bool relaxIndexConstraints = - ReplicationCoordinator::get(opCtx)->shouldRelaxIndexConstraints(opCtx, indexNss); - if (indexSpec["background"].trueValue()) { - Lock::TempRelease release(opCtx->lockState()); - if (opCtx->lockState()->isLocked()) { - // If TempRelease fails, background index build will deadlock. - LOG(3) << "apply op: building background index " << indexSpec - << " in the foreground because temp release failed"; - IndexBuilder builder(indexSpec, relaxIndexConstraints); - Status status = builder.buildInForeground(opCtx, db); - uassertStatusOK(status); - } else { - IndexBuilder* builder = new IndexBuilder(indexSpec, relaxIndexConstraints); - // This spawns a new thread and returns immediately. - builder->go(); - // Wait for thread to start and register itself - IndexBuilder::waitForBgIndexStarting(); - } - opCtx->recoveryUnit()->abandonSnapshot(); - } else { - IndexBuilder builder(indexSpec, relaxIndexConstraints); - Status status = builder.buildInForeground(opCtx, db); - uassertStatusOK(status); - } - // Since this is an index operation we can return without falling through. - if (incrementOpsAppliedStats) { - incrementOpsAppliedStats(); - } + createIndexForApplyOps(opCtx, indexSpec, indexNss, incrementOpsAppliedStats); return Status::OK(); } uassert(ErrorCodes::NamespaceNotFound, @@ -1005,6 +1105,7 @@ Status applyOperation_inlock(OperationContext* opCtx, incrementOpsAppliedStats(); } } else { + invariant(*opType != 'c'); // commands are processed in applyCommand_inlock() throw MsgAssertionException( 14825, str::stream() << "error in applyOperation : unknown opType " << *opType); } @@ -1015,12 +1116,13 @@ Status applyOperation_inlock(OperationContext* opCtx, Status applyCommand_inlock(OperationContext* opCtx, const BSONObj& op, bool inSteadyStateReplication) { - const char* names[] = {"o", "ns", "op"}; - BSONElement fields[3]; - op.getFields(3, names, fields); + std::array<StringData, 4> names = {"o", "ui", "ns", "op"}; + std::array<BSONElement, 4> fields; + op.getFields(names, &fields); BSONElement& fieldO = fields[0]; - BSONElement& fieldNs = fields[1]; - BSONElement& fieldOp = fields[2]; + BSONElement& fieldUI = fields[1]; + BSONElement& fieldNs = fields[2]; + BSONElement& fieldOp = fields[3]; const char* opType = fieldOp.valuestrsafe(); invariant(*opType == 'c'); // only commands are processed here @@ -1084,7 +1186,7 @@ Status applyCommand_inlock(OperationContext* opCtx, ApplyOpMetadata curOpToApply = op->second; Status status = Status::OK(); try { - status = curOpToApply.applyFunc(opCtx, nss.ns().c_str(), o, opTime); + status = curOpToApply.applyFunc(opCtx, nss.ns().c_str(), fieldUI, o, opTime); } catch (...) { status = exceptionToStatus(); } diff --git a/src/mongo/db/repl/oplog_entry.cpp b/src/mongo/db/repl/oplog_entry.cpp index 130543160bf..d2e9416e407 100644 --- a/src/mongo/db/repl/oplog_entry.cpp +++ b/src/mongo/db/repl/oplog_entry.cpp @@ -58,8 +58,8 @@ OplogEntry::CommandType parseCommandType(const BSONObj& objectField) { return OplogEntry::CommandType::kEmptyCapped; } else if (commandString == "convertToCapped") { return OplogEntry::CommandType::kConvertToCapped; - } else if (commandString == "createIndex") { - return OplogEntry::CommandType::kCreateIndex; + } else if (commandString == "createIndexes") { + return OplogEntry::CommandType::kCreateIndexes; } else if (commandString == "dropIndexes") { return OplogEntry::CommandType::kDropIndexes; } else if (commandString == "deleteIndexes") { diff --git a/src/mongo/db/repl/oplog_entry.h b/src/mongo/db/repl/oplog_entry.h index 7e7d5e03603..51ae9df3d01 100644 --- a/src/mongo/db/repl/oplog_entry.h +++ b/src/mongo/db/repl/oplog_entry.h @@ -51,7 +51,7 @@ public: kDropDatabase, kEmptyCapped, kConvertToCapped, - kCreateIndex, + kCreateIndexes, kDropIndexes }; diff --git a/src/mongo/db/repl/rollback_fix_up_info_test.cpp b/src/mongo/db/repl/rollback_fix_up_info_test.cpp index 54600e51fa5..cc72e9bd4cb 100644 --- a/src/mongo/db/repl/rollback_fix_up_info_test.cpp +++ b/src/mongo/db/repl/rollback_fix_up_info_test.cpp @@ -407,7 +407,7 @@ TEST_F( RollbackFixUpInfoTest, ProcessCreateCollectionOplogEntryInsertsDocumentIntoRollbackCollectionUuidCollectionWithEmptyNamespace) { // State of oplog: - // {create: mynewcoll}, {createIndex: myindex}, {collMod: mynewcoll}, {op: 'i'}, .... + // {create: mynewcoll}, {createIndexes: myindex}, {collMod: mynewcoll}, {op: 'i'}, .... // (earliest optime) ---> (latest optime) // // Oplog entries are processed in reverse optime order. @@ -685,12 +685,12 @@ TEST_F(RollbackFixUpInfoTest, << "ui" << UUID::gen() << "o" - << BSON("createIndex" << 1 << "v" << 2 << "key" << BSON("b" << 1) << "name" - << "b_1" - << "ns" - << "mydb.mycoll" - << "expireAfterSeconds" - << 60)); + << BSON("createIndexes" << 1 << "v" << 2 << "key" << BSON("b" << 1) << "name" + << "b_1" + << "ns" + << "mydb.mycoll" + << "expireAfterSeconds" + << 60)); auto collectionUuid = unittest::assertGet(UUID::parse(operation["ui"])); auto indexName = operation["o"].Obj()["name"].String(); @@ -716,7 +716,7 @@ TEST_F(RollbackFixUpInfoTest, ProcessCreateIndexOplogEntryWhenExistingDocumentHasDropOpTypeRemovesExistingDocument) { // State of oplog: - // {createIndex: indexA}, ...., {dropIndexes: indexA}, .... + // {createIndexes: indexA}, ...., {dropIndexes: indexA}, .... // (earliest optime) ---> (latest optime) // // Oplog entries are processed in reverse optime order. @@ -741,7 +741,7 @@ TEST_F(RollbackFixUpInfoTest, << "infoObj" << infoObj)}); - // Next, process createIndex. This should cancel out the existing 'drop' operation and remove + // Next, process createIndexes. This should cancel out the existing 'drop' operation and remove // existing document from the collection. ASSERT_OK( rollbackFixUpInfo.processCreateIndexOplogEntry(opCtx.get(), collectionUuid, indexName)); @@ -752,7 +752,7 @@ TEST_F(RollbackFixUpInfoTest, ProcessCreateIndexOplogEntryWhenExistingDocumentHasUpdateTTLOpTypeReplacesExistingDocument) { // State of oplog: - // {createIndex: indexA}, ...., {collMod: indexA}, .... + // {createIndexes: indexA}, ...., {collMod: indexA}, .... // (earliest optime) ---> (latest optime) // // Oplog entries are processed in reverse optime order. @@ -775,7 +775,7 @@ TEST_F(RollbackFixUpInfoTest, << "infoObj" << BSON("expireAfterSeconds" << 60))}); - // Next, process createIndex. This should replace the existing 'updateTTL' operation so that + // Next, process createIndexes. This should replace the existing 'updateTTL' operation so that // we drop the index when it's time to apply the fix up info. ASSERT_OK( rollbackFixUpInfo.processCreateIndexOplogEntry(opCtx.get(), collectionUuid, indexName)); @@ -812,7 +812,7 @@ TEST_F( _assertDocumentsInCollectionEquals( opCtx.get(), RollbackFixUpInfo::kRollbackIndexNamespace, {malformedDoc}); - // Process createIndex. This should log an error when checking the operation type on the + // Process createIndexes. This should log an error when checking the operation type on the // existing document. The malformed document should be replaced. ASSERT_OK( rollbackFixUpInfo.processCreateIndexOplogEntry(opCtx.get(), collectionUuid, indexName)); @@ -1001,13 +1001,13 @@ TEST_F(RollbackFixUpInfoTest, ProcessDropIndexOplogEntryWhenExistingDocumentHasCreateOpTypeReplacesExistingDocument) { // State of oplog: - // {dropIndexes: indexA}, ...., {createIndex: indexA}, .... + // {dropIndexes: indexA}, ...., {createIndexes: indexA}, .... // (earliest optime) ---> (latest optime) // // Oplog entries are processed in reverse optime order. - // First, process createIndex. This should insert a document into the collection with a 'create' - // op type. + // First, process createIndexes. This should insert a document into the collection with a + // 'create' op type. auto collectionUuid = UUID::gen(); std::string indexName = "b_1"; auto infoObj = BSON("v" << 2 << "key" << BSON("b" << 1) << "name" << indexName << "ns" diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp index 8f1346f5d0b..7422b8c913d 100644 --- a/src/mongo/db/repl/storage_interface_impl.cpp +++ b/src/mongo/db/repl/storage_interface_impl.cpp @@ -49,6 +49,7 @@ #include "mongo/db/catalog/document_validation.h" #include "mongo/db/catalog/index_catalog.h" #include "mongo/db/catalog/index_create.h" +#include "mongo/db/catalog/uuid_catalog.h" #include "mongo/db/client.h" #include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/write_conflict_exception.h" @@ -432,9 +433,15 @@ Status StorageInterfaceImpl::renameCollection(OperationContext* opCtx, WriteUnitOfWork wunit(opCtx); const auto status = autoDB.getDb()->renameCollection(opCtx, fromNS.ns(), toNS.ns(), stayTemp); - if (status.isOK()) { - wunit.commit(); + if (!status.isOK()) { + return status; + } + + auto newColl = autoDB.getDb()->getCollection(opCtx, toNS); + if (newColl->uuid()) { + UUIDCatalog::get(opCtx).onRenameCollection(opCtx, newColl, newColl->uuid().get()); } + wunit.commit(); return status; } MONGO_WRITE_CONFLICT_RETRY_LOOP_END( diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp index c3d5bd30efb..a44591e130d 100644 --- a/src/mongo/db/repl/sync_tail.cpp +++ b/src/mongo/db/repl/sync_tail.cpp @@ -290,38 +290,10 @@ Status SyncTail::syncApply(OperationContext* opCtx, // Count each log op application as a separate operation, for reporting purposes CurOp individualOp(opCtx); - const char* ns = op.getStringField("ns"); - verify(ns); + const NamespaceString nss(op.getStringField("ns")); const char* opType = op["op"].valuestrsafe(); - bool isCommand(opType[0] == 'c'); - bool isNoOp(opType[0] == 'n'); - - if ((*ns == '\0') || (*ns == '.')) { - // this is ugly - // this is often a no-op - // but can't be 100% sure - if (!isNoOp) { - error() << "skipping bad op in oplog: " << redact(op); - } - return Status::OK(); - } - - if (isCommand) { - MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { - // a command may need a global write lock. so we will conservatively go - // ahead and grab one here. suboptimal. :-( - Lock::GlobalWrite globalWriteLock(opCtx); - - // special case apply for commands to avoid implicit database creation - Status status = applyCommandInLock(opCtx, op, inSteadyStateReplication); - incrementOpsAppliedStats(); - return status; - } - MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "syncApply_command", ns); - } - auto applyOp = [&](Database* db) { // For non-initial-sync, we convert updates to upserts // to suppress errors when replaying oplog entries. @@ -336,14 +308,17 @@ Status SyncTail::syncApply(OperationContext* opCtx, return status; }; - if (isNoOp || (opType[0] == 'i' && nsToCollectionSubstring(ns) == "system.indexes")) { + bool isNoOp = opType[0] == 'n'; + if (isNoOp || (opType[0] == 'i' && nss.isSystemDotIndexes())) { auto opStr = isNoOp ? "syncApply_noop" : "syncApply_indexBuild"; + if (isNoOp && nss.db() == "") + return Status::OK(); MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { - Lock::DBLock dbLock(opCtx, nsToDatabaseSubstring(ns), MODE_X); - OldClientContext ctx(opCtx, ns); + Lock::DBLock dbLock(opCtx, nss.db(), MODE_X); + OldClientContext ctx(opCtx, nss.ns()); return applyOp(ctx.db()); } - MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, opStr, ns); + MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, opStr, nss.ns()); } if (isCrudOpType(opType)) { @@ -353,7 +328,6 @@ Status SyncTail::syncApply(OperationContext* opCtx, std::unique_ptr<Lock::CollectionLock> collectionLock; std::unique_ptr<OldClientContext> ctx; - NamespaceString nss(ns); auto dbName = nss.db(); auto resetLocks = [&](LockMode mode) { @@ -363,28 +337,42 @@ Status SyncTail::syncApply(OperationContext* opCtx, // the upgraded one. dbLock.reset(); dbLock.reset(new Lock::DBLock(opCtx, dbName, mode)); - collectionLock.reset(new Lock::CollectionLock(opCtx->lockState(), ns, mode)); + collectionLock.reset(new Lock::CollectionLock(opCtx->lockState(), nss.ns(), mode)); }; resetLocks(MODE_IX); if (!dbHolder().get(opCtx, dbName)) { // Need to create database, so reset lock to stronger mode. resetLocks(MODE_X); - ctx.reset(new OldClientContext(opCtx, ns)); + ctx.reset(new OldClientContext(opCtx, nss.ns())); } else { - ctx.reset(new OldClientContext(opCtx, ns)); + ctx.reset(new OldClientContext(opCtx, nss.ns())); if (!ctx->db()->getCollection(opCtx, nss)) { // Need to implicitly create collection. This occurs for 'u' opTypes, // but not for 'i' nor 'd'. ctx.reset(); resetLocks(MODE_X); - ctx.reset(new OldClientContext(opCtx, ns)); + ctx.reset(new OldClientContext(opCtx, nss.ns())); } } return applyOp(ctx->db()); } - MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "syncApply_CRUD", ns); + MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "syncApply_CRUD", nss.ns()); + } + + if (opType[0] == 'c') { + MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { + // a command may need a global write lock. so we will conservatively go + // ahead and grab one here. suboptimal. :-( + Lock::GlobalWrite globalWriteLock(opCtx); + + // special case apply for commands to avoid implicit database creation + Status status = applyCommandInLock(opCtx, op, inSteadyStateReplication); + incrementOpsAppliedStats(); + return status; + } + MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "syncApply_command", nss.ns()); } // unknown opType @@ -756,14 +744,17 @@ private: void SyncTail::oplogApplication(ReplicationCoordinator* replCoord) { OpQueueBatcher batcher(this); - const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); - OperationContext& opCtx = *opCtxPtr; std::unique_ptr<ApplyBatchFinalizer> finalizer{ getGlobalServiceContext()->getGlobalStorageEngine()->isDurable() ? new ApplyBatchFinalizerForJournal(replCoord) : new ApplyBatchFinalizer(replCoord)}; while (true) { // Exits on message from OpQueueBatcher. + // Use a new operation context each iteration, as otherwise we may appear to use a single + // collection name to refer to collections with different UUIDs. + const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); + OperationContext& opCtx = *opCtxPtr; + // For pausing replication in tests. while (MONGO_FAIL_POINT(rsSyncApplyStop)) { // Tests should not trigger clean shutdown while that failpoint is active. If we diff --git a/src/mongo/db/repl/sync_tail_test.cpp b/src/mongo/db/repl/sync_tail_test.cpp index 888640f96a3..9e1e8178bd7 100644 --- a/src/mongo/db/repl/sync_tail_test.cpp +++ b/src/mongo/db/repl/sync_tail_test.cpp @@ -368,7 +368,8 @@ Status failedApplyCommand(OperationContext* opCtx, const BSONObj& theOperation, TEST_F(SyncTailTest, SyncApplyNoNamespaceBadOp) { const BSONObj op = BSON("op" << "x"); - ASSERT_OK(SyncTail::syncApply(_opCtx.get(), op, false, _applyOp, _applyCmd, _incOps)); + ASSERT_EQUALS(ErrorCodes::BadValue, + SyncTail::syncApply(_opCtx.get(), op, false, _applyOp, _applyCmd, _incOps)); ASSERT_EQUALS(0U, _opsApplied); } @@ -1137,6 +1138,11 @@ OplogEntry IdempotencyTest::dropIndex(const std::string& indexName) { } CollectionState IdempotencyTest::validate() { + // We check that a given operation will not resolve the same NamespaceString to different UUIDs, + // make sure to use a new operation here. + _opCtx.reset(); + _opCtx = cc().makeOperationContext(); + AutoGetCollectionForReadCommand autoColl(_opCtx.get(), nss); auto collection = autoColl.getCollection(); @@ -1178,6 +1184,7 @@ CollectionState IdempotencyTest::validate() { auto collectionCatalog = collection->getCatalogEntry(); auto collectionOptions = collectionCatalog->getCollectionOptions(_opCtx.get()); + collectionOptions.uuid.reset(); std::vector<std::string> allIndexes; BSONObjSet indexSpecs = SimpleBSONObjComparator::kInstance.makeBSONObjSet(); collectionCatalog->getAllIndexes(_opCtx.get(), &allIndexes); diff --git a/src/mongo/db/storage/mmap_v1/repair_database.cpp b/src/mongo/db/storage/mmap_v1/repair_database.cpp index 175afb53298..b70d6844d39 100644 --- a/src/mongo/db/storage/mmap_v1/repair_database.cpp +++ b/src/mongo/db/storage/mmap_v1/repair_database.cpp @@ -41,6 +41,7 @@ #include "mongo/db/catalog/database.h" #include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog/index_create.h" +#include "mongo/db/catalog/uuid_catalog.h" #include "mongo/db/client.h" #include "mongo/db/db_raii.h" #include "mongo/db/index/index_descriptor.h" @@ -319,8 +320,9 @@ Status MMAPV1Engine::repairDatabase(OperationContext* opCtx, unique_ptr<Database> tempDatabase; // Must call this before MMAPV1DatabaseCatalogEntry's destructor closes the DB files - ON_BLOCK_EXIT([&dbEntry, &opCtx] { + ON_BLOCK_EXIT([&dbEntry, &opCtx, &tempDatabase] { getDur().syncDataAndTruncateJournal(opCtx); + UUIDCatalog::get(opCtx).onCloseDatabase(tempDatabase.get()); dbEntry->close(opCtx); }); @@ -362,7 +364,7 @@ Status MMAPV1Engine::repairDatabase(OperationContext* opCtx, CollectionOptions options; if (obj["options"].isABSONObj()) { Status status = - options.parse(obj["options"].Obj(), CollectionOptions::parseForCommand); + options.parse(obj["options"].Obj(), CollectionOptions::parseForStorage); if (!status.isOK()) return status; } @@ -381,6 +383,9 @@ Status MMAPV1Engine::repairDatabase(OperationContext* opCtx, Collection* tempCollection = NULL; { WriteUnitOfWork wunit(opCtx); + if (options.uuid) { + UUIDCatalog::get(opCtx).onDropCollection(opCtx, options.uuid.get()); + } tempCollection = tempDatabase->createCollection(opCtx, ns, options, false); wunit.commit(); } diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp index 9cb4b18af93..48fb0381342 100644 --- a/src/mongo/dbtests/repltests.cpp +++ b/src/mongo/dbtests/repltests.cpp @@ -891,20 +891,6 @@ public: } }; -class EmptyPushSparseIndex : public EmptyPush { -public: - EmptyPushSparseIndex() { - _client.insert("unittests.system.indexes", - BSON("ns" << ns() << "key" << BSON("a" << 1) << "name" - << "foo" - << "sparse" - << true)); - } - ~EmptyPushSparseIndex() { - _client.dropIndexes(ns()); - } -}; - class PushAll : public Base { public: void doIt() const { @@ -1465,7 +1451,6 @@ public: add<Idempotence::PushUpsert>(); add<Idempotence::MultiPush>(); add<Idempotence::EmptyPush>(); - add<Idempotence::EmptyPushSparseIndex>(); add<Idempotence::PushAll>(); add<Idempotence::PushSlice>(); add<Idempotence::PushSliceInitiallyInexistent>(); diff --git a/src/mongo/dbtests/rollbacktests.cpp b/src/mongo/dbtests/rollbacktests.cpp index 71f1a89a8ff..3cab6c741ab 100644 --- a/src/mongo/dbtests/rollbacktests.cpp +++ b/src/mongo/dbtests/rollbacktests.cpp @@ -32,8 +32,10 @@ #include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/database_catalog_entry.h" #include "mongo/db/catalog/database_holder.h" +#include "mongo/db/catalog/drop_collection.h" #include "mongo/db/catalog/head_manager.h" #include "mongo/db/catalog/index_create.h" +#include "mongo/db/catalog/rename_collection.h" #include "mongo/db/client.h" #include "mongo/db/db_raii.h" #include "mongo/db/index/index_descriptor.h" @@ -80,8 +82,7 @@ Status renameCollection(OperationContext* opCtx, const NamespaceString& source, const NamespaceString& target) { ASSERT_EQ(source.db(), target.db()); - Database* db = dbHolder().get(opCtx, source.db()); - return db->renameCollection(opCtx, source.ns(), target.ns(), false); + return renameCollection(opCtx, source, target, false, false); } Status truncateCollection(OperationContext* opCtx, const NamespaceString& nss) { Collection* coll = dbHolder().get(opCtx, nss.db())->getCollection(opCtx, nss); @@ -317,7 +318,13 @@ public: { WriteUnitOfWork uow(&opCtx); - ASSERT_OK(ctx.db()->dropCollection(&opCtx, target.ns())); + BSONObjBuilder result; + ASSERT_OK( + dropCollection(&opCtx, + target, + result, + {}, + DropCollectionSystemCollectionMode::kDisallowSystemCollectionDrops)); ASSERT_OK(renameCollection(&opCtx, source, target)); ASSERT(!collectionExists(&ctx, source.ns())); ASSERT(collectionExists(&ctx, target.ns())); @@ -375,7 +382,13 @@ public: { WriteUnitOfWork uow(&opCtx); - ASSERT_OK(ctx.db()->dropCollection(&opCtx, nss.ns())); + BSONObjBuilder result; + ASSERT_OK( + dropCollection(&opCtx, + nss, + result, + {}, + DropCollectionSystemCollectionMode::kDisallowSystemCollectionDrops)); ASSERT(!collectionExists(&ctx, nss.ns())); ASSERT_OK(userCreateNS(&opCtx, ctx.db(), @@ -428,7 +441,13 @@ public: insertRecord(&opCtx, nss, doc); assertOnlyRecord(&opCtx, nss, doc); - ASSERT_OK(ctx.db()->dropCollection(&opCtx, nss.ns())); + BSONObjBuilder result; + ASSERT_OK( + dropCollection(&opCtx, + nss, + result, + {}, + DropCollectionSystemCollectionMode::kDisallowSystemCollectionDrops)); ASSERT(!collectionExists(&ctx, nss.ns())); if (!rollback) { diff --git a/src/mongo/util/SConscript b/src/mongo/util/SConscript index 2bb79104f3c..604743801c4 100644 --- a/src/mongo/util/SConscript +++ b/src/mongo/util/SConscript @@ -101,26 +101,9 @@ env.Library( target='uuid', source=[ 'uuid.cpp', - 'namespace_uuid_cache.cpp', ], LIBDEPS=[ '$BUILD_DIR/mongo/base', - '$BUILD_DIR/mongo/db/catalog/collection', - '$BUILD_DIR/mongo/util/decorable', - '$BUILD_DIR/third_party/murmurhash3/murmurhash3', - ], -) - -env.Library( - target='uuid_catalog', - source=[ - 'uuid_catalog.cpp' - ], - LIBDEPS=[ - 'uuid', - '$BUILD_DIR/mongo/base', - '$BUILD_DIR/mongo/db/catalog/collection', - '$BUILD_DIR/mongo/db/namespace_string', '$BUILD_DIR/mongo/util/decorable', '$BUILD_DIR/third_party/murmurhash3/murmurhash3', ], @@ -137,28 +120,6 @@ env.CppUnitTest( ], ) -env.CppUnitTest( - target='namespace_uuid_cache_test', - source=[ - 'namespace_uuid_cache_test.cpp' - ], - LIBDEPS=[ - 'uuid', - ], -) - -env.CppUnitTest( - target='uuid_catalog_test', - source=[ - 'uuid_catalog_test.cpp', - ], - LIBDEPS=[ - 'uuid_catalog', - 'uuid', - '$BUILD_DIR/mongo/db/service_context', - ] - ) - env.Library( target='summation', source=[ diff --git a/src/mongo/util/uuid.cpp b/src/mongo/util/uuid.cpp index 3c041e64010..fdd0798abb1 100644 --- a/src/mongo/util/uuid.cpp +++ b/src/mongo/util/uuid.cpp @@ -91,6 +91,10 @@ bool UUID::isUUIDString(const std::string& s) { return std::regex_match(s, uuidRegex); } +bool UUID::isRFC4122v4() const { + return (_uuid[6] & ~0x0f) == 0x40 && (_uuid[8] & ~0x3f) == 0x80; // See RFC 4122, section 4.4. +} + UUID UUID::gen() { int64_t randomWords[2]; diff --git a/src/mongo/util/uuid.h b/src/mongo/util/uuid.h index 9e1caa88b6c..0049fe9b665 100644 --- a/src/mongo/util/uuid.h +++ b/src/mongo/util/uuid.h @@ -64,7 +64,7 @@ public: static constexpr int kNumBytes = sizeof(UUIDStorage); /** - * Generate a new random v4 UUID per RFC 4122. + * Generates a new random v4 UUID per RFC 4122. */ static UUID gen(); @@ -81,7 +81,7 @@ public: static StatusWith<UUID> parse(BSONElement from); /** - * Parse a BSON document of the form { uuid: BinData(4, "...") }. + * Parses a BSON document of the form { uuid: BinData(4, "...") }. * * For IDL. */ @@ -107,17 +107,17 @@ public: } /** - * Append to builder as BinData(4, "...") element with the given name. + * Appends to builder as BinData(4, "...") element with the given name. */ void appendToBuilder(BSONObjBuilder* builder, StringData name) const; /** - * Return a BSON object of the form { uuid: BinData(4, "...") }. + * Returns a BSON object of the form { uuid: BinData(4, "...") }. */ BSONObj toBSON() const; /** - * Return a string representation of this UUID, in hexadecimal, + * Returns a string representation of this UUID, in hexadecimal, * as per RFC 4122: * * 4 Octets - 2 Octets - 2 Octets - 2 Octets - 6 Octets @@ -133,6 +133,11 @@ public: } /** + * Returns true only if the UUID is the RFC 4122 variant, v4 (random). + */ + bool isRFC4122v4() const; + + /** * Custom hasher so UUIDs can be used in unordered data structures. * * ex: std::unordered_set<UUID, UUID::Hash> uuidSet; |