summaryrefslogtreecommitdiff
path: root/src/mongo/db/s
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2022-06-20 16:29:09 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-06-20 17:21:51 +0000
commit77ee09ff95f6f51483ed1cde01198879adaa71ad (patch)
treeae3206166d8dc7386803f100af3653e10321ae0f /src/mongo/db/s
parent46d25203fcbe843eec1b2432e1d4ed1e9d598164 (diff)
downloadmongo-77ee09ff95f6f51483ed1cde01198879adaa71ad.tar.gz
SERVER-66869 Get rid of the multi DB locking capability
Diffstat (limited to 'src/mongo/db/s')
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp5
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.h2
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp7
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp8
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp15
-rw-r--r--src/mongo/db/s/move_primary_source_manager.cpp8
-rw-r--r--src/mongo/db/s/rename_collection_participant_service.cpp9
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_application.cpp9
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_application.h1
-rw-r--r--src/mongo/db/s/shard_metadata_util.cpp6
-rw-r--r--src/mongo/db/s/shard_metadata_util.h47
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader.cpp16
-rw-r--r--src/mongo/db/s/type_shard_collection.idl3
13 files changed, 42 insertions, 94 deletions
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index 37e776833c8..1a76fd9a5a5 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -750,7 +750,6 @@ Status MigrationChunkClonerSourceLegacy::nextCloneBatch(OperationContext* opCtx,
}
Status MigrationChunkClonerSourceLegacy::nextModsBatch(OperationContext* opCtx,
- Database* db,
BSONObjBuilder* builder) {
dassert(opCtx->lockState()->isCollectionLockedForMode(nss(), MODE_IS));
@@ -783,8 +782,8 @@ Status MigrationChunkClonerSourceLegacy::nextModsBatch(OperationContext* opCtx,
if (deleteList.empty()) {
BSONArrayBuilder arrUpd(builder->subarrayStart("reload"));
- auto findByIdWrapper = [opCtx, db, ns](BSONObj idDoc, BSONObj* fullDoc) {
- return Helpers::findById(opCtx, db, ns, idDoc, *fullDoc);
+ auto findByIdWrapper = [opCtx, ns](BSONObj idDoc, BSONObj* fullDoc) {
+ return Helpers::findById(opCtx, ns, idDoc, *fullDoc);
};
totalDocSize = xferMods(&arrUpd, &updateList, totalDocSize, findByIdWrapper);
arrUpd.done();
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.h b/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
index 80f5d33cf41..1912c947ad9 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.h
@@ -174,7 +174,7 @@ public:
*
* NOTE: Must be called with the collection lock held in at least IS mode.
*/
- Status nextModsBatch(OperationContext* opCtx, Database* db, BSONObjBuilder* builder);
+ Status nextModsBatch(OperationContext* opCtx, BSONObjBuilder* builder);
/**
* Appends to 'arrBuilder' oplog entries which wrote to the currently migrated chunk and contain
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
index d4c7593370c..8be0acd90df 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
@@ -103,11 +103,6 @@ public:
_autoColl = boost::none;
}
- Database* getDb() const {
- invariant(_autoColl);
- return _autoColl->getDb();
- }
-
const CollectionPtr& getColl() const {
invariant(_autoColl);
return _autoColl->getCollection();
@@ -235,7 +230,7 @@ public:
AutoGetActiveCloner autoCloner(opCtx, migrationSessionId, true);
- uassertStatusOK(autoCloner.getCloner()->nextModsBatch(opCtx, autoCloner.getDb(), &result));
+ uassertStatusOK(autoCloner.getCloner()->nextModsBatch(opCtx, &result));
return true;
}
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
index 91e1b4a21bc..561bcbb713a 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
@@ -355,7 +355,7 @@ TEST_F(MigrationChunkClonerSourceLegacyTest, CorrectDocumentsFetched) {
{
BSONObjBuilder modsBuilder;
- ASSERT_OK(cloner.nextModsBatch(operationContext(), autoColl.getDb(), &modsBuilder));
+ ASSERT_OK(cloner.nextModsBatch(operationContext(), &modsBuilder));
const auto modsObj = modsBuilder.obj();
ASSERT_EQ(2U, modsObj["reload"].Array().size());
@@ -455,7 +455,7 @@ TEST_F(MigrationChunkClonerSourceLegacyTest, RemoveDuplicateDocuments) {
AutoGetCollection autoColl(operationContext(), kNss, MODE_IS);
{
BSONObjBuilder modsBuilder;
- ASSERT_OK(cloner.nextModsBatch(operationContext(), autoColl.getDb(), &modsBuilder));
+ ASSERT_OK(cloner.nextModsBatch(operationContext(), &modsBuilder));
const auto modsObj = modsBuilder.obj();
ASSERT_EQ(1U, modsObj["reload"].Array().size());
@@ -522,7 +522,7 @@ TEST_F(MigrationChunkClonerSourceLegacyTest, OneLargeDocumentTransferMods) {
AutoGetCollection autoColl(operationContext(), kNss, MODE_IS);
{
BSONObjBuilder modsBuilder;
- ASSERT_OK(cloner.nextModsBatch(operationContext(), autoColl.getDb(), &modsBuilder));
+ ASSERT_OK(cloner.nextModsBatch(operationContext(), &modsBuilder));
const auto modsObj = modsBuilder.obj();
ASSERT_EQ(1, modsObj["reload"].Array().size());
@@ -600,7 +600,7 @@ TEST_F(MigrationChunkClonerSourceLegacyTest, ManySmallDocumentsTransferMods) {
AutoGetCollection autoColl(operationContext(), kNss, MODE_IS);
{
BSONObjBuilder modsBuilder;
- ASSERT_OK(cloner.nextModsBatch(operationContext(), autoColl.getDb(), &modsBuilder));
+ ASSERT_OK(cloner.nextModsBatch(operationContext(), &modsBuilder));
const auto modsObj = modsBuilder.obj();
ASSERT_EQ(modsObj["reload"].Array().size(), numDocuments);
}
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index c532b48ed2a..fa303126008 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -201,11 +201,10 @@ bool willOverrideLocalId(OperationContext* opCtx,
BSONObj min,
BSONObj max,
BSONObj shardKeyPattern,
- Database* db,
BSONObj remoteDoc,
BSONObj* localDoc) {
*localDoc = BSONObj();
- if (Helpers::findById(opCtx, db, nss.ns(), remoteDoc, *localDoc)) {
+ if (Helpers::findById(opCtx, nss.ns(), remoteDoc, *localDoc)) {
return !isInRange(*localDoc, min, max, shardKeyPattern);
}
@@ -1774,7 +1773,7 @@ bool MigrationDestinationManager::_applyMigrateOp(OperationContext* opCtx, const
// Do not apply delete if doc does not belong to the chunk being migrated
BSONObj fullObj;
- if (Helpers::findById(opCtx, autoColl.getDb(), _nss.ns(), id, fullObj)) {
+ if (Helpers::findById(opCtx, _nss.ns(), id, fullObj)) {
if (!isInRange(fullObj, _min, _max, _shardKeyPattern)) {
if (MONGO_unlikely(failMigrationReceivedOutOfRangeOperation.shouldFail())) {
MONGO_UNREACHABLE;
@@ -1823,14 +1822,8 @@ bool MigrationDestinationManager::_applyMigrateOp(OperationContext* opCtx, const
}
BSONObj localDoc;
- if (willOverrideLocalId(opCtx,
- _nss,
- _min,
- _max,
- _shardKeyPattern,
- autoColl.getDb(),
- updatedDoc,
- &localDoc)) {
+ if (willOverrideLocalId(
+ opCtx, _nss, _min, _max, _shardKeyPattern, updatedDoc, &localDoc)) {
// Exception will abort migration cleanly
LOGV2_ERROR_OPTIONS(
16977,
diff --git a/src/mongo/db/s/move_primary_source_manager.cpp b/src/mongo/db/s/move_primary_source_manager.cpp
index b4382f21e0f..aaca2f82bbb 100644
--- a/src/mongo/db/s/move_primary_source_manager.cpp
+++ b/src/mongo/db/s/move_primary_source_manager.cpp
@@ -27,9 +27,6 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/db/s/move_primary_source_manager.h"
#include "mongo/client/connpool.h"
@@ -50,14 +47,11 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
-
namespace mongo {
MONGO_FAIL_POINT_DEFINE(hangInCloneStage);
MONGO_FAIL_POINT_DEFINE(hangInCleanStaleDataStage);
-using namespace shardmetadatautil;
-
MovePrimarySourceManager::MovePrimarySourceManager(OperationContext* opCtx,
ShardMovePrimary requestArgs,
StringData dbname,
@@ -192,7 +186,7 @@ Status MovePrimarySourceManager::enterCriticalSection(OperationContext* opCtx) {
// time inclusive of the move primary config commit update from accessing secondary data.
// Note: this write must occur after the critSec flag is set, to ensure the secondary refresh
// will stall behind the flag.
- Status signalStatus = updateShardDatabasesEntry(
+ Status signalStatus = shardmetadatautil::updateShardDatabasesEntry(
opCtx,
BSON(ShardDatabaseType::kNameFieldName << getNss().toString()),
BSONObj(),
diff --git a/src/mongo/db/s/rename_collection_participant_service.cpp b/src/mongo/db/s/rename_collection_participant_service.cpp
index bf48f41b581..64419c6c5e4 100644
--- a/src/mongo/db/s/rename_collection_participant_service.cpp
+++ b/src/mongo/db/s/rename_collection_participant_service.cpp
@@ -27,8 +27,7 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
+#include "mongo/db/s/rename_collection_participant_service.h"
#include "mongo/base/checked_cast.h"
#include "mongo/db/catalog/collection_catalog.h"
@@ -40,8 +39,6 @@
#include "mongo/db/s/operation_sharding_state.h"
#include "mongo/db/s/range_deletion_util.h"
#include "mongo/db/s/recoverable_critical_section_service.h"
-#include "mongo/db/s/rename_collection_participant_service.h"
-#include "mongo/db/s/shard_metadata_util.h"
#include "mongo/logv2/log.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/grid.h"
@@ -49,9 +46,7 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
-
namespace mongo {
-
namespace {
const Backoff kExponentialBackoff(Seconds(1), Milliseconds::max());
@@ -76,7 +71,6 @@ void dropCollectionLocally(OperationContext* opCtx, const NamespaceString& nss)
"collectionExisted"_attr = knownNss);
}
-/* Clear the CollectionShardingRuntime entry for the specified namespace */
void clearFilteringMetadata(OperationContext* opCtx, const NamespaceString& nss) {
UninterruptibleLockGuard noInterrupt(opCtx->lockState());
Lock::DBLock dbLock(opCtx, nss.db(), MODE_IX);
@@ -135,6 +129,7 @@ void renameOrDropTarget(OperationContext* opCtx,
deleteRangeDeletionTasksForRename(opCtx, fromNss, toNss);
}
}
+
} // namespace
RenameCollectionParticipantService* RenameCollectionParticipantService::getService(
diff --git a/src/mongo/db/s/resharding/resharding_oplog_application.cpp b/src/mongo/db/s/resharding/resharding_oplog_application.cpp
index 1478a3ec30c..9a643ef819e 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_application.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_application.cpp
@@ -252,7 +252,7 @@ void ReshardingOplogApplicationRules::_applyInsert_inlock(OperationContext* opCt
// First, query the conflict stash collection using [op _id] as the query. If a doc exists,
// apply rule #1 and run a replacement update on the stash collection.
- auto stashCollDoc = _queryStashCollById(opCtx, db, stashColl, idQuery);
+ auto stashCollDoc = _queryStashCollById(opCtx, stashColl, idQuery);
if (!stashCollDoc.isEmpty()) {
auto request = UpdateRequest();
request.setNamespaceString(_myStashNss);
@@ -348,7 +348,7 @@ void ReshardingOplogApplicationRules::_applyUpdate_inlock(OperationContext* opCt
// First, query the conflict stash collection using [op _id] as the query. If a doc exists,
// apply rule #1 and update the doc from the stash collection.
- auto stashCollDoc = _queryStashCollById(opCtx, db, stashColl, idQuery);
+ auto stashCollDoc = _queryStashCollById(opCtx, stashColl, idQuery);
if (!stashCollDoc.isEmpty()) {
auto request = UpdateRequest();
request.setNamespaceString(_myStashNss);
@@ -430,7 +430,7 @@ void ReshardingOplogApplicationRules::_applyDelete_inlock(OperationContext* opCt
// First, query the conflict stash collection using [op _id] as the query. If a doc exists,
// apply rule #1 and delete the doc from the stash collection.
- auto stashCollDoc = _queryStashCollById(opCtx, db, stashColl, idQuery);
+ auto stashCollDoc = _queryStashCollById(opCtx, stashColl, idQuery);
if (!stashCollDoc.isEmpty()) {
auto nDeleted = deleteObjects(opCtx, stashColl, _myStashNss, idQuery, true /* justOne */);
invariant(nDeleted != 0);
@@ -543,7 +543,6 @@ void ReshardingOplogApplicationRules::_applyDelete_inlock(OperationContext* opCt
}
BSONObj ReshardingOplogApplicationRules::_queryStashCollById(OperationContext* opCtx,
- Database* db,
const CollectionPtr& coll,
const BSONObj& idQuery) const {
const IndexCatalog* indexCatalog = coll->getIndexCatalog();
@@ -552,7 +551,7 @@ BSONObj ReshardingOplogApplicationRules::_queryStashCollById(OperationContext* o
indexCatalog->haveIdIndex(opCtx));
BSONObj result;
- Helpers::findById(opCtx, db, _myStashNss.ns(), idQuery, result);
+ Helpers::findById(opCtx, _myStashNss.ns(), idQuery, result);
return result;
}
} // namespace mongo
diff --git a/src/mongo/db/s/resharding/resharding_oplog_application.h b/src/mongo/db/s/resharding/resharding_oplog_application.h
index b8bd3942b40..4e00a62a269 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_application.h
+++ b/src/mongo/db/s/resharding/resharding_oplog_application.h
@@ -96,7 +96,6 @@ private:
// Queries '_stashNss' using 'idQuery'.
BSONObj _queryStashCollById(OperationContext* opCtx,
- Database* db,
const CollectionPtr& coll,
const BSONObj& idQuery) const;
diff --git a/src/mongo/db/s/shard_metadata_util.cpp b/src/mongo/db/s/shard_metadata_util.cpp
index 69d0a905642..d7d8386d10b 100644
--- a/src/mongo/db/s/shard_metadata_util.cpp
+++ b/src/mongo/db/s/shard_metadata_util.cpp
@@ -27,13 +27,8 @@
* it in the license file.
*/
-
-#include "mongo/platform/basic.h"
-
#include "mongo/db/s/shard_metadata_util.h"
-#include <memory>
-
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/ops/write_ops.h"
#include "mongo/db/s/type_shard_collection.h"
@@ -49,7 +44,6 @@
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
-
namespace mongo {
namespace shardmetadatautil {
namespace {
diff --git a/src/mongo/db/s/shard_metadata_util.h b/src/mongo/db/s/shard_metadata_util.h
index 52f043a0b9a..a23efa4b577 100644
--- a/src/mongo/db/s/shard_metadata_util.h
+++ b/src/mongo/db/s/shard_metadata_util.h
@@ -32,7 +32,7 @@
#include <string>
#include <vector>
-#include "mongo/base/status.h"
+#include "mongo/base/status_with.h"
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/oid.h"
#include "mongo/s/chunk_version.h"
@@ -40,17 +40,11 @@
namespace mongo {
class ChunkType;
-class CollectionMetadata;
class NamespaceString;
class OperationContext;
class ShardCollectionType;
class ShardDatabaseType;
-template <typename T>
-class StatusWith;
-/**
- * Function helpers to locally, using a DBDirectClient, read and write sharding metadata on a shard.
- */
namespace shardmetadatautil {
/**
@@ -62,25 +56,6 @@ struct QueryAndSort {
};
/**
- * Subset of the shard's collections collection document that relates to refresh state.
- */
-struct RefreshState {
- bool operator==(const RefreshState& other) const;
-
- std::string toString() const;
-
- // The current generation of the collection.
- CollectionGeneration generation;
-
- // Whether a refresh is currently in progress.
- bool refreshing;
-
- // The collection version after the last complete refresh. Indicates change if refreshing has
- // started and finished since last loaded.
- ChunkVersion lastRefreshedCollectionVersion;
-};
-
-/**
* Returns the query needed to find incremental changes to the chunks collection on a shard server.
*
* The query has to find all the chunks $gte the current max version. Currently, any splits, merges
@@ -115,6 +90,26 @@ Status unsetPersistedRefreshFlags(OperationContext* opCtx,
const ChunkVersion& refreshedVersion);
/**
+ * Represents a subset of a collection's config.cache.collections entry that relates to refresh
+ * state.
+ */
+struct RefreshState {
+ bool operator==(const RefreshState& other) const;
+
+ std::string toString() const;
+
+ // The current generation of the collection.
+ CollectionGeneration generation;
+
+ // Whether a refresh is currently in progress.
+ bool refreshing;
+
+ // The collection version after the last complete refresh. Indicates change if refreshing has
+ // started and finished since last loaded.
+ ChunkVersion lastRefreshedCollectionVersion;
+};
+
+/**
* Reads the persisted refresh signal for 'nss' and returns those settings.
*/
StatusWith<RefreshState> getPersistedRefreshFlags(OperationContext* opCtx,
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
index 274ab8fd133..283001c3b3e 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
@@ -27,17 +27,8 @@
* it in the license file.
*/
-
-#define LOGV2_FOR_CATALOG_REFRESH(ID, DLEVEL, MESSAGE, ...) \
- LOGV2_DEBUG_OPTIONS( \
- ID, DLEVEL, {logv2::LogComponent::kShardingCatalogRefresh}, MESSAGE, ##__VA_ARGS__)
-
-#include "mongo/platform/basic.h"
-
#include "mongo/db/s/shard_server_catalog_cache_loader.h"
-#include <memory>
-
#include "mongo/db/catalog/rename_collection.h"
#include "mongo/db/client.h"
#include "mongo/db/db_raii.h"
@@ -65,7 +56,6 @@ using CollectionAndChangedChunks = CatalogCacheLoader::CollectionAndChangedChunk
namespace {
-MONGO_FAIL_POINT_DEFINE(hangPersistCollectionAndChangedChunksAfterDropChunks);
MONGO_FAIL_POINT_DEFINE(hangCollectionFlush);
AtomicWord<unsigned long long> taskIdGenerator{0};
@@ -84,11 +74,6 @@ void dropChunksIfEpochChanged(OperationContext* opCtx,
// Drop the 'config.cache.chunks.<ns>' collection
dropChunks(opCtx, nss);
- if (MONGO_unlikely(hangPersistCollectionAndChangedChunksAfterDropChunks.shouldFail())) {
- LOGV2(22093, "Hit hangPersistCollectionAndChangedChunksAfterDropChunks failpoint");
- hangPersistCollectionAndChangedChunksAfterDropChunks.pauseWhileSet(opCtx);
- }
-
LOGV2(5990400,
"Dropped persisted chunk metadata due to epoch change",
"namespace"_attr = nss,
@@ -130,7 +115,6 @@ Status persistCollectionAndChangedChunks(OperationContext* opCtx,
return status;
}
- // Update the chunk metadata.
try {
dropChunksIfEpochChanged(opCtx, maxLoaderVersion, collAndChunks.epoch, nss);
} catch (const DBException& ex) {
diff --git a/src/mongo/db/s/type_shard_collection.idl b/src/mongo/db/s/type_shard_collection.idl
index 11ef4a8daa7..d56b231e302 100644
--- a/src/mongo/db/s/type_shard_collection.idl
+++ b/src/mongo/db/s/type_shard_collection.idl
@@ -93,11 +93,12 @@ structs:
optional: false
epoch:
type: objectid
+ optional: false
description: "Uniquely identifies this instance of the collection, in case of
drop/create or shard key refine."
- optional: false
timestamp:
type: timestamp
+ optional: false
description: "Uniquely identifies this incarnation of the collection. Only changes
in case of drop and create, or shard key refine.
This field will store the ClusterTime of the Config Server when the