summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaolo Polato <paolo.polato@mongodb.com>2022-11-28 13:45:33 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-11-28 14:15:49 +0000
commite4e1b807a5e079dc9fff098294271b63966930e3 (patch)
tree797da1c89cf8fa1d3e14cca7b29c04cb052d08d2
parent1bb999c4e26f989ac69bab0badd187ff5a3f6e6d (diff)
downloadmongo-e4e1b807a5e079dc9fff098294271b63966930e3.tar.gz
SERVER-68930 Add placement history entry when a collection is removed from config.collections
-rw-r--r--jstests/sharding/store_historical_placement_data.js121
-rw-r--r--src/mongo/db/s/SConscript1
-rw-r--r--src/mongo/db/s/config/configsvr_rename_collection_metadata_command.cpp18
-rw-r--r--src/mongo/db/s/sharding_ddl_util.cpp94
-rw-r--r--src/mongo/db/s/sharding_ddl_util_test.cpp7
5 files changed, 219 insertions, 22 deletions
diff --git a/jstests/sharding/store_historical_placement_data.js b/jstests/sharding/store_historical_placement_data.js
index 0716cce1ef4..6198c6900ee 100644
--- a/jstests/sharding/store_historical_placement_data.js
+++ b/jstests/sharding/store_historical_placement_data.js
@@ -14,6 +14,7 @@ function getInfoFromConfigDatabases(dbName) {
if (configDBsQueryResults.length === 0) {
return null;
}
+
assert.eq(1, configDBsQueryResults.length);
return configDBsQueryResults[0];
}
@@ -23,6 +24,7 @@ function getInfoFromConfigCollections(fullCollName) {
if (configCollsQueryResults.length === 0) {
return null;
}
+
assert.eq(configCollsQueryResults.length, 1);
return configCollsQueryResults[0];
}
@@ -30,6 +32,10 @@ function getInfoFromConfigCollections(fullCollName) {
function getLatestPlacementInfoFor(namespace) {
const placementQueryResults =
configDB.placementHistory.find({nss: namespace}).sort({timestamp: -1}).limit(1).toArray();
+ if (placementQueryResults.length === 0) {
+ return null;
+ }
+
assert.eq(placementQueryResults.length, 1);
return placementQueryResults[0];
}
@@ -178,18 +184,120 @@ function testMovePrimary(dbName, fromPrimaryShardName, toPrimaryShardName) {
assert.sameMembers(newDbInfo.shards, [toPrimaryShardName]);
}
+function testDropCollection() {
+ const dbName = 'dropCollectionTestDB';
+ const collName = 'shardedCollName';
+ const nss = dbName + '.' + collName;
+ const db = st.s.getDB(dbName);
+
+ testShardCollection(dbName, collName);
+ const initialPlacementInfo = getLatestPlacementInfoFor(nss);
+ const numHistoryEntriesBeforeFirstDrop = configDB.placementHistory.count({nss: nss});
+
+ // Drop the collection
+ assert.commandWorked(db.runCommand({drop: collName}));
+
+ // Verify that a single entry gets added with the expected content.
+ const numHistoryEntriesAfterFirstDrop = configDB.placementHistory.count({nss: nss});
+ assert.eq(numHistoryEntriesBeforeFirstDrop + 1, numHistoryEntriesAfterFirstDrop);
+ const collPlacementInfo = getLatestPlacementInfoFor(nss);
+ assert.eq(0, collPlacementInfo.shards.length);
+ assert.eq(initialPlacementInfo.uuid, collPlacementInfo.uuid);
+ assert(timestampCmp(initialPlacementInfo.timestamp, collPlacementInfo.timestamp) < 0);
+
+ // Verify that no placement entry gets added if dropCollection is repeated
+ assert.commandWorked(db.runCommand({drop: collName}));
+ assert.eq(numHistoryEntriesAfterFirstDrop, configDB.placementHistory.count({nss: nss}));
+
+ // Verify that no records get added in case an unsharded collection gets dropped
+ const unshardedCollName = 'unshardedColl';
+ assert.commandWorked(db.createCollection(unshardedCollName));
+
+ assert.commandWorked(db.runCommand({drop: unshardedCollName}));
+
+ assert.eq(0, configDB.placementHistory.count({nss: dbName + '.' + unshardedCollName}));
+}
+
+function testRenameCollection() {
+ const dbName = 'renameCollectionTestDB';
+ const db = st.s.getDB(dbName);
+ const oldCollName = 'old';
+ const oldNss = dbName + '.' + oldCollName;
+
+ const targetCollName = 'target';
+ const targetNss = dbName + '.' + targetCollName;
+
+ jsTest.log(
+ 'Testing that placement entries are added by rename() for each sharded collection involved in the DDL');
+ testShardCollection(dbName, oldCollName);
+ const initialPlacementForOldColl = getLatestPlacementInfoFor(oldNss);
+
+ assert.commandWorked(st.s.adminCommand({shardCollection: targetNss, key: {x: 1}}));
+ st.s.adminCommand({split: targetNss, middle: {x: 0}});
+ assert.commandWorked(st.s.adminCommand({moveChunk: targetNss, find: {x: -1}, to: shard1}));
+ const initialPlacementForTargetColl = getLatestPlacementInfoFor(targetNss);
+
+ assert.commandWorked(db[oldCollName].renameCollection(targetCollName, true /*dropTarget*/));
+ const finalPlacementForOldColl = getLatestPlacementInfoFor(oldNss);
+ const finalPlacementForTargetColl = getLatestPlacementInfoFor(targetNss);
+
+ // TODO SERVER-70682 modify the following assertions to distinguish between placement of dropped
+ // target VS placement of renamed target.
+ assert.eq(initialPlacementForOldColl.uuid, finalPlacementForOldColl.uuid);
+ assert.sameMembers([], finalPlacementForOldColl.shards);
+
+ assert.eq(initialPlacementForTargetColl.uuid, finalPlacementForTargetColl.uuid);
+ assert.sameMembers([], finalPlacementForTargetColl.shards);
+
+ jsTest.log(
+ 'Testing that no placement entries are added by rename() for unsharded collections involved in the DDL');
+ const unshardedOldCollName = 'unshardedOld';
+ const unshardedTargetCollName = 'unshardedTarget';
+ assert.commandWorked(db.createCollection(unshardedOldCollName));
+ assert.commandWorked(db.createCollection(unshardedTargetCollName));
+
+ assert.commandWorked(
+ db[unshardedOldCollName].renameCollection(unshardedTargetCollName, true /*dropTarget*/));
+
+ assert.eq(0, configDB.placementHistory.count({nss: dbName + '.' + unshardedOldCollName}));
+ assert.eq(0, configDB.placementHistory.count({nss: dbName + '.' + unshardedTargetCollName}));
+}
+
function testDropDatabase(dbName, primaryShardName) {
// Create the database
testEnableSharding(dbName, primaryShardName);
+ const db = st.s.getDB(dbName);
+
+ // Create an unsharded collection
+ const unshardedCollName = 'unshardedColl';
+ const unshardedCollNss = dbName + '.' + unshardedCollName;
+ assert.commandWorked(db.createCollection(unshardedCollName));
+
+ // Create a sharded collection
+ const shardedCollName = 'shardedColl';
+ const shardedCollNss = dbName + '.' + shardedCollName;
+ testShardCollection(dbName, shardedCollName);
+ const initialShardedCollPlacementInfo = getLatestPlacementInfoFor(shardedCollNss);
// Drop the database
- let db = st.s.getDB(dbName);
assert.commandWorked(db.dropDatabase());
- // Verify the database is no longer present in the placement history
+ // Verify that a new entry with an empty set of shards has been inserted for both dbName and
+ // shardedCollName...
const dbPlacementInfo = getLatestPlacementInfoFor(dbName);
- assert.neq(dbPlacementInfo, null);
- assert.eq(dbPlacementInfo.shards.length, 0);
+ assert.neq(null, dbPlacementInfo);
+ assert.eq(0, dbPlacementInfo.shards.length);
+ assert.eq(undefined, dbPlacementInfo.uuid);
+
+ const finalShardedCollPlacementInfo = getLatestPlacementInfoFor(shardedCollNss);
+ assert.neq(null, finalShardedCollPlacementInfo);
+ assert.eq(0, finalShardedCollPlacementInfo.shards);
+ assert.eq(initialShardedCollPlacementInfo.uuid, finalShardedCollPlacementInfo.uuid);
+ assert(timestampCmp(initialShardedCollPlacementInfo.timestamp,
+ finalShardedCollPlacementInfo.timestamp) < 0);
+
+ // ...And that unshardedCollName stays untracked.
+ assert.eq(null, getLatestPlacementInfoFor(unshardedCollNss));
}
// TODO SERVER-69106 remove the logic to skip the test execution
@@ -208,6 +316,9 @@ jsTest.log(
'Testing placement entries added by shardCollection() over an existing sharding-enabled DB');
testShardCollection('explicitlyCreatedDB', 'coll1');
+jsTest.log('Testing placement entries added by dropCollection()');
+testDropCollection();
+
jsTest.log('Testing placement entries added by shardCollection() over a non-existing db (& coll)');
testShardCollection('implicitlyCreatedDB', 'coll1');
@@ -221,6 +332,8 @@ jsTest.log(
'Testing placement entries added by movePrimary() over a new sharding-enabled DB with no data');
testMovePrimary('movePrimaryDB', st.shard0.shardName, st.shard1.shardName);
+testRenameCollection();
+
jsTest.log(
'Testing placement entries added by dropDatabase() over a new sharding-enabled DB with data');
testDropDatabase('dropDatabaseDB', st.shard0.shardName);
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index a97c5dabee0..3b5152f1aad 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -366,6 +366,7 @@ env.Library(
'$BUILD_DIR/mongo/db/audit',
'$BUILD_DIR/mongo/db/catalog/catalog_helpers',
'$BUILD_DIR/mongo/db/catalog/collection_options',
+ '$BUILD_DIR/mongo/db/cluster_transaction_api',
'$BUILD_DIR/mongo/db/commands/cluster_server_parameter_commands_invocation',
'$BUILD_DIR/mongo/db/commands/mongod_fcv',
'$BUILD_DIR/mongo/db/commands/set_feature_compatibility_version_idl',
diff --git a/src/mongo/db/s/config/configsvr_rename_collection_metadata_command.cpp b/src/mongo/db/s/config/configsvr_rename_collection_metadata_command.cpp
index cfd4f40a3bf..d92628c12dd 100644
--- a/src/mongo/db/s/config/configsvr_rename_collection_metadata_command.cpp
+++ b/src/mongo/db/s/config/configsvr_rename_collection_metadata_command.cpp
@@ -93,19 +93,15 @@ public:
repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern);
auto txnParticipant = TransactionParticipant::get(opCtx);
- if (!txnParticipant) {
- // old binaries will not send the txnNumber
- ShardingCatalogManager::get(opCtx)->renameShardedMetadata(
- opCtx,
- ns(),
- req.getTo(),
- ShardingCatalogClient::kMajorityWriteConcern,
- req.getOptFromCollection());
- return;
- }
+ uassert(ErrorCodes::InvalidOptions,
+ str::stream() << Request::kCommandName
+ << " expected to be called within a transaction",
+ txnParticipant);
{
auto newClient = opCtx->getServiceContext()->makeClient("RenameCollectionMetadata");
+ AuthorizationSession::get(newClient.get())
+ ->grantInternalAuthorization(newClient.get());
{
stdx::lock_guard<Client> lk(*newClient.get());
newClient->setSystemOperationKillableByStepdown(lk);
@@ -127,6 +123,8 @@ public:
// Since we no write happened on this txnNumber, we need to make a dummy write so that
// secondaries can be aware of this txn.
+ // Such write will also guarantee that the lastOpTime of opCtx will be inclusive of any
+ // write executed under the AlternativeClientRegion.
DBDirectClient client(opCtx);
client.update(NamespaceString::kServerConfigurationNamespace.ns(),
BSON("_id"
diff --git a/src/mongo/db/s/sharding_ddl_util.cpp b/src/mongo/db/s/sharding_ddl_util.cpp
index 383264d9922..c38a084a005 100644
--- a/src/mongo/db/s/sharding_ddl_util.cpp
+++ b/src/mongo/db/s/sharding_ddl_util.cpp
@@ -31,6 +31,7 @@
#include "mongo/db/s/sharding_ddl_util.h"
#include "mongo/db/catalog/collection_catalog.h"
+#include "mongo/db/cluster_transaction_api.h"
#include "mongo/db/commands/feature_compatibility_version.h"
#include "mongo/db/concurrency/exception_util.h"
#include "mongo/db/db_raii.h"
@@ -42,6 +43,7 @@
#include "mongo/db/s/shard_filtering_metadata_refresh.h"
#include "mongo/db/s/sharding_logging.h"
#include "mongo/db/s/sharding_util.h"
+#include "mongo/db/transaction/transaction_api.h"
#include "mongo/db/vector_clock.h"
#include "mongo/db/write_block_bypass.h"
#include "mongo/logv2/log.h"
@@ -49,10 +51,13 @@
#include "mongo/s/analyze_shard_key_documents_gen.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
+#include "mongo/s/catalog/type_namespace_placement_gen.h"
#include "mongo/s/catalog/type_tags.h"
#include "mongo/s/grid.h"
#include "mongo/s/request_types/set_allow_migrations_gen.h"
+#include "mongo/s/sharding_feature_flags_gen.h"
#include "mongo/s/write_ops/batch_write_exec.h"
+#include "mongo/util/uuid.h"
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
@@ -121,15 +126,88 @@ void deleteCollection(OperationContext* opCtx,
const NamespaceString& nss,
const UUID& uuid,
const WriteConcernOptions& writeConcern) {
- const auto catalogClient = Grid::get(opCtx)->catalogClient();
+ /* Perform a transaction to delete the collection and append a new placement entry.
+ * NOTE: deleteCollectionFn may be run on a separate thread than the one serving
+ * deleteCollection(). For this reason, all the referenced parameters have to
+ * be captured by value.
+ * TODO SERVER-66261 replace capture list with a single '&'.
+ */
+ auto transactionChain = [nss, uuid](const txn_api::TransactionClient& txnClient,
+ ExecutorPtr txnExec) {
+ // Remove config.collection entry. Query by 'ns' AND 'uuid' so that the remove can be
+ // resolved with an IXSCAN (thanks to the index on '_id') and is idempotent (thanks to the
+ // 'uuid')
+ const auto deleteCollectionQuery = BSON(
+ CollectionType::kNssFieldName << nss.ns() << CollectionType::kUuidFieldName << uuid);
+
+ write_ops::DeleteCommandRequest deleteOp(CollectionType::ConfigNS);
+ deleteOp.setDeletes({[&]() {
+ write_ops::DeleteOpEntry entry;
+ entry.setMulti(false);
+ entry.setQ(deleteCollectionQuery);
+ return entry;
+ }()});
- // Remove config.collection entry. Query by 'ns' AND 'uuid' so that the remove can be resolved
- // with an IXSCAN (thanks to the index on '_id') and is idempotent (thanks to the 'uuid')
- uassertStatusOK(catalogClient->removeConfigDocuments(
- opCtx,
- CollectionType::ConfigNS,
- BSON(CollectionType::kNssFieldName << nss.ns() << CollectionType::kUuidFieldName << uuid),
- writeConcern));
+ return txnClient.runCRUDOp(deleteOp, {} /*stmtIds*/)
+ .thenRunOn(txnExec)
+ .then([&](const BatchedCommandResponse& deleteCollResponse) {
+ uassertStatusOK(deleteCollResponse.toStatus());
+ const auto writePlacementEntry =
+ feature_flags::gHistoricalPlacementShardingCatalog.isEnabled(
+ serverGlobalParams.featureCompatibility);
+
+ // Also skip the insertion of the placement entry if the previous statement didn't
+ // remove any document - we can deduce that the whole transaction was already
+ // committed in a previous attempt.
+ if (!writePlacementEntry || deleteCollResponse.getN() == 0) {
+ BatchedCommandResponse noOpResponse;
+ noOpResponse.setStatus(Status::OK());
+ noOpResponse.setN(0);
+ return SemiFuture<BatchedCommandResponse>(std::move(noOpResponse));
+ }
+
+ auto now = VectorClock::get(getGlobalServiceContext())->getTime();
+ const auto clusterTime = now.clusterTime().asTimestamp();
+ NamespacePlacementType placementInfo(
+ NamespaceString(nss), clusterTime, {} /*shards*/);
+ placementInfo.setUuid(uuid);
+ write_ops::InsertCommandRequest insertPlacementEntry(
+ NamespaceString::kConfigsvrPlacementHistoryNamespace, {placementInfo.toBSON()});
+
+ return txnClient.runCRUDOp(insertPlacementEntry, {} /*stmtIds*/);
+ })
+ .thenRunOn(txnExec)
+ .then([&](const BatchedCommandResponse& insertPlacementEntryResponse) {
+ uassertStatusOK(insertPlacementEntryResponse.toStatus());
+ })
+ .semi();
+ };
+
+ // The Internal Transactions API receives the write concern option through the passed Operation
+ // context.
+ WriteConcernOptions originalWC = opCtx->getWriteConcern();
+ opCtx->setWriteConcern(writeConcern);
+ ScopeGuard guard([opCtx, originalWC] { opCtx->setWriteConcern(originalWC); });
+ auto& executor = Grid::get(opCtx)->getExecutorPool()->getFixedExecutor();
+
+ // Instantiate the right custom TXN client to ensure that the queries to the config DB will be
+ // routed to the CSRS.
+ auto customTxnClient = [&]() -> std::unique_ptr<txn_api::TransactionClient> {
+ if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) {
+ return std::make_unique<txn_api::details::SEPTransactionClient>(
+ opCtx,
+ executor,
+ std::make_unique<txn_api::details::ClusterSEPTransactionClientBehaviors>(
+ opCtx->getServiceContext()));
+ }
+
+ invariant(serverGlobalParams.clusterRole == ClusterRole::ConfigServer);
+ return nullptr;
+ }();
+
+ txn_api::SyncTransactionWithRetries txn(
+ opCtx, executor, nullptr /*resourceYielder*/, std::move(customTxnClient));
+ txn.run(opCtx, transactionChain);
}
write_ops::UpdateCommandRequest buildNoopWriteRequestCommand() {
diff --git a/src/mongo/db/s/sharding_ddl_util_test.cpp b/src/mongo/db/s/sharding_ddl_util_test.cpp
index 137434dfb59..93748a09309 100644
--- a/src/mongo/db/s/sharding_ddl_util_test.cpp
+++ b/src/mongo/db/s/sharding_ddl_util_test.cpp
@@ -29,6 +29,7 @@
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/namespace_string.h"
+#include "mongo/db/read_write_concern_defaults_cache_lookup_mock.h"
#include "mongo/db/s/config/config_server_test_fixture.h"
#include "mongo/db/s/sharding_ddl_util.h"
#include "mongo/db/s/transaction_coordinator_service.h"
@@ -51,9 +52,15 @@ class ShardingDDLUtilTest : public ConfigServerTestFixture {
protected:
ShardType shard0;
+private:
+ ReadWriteConcernDefaultsLookupMock _lookupMock;
+
void setUp() override {
setUpAndInitializeConfigDb();
+ // Manually instantiate the ReadWriteConcernDefaults decoration on the service
+ ReadWriteConcernDefaults::create(getServiceContext(), _lookupMock.getFetchDefaultsFn());
+
// Create config.transactions collection
auto opCtx = operationContext();
DBDirectClient client(opCtx);