summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorTommaso Tocci <tommaso.tocci@mongodb.com>2022-02-28 13:44:53 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-02-28 14:17:00 +0000
commit891c15831552cea487545c0b16944a7a195d6871 (patch)
treec10d83decef127c4e5dcaf3b004d162775c58372 /src
parent0544506460a4ff21be5a1d4708e38ecbf8338f88 (diff)
downloadmongo-891c15831552cea487545c0b16944a7a195d6871.tar.gz
SERVER-60926 Make enableSharding command optional
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp74
-rw-r--r--src/mongo/db/s/create_collection_coordinator.cpp39
-rw-r--r--src/mongo/s/cluster_ddl.cpp4
-rw-r--r--src/mongo/s/cluster_ddl.h2
-rw-r--r--src/mongo/s/commands/cluster_shard_collection_cmd.cpp4
-rw-r--r--src/mongo/s/sharding_feature_flags.idl6
-rw-r--r--src/mongo/shell/shardingtest.js33
-rw-r--r--src/mongo/shell/utils_sh.js140
8 files changed, 143 insertions, 159 deletions
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
index 4d6ee0347e2..212e598bb67 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
@@ -48,6 +48,7 @@
#include "mongo/s/client/shard.h"
#include "mongo/s/grid.h"
#include "mongo/s/shard_util.h"
+#include "mongo/s/sharding_feature_flags_gen.h"
namespace mongo {
namespace {
@@ -93,7 +94,7 @@ DatabaseType ShardingCatalogManager::createDatabase(OperationContext* opCtx,
}
uassert(ErrorCodes::InvalidOptions,
- str::stream() << "Cannot manually create or shard database '" << dbName << "'",
+ str::stream() << "Cannot manually create database'" << dbName << "'",
dbName != NamespaceString::kAdminDb && dbName != NamespaceString::kLocalDb);
uassert(ErrorCodes::InvalidNamespace,
@@ -103,39 +104,55 @@ DatabaseType ShardingCatalogManager::createDatabase(OperationContext* opCtx,
// Make sure to force update of any stale metadata
ON_BLOCK_EXIT([&] { Grid::get(opCtx)->catalogCache()->purgeDatabase(dbName); });
+ auto& replClient = repl::ReplClientInfo::forClient(opCtx->getClient());
+
DBDirectClient client(opCtx);
boost::optional<DistLockManager::ScopedLock> dbLock;
- // First perform an optimistic attempt to write the 'sharded' field to the database entry, in
- // case this is the only thing, which is missing. If that doesn't succeed, go through the
+ const auto enableShardingOptional =
+ feature_flags::gEnableShardingOptional.isEnabled(serverGlobalParams.featureCompatibility);
+
+ const auto dbMatchFilter = [&] {
+ BSONObjBuilder filterBuilder;
+ filterBuilder.append(DatabaseType::kNameFieldName, dbName);
+ if (optPrimaryShard) {
+ uassert(ErrorCodes::BadValue,
+ str::stream() << "invalid shard name: " << *optPrimaryShard,
+ optPrimaryShard->isValid());
+ filterBuilder.append(DatabaseType::kPrimaryFieldName, optPrimaryShard->toString());
+ }
+ return filterBuilder.obj();
+ }();
+
+
+ // First perform an optimistic attempt to write the 'sharded' field to the database entry,
+ // in case this is the only thing, which is missing. If that doesn't succeed, go through the
// expensive createDatabase flow.
while (true) {
- auto response = client.findAndModify([&] {
- write_ops::FindAndModifyCommandRequest findAndModify(
- NamespaceString::kConfigDatabasesNamespace);
- findAndModify.setQuery([&] {
- BSONObjBuilder queryFilterBuilder;
- queryFilterBuilder.append(DatabaseType::kNameFieldName, dbName);
- if (optPrimaryShard) {
- uassert(ErrorCodes::BadValue,
- str::stream() << "invalid shard name: " << *optPrimaryShard,
- optPrimaryShard->isValid());
- queryFilterBuilder.append(DatabaseType::kPrimaryFieldName,
- optPrimaryShard->toString());
- }
- return queryFilterBuilder.obj();
+ if (!enableShardingOptional) {
+ auto response = client.findAndModify([&] {
+ write_ops::FindAndModifyCommandRequest findAndModify(
+ NamespaceString::kConfigDatabasesNamespace);
+ findAndModify.setQuery(dbMatchFilter);
+ findAndModify.setUpdate(write_ops::UpdateModification::parseFromClassicUpdate(
+ BSON("$set" << BSON(DatabaseType::kShardedFieldName << enableSharding))));
+ findAndModify.setUpsert(false);
+ findAndModify.setNew(true);
+ return findAndModify;
}());
- findAndModify.setUpdate(write_ops::UpdateModification::parseFromClassicUpdate(
- BSON("$set" << BSON(DatabaseType::kShardedFieldName << enableSharding))));
- findAndModify.setUpsert(false);
- findAndModify.setNew(true);
- return findAndModify;
- }());
-
- if (response.getLastErrorObject().getNumDocs()) {
- uassert(528120, "Missing value in the response", response.getValue());
- return DatabaseType::parse(IDLParserErrorContext("DatabaseType"), *response.getValue());
+
+ if (response.getLastErrorObject().getNumDocs()) {
+ uassert(528120, "Missing value in the response", response.getValue());
+ return DatabaseType::parse(IDLParserErrorContext("DatabaseType"),
+ *response.getValue());
+ }
+ } else {
+ auto dbObj = client.findOne(NamespaceString::kConfigDatabasesNamespace, dbMatchFilter);
+ if (!dbObj.isEmpty()) {
+ replClient.setLastOpToSystemLastOpTime(opCtx);
+ return DatabaseType::parse(IDLParserErrorContext("DatabaseType"), std::move(dbObj));
+ }
}
if (dbLock) {
@@ -151,7 +168,6 @@ DatabaseType ShardingCatalogManager::createDatabase(OperationContext* opCtx,
// Expensive createDatabase code path
const auto catalogClient = Grid::get(opCtx)->catalogClient();
const auto shardRegistry = Grid::get(opCtx)->shardRegistry();
- auto& replClient = repl::ReplClientInfo::forClient(opCtx->getClient());
// Check if a database already exists with the same name (case sensitive), and if so, return the
// existing entry.
@@ -200,7 +216,7 @@ DatabaseType ShardingCatalogManager::createDatabase(OperationContext* opCtx,
// Pick a primary shard for the new database.
DatabaseType db(dbName.toString(),
shardPtr->getId(),
- enableSharding,
+ enableShardingOptional ? false : enableSharding,
DatabaseVersion(UUID::gen(), clusterTime));
LOGV2(21938,
diff --git a/src/mongo/db/s/create_collection_coordinator.cpp b/src/mongo/db/s/create_collection_coordinator.cpp
index 4016982d6c1..72766e99945 100644
--- a/src/mongo/db/s/create_collection_coordinator.cpp
+++ b/src/mongo/db/s/create_collection_coordinator.cpp
@@ -57,6 +57,7 @@
#include "mongo/s/cluster_commands_helpers.h"
#include "mongo/s/cluster_write.h"
#include "mongo/s/grid.h"
+#include "mongo/s/sharding_feature_flags_gen.h"
namespace mongo {
namespace {
@@ -630,25 +631,29 @@ ExecutorFuture<void> CreateCollectionCoordinator::_runImpl(
void CreateCollectionCoordinator::_checkCommandArguments(OperationContext* opCtx) {
LOGV2_DEBUG(5277902, 2, "Create collection _checkCommandArguments", "namespace"_attr = nss());
- const auto dbEnabledForSharding = [&, this] {
- // The modification of the 'sharded' flag for the db does not imply a database version
- // change so we can't use the DatabaseShardingState to look it up. Instead we will do a
- // first attempt through the catalog cache and if it is unset we will attempt another time
- // after a forced catalog cache refresh.
- auto catalogCache = Grid::get(opCtx)->catalogCache();
-
- auto dbInfo = uassertStatusOK(catalogCache->getDatabase(opCtx, nss().db()));
- if (!dbInfo.shardingEnabled()) {
- sharding_ddl_util::linearizeCSRSReads(opCtx);
- dbInfo = uassertStatusOK(catalogCache->getDatabaseWithRefresh(opCtx, nss().db()));
- }
+ if (!feature_flags::gEnableShardingOptional.isEnabled(
+ serverGlobalParams.featureCompatibility)) {
- return dbInfo.shardingEnabled();
- }();
+ const auto dbEnabledForSharding = [&, this] {
+ // The modification of the 'sharded' flag for the db does not imply a database version
+ // change so we can't use the DatabaseShardingState to look it up. Instead we will do a
+ // first attempt through the catalog cache and if it is unset we will attempt another
+ // time after a forced catalog cache refresh.
+ auto catalogCache = Grid::get(opCtx)->catalogCache();
- uassert(ErrorCodes::IllegalOperation,
- str::stream() << "sharding not enabled for db " << nss().db(),
- dbEnabledForSharding);
+ auto dbInfo = uassertStatusOK(catalogCache->getDatabase(opCtx, nss().db()));
+ if (!dbInfo.shardingEnabled()) {
+ sharding_ddl_util::linearizeCSRSReads(opCtx);
+ dbInfo = uassertStatusOK(catalogCache->getDatabaseWithRefresh(opCtx, nss().db()));
+ }
+
+ return dbInfo.shardingEnabled();
+ }();
+
+ uassert(ErrorCodes::IllegalOperation,
+ str::stream() << "sharding not enabled for db " << nss().db(),
+ dbEnabledForSharding);
+ }
uassert(ErrorCodes::InvalidNamespace,
str::stream() << "Namespace too long. Namespace: " << nss()
diff --git a/src/mongo/s/cluster_ddl.cpp b/src/mongo/s/cluster_ddl.cpp
index 6facb1a7a52..58fa97a9192 100644
--- a/src/mongo/s/cluster_ddl.cpp
+++ b/src/mongo/s/cluster_ddl.cpp
@@ -119,8 +119,7 @@ CachedDatabaseInfo createDatabase(OperationContext* opCtx,
void createCollection(OperationContext* opCtx, const ShardsvrCreateCollection& request) {
const auto& nss = request.getNamespace();
- auto catalogCache = Grid::get(opCtx)->catalogCache();
- const auto dbInfo = uassertStatusOK(catalogCache->getDatabase(opCtx, nss.db()));
+ const auto dbInfo = createDatabase(opCtx, nss.db());
auto cmdResponse = executeCommandAgainstDatabasePrimaryOrFirstShard(
opCtx,
@@ -136,6 +135,7 @@ void createCollection(OperationContext* opCtx, const ShardsvrCreateCollection& r
auto createCollResp = CreateCollectionResponse::parse(IDLParserErrorContext("createCollection"),
remoteResponse.data);
+ auto catalogCache = Grid::get(opCtx)->catalogCache();
catalogCache->invalidateShardOrEntireCollectionEntryForShardedCollection(
nss, createCollResp.getCollectionVersion(), dbInfo.primaryId());
}
diff --git a/src/mongo/s/cluster_ddl.h b/src/mongo/s/cluster_ddl.h
index 1f2dbe8b4ce..0aa3cc24503 100644
--- a/src/mongo/s/cluster_ddl.h
+++ b/src/mongo/s/cluster_ddl.h
@@ -37,7 +37,7 @@ namespace cluster {
/**
* Creates (or ensures that it is created) a database `dbName`, with `suggestedPrimaryId` as the
- * primary node and the `shardingEnabled` field set to true.
+ * primary node.
*/
CachedDatabaseInfo createDatabase(OperationContext* opCtx,
StringData dbName,
diff --git a/src/mongo/s/commands/cluster_shard_collection_cmd.cpp b/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
index 218afb2f558..68ec95bb05c 100644
--- a/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
+++ b/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
@@ -61,9 +61,7 @@ public:
}
std::string help() const override {
- return "Shard a collection. Requires key. Optional unique."
- " Sharding must already be enabled for the database.\n"
- " { enablesharding : \"<dbname>\" }\n";
+ return "Shard a collection. Requires key. Optional unique.";
}
Status checkAuthForCommand(Client* client,
diff --git a/src/mongo/s/sharding_feature_flags.idl b/src/mongo/s/sharding_feature_flags.idl
index 850b059b272..520f233d4f8 100644
--- a/src/mongo/s/sharding_feature_flags.idl
+++ b/src/mongo/s/sharding_feature_flags.idl
@@ -40,3 +40,9 @@ feature_flags:
description: Feature flag for enabling the new metrics for global indexes and resharding.
cpp_varname: feature_flags::gFeatureFlagShardingDataTransformMetrics
default: false
+ # TODO SERVER-63983: remove the following flag once 6.0 becomes lastLTS
+ featureFlagEnableShardingOptional:
+ description: "Feature flag for making enableSharding optional"
+ cpp_varname: feature_flags::gEnableShardingOptional
+ default: true
+ version: 6.0
diff --git a/src/mongo/shell/shardingtest.js b/src/mongo/shell/shardingtest.js
index 35cf6ecec4d..193dd8f9485 100644
--- a/src/mongo/shell/shardingtest.js
+++ b/src/mongo/shell/shardingtest.js
@@ -158,35 +158,6 @@ var ShardingTest = function(params) {
}
/**
- * Checks whether the specified collection is sharded by consulting the config metadata.
- */
- function _isSharded(collName) {
- var collName = "" + collName;
- var dbName;
-
- if (typeof collName.getCollectionNames == 'function') {
- dbName = "" + collName;
- collName = undefined;
- }
-
- if (dbName) {
- var x = self.config.databases.findOne({_id: dbname});
- if (x)
- return x.partitioned;
- else
- return false;
- }
-
- if (collName) {
- var x = self.config.collections.findOne({_id: collName});
- if (x)
- return true;
- else
- return false;
- }
- }
-
- /**
* Extends the ShardingTest class with the methods exposed by the sh utility class.
*/
function _extendWithShMethods() {
@@ -709,9 +680,7 @@ var ShardingTest = function(params) {
c = "" + collName;
}
- if (!_isSharded(dbName)) {
- assert.commandWorked(this.s.adminCommand({enableSharding: dbName}));
- }
+ assert.commandWorked(this.s.adminCommand({enableSharding: dbName}));
var result = assert.commandWorked(this.s.adminCommand({shardcollection: c, key: key}));
diff --git a/src/mongo/shell/utils_sh.js b/src/mongo/shell/utils_sh.js
index e28d99e7ee0..b4bd7175096 100644
--- a/src/mongo/shell/utils_sh.js
+++ b/src/mongo/shell/utils_sh.js
@@ -750,63 +750,59 @@ function printShardingStatus(configDB, verbose) {
output(2, tojsononeline(db, "", true));
- if (db.partitioned) {
- configDB.collections.find({_id: new RegExp("^" + RegExp.escape(db._id) + "\\.")})
- .sort({_id: 1})
- .forEach(function(coll) {
- // Checking for '!dropped' to ensure mongo shell compatibility with earlier
- // versions of the server
- if (!coll.dropped) {
- output(3, coll._id);
- output(4, "shard key: " + tojson(coll.key));
- output(4,
- "unique: " + truthy(coll.unique) +
- nonBooleanNote("unique", coll.unique));
+ configDB.collections.find({_id: new RegExp("^" + RegExp.escape(db._id) + "\\.")})
+ .sort({_id: 1})
+ .forEach(function(coll) {
+ // Checking for '!dropped' to ensure mongo shell compatibility with earlier
+ // versions of the server
+ if (!coll.dropped) {
+ output(3, coll._id);
+ output(4, "shard key: " + tojson(coll.key));
+ output(
+ 4,
+ "unique: " + truthy(coll.unique) + nonBooleanNote("unique", coll.unique));
+ output(4,
+ "balancing: " + !truthy(coll.noBalance) +
+ nonBooleanNote("noBalance", coll.noBalance));
+ output(4, "chunks:");
+
+ const chunksMatchPredicate =
+ coll.hasOwnProperty("timestamp") ? {uuid: coll.uuid} : {ns: coll._id};
+
+ res = configDB.chunks
+ .aggregate({$match: chunksMatchPredicate},
+ {$group: {_id: "$shard", cnt: {$sum: 1}}},
+ {$project: {_id: 0, shard: "$_id", nChunks: "$cnt"}},
+ {$sort: {shard: 1}})
+ .toArray();
+
+ var totalChunks = 0;
+ res.forEach(function(z) {
+ totalChunks += z.nChunks;
+ output(5, z.shard + "\t" + z.nChunks);
+ });
+
+ if (totalChunks < 20 || verbose) {
+ configDB.chunks.find(chunksMatchPredicate)
+ .sort({min: 1})
+ .forEach(function(chunk) {
+ output(4,
+ tojson(chunk.min) + " -->> " + tojson(chunk.max) +
+ " on : " + chunk.shard + " " + tojson(chunk.lastmod) +
+ " " + (chunk.jumbo ? "jumbo " : ""));
+ });
+ } else {
output(4,
- "balancing: " + !truthy(coll.noBalance) +
- nonBooleanNote("noBalance", coll.noBalance));
- output(4, "chunks:");
-
- const chunksMatchPredicate =
- coll.hasOwnProperty("timestamp") ? {uuid: coll.uuid} : {ns: coll._id};
-
- res = configDB.chunks
- .aggregate({$match: chunksMatchPredicate},
- {$group: {_id: "$shard", cnt: {$sum: 1}}},
- {$project: {_id: 0, shard: "$_id", nChunks: "$cnt"}},
- {$sort: {shard: 1}})
- .toArray();
-
- var totalChunks = 0;
- res.forEach(function(z) {
- totalChunks += z.nChunks;
- output(5, z.shard + "\t" + z.nChunks);
- });
-
- if (totalChunks < 20 || verbose) {
- configDB.chunks.find(chunksMatchPredicate)
- .sort({min: 1})
- .forEach(function(chunk) {
- output(4,
- tojson(chunk.min) + " -->> " + tojson(chunk.max) +
- " on : " + chunk.shard + " " +
- tojson(chunk.lastmod) + " " +
- (chunk.jumbo ? "jumbo " : ""));
- });
- } else {
- output(
- 4,
- "too many chunks to print, use verbose if you want to force print");
- }
-
- configDB.tags.find({ns: coll._id}).sort({min: 1}).forEach(function(tag) {
- output(4,
- " tag: " + tag.tag + " " + tojson(tag.min) + " -->> " +
- tojson(tag.max));
- });
+ "too many chunks to print, use verbose if you want to force print");
}
- });
- }
+
+ configDB.tags.find({ns: coll._id}).sort({min: 1}).forEach(function(tag) {
+ output(4,
+ " tag: " + tag.tag + " " + tojson(tag.min) + " -->> " +
+ tojson(tag.max));
+ });
+ }
+ });
});
print(raw);
@@ -841,27 +837,21 @@ function printShardingSizes(configDB) {
configDB.databases.find().sort({name: 1}).forEach(function(db) {
output(2, tojson(db, "", true));
- if (db.partitioned) {
- configDB.collections.find({_id: new RegExp("^" + RegExp.escape(db._id) + "\.")})
- .sort({_id: 1})
- .forEach(function(coll) {
- output(3, coll._id + " chunks:");
- configDB.chunks.find({"ns": coll._id}).sort({min: 1}).forEach(function(chunk) {
- var out = saveDB.adminCommand({
- dataSize: coll._id,
- keyPattern: coll.key,
- min: chunk.min,
- max: chunk.max
- });
- delete out.millis;
- delete out.ok;
-
- output(4,
- tojson(chunk.min) + " -->> " + tojson(chunk.max) +
- " on : " + chunk.shard + " " + tojson(out));
- });
+ configDB.collections.find({_id: new RegExp("^" + RegExp.escape(db._id) + "\.")})
+ .sort({_id: 1})
+ .forEach(function(coll) {
+ output(3, coll._id + " chunks:");
+ configDB.chunks.find({"ns": coll._id}).sort({min: 1}).forEach(function(chunk) {
+ var out = saveDB.adminCommand(
+ {dataSize: coll._id, keyPattern: coll.key, min: chunk.min, max: chunk.max});
+ delete out.millis;
+ delete out.ok;
+
+ output(4,
+ tojson(chunk.min) + " -->> " + tojson(chunk.max) +
+ " on : " + chunk.shard + " " + tojson(out));
});
- }
+ });
});
print(raw);