summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSilvia Surroca <silvia.surroca@mongodb.com>2022-06-03 11:58:55 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-06-03 12:36:42 +0000
commitcc541819baf3d798f8f0da1edf6f9beda10961a7 (patch)
tree95ec3ce26711e0749796795d0dfd78cf279fd108
parent9aafbc13112e03e9aa6889b0fb754adaac8d1e84 (diff)
downloadmongo-cc541819baf3d798f8f0da1edf6f9beda10961a7.tar.gz
SERVER-66431 Replace all usages of ShardType::ConfigNS by kConfigsvrShardsNamespace
-rw-r--r--src/mongo/db/namespace_string.cpp1
-rw-r--r--src/mongo/db/pipeline/document_source_change_stream_handle_topology_change.cpp4
-rw-r--r--src/mongo/db/pipeline/sharded_agg_helpers.cpp4
-rw-r--r--src/mongo/db/process_health/config_server_health_observer.cpp21
-rw-r--r--src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp32
-rw-r--r--src/mongo/db/s/config/config_server_test_fixture.cpp10
-rw-r--r--src/mongo/db/s/config/index_on_config_test.cpp15
-rw-r--r--src/mongo/db/s/config/initial_split_policy.cpp2
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.cpp4
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp8
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_assign_key_range_to_zone_test.cpp3
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp2
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_config_initialization_test.cpp3
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_remove_shard_from_zone_test.cpp2
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp4
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp21
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp8
-rw-r--r--src/mongo/db/s/config_server_op_observer.cpp7
-rw-r--r--src/mongo/db/vector_clock_mongod.cpp2
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.cpp2
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_test.cpp2
-rw-r--r--src/mongo/s/catalog/type_shard.cpp2
-rw-r--r--src/mongo/s/catalog/type_shard.h3
-rw-r--r--src/mongo/s/client/shard_registry.cpp2
-rw-r--r--src/mongo/s/commands/cluster_add_shard_to_zone_cmd.cpp3
-rw-r--r--src/mongo/s/commands/cluster_remove_shard_from_zone_cmd.cpp3
-rw-r--r--src/mongo/s/commands/cluster_update_zone_key_range_cmd.cpp3
-rw-r--r--src/mongo/s/query/async_results_merger.cpp2
-rw-r--r--src/mongo/s/query/async_results_merger_test.cpp31
-rw-r--r--src/mongo/s/sharding_router_test_fixture.cpp4
30 files changed, 119 insertions, 91 deletions
diff --git a/src/mongo/db/namespace_string.cpp b/src/mongo/db/namespace_string.cpp
index d6073760048..3d74f7a507a 100644
--- a/src/mongo/db/namespace_string.cpp
+++ b/src/mongo/db/namespace_string.cpp
@@ -167,7 +167,6 @@ const NamespaceString NamespaceString::kCompactStructuredEncryptionCoordinatorNa
const NamespaceString NamespaceString::kClusterParametersNamespace(NamespaceString::kConfigDb,
"clusterParameters");
-// TODO (SERVER-66431): replace all usages of ShardType::ConfigNS by kConfigsvrShardsNamespace
const NamespaceString NamespaceString::kConfigsvrShardsNamespace(NamespaceString::kConfigDb,
"shards");
diff --git a/src/mongo/db/pipeline/document_source_change_stream_handle_topology_change.cpp b/src/mongo/db/pipeline/document_source_change_stream_handle_topology_change.cpp
index 56cd64d653c..e09c35179f4 100644
--- a/src/mongo/db/pipeline/document_source_change_stream_handle_topology_change.cpp
+++ b/src/mongo/db/pipeline/document_source_change_stream_handle_topology_change.cpp
@@ -82,9 +82,9 @@ bool isShardConfigEvent(const Document& eventDoc) {
// Check whether this event occurred on the config.shards collection.
auto nsObj = eventDoc[DocumentSourceChangeStream::kNamespaceField];
const bool isConfigDotShardsEvent = nsObj["db"_sd].getType() == BSONType::String &&
- nsObj["db"_sd].getStringData() == ShardType::ConfigNS.db() &&
+ nsObj["db"_sd].getStringData() == NamespaceString::kConfigsvrShardsNamespace.db() &&
nsObj["coll"_sd].getType() == BSONType::String &&
- nsObj["coll"_sd].getStringData() == ShardType::ConfigNS.coll();
+ nsObj["coll"_sd].getStringData() == NamespaceString::kConfigsvrShardsNamespace.coll();
// If it isn't from config.shards, treat it as a normal user event.
if (!isConfigDotShardsEvent) {
diff --git a/src/mongo/db/pipeline/sharded_agg_helpers.cpp b/src/mongo/db/pipeline/sharded_agg_helpers.cpp
index 7f41aeca2a2..0e14cc5e9ba 100644
--- a/src/mongo/db/pipeline/sharded_agg_helpers.cpp
+++ b/src/mongo/db/pipeline/sharded_agg_helpers.cpp
@@ -109,7 +109,7 @@ RemoteCursor openChangeStreamNewShardMonitor(const boost::intrusive_ptr<Expressi
const auto& configShard = Grid::get(expCtx->opCtx)->shardRegistry()->getConfigShard();
// Pipeline: {$changeStream: {startAtOperationTime: [now], allowToRunOnConfigDB: true}}
AggregateCommandRequest aggReq(
- ShardType::ConfigNS,
+ NamespaceString::kConfigsvrShardsNamespace,
{BSON(DocumentSourceChangeStream::kStageName
<< BSON(DocumentSourceChangeStreamSpec::kStartAtOperationTimeFieldName
<< startMonitoringAtTime
@@ -1196,7 +1196,7 @@ DispatchShardPipelineResults dispatchShardPipeline(
// For $changeStream, we must open an extra cursor on the 'config.shards' collection, so
// that we can monitor for the addition of new shards inline with real events.
- if (hasChangeStream && expCtx->ns.db() != ShardType::ConfigNS.db()) {
+ if (hasChangeStream && expCtx->ns.db() != NamespaceString::kConfigsvrShardsNamespace.db()) {
cursors.emplace_back(openChangeStreamNewShardMonitor(expCtx, shardRegistryReloadTime));
}
}
diff --git a/src/mongo/db/process_health/config_server_health_observer.cpp b/src/mongo/db/process_health/config_server_health_observer.cpp
index 290387d4de5..5e8a9ada390 100644
--- a/src/mongo/db/process_health/config_server_health_observer.cpp
+++ b/src/mongo/db/process_health/config_server_health_observer.cpp
@@ -209,7 +209,7 @@ void ConfigServerHealthObserver::_runSmokeReadShardsCommand(std::shared_ptr<Chec
}();
BSONObjBuilder findCmdBuilder;
- FindCommandRequest findCommand(ShardType::ConfigNS);
+ FindCommandRequest findCommand(NamespaceString::kConfigsvrShardsNamespace);
findCommand.setReadConcern(readConcernObj);
findCommand.setLimit(1);
findCommand.setSingleBatch(true);
@@ -221,15 +221,16 @@ void ConfigServerHealthObserver::_runSmokeReadShardsCommand(std::shared_ptr<Chec
StatusWith<Shard::CommandResponse> findOneShardResponse{ErrorCodes::HostUnreachable,
"Config server read was not run"};
try {
- findOneShardResponse = Grid::get(ctx->opCtx.get())
- ->shardRegistry()
- ->getConfigShard()
- ->runCommand(ctx->opCtx.get(),
- readPref,
- ShardType::ConfigNS.db().toString(),
- findCmdBuilder.done(),
- kServerRequestTimeout,
- Shard::RetryPolicy::kNoRetry);
+ findOneShardResponse =
+ Grid::get(ctx->opCtx.get())
+ ->shardRegistry()
+ ->getConfigShard()
+ ->runCommand(ctx->opCtx.get(),
+ readPref,
+ NamespaceString::kConfigsvrShardsNamespace.db().toString(),
+ findCmdBuilder.done(),
+ kServerRequestTimeout,
+ Shard::RetryPolicy::kNoRetry);
} catch (const DBException& exc) {
findOneShardResponse = StatusWith<Shard::CommandResponse>(exc.toStatus());
}
diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp
index 8e19a6087e6..bf22d67619e 100644
--- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp
+++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp
@@ -122,10 +122,14 @@ protected:
TEST_F(BalancerChunkSelectionTest, TagRangesOverlap) {
// Set up two shards in the metadata.
- ASSERT_OK(catalogClient()->insertConfigDocument(
- operationContext(), ShardType::ConfigNS, kShard0, kMajorityWriteConcern));
- ASSERT_OK(catalogClient()->insertConfigDocument(
- operationContext(), ShardType::ConfigNS, kShard1, kMajorityWriteConcern));
+ ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(),
+ NamespaceString::kConfigsvrShardsNamespace,
+ kShard0,
+ kMajorityWriteConcern));
+ ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(),
+ NamespaceString::kConfigsvrShardsNamespace,
+ kShard1,
+ kMajorityWriteConcern));
// Set up a database and a sharded collection in the metadata.
const auto collUUID = UUID::gen();
@@ -178,11 +182,11 @@ TEST_F(BalancerChunkSelectionTest, TagRangeMaxNotAlignedWithChunkMax) {
"featureFlagBalanceAccordingToDataSize", false};
// Set up two shards in the metadata.
ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(),
- ShardType::ConfigNS,
+ NamespaceString::kConfigsvrShardsNamespace,
appendTags(kShard0, {"A"}),
kMajorityWriteConcern));
ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(),
- ShardType::ConfigNS,
+ NamespaceString::kConfigsvrShardsNamespace,
appendTags(kShard1, {"A"}),
kMajorityWriteConcern));
@@ -237,11 +241,11 @@ TEST_F(BalancerChunkSelectionTest, TagRangeMaxNotAlignedWithChunkMax) {
TEST_F(BalancerChunkSelectionTest, ShardedTimeseriesCollectionsCanBeAutoSplitted) {
// Set up two shards in the metadata, each one with its own tag
ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(),
- ShardType::ConfigNS,
+ NamespaceString::kConfigsvrShardsNamespace,
appendTags(kShard0, {"A"}),
kMajorityWriteConcern));
ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(),
- ShardType::ConfigNS,
+ NamespaceString::kConfigsvrShardsNamespace,
appendTags(kShard1, {"B"}),
kMajorityWriteConcern));
@@ -287,10 +291,14 @@ TEST_F(BalancerChunkSelectionTest, ShardedTimeseriesCollectionsCanBeBalanced) {
RAIIServerParameterControllerForTest featureFlagBalanceAccordingToDataSize{
"featureFlagBalanceAccordingToDataSize", false};
// Set up two shards in the metadata.
- ASSERT_OK(catalogClient()->insertConfigDocument(
- operationContext(), ShardType::ConfigNS, kShard0, kMajorityWriteConcern));
- ASSERT_OK(catalogClient()->insertConfigDocument(
- operationContext(), ShardType::ConfigNS, kShard1, kMajorityWriteConcern));
+ ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(),
+ NamespaceString::kConfigsvrShardsNamespace,
+ kShard0,
+ kMajorityWriteConcern));
+ ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(),
+ NamespaceString::kConfigsvrShardsNamespace,
+ kShard1,
+ kMajorityWriteConcern));
// Set up a database and a sharded collection in the metadata.
const auto collUUID = UUID::gen();
diff --git a/src/mongo/db/s/config/config_server_test_fixture.cpp b/src/mongo/db/s/config/config_server_test_fixture.cpp
index 05cb045f5ae..198371cf17a 100644
--- a/src/mongo/db/s/config/config_server_test_fixture.cpp
+++ b/src/mongo/db/s/config/config_server_test_fixture.cpp
@@ -292,7 +292,7 @@ StatusWith<BSONObj> ConfigServerTestFixture::findOneOnConfigCollection(Operation
}
void ConfigServerTestFixture::setupShards(const std::vector<ShardType>& shards) {
- const NamespaceString shardNS(ShardType::ConfigNS);
+ const NamespaceString shardNS(NamespaceString::kConfigsvrShardsNamespace);
for (const auto& shard : shards) {
ASSERT_OK(insertToConfigCollection(operationContext(), shardNS, shard.toBSON()));
}
@@ -300,8 +300,8 @@ void ConfigServerTestFixture::setupShards(const std::vector<ShardType>& shards)
StatusWith<ShardType> ConfigServerTestFixture::getShardDoc(OperationContext* opCtx,
const std::string& shardId) {
- auto doc =
- findOneOnConfigCollection(opCtx, ShardType::ConfigNS, BSON(ShardType::name(shardId)));
+ auto doc = findOneOnConfigCollection(
+ opCtx, NamespaceString::kConfigsvrShardsNamespace, BSON(ShardType::name(shardId)));
if (!doc.isOK()) {
if (doc.getStatus() == ErrorCodes::NoMatchingDocument) {
return {ErrorCodes::ShardNotFound,
@@ -323,8 +323,8 @@ void ConfigServerTestFixture::setupCollection(const NamespaceString& nss,
if (!dbDoc.isOK()) {
// If the database is not setup, choose the first available shard as primary to implicitly
// create the db
- auto swShardDoc =
- findOneOnConfigCollection(operationContext(), ShardType::ConfigNS, BSONObj());
+ auto swShardDoc = findOneOnConfigCollection(
+ operationContext(), NamespaceString::kConfigsvrShardsNamespace, BSONObj());
invariant(swShardDoc.isOK(),
"At least one shard should be setup when initializing a collection");
auto shard = uassertStatusOK(ShardType::fromBSON(swShardDoc.getValue()));
diff --git a/src/mongo/db/s/config/index_on_config_test.cpp b/src/mongo/db/s/config/index_on_config_test.cpp
index d6e3b5936b1..38f36d98fc2 100644
--- a/src/mongo/db/s/config/index_on_config_test.cpp
+++ b/src/mongo/db/s/config/index_on_config_test.cpp
@@ -63,8 +63,10 @@ protected:
};
TEST_F(ConfigIndexTest, CompatibleIndexAlreadyExists) {
- createIndexOnConfigCollection(
- operationContext(), ShardType::ConfigNS, BSON("host" << 1), /*unique*/ true)
+ createIndexOnConfigCollection(operationContext(),
+ NamespaceString::kConfigsvrShardsNamespace,
+ BSON("host" << 1),
+ /*unique*/ true)
.transitional_ignore();
ASSERT_OK(ShardingCatalogManager::get(operationContext())
@@ -77,15 +79,18 @@ TEST_F(ConfigIndexTest, CompatibleIndexAlreadyExists) {
<< "host_1")};
- auto foundShardsIndexes = assertGet(getIndexes(operationContext(), ShardType::ConfigNS));
+ auto foundShardsIndexes =
+ assertGet(getIndexes(operationContext(), NamespaceString::kConfigsvrShardsNamespace));
assertBSONObjsSame(expectedShardsIndexes, foundShardsIndexes);
}
TEST_F(ConfigIndexTest, IncompatibleIndexAlreadyExists) {
// Make the index non-unique even though its supposed to be unique, make sure initialization
// fails
- createIndexOnConfigCollection(
- operationContext(), ShardType::ConfigNS, BSON("host" << 1), /*unique*/ false)
+ createIndexOnConfigCollection(operationContext(),
+ NamespaceString::kConfigsvrShardsNamespace,
+ BSON("host" << 1),
+ /*unique*/ false)
.transitional_ignore();
ASSERT_EQUALS(ErrorCodes::IndexKeySpecsConflict,
diff --git a/src/mongo/db/s/config/initial_split_policy.cpp b/src/mongo/db/s/config/initial_split_policy.cpp
index 6847ade89ef..1be2dd486fb 100644
--- a/src/mongo/db/s/config/initial_split_policy.cpp
+++ b/src/mongo/db/s/config/initial_split_policy.cpp
@@ -138,7 +138,7 @@ StringMap<std::vector<ShardId>> buildTagsToShardIdsMap(OperationContext* opCtx,
configServer->exhaustiveFindOnConfig(opCtx,
ReadPreferenceSetting(ReadPreference::Nearest),
repl::ReadConcernLevel::kMajorityReadConcern,
- ShardType::ConfigNS,
+ NamespaceString::kConfigsvrShardsNamespace,
BSONObj(),
BSONObj(),
boost::none));
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.cpp b/src/mongo/db/s/config/sharding_catalog_manager.cpp
index baeda1f643f..86b55b03739 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager.cpp
@@ -436,7 +436,7 @@ Status ShardingCatalogManager::_initConfigIndexes(OperationContext* opCtx) {
}
result = createIndexOnConfigCollection(
- opCtx, ShardType::ConfigNS, BSON(ShardType::host() << 1), unique);
+ opCtx, NamespaceString::kConfigsvrShardsNamespace, BSON(ShardType::host() << 1), unique);
if (!result.isOK()) {
return result.withContext("couldn't create host_1 index on config db");
}
@@ -548,7 +548,7 @@ StatusWith<bool> ShardingCatalogManager::_isShardRequiredByZoneStillInUse(
configShard->exhaustiveFindOnConfig(opCtx,
readPref,
repl::ReadConcernLevel::kLocalReadConcern,
- ShardType::ConfigNS,
+ NamespaceString::kConfigsvrShardsNamespace,
BSON(ShardType::tags() << zoneName),
BSONObj(),
2);
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
index 8d187d683f5..0a45a9d3a6d 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp
@@ -190,7 +190,7 @@ protected:
operationContext(),
ReadPreferenceSetting(ReadPreference::PrimaryOnly),
repl::ReadConcernLevel::kLocalReadConcern,
- ShardType::ConfigNS,
+ NamespaceString::kConfigsvrShardsNamespace,
BSONObj(),
BSONObj(),
boost::none));
@@ -543,7 +543,7 @@ TEST_F(AddShardTest, StandaloneGenerateName) {
// Add a pre-existing shard so when generating a name for the new shard it will have to go
// higher than the existing one.
ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(),
- ShardType::ConfigNS,
+ NamespaceString::kConfigsvrShardsNamespace,
existingShard.toBSON(),
ShardingCatalogClient::kMajorityWriteConcern));
assertShardExists(existingShard);
@@ -1209,7 +1209,7 @@ TEST_F(AddShardTest, AddExistingShardStandalone) {
// Make sure the shard already exists.
ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(),
- ShardType::ConfigNS,
+ NamespaceString::kConfigsvrShardsNamespace,
existingShard.toBSON(),
ShardingCatalogClient::kMajorityWriteConcern));
assertShardExists(existingShard);
@@ -1318,7 +1318,7 @@ TEST_F(AddShardTest, AddExistingShardReplicaSet) {
// Make sure the shard already exists.
ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(),
- ShardType::ConfigNS,
+ NamespaceString::kConfigsvrShardsNamespace,
existingShard.toBSON(),
ShardingCatalogClient::kMajorityWriteConcern));
assertShardExists(existingShard);
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_assign_key_range_to_zone_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_assign_key_range_to_zone_test.cpp
index 4093d840d87..297a4aa04d8 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_assign_key_range_to_zone_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_assign_key_range_to_zone_test.cpp
@@ -596,7 +596,8 @@ TEST_F(AssignKeyRangeWithOneRangeFixture,
shard.setHost("b:1234");
shard.setTags({"y"});
- ASSERT_OK(insertToConfigCollection(operationContext(), ShardType::ConfigNS, shard.toBSON()));
+ ASSERT_OK(insertToConfigCollection(
+ operationContext(), NamespaceString::kConfigsvrShardsNamespace, shard.toBSON()));
ASSERT_THROWS_CODE(
ShardingCatalogManager::get(operationContext())
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
index 067ce787d2a..0cf64cc5288 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp
@@ -995,7 +995,7 @@ StatusWith<BSONObj> ShardingCatalogManager::commitChunkMigration(
configShard->exhaustiveFindOnConfig(opCtx,
ReadPreferenceSetting(ReadPreference::PrimaryOnly),
repl::ReadConcernLevel::kLocalReadConcern,
- ShardType::ConfigNS,
+ NamespaceString::kConfigsvrShardsNamespace,
BSON(ShardType::name(toShard.toString())),
{},
boost::none));
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_config_initialization_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_config_initialization_test.cpp
index a480f42dec5..c7c844aa33b 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_config_initialization_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_config_initialization_test.cpp
@@ -317,7 +317,8 @@ TEST_F(ConfigInitializationTest, BuildsNecessaryIndexes) {
auto foundLocksIndexes = assertGet(getIndexes(operationContext(), LocksType::ConfigNS));
assertBSONObjsSame(expectedLocksIndexes, foundLocksIndexes);
- auto foundShardsIndexes = assertGet(getIndexes(operationContext(), ShardType::ConfigNS));
+ auto foundShardsIndexes =
+ assertGet(getIndexes(operationContext(), NamespaceString::kConfigsvrShardsNamespace));
assertBSONObjsSame(expectedShardsIndexes, foundShardsIndexes);
auto foundTagsIndexes = assertGet(getIndexes(operationContext(), TagsType::ConfigNS));
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_from_zone_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_from_zone_test.cpp
index 40641dfa4d8..60327f62be5 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_from_zone_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_from_zone_test.cpp
@@ -249,7 +249,7 @@ TEST_F(RemoveShardFromZoneTest, RemoveZoneFromShardShouldErrorIfShardDocIsMalfor
<< "z"));
insertToConfigCollection(
- operationContext(), ShardType::ConfigNS, invalidShardDoc);
+ operationContext(), NamespaceString::kConfigsvrShardsNamespace, invalidShardDoc);
auto status =
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp
index 5e2c7b656df..32544cacc7b 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp
@@ -118,7 +118,7 @@ protected:
operationContext(),
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
- ShardType::ConfigNS,
+ NamespaceString::kConfigsvrShardsNamespace,
BSON(ShardType::name() << shardName),
BSONObj(),
1));
@@ -367,7 +367,7 @@ TEST_F(RemoveShardTest, RemoveShardCompletion) {
operationContext(),
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
- ShardType::ConfigNS,
+ NamespaceString::kConfigsvrShardsNamespace,
BSON(ShardType::name() << shard1.getName()),
BSONObj(),
1));
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
index e2bd69e8cea..7de1d4c3efe 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp
@@ -120,7 +120,7 @@ StatusWith<std::string> generateNewShardName(OperationContext* opCtx) {
opCtx,
kConfigReadSelector,
repl::ReadConcernLevel::kLocalReadConcern,
- ShardType::ConfigNS,
+ NamespaceString::kConfigsvrShardsNamespace,
shardNameRegex.obj(),
BSON(ShardType::name() << -1),
1);
@@ -748,7 +748,7 @@ StatusWith<std::string> ShardingCatalogManager::addShard(
Status result = Grid::get(opCtx)->catalogClient()->insertConfigDocument(
opCtx,
- ShardType::ConfigNS,
+ NamespaceString::kConfigsvrShardsNamespace,
shardType.toBSON(),
ShardingCatalogClient::kLocalWriteConcern);
if (!result.isOK()) {
@@ -821,7 +821,7 @@ RemoveShardProgress ShardingCatalogManager::removeShard(OperationContext* opCtx,
configShard->exhaustiveFindOnConfig(opCtx,
kConfigReadSelector,
repl::ReadConcernLevel::kLocalReadConcern,
- ShardType::ConfigNS,
+ NamespaceString::kConfigsvrShardsNamespace,
BSON(ShardType::name() << name),
BSONObj(),
1));
@@ -833,7 +833,7 @@ RemoveShardProgress ShardingCatalogManager::removeShard(OperationContext* opCtx,
// Find how many *other* shards exist, which are *not* currently draining
const auto countOtherNotDrainingShards = uassertStatusOK(_runCountCommandOnConfig(
opCtx,
- ShardType::ConfigNS,
+ NamespaceString::kConfigsvrShardsNamespace,
BSON(ShardType::name() << NE << name << ShardType::draining.ne(true))));
uassert(ErrorCodes::IllegalOperation,
"Operation not allowed because it would remove the last shard",
@@ -854,7 +854,7 @@ RemoveShardProgress ShardingCatalogManager::removeShard(OperationContext* opCtx,
const bool isShardCurrentlyDraining =
uassertStatusOK(_runCountCommandOnConfig(
opCtx,
- ShardType::ConfigNS,
+ NamespaceString::kConfigsvrShardsNamespace,
BSON(ShardType::name() << name << ShardType::draining(true)))) > 0;
auto* const catalogClient = Grid::get(opCtx)->catalogClient();
@@ -875,7 +875,7 @@ RemoveShardProgress ShardingCatalogManager::removeShard(OperationContext* opCtx,
uassertStatusOKWithContext(
catalogClient->updateConfigDocument(opCtx,
- ShardType::ConfigNS,
+ NamespaceString::kConfigsvrShardsNamespace,
BSON(ShardType::name() << name),
BSON("$set" << BSON(ShardType::draining(true))),
false,
@@ -930,7 +930,7 @@ RemoveShardProgress ShardingCatalogManager::removeShard(OperationContext* opCtx,
configShard->exhaustiveFindOnConfig(opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
- ShardType::ConfigNS,
+ NamespaceString::kConfigsvrShardsNamespace,
BSON(ShardType::name.ne(name)),
{},
1);
@@ -1219,7 +1219,7 @@ void ShardingCatalogManager::_standardizeClusterParameters(OperationContext* opC
opCtx,
ReadPreferenceSetting(ReadPreference::PrimaryOnly),
repl::ReadConcernLevel::kLocalReadConcern,
- ShardType::ConfigNS,
+ NamespaceString::kConfigsvrShardsNamespace,
BSONObj(),
BSONObj(),
boost::none));
@@ -1240,7 +1240,7 @@ void ShardingCatalogManager::_removeShardInTransaction(OperationContext* opCtx,
const Timestamp& newTopologyTime) {
auto removeShardFn = [removedShardName, controlShardName, newTopologyTime](
const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) {
- write_ops::DeleteCommandRequest deleteOp(ShardType::ConfigNS);
+ write_ops::DeleteCommandRequest deleteOp(NamespaceString::kConfigsvrShardsNamespace);
deleteOp.setDeletes({[&]() {
write_ops::DeleteOpEntry entry;
entry.setMulti(false);
@@ -1253,7 +1253,8 @@ void ShardingCatalogManager::_removeShardInTransaction(OperationContext* opCtx,
auto deleteResponse) {
uassertStatusOK(deleteResponse.toStatus());
- write_ops::UpdateCommandRequest updateOp(ShardType::ConfigNS);
+ write_ops::UpdateCommandRequest updateOp(
+ NamespaceString::kConfigsvrShardsNamespace);
updateOp.setUpdates({[&]() {
write_ops::UpdateOpEntry entry;
entry.setUpsert(false);
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
index 6bdfb5d7655..e8739c4ec62 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp
@@ -217,7 +217,7 @@ Status ShardingCatalogManager::addShardToZone(OperationContext* opCtx,
auto updateStatus = Grid::get(opCtx)->catalogClient()->updateConfigDocument(
opCtx,
- ShardType::ConfigNS,
+ NamespaceString::kConfigsvrShardsNamespace,
BSON(ShardType::name(shardName)),
BSON("$addToSet" << BSON(ShardType::tags() << zoneName)),
false,
@@ -241,7 +241,7 @@ Status ShardingCatalogManager::removeShardFromZone(OperationContext* opCtx,
Lock::ExclusiveLock lk(opCtx, opCtx->lockState(), _kZoneOpLock);
auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
- const NamespaceString shardNS(ShardType::ConfigNS);
+ const NamespaceString shardNS(NamespaceString::kConfigsvrShardsNamespace);
//
// Check whether the shard even exist in the first place.
@@ -287,7 +287,7 @@ Status ShardingCatalogManager::removeShardFromZone(OperationContext* opCtx,
auto updateStatus = Grid::get(opCtx)->catalogClient()->updateConfigDocument(
opCtx,
- ShardType::ConfigNS,
+ NamespaceString::kConfigsvrShardsNamespace,
BSON(ShardType::name(shardName)),
BSON("$pull" << BSON(ShardType::tags() << zoneName)),
false,
@@ -322,7 +322,7 @@ void ShardingCatalogManager::assignKeyRangeToZone(OperationContext* opCtx,
opCtx,
kConfigPrimarySelector,
repl::ReadConcernLevel::kLocalReadConcern,
- ShardType::ConfigNS,
+ NamespaceString::kConfigsvrShardsNamespace,
BSON(ShardType::tags() << zoneName),
BSONObj(),
1))
diff --git a/src/mongo/db/s/config_server_op_observer.cpp b/src/mongo/db/s/config_server_op_observer.cpp
index dff7d0d6022..c0081d68a41 100644
--- a/src/mongo/db/s/config_server_op_observer.cpp
+++ b/src/mongo/db/s/config_server_op_observer.cpp
@@ -96,7 +96,8 @@ void ConfigServerOpObserver::_onReplicationRollback(OperationContext* opCtx,
ClusterIdentityLoader::get(opCtx)->discardCachedClusterId();
}
- if (rbInfo.rollbackNamespaces.find(ShardType::ConfigNS) != rbInfo.rollbackNamespaces.end()) {
+ if (rbInfo.rollbackNamespaces.find(NamespaceString::kConfigsvrShardsNamespace) !=
+ rbInfo.rollbackNamespaces.end()) {
// If some entries were rollbacked from config.shards we might need to discard some tick
// points from the TopologyTimeTicker
const auto lastApplied = repl::ReplicationCoordinator::get(opCtx)->getMyLastAppliedOpTime();
@@ -110,7 +111,7 @@ void ConfigServerOpObserver::onInserts(OperationContext* opCtx,
std::vector<InsertStatement>::const_iterator begin,
std::vector<InsertStatement>::const_iterator end,
bool fromMigrate) {
- if (nss != ShardType::ConfigNS) {
+ if (nss != NamespaceString::kConfigsvrShardsNamespace) {
return;
}
@@ -137,7 +138,7 @@ void ConfigServerOpObserver::onInserts(OperationContext* opCtx,
}
void ConfigServerOpObserver::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) {
- if (args.nss != ShardType::ConfigNS) {
+ if (args.nss != NamespaceString::kConfigsvrShardsNamespace) {
return;
}
diff --git a/src/mongo/db/vector_clock_mongod.cpp b/src/mongo/db/vector_clock_mongod.cpp
index 8e020d66840..c46e39a63ca 100644
--- a/src/mongo/db/vector_clock_mongod.cpp
+++ b/src/mongo/db/vector_clock_mongod.cpp
@@ -201,7 +201,7 @@ void VectorClockMongoD::onInitialDataAvailable(OperationContext* opCtx,
if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
const auto maxTopologyTime{[&opCtx]() -> boost::optional<Timestamp> {
DBDirectClient client{opCtx};
- FindCommandRequest findRequest{ShardType::ConfigNS};
+ FindCommandRequest findRequest{NamespaceString::kConfigsvrShardsNamespace};
findRequest.setSort(BSON(ShardType::topologyTime << -1));
findRequest.setLimit(1);
auto cursor{client.find(std::move(findRequest))};
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
index 42c22572dc7..2fceaeb3d4a 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
@@ -762,7 +762,7 @@ StatusWith<repl::OpTimeWith<std::vector<ShardType>>> ShardingCatalogClientImpl::
auto findStatus = _exhaustiveFindOnConfig(opCtx,
kConfigReadSelector,
readConcern,
- ShardType::ConfigNS,
+ NamespaceString::kConfigsvrShardsNamespace,
BSONObj(), // no query filter
BSONObj(), // no sort
boost::none); // no limit
diff --git a/src/mongo/s/catalog/sharding_catalog_client_test.cpp b/src/mongo/s/catalog/sharding_catalog_client_test.cpp
index 2974b6232df..effe9ca0a10 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_test.cpp
@@ -318,7 +318,7 @@ TEST_F(ShardingCatalogClientTest, GetAllShardsValid) {
auto query = query_request_helper::makeFromFindCommandForTests(opMsg.body);
ASSERT_EQ(query->getNamespaceOrUUID().nss().value_or(NamespaceString()),
- ShardType::ConfigNS);
+ NamespaceString::kConfigsvrShardsNamespace);
ASSERT_BSONOBJ_EQ(query->getFilter(), BSONObj());
ASSERT_BSONOBJ_EQ(query->getSort(), BSONObj());
ASSERT_FALSE(query->getLimit().is_initialized());
diff --git a/src/mongo/s/catalog/type_shard.cpp b/src/mongo/s/catalog/type_shard.cpp
index 36e8e931536..4456729939c 100644
--- a/src/mongo/s/catalog/type_shard.cpp
+++ b/src/mongo/s/catalog/type_shard.cpp
@@ -40,8 +40,6 @@
namespace mongo {
-const NamespaceString ShardType::ConfigNS("config.shards");
-
const BSONField<std::string> ShardType::name("_id");
const BSONField<std::string> ShardType::host("host");
const BSONField<bool> ShardType::draining("draining");
diff --git a/src/mongo/s/catalog/type_shard.h b/src/mongo/s/catalog/type_shard.h
index fa73e1271a4..3038655f168 100644
--- a/src/mongo/s/catalog/type_shard.h
+++ b/src/mongo/s/catalog/type_shard.h
@@ -57,9 +57,6 @@ public:
kShardAware,
};
- // Name of the shards collection in the config server.
- static const NamespaceString ConfigNS;
-
// Field names and types in the shards collection type.
static const BSONField<std::string> name;
static const BSONField<std::string> host;
diff --git a/src/mongo/s/client/shard_registry.cpp b/src/mongo/s/client/shard_registry.cpp
index 1f66ae84c0c..5bfacde260e 100644
--- a/src/mongo/s/client/shard_registry.cpp
+++ b/src/mongo/s/client/shard_registry.cpp
@@ -457,7 +457,7 @@ void ShardRegistry::updateReplicaSetOnConfigServer(ServiceContext* serviceContex
auto swWasUpdated = grid->catalogClient()->updateConfigDocument(
opCtx.get(),
- ShardType::ConfigNS,
+ NamespaceString::kConfigsvrShardsNamespace,
BSON(ShardType::name(s->getId().toString())),
BSON("$set" << BSON(ShardType::host(connStr.toString()))),
false,
diff --git a/src/mongo/s/commands/cluster_add_shard_to_zone_cmd.cpp b/src/mongo/s/commands/cluster_add_shard_to_zone_cmd.cpp
index 5cdfd09fb33..764b154bbd0 100644
--- a/src/mongo/s/commands/cluster_add_shard_to_zone_cmd.cpp
+++ b/src/mongo/s/commands/cluster_add_shard_to_zone_cmd.cpp
@@ -92,7 +92,8 @@ public:
// Fallback on permissions to directly modify the shard config.
if (!as->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(ShardType::ConfigNS), ActionType::update)) {
+ ResourcePattern::forExactNamespace(NamespaceString::kConfigsvrShardsNamespace),
+ ActionType::update)) {
return {ErrorCodes::Unauthorized, "Unauthorized"};
}
diff --git a/src/mongo/s/commands/cluster_remove_shard_from_zone_cmd.cpp b/src/mongo/s/commands/cluster_remove_shard_from_zone_cmd.cpp
index 67e8b387eb8..74f4fadaea1 100644
--- a/src/mongo/s/commands/cluster_remove_shard_from_zone_cmd.cpp
+++ b/src/mongo/s/commands/cluster_remove_shard_from_zone_cmd.cpp
@@ -99,7 +99,8 @@ public:
// Fallback on permissions to directly modify the shard config.
if (!as->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(ShardType::ConfigNS), ActionType::update)) {
+ ResourcePattern::forExactNamespace(NamespaceString::kConfigsvrShardsNamespace),
+ ActionType::update)) {
return {ErrorCodes::Unauthorized, "Unauthorized"};
}
diff --git a/src/mongo/s/commands/cluster_update_zone_key_range_cmd.cpp b/src/mongo/s/commands/cluster_update_zone_key_range_cmd.cpp
index 6db8dac1aa4..cdf546f7c70 100644
--- a/src/mongo/s/commands/cluster_update_zone_key_range_cmd.cpp
+++ b/src/mongo/s/commands/cluster_update_zone_key_range_cmd.cpp
@@ -102,7 +102,8 @@ public:
// Fallback on permissions to directly modify the shard config.
if (!as->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(ShardType::ConfigNS), ActionType::find)) {
+ ResourcePattern::forExactNamespace(NamespaceString::kConfigsvrShardsNamespace),
+ ActionType::find)) {
return {ErrorCodes::Unauthorized, "Unauthorized"};
}
diff --git a/src/mongo/s/query/async_results_merger.cpp b/src/mongo/s/query/async_results_merger.cpp
index 8aa7afd2b83..63f6de4771d 100644
--- a/src/mongo/s/query/async_results_merger.cpp
+++ b/src/mongo/s/query/async_results_merger.cpp
@@ -643,7 +643,7 @@ bool AsyncResultsMerger::_checkHighWaterMarkEligibility(WithLock,
const CursorResponse& response) {
// If the cursor is not on the "config.shards" namespace, then it is a normal shard cursor.
// These cursors are always eligible to provide a high water mark resume token.
- if (remote.cursorNss != ShardType::ConfigNS) {
+ if (remote.cursorNss != NamespaceString::kConfigsvrShardsNamespace) {
return true;
}
diff --git a/src/mongo/s/query/async_results_merger_test.cpp b/src/mongo/s/query/async_results_merger_test.cpp
index 80600272e63..4ba891f55f5 100644
--- a/src/mongo/s/query/async_results_merger_test.cpp
+++ b/src/mongo/s/query/async_results_merger_test.cpp
@@ -1687,7 +1687,8 @@ TEST_F(AsyncResultsMergerTest, SortedTailableCursorDoesNotAdvanceHighWaterMarkFo
cursors.push_back(makeRemoteCursor(
kTestShardIds[2],
kTestShardHosts[2],
- CursorResponse(ShardType::ConfigNS, 789, {}, boost::none, pbrtConfigCursor)));
+ CursorResponse(
+ NamespaceString::kConfigsvrShardsNamespace, 789, {}, boost::none, pbrtConfigCursor)));
params.setRemotes(std::move(cursors));
params.setTailableMode(TailableModeEnum::kTailableAndAwaitData);
params.setSort(change_stream_constants::kSortSpec);
@@ -1715,8 +1716,11 @@ TEST_F(AsyncResultsMergerTest, SortedTailableCursorDoesNotAdvanceHighWaterMarkFo
pbrtConfigCursor = makePostBatchResumeToken(Timestamp(1, 2));
scheduleNetworkResponse({kTestNss, CursorId(123), {}, boost::none, pbrtFirstCursor});
scheduleNetworkResponse({kTestNss, CursorId(456), {}, boost::none, pbrtSecondCursor});
- scheduleNetworkResponse(
- {ShardType::ConfigNS, CursorId(789), {}, boost::none, pbrtConfigCursor});
+ scheduleNetworkResponse({NamespaceString::kConfigsvrShardsNamespace,
+ CursorId(789),
+ {},
+ boost::none,
+ pbrtConfigCursor});
// The high water mark has not advanced from its previous value.
ASSERT_BSONOBJ_EQ(arm->getHighWaterMark(), initialHighWaterMark);
@@ -1734,8 +1738,11 @@ TEST_F(AsyncResultsMergerTest, SortedTailableCursorDoesNotAdvanceHighWaterMarkFo
configEvent.addField(BSON("$sortKey" << BSON_ARRAY(pbrtConfigCursor)).firstElement());
scheduleNetworkResponse({kTestNss, CursorId(123), {}, boost::none, pbrtFirstCursor});
scheduleNetworkResponse({kTestNss, CursorId(456), {}, boost::none, pbrtSecondCursor});
- scheduleNetworkResponse(
- {ShardType::ConfigNS, CursorId(789), {configEvent}, boost::none, pbrtConfigCursor});
+ scheduleNetworkResponse({NamespaceString::kConfigsvrShardsNamespace,
+ CursorId(789),
+ {configEvent},
+ boost::none,
+ pbrtConfigCursor});
// The config cursor has a lower sort key than the other shards, so we can retrieve the event.
ASSERT_TRUE(arm->ready());
@@ -1750,8 +1757,11 @@ TEST_F(AsyncResultsMergerTest, SortedTailableCursorDoesNotAdvanceHighWaterMarkFo
// event, it does not advance the ARM's high water mark sort key.
scheduleNetworkResponse({kTestNss, CursorId(123), {}, boost::none, pbrtFirstCursor});
scheduleNetworkResponse({kTestNss, CursorId(456), {}, boost::none, pbrtSecondCursor});
- scheduleNetworkResponse(
- {ShardType::ConfigNS, CursorId(789), {}, boost::none, pbrtConfigCursor});
+ scheduleNetworkResponse({NamespaceString::kConfigsvrShardsNamespace,
+ CursorId(789),
+ {},
+ boost::none,
+ pbrtConfigCursor});
ASSERT_BSONOBJ_EQ(arm->getHighWaterMark(), initialHighWaterMark);
ASSERT_FALSE(arm->ready());
@@ -1762,8 +1772,11 @@ TEST_F(AsyncResultsMergerTest, SortedTailableCursorDoesNotAdvanceHighWaterMarkFo
pbrtConfigCursor = makePostBatchResumeToken(Timestamp(1, 12));
scheduleNetworkResponse({kTestNss, CursorId(123), {}, boost::none, pbrtFirstCursor});
scheduleNetworkResponse({kTestNss, CursorId(456), {}, boost::none, pbrtSecondCursor});
- scheduleNetworkResponse(
- {ShardType::ConfigNS, CursorId(789), {}, boost::none, pbrtConfigCursor});
+ scheduleNetworkResponse({NamespaceString::kConfigsvrShardsNamespace,
+ CursorId(789),
+ {},
+ boost::none,
+ pbrtConfigCursor});
ASSERT_BSONOBJ_GT(arm->getHighWaterMark(), initialHighWaterMark);
ASSERT_BSONOBJ_EQ(arm->getHighWaterMark(), pbrtConfigCursor);
ASSERT_FALSE(arm->ready());
diff --git a/src/mongo/s/sharding_router_test_fixture.cpp b/src/mongo/s/sharding_router_test_fixture.cpp
index 4e9779773ac..1f517f5bee3 100644
--- a/src/mongo/s/sharding_router_test_fixture.cpp
+++ b/src/mongo/s/sharding_router_test_fixture.cpp
@@ -252,12 +252,12 @@ void ShardingTestFixture::setupShards(const std::vector<ShardType>& shards) {
void ShardingTestFixture::expectGetShards(const std::vector<ShardType>& shards) {
onFindCommand([this, &shards](const RemoteCommandRequest& request) {
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- ASSERT_EQ(nss, ShardType::ConfigNS);
+ ASSERT_EQ(nss, NamespaceString::kConfigsvrShardsNamespace);
// If there is no '$db', append it.
auto cmd = OpMsgRequest::fromDBAndBody(nss.db(), request.cmdObj).body;
auto query = query_request_helper::makeFromFindCommandForTests(cmd, nss);
- ASSERT_EQ(*query->getNamespaceOrUUID().nss(), ShardType::ConfigNS);
+ ASSERT_EQ(*query->getNamespaceOrUUID().nss(), NamespaceString::kConfigsvrShardsNamespace);
ASSERT_BSONOBJ_EQ(query->getFilter(), BSONObj());
ASSERT_BSONOBJ_EQ(query->getSort(), BSONObj());