summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorDianna Hohensee <dianna.hohensee@10gen.com>2017-02-01 14:18:12 -0500
committerDianna Hohensee <dianna.hohensee@10gen.com>2017-02-10 15:17:10 -0500
commit489cd07d2a3711286debae56f28416d7ba290648 (patch)
tree4a8dd44484c35b33dc71348635f9a2c7c97b35e8 /src/mongo
parent50e9769099bb49220783f95c9045dd1259a18eb4 (diff)
downloadmongo-r3.5.3.tar.gz
SERVER-27860 remove invariants to prevent ShardLocal running on shards and remove "-OnConfig" function name suffixesr3.5.3
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/s/balancer/migration_manager.cpp2
-rw-r--r--src/mongo/db/s/balancer/migration_manager_test.cpp36
-rw-r--r--src/mongo/db/s/balancer/scoped_migration_request.cpp2
-rw-r--r--src/mongo/db/s/balancer/scoped_migration_request_test.cpp2
-rw-r--r--src/mongo/s/catalog/dist_lock_catalog_impl.cpp16
-rw-r--r--src/mongo/s/catalog/sharding_catalog_add_shard_test.cpp21
-rw-r--r--src/mongo/s/catalog/sharding_catalog_assign_key_range_to_zone_test.cpp14
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.cpp22
-rw-r--r--src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp4
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager_chunk_operations_impl.cpp40
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager_impl.cpp61
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp2
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager_zone_operations_impl.cpp87
-rw-r--r--src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp70
-rw-r--r--src/mongo/s/client/shard.cpp19
-rw-r--r--src/mongo/s/client/shard.h45
-rw-r--r--src/mongo/s/client/shard_local.cpp16
-rw-r--r--src/mongo/s/client/shard_local.h29
-rw-r--r--src/mongo/s/client/shard_local_test.cpp23
-rw-r--r--src/mongo/s/client/shard_remote.cpp12
-rw-r--r--src/mongo/s/client/shard_remote.h23
-rw-r--r--src/mongo/s/config_server_test_fixture.cpp6
22 files changed, 264 insertions, 288 deletions
diff --git a/src/mongo/db/s/balancer/migration_manager.cpp b/src/mongo/db/s/balancer/migration_manager.cpp
index 381f54b0669..31f557380aa 100644
--- a/src/mongo/db/s/balancer/migration_manager.cpp
+++ b/src/mongo/db/s/balancer/migration_manager.cpp
@@ -219,7 +219,7 @@ void MigrationManager::startRecoveryAndAcquireDistLocks(OperationContext* txn) {
// Load the active migrations from the config.migrations collection.
auto statusWithMigrationsQueryResponse =
- Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
+ Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFind(
txn,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
diff --git a/src/mongo/db/s/balancer/migration_manager_test.cpp b/src/mongo/db/s/balancer/migration_manager_test.cpp
index aa09c329844..2f2012b1ee9 100644
--- a/src/mongo/db/s/balancer/migration_manager_test.cpp
+++ b/src/mongo/db/s/balancer/migration_manager_test.cpp
@@ -231,20 +231,19 @@ void MigrationManagerTest::setUpMigration(const ChunkType& chunk, const ShardId&
}
void MigrationManagerTest::checkMigrationsCollectionIsEmptyAndLocksAreUnlocked() {
- auto statusWithMigrationsQueryResponse =
- shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
- operationContext(),
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- repl::ReadConcernLevel::kMajorityReadConcern,
- NamespaceString(MigrationType::ConfigNS),
- BSONObj(),
- BSONObj(),
- boost::none);
+ auto statusWithMigrationsQueryResponse = shardRegistry()->getConfigShard()->exhaustiveFind(
+ operationContext(),
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kMajorityReadConcern,
+ NamespaceString(MigrationType::ConfigNS),
+ BSONObj(),
+ BSONObj(),
+ boost::none);
Shard::QueryResponse migrationsQueryResponse =
uassertStatusOK(statusWithMigrationsQueryResponse);
ASSERT_EQUALS(0U, migrationsQueryResponse.docs.size());
- auto statusWithLocksQueryResponse = shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
+ auto statusWithLocksQueryResponse = shardRegistry()->getConfigShard()->exhaustiveFind(
operationContext(),
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kMajorityReadConcern,
@@ -553,15 +552,14 @@ TEST_F(MigrationManagerTest, InterruptMigration) {
// Check that the migration that was active when the migration manager was interrupted can be
// found in config.migrations (and thus would be recovered if a migration manager were to start
// up again).
- auto statusWithMigrationsQueryResponse =
- shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
- operationContext(),
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- repl::ReadConcernLevel::kMajorityReadConcern,
- NamespaceString(MigrationType::ConfigNS),
- BSON(MigrationType::name(chunk.getName())),
- BSONObj(),
- boost::none);
+ auto statusWithMigrationsQueryResponse = shardRegistry()->getConfigShard()->exhaustiveFind(
+ operationContext(),
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kMajorityReadConcern,
+ NamespaceString(MigrationType::ConfigNS),
+ BSON(MigrationType::name(chunk.getName())),
+ BSONObj(),
+ boost::none);
Shard::QueryResponse migrationsQueryResponse =
uassertStatusOK(statusWithMigrationsQueryResponse);
ASSERT_EQUALS(1U, migrationsQueryResponse.docs.size());
diff --git a/src/mongo/db/s/balancer/scoped_migration_request.cpp b/src/mongo/db/s/balancer/scoped_migration_request.cpp
index af737fd0640..c422551892b 100644
--- a/src/mongo/db/s/balancer/scoped_migration_request.cpp
+++ b/src/mongo/db/s/balancer/scoped_migration_request.cpp
@@ -106,7 +106,7 @@ StatusWith<ScopedMigrationRequest> ScopedMigrationRequest::writeMigration(
// for the request because this migration request will join the active one once
// scheduled.
auto statusWithMigrationQueryResult =
- grid.shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
+ grid.shardRegistry()->getConfigShard()->exhaustiveFind(
txn,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
diff --git a/src/mongo/db/s/balancer/scoped_migration_request_test.cpp b/src/mongo/db/s/balancer/scoped_migration_request_test.cpp
index 10fb8da0e22..d636b739ad8 100644
--- a/src/mongo/db/s/balancer/scoped_migration_request_test.cpp
+++ b/src/mongo/db/s/balancer/scoped_migration_request_test.cpp
@@ -67,7 +67,7 @@ public:
void ScopedMigrationRequestTest::checkMigrationsCollectionForDocument(
std::string chunkName, const unsigned long expectedNumberOfDocuments) {
- auto response = shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
+ auto response = shardRegistry()->getConfigShard()->exhaustiveFind(
operationContext(),
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kMajorityReadConcern,
diff --git a/src/mongo/s/catalog/dist_lock_catalog_impl.cpp b/src/mongo/s/catalog/dist_lock_catalog_impl.cpp
index 754db72ae6f..d84ced179db 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_impl.cpp
+++ b/src/mongo/s/catalog/dist_lock_catalog_impl.cpp
@@ -204,7 +204,7 @@ Status DistLockCatalogImpl::ping(OperationContext* txn, StringData processID, Da
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
_locksNS.db().toString(),
request.toBSON(),
- Shard::kDefaultConfigCommandTimeout,
+ Shard::kDefaultCommandTimeout,
Shard::RetryPolicy::kNotIdempotent);
auto findAndModifyStatus = extractFindAndModifyNewObj(std::move(resultStatus));
@@ -241,7 +241,7 @@ StatusWith<LocksType> DistLockCatalogImpl::grabLock(OperationContext* txn,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
_locksNS.db().toString(),
request.toBSON(),
- Shard::kDefaultConfigCommandTimeout,
+ Shard::kDefaultCommandTimeout,
Shard::RetryPolicy::kNoRetry); // Dist lock manager is handling own retries
auto findAndModifyStatus = extractFindAndModifyNewObj(std::move(resultStatus));
@@ -298,7 +298,7 @@ StatusWith<LocksType> DistLockCatalogImpl::overtakeLock(OperationContext* txn,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
_locksNS.db().toString(),
request.toBSON(),
- Shard::kDefaultConfigCommandTimeout,
+ Shard::kDefaultCommandTimeout,
Shard::RetryPolicy::kNotIdempotent);
auto findAndModifyStatus = extractFindAndModifyNewObj(std::move(resultStatus));
@@ -343,7 +343,7 @@ Status DistLockCatalogImpl::_unlock(OperationContext* txn, const FindAndModifyRe
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
_locksNS.db().toString(),
request.toBSON(),
- Shard::kDefaultConfigCommandTimeout,
+ Shard::kDefaultCommandTimeout,
Shard::RetryPolicy::kIdempotent);
auto findAndModifyStatus = extractFindAndModifyNewObj(std::move(resultStatus));
@@ -378,7 +378,7 @@ Status DistLockCatalogImpl::unlockAll(OperationContext* txn, const std::string&
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
_locksNS.db().toString(),
cmdObj,
- Shard::kDefaultConfigCommandTimeout,
+ Shard::kDefaultCommandTimeout,
Shard::RetryPolicy::kIdempotent);
if (!response.isOK()) {
@@ -409,7 +409,7 @@ StatusWith<DistLockCatalog::ServerInfo> DistLockCatalogImpl::getServerInfo(Opera
kReadPref,
"admin",
BSON("serverStatus" << 1),
- Shard::kDefaultConfigCommandTimeout,
+ Shard::kDefaultCommandTimeout,
Shard::RetryPolicy::kIdempotent);
if (!resultStatus.isOK()) {
@@ -501,7 +501,7 @@ Status DistLockCatalogImpl::stopPing(OperationContext* txn, StringData processId
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
_locksNS.db().toString(),
request.toBSON(),
- Shard::kDefaultConfigCommandTimeout,
+ Shard::kDefaultCommandTimeout,
Shard::RetryPolicy::kNotIdempotent);
auto findAndModifyStatus = extractFindAndModifyNewObj(std::move(resultStatus));
@@ -515,7 +515,7 @@ StatusWith<vector<BSONObj>> DistLockCatalogImpl::_findOnConfig(
const BSONObj& query,
const BSONObj& sort,
boost::optional<long long> limit) {
- auto result = _client->getConfigShard()->exhaustiveFindOnConfig(
+ auto result = _client->getConfigShard()->exhaustiveFind(
txn, readPref, repl::ReadConcernLevel::kMajorityReadConcern, nss, query, sort, limit);
if (!result.isOK()) {
return result.getStatus();
diff --git a/src/mongo/s/catalog/sharding_catalog_add_shard_test.cpp b/src/mongo/s/catalog/sharding_catalog_add_shard_test.cpp
index df94c520318..d65c8ef1590 100644
--- a/src/mongo/s/catalog/sharding_catalog_add_shard_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_add_shard_test.cpp
@@ -293,17 +293,16 @@ protected:
*/
void assertChangeWasLogged(const ShardType& addedShard) {
auto response = assertGet(
- getConfigShard()->exhaustiveFindOnConfig(operationContext(),
- ReadPreferenceSetting{
- ReadPreference::PrimaryOnly},
- repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString("config.changelog"),
- BSON("what"
- << "addShard"
- << "details.name"
- << addedShard.getName()),
- BSONObj(),
- 1));
+ getConfigShard()->exhaustiveFind(operationContext(),
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ NamespaceString("config.changelog"),
+ BSON("what"
+ << "addShard"
+ << "details.name"
+ << addedShard.getName()),
+ BSONObj(),
+ 1));
ASSERT_EQ(1U, response.docs.size());
auto logEntryBSON = response.docs.front();
auto logEntry = assertGet(ChangeLogType::fromBSON(logEntryBSON));
diff --git a/src/mongo/s/catalog/sharding_catalog_assign_key_range_to_zone_test.cpp b/src/mongo/s/catalog/sharding_catalog_assign_key_range_to_zone_test.cpp
index 104a97a3f6a..2f79b9e6f12 100644
--- a/src/mongo/s/catalog/sharding_catalog_assign_key_range_to_zone_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_assign_key_range_to_zone_test.cpp
@@ -87,13 +87,13 @@ public:
const ChunkRange& range,
const string& zoneName) {
auto findStatus =
- getConfigShard()->exhaustiveFindOnConfig(operationContext(),
- kReadPref,
- repl::ReadConcernLevel::kMajorityReadConcern,
- NamespaceString(TagsType::ConfigNS),
- BSONObj(),
- BSONObj(),
- 1);
+ getConfigShard()->exhaustiveFind(operationContext(),
+ kReadPref,
+ repl::ReadConcernLevel::kMajorityReadConcern,
+ NamespaceString(TagsType::ConfigNS),
+ BSONObj(),
+ BSONObj(),
+ 1);
ASSERT_OK(findStatus.getStatus());
auto findResult = findStatus.getValue();
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
index 0138dd0fb37..bdfb6a4dc57 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
@@ -992,7 +992,7 @@ StatusWith<BSONObj> ShardingCatalogClientImpl::getGlobalSettings(OperationContex
StatusWith<VersionType> ShardingCatalogClientImpl::getConfigVersion(
OperationContext* txn, repl::ReadConcernLevel readConcern) {
- auto findStatus = Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
+ auto findStatus = Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFind(
txn,
kConfigReadSelector,
readConcern,
@@ -1241,7 +1241,7 @@ bool ShardingCatalogClientImpl::runUserManagementWriteCommand(OperationContext*
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
dbname,
cmdToRun,
- Shard::kDefaultConfigCommandTimeout,
+ Shard::kDefaultCommandTimeout,
Shard::RetryPolicy::kNotIdempotent);
if (!response.isOK()) {
@@ -1287,7 +1287,7 @@ bool ShardingCatalogClientImpl::runUserManagementReadCommand(OperationContext* t
kConfigPrimaryPreferredSelector,
dbname,
cmdObj,
- Shard::kDefaultConfigCommandTimeout,
+ Shard::kDefaultCommandTimeout,
Shard::RetryPolicy::kIdempotent);
if (resultStatus.isOK()) {
result->appendElements(resultStatus.getValue().response);
@@ -1397,8 +1397,8 @@ void ShardingCatalogClientImpl::writeConfigServerDirect(OperationContext* txn,
}
auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
- *batchResponse = configShard->runBatchWriteCommandOnConfig(
- txn, batchRequest, Shard::RetryPolicy::kNotIdempotent);
+ *batchResponse =
+ configShard->runBatchWriteCommand(txn, batchRequest, Shard::RetryPolicy::kNotIdempotent);
}
Status ShardingCatalogClientImpl::insertConfigDocument(OperationContext* txn,
@@ -1421,7 +1421,7 @@ Status ShardingCatalogClientImpl::insertConfigDocument(OperationContext* txn,
auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
for (int retry = 1; retry <= kMaxWriteRetry; retry++) {
auto response =
- configShard->runBatchWriteCommandOnConfig(txn, request, Shard::RetryPolicy::kNoRetry);
+ configShard->runBatchWriteCommand(txn, request, Shard::RetryPolicy::kNoRetry);
Status status = response.toStatus();
@@ -1504,7 +1504,7 @@ StatusWith<bool> ShardingCatalogClientImpl::updateConfigDocument(
auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
auto response =
- configShard->runBatchWriteCommandOnConfig(txn, request, Shard::RetryPolicy::kIdempotent);
+ configShard->runBatchWriteCommand(txn, request, Shard::RetryPolicy::kIdempotent);
Status status = response.toStatus();
if (!status.isOK()) {
@@ -1536,7 +1536,7 @@ Status ShardingCatalogClientImpl::removeConfigDocuments(OperationContext* txn,
auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
auto response =
- configShard->runBatchWriteCommandOnConfig(txn, request, Shard::RetryPolicy::kIdempotent);
+ configShard->runBatchWriteCommand(txn, request, Shard::RetryPolicy::kIdempotent);
return response.toStatus();
}
@@ -1603,7 +1603,7 @@ Status ShardingCatalogClientImpl::_createCappedConfigCollection(
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
"config",
createCmd,
- Shard::kDefaultConfigCommandTimeout,
+ Shard::kDefaultCommandTimeout,
Shard::RetryPolicy::kIdempotent);
if (!result.isOK()) {
@@ -1639,7 +1639,7 @@ StatusWith<long long> ShardingCatalogClientImpl::_runCountCommandOnConfig(Operat
kConfigReadSelector,
ns.db().toString(),
countBuilder.done(),
- Shard::kDefaultConfigCommandTimeout,
+ Shard::kDefaultCommandTimeout,
Shard::RetryPolicy::kIdempotent);
if (!resultStatus.isOK()) {
return resultStatus.getStatus();
@@ -1667,7 +1667,7 @@ StatusWith<repl::OpTimeWith<vector<BSONObj>>> ShardingCatalogClientImpl::_exhaus
const BSONObj& query,
const BSONObj& sort,
boost::optional<long long> limit) {
- auto response = Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
+ auto response = Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFind(
txn, readPref, readConcern, nss, query, sort, limit);
if (!response.isOK()) {
return response.getStatus();
diff --git a/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp b/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp
index 99fdcf0ecab..512ddf2e838 100644
--- a/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp
@@ -349,7 +349,7 @@ TEST_F(ConfigInitializationTest, BuildsNecessaryIndexes) {
}
TEST_F(ConfigInitializationTest, CompatibleIndexAlreadyExists) {
- getConfigShard()->createIndexOnConfig(
+ getConfigShard()->createIndex(
operationContext(), NamespaceString(ShardType::ConfigNS), BSON("host" << 1), true);
ASSERT_OK(catalogManager()->initializeConfigDatabaseIfNeeded(operationContext()));
@@ -373,7 +373,7 @@ TEST_F(ConfigInitializationTest, CompatibleIndexAlreadyExists) {
TEST_F(ConfigInitializationTest, IncompatibleIndexAlreadyExists) {
// Make the index non-unique even though its supposed to be unique, make sure initialization
// fails
- getConfigShard()->createIndexOnConfig(
+ getConfigShard()->createIndex(
operationContext(), NamespaceString(ShardType::ConfigNS), BSON("host" << 1), false);
ASSERT_EQUALS(ErrorCodes::IndexOptionsConflict,
diff --git a/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations_impl.cpp b/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations_impl.cpp
index fe3e427f228..09ac97590d5 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations_impl.cpp
@@ -143,15 +143,14 @@ Status checkCollectionVersionEpoch(OperationContext* txn,
const NamespaceString& nss,
const ChunkType& aChunk,
const OID& collectionEpoch) {
- auto findResponseWith =
- Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
- txn,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString(ChunkType::ConfigNS),
- BSON(ChunkType::ns() << nss.ns()),
- BSONObj(),
- 1);
+ auto findResponseWith = Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFind(
+ txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ NamespaceString(ChunkType::ConfigNS),
+ BSON(ChunkType::ns() << nss.ns()),
+ BSONObj(),
+ 1);
if (!findResponseWith.isOK()) {
return findResponseWith.getStatus();
}
@@ -203,15 +202,14 @@ Status checkChunkIsOnShard(OperationContext* txn,
<< shard);
// Must use local read concern because we're going to perform subsequent writes.
- auto findResponseWith =
- Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
- txn,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString(ChunkType::ConfigNS),
- chunkQuery,
- BSONObj(),
- 1);
+ auto findResponseWith = Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFind(
+ txn,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ NamespaceString(ChunkType::ConfigNS),
+ chunkQuery,
+ BSONObj(),
+ 1);
if (!findResponseWith.isOK()) {
return findResponseWith.getStatus();
}
@@ -301,7 +299,7 @@ Status ShardingCatalogManagerImpl::commitChunkSplit(OperationContext* txn,
Lock::ExclusiveLock lk(txn->lockState(), _kChunkOpLock);
// Get the chunk with highest version for this namespace
- auto findStatus = Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
+ auto findStatus = Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFind(
txn,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
@@ -491,7 +489,7 @@ Status ShardingCatalogManagerImpl::commitChunkMerge(OperationContext* txn,
Lock::ExclusiveLock lk(txn->lockState(), _kChunkOpLock);
// Get the chunk with the highest version for this namespace
- auto findStatus = Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
+ auto findStatus = Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFind(
txn,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
@@ -628,7 +626,7 @@ StatusWith<BSONObj> ShardingCatalogManagerImpl::commitChunkMigration(
}
// Must use local read concern because we will perform subsequent writes.
- auto findResponse = Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
+ auto findResponse = Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFind(
txn,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
diff --git a/src/mongo/s/catalog/sharding_catalog_manager_impl.cpp b/src/mongo/s/catalog/sharding_catalog_manager_impl.cpp
index 6c3e9ac749c..e87c9b1e492 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_manager_impl.cpp
@@ -172,18 +172,17 @@ Status ShardingCatalogManagerImpl::_initConfigIndexes(OperationContext* txn) {
const bool unique = true;
auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
- Status result =
- configShard->createIndexOnConfig(txn,
- NamespaceString(ChunkType::ConfigNS),
- BSON(ChunkType::ns() << 1 << ChunkType::min() << 1),
- unique);
+ Status result = configShard->createIndex(txn,
+ NamespaceString(ChunkType::ConfigNS),
+ BSON(ChunkType::ns() << 1 << ChunkType::min() << 1),
+ unique);
if (!result.isOK()) {
return Status(result.code(),
str::stream() << "couldn't create ns_1_min_1 index on config db"
<< causedBy(result));
}
- result = configShard->createIndexOnConfig(
+ result = configShard->createIndex(
txn,
NamespaceString(ChunkType::ConfigNS),
BSON(ChunkType::ns() << 1 << ChunkType::shard() << 1 << ChunkType::min() << 1),
@@ -194,29 +193,28 @@ Status ShardingCatalogManagerImpl::_initConfigIndexes(OperationContext* txn) {
<< causedBy(result));
}
- result = configShard->createIndexOnConfig(
- txn,
- NamespaceString(ChunkType::ConfigNS),
- BSON(ChunkType::ns() << 1 << ChunkType::DEPRECATED_lastmod() << 1),
- unique);
+ result =
+ configShard->createIndex(txn,
+ NamespaceString(ChunkType::ConfigNS),
+ BSON(ChunkType::ns() << 1 << ChunkType::DEPRECATED_lastmod() << 1),
+ unique);
if (!result.isOK()) {
return Status(result.code(),
str::stream() << "couldn't create ns_1_lastmod_1 index on config db"
<< causedBy(result));
}
- result = configShard->createIndexOnConfig(
- txn,
- NamespaceString(MigrationType::ConfigNS),
- BSON(MigrationType::ns() << 1 << MigrationType::min() << 1),
- unique);
+ result = configShard->createIndex(txn,
+ NamespaceString(MigrationType::ConfigNS),
+ BSON(MigrationType::ns() << 1 << MigrationType::min() << 1),
+ unique);
if (!result.isOK()) {
return Status(result.code(),
str::stream() << "couldn't create ns_1_min_1 index on config.migrations"
<< causedBy(result));
}
- result = configShard->createIndexOnConfig(
+ result = configShard->createIndex(
txn, NamespaceString(ShardType::ConfigNS), BSON(ShardType::host() << 1), unique);
if (!result.isOK()) {
return Status(result.code(),
@@ -224,7 +222,7 @@ Status ShardingCatalogManagerImpl::_initConfigIndexes(OperationContext* txn) {
<< causedBy(result));
}
- result = configShard->createIndexOnConfig(
+ result = configShard->createIndex(
txn, NamespaceString(LocksType::ConfigNS), BSON(LocksType::lockID() << 1), !unique);
if (!result.isOK()) {
return Status(result.code(),
@@ -232,18 +230,17 @@ Status ShardingCatalogManagerImpl::_initConfigIndexes(OperationContext* txn) {
<< causedBy(result));
}
- result =
- configShard->createIndexOnConfig(txn,
- NamespaceString(LocksType::ConfigNS),
- BSON(LocksType::state() << 1 << LocksType::process() << 1),
- !unique);
+ result = configShard->createIndex(txn,
+ NamespaceString(LocksType::ConfigNS),
+ BSON(LocksType::state() << 1 << LocksType::process() << 1),
+ !unique);
if (!result.isOK()) {
return Status(result.code(),
str::stream() << "couldn't create state and process id index on config db"
<< causedBy(result));
}
- result = configShard->createIndexOnConfig(
+ result = configShard->createIndex(
txn, NamespaceString(LockpingsType::ConfigNS), BSON(LockpingsType::ping() << 1), !unique);
if (!result.isOK()) {
return Status(result.code(),
@@ -251,20 +248,20 @@ Status ShardingCatalogManagerImpl::_initConfigIndexes(OperationContext* txn) {
<< causedBy(result));
}
- result = configShard->createIndexOnConfig(txn,
- NamespaceString(TagsType::ConfigNS),
- BSON(TagsType::ns() << 1 << TagsType::min() << 1),
- unique);
+ result = configShard->createIndex(txn,
+ NamespaceString(TagsType::ConfigNS),
+ BSON(TagsType::ns() << 1 << TagsType::min() << 1),
+ unique);
if (!result.isOK()) {
return Status(result.code(),
str::stream() << "couldn't create ns_1_min_1 index on config db"
<< causedBy(result));
}
- result = configShard->createIndexOnConfig(txn,
- NamespaceString(TagsType::ConfigNS),
- BSON(TagsType::ns() << 1 << TagsType::tag() << 1),
- !unique);
+ result = configShard->createIndex(txn,
+ NamespaceString(TagsType::ConfigNS),
+ BSON(TagsType::ns() << 1 << TagsType::tag() << 1),
+ !unique);
if (!result.isOK()) {
return Status(result.code(),
str::stream() << "couldn't create ns_1_tag_1 index on config db"
diff --git a/src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp b/src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp
index b42ed6bb69e..138da223f76 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp
@@ -89,7 +89,7 @@ StatusWith<std::string> generateNewShardName(OperationContext* txn) {
BSONObjBuilder shardNameRegex;
shardNameRegex.appendRegex(ShardType::name(), "^shard");
- auto findStatus = Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
+ auto findStatus = Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFind(
txn,
kConfigReadSelector,
repl::ReadConcernLevel::kMajorityReadConcern,
diff --git a/src/mongo/s/catalog/sharding_catalog_manager_zone_operations_impl.cpp b/src/mongo/s/catalog/sharding_catalog_manager_zone_operations_impl.cpp
index 6af584d3c30..6c94eaad829 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager_zone_operations_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_manager_zone_operations_impl.cpp
@@ -66,13 +66,13 @@ Status checkForOveralappedZonedKeyRange(OperationContext* txn,
const KeyPattern& shardKeyPattern) {
DistributionStatus chunkDist(ns, ShardToChunksMap{});
- auto tagStatus = configServer->exhaustiveFindOnConfig(txn,
- kConfigPrimarySelector,
- repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString(TagsType::ConfigNS),
- BSON(TagsType::ns(ns.ns())),
- BSONObj(),
- 0);
+ auto tagStatus = configServer->exhaustiveFind(txn,
+ kConfigPrimarySelector,
+ repl::ReadConcernLevel::kLocalReadConcern,
+ NamespaceString(TagsType::ConfigNS),
+ BSON(TagsType::ns(ns.ns())),
+ BSONObj(),
+ 0);
if (!tagStatus.isOK()) {
return tagStatus.getStatus();
}
@@ -117,14 +117,13 @@ StatusWith<ChunkRange> includeFullShardKey(OperationContext* txn,
const NamespaceString& ns,
const ChunkRange& range,
KeyPattern* shardKeyPatternOut) {
- auto findCollStatus =
- configServer->exhaustiveFindOnConfig(txn,
- kConfigPrimarySelector,
- repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString(CollectionType::ConfigNS),
- BSON(CollectionType::fullNs(ns.ns())),
- BSONObj(),
- 1);
+ auto findCollStatus = configServer->exhaustiveFind(txn,
+ kConfigPrimarySelector,
+ repl::ReadConcernLevel::kLocalReadConcern,
+ NamespaceString(CollectionType::ConfigNS),
+ BSON(CollectionType::fullNs(ns.ns())),
+ BSONObj(),
+ 1);
if (!findCollStatus.isOK()) {
return findCollStatus.getStatus();
@@ -210,13 +209,13 @@ Status ShardingCatalogManagerImpl::removeShardFromZone(OperationContext* txn,
//
auto findShardExistsStatus =
- configShard->exhaustiveFindOnConfig(txn,
- kConfigPrimarySelector,
- repl::ReadConcernLevel::kLocalReadConcern,
- shardNS,
- BSON(ShardType::name() << shardName),
- BSONObj(),
- 1);
+ configShard->exhaustiveFind(txn,
+ kConfigPrimarySelector,
+ repl::ReadConcernLevel::kLocalReadConcern,
+ shardNS,
+ BSON(ShardType::name() << shardName),
+ BSONObj(),
+ 1);
if (!findShardExistsStatus.isOK()) {
return findShardExistsStatus.getStatus();
@@ -231,14 +230,13 @@ Status ShardingCatalogManagerImpl::removeShardFromZone(OperationContext* txn,
// Check how many shards belongs to this zone.
//
- auto findShardStatus =
- configShard->exhaustiveFindOnConfig(txn,
- kConfigPrimarySelector,
- repl::ReadConcernLevel::kLocalReadConcern,
- shardNS,
- BSON(ShardType::tags() << zoneName),
- BSONObj(),
- 2);
+ auto findShardStatus = configShard->exhaustiveFind(txn,
+ kConfigPrimarySelector,
+ repl::ReadConcernLevel::kLocalReadConcern,
+ shardNS,
+ BSON(ShardType::tags() << zoneName),
+ BSONObj(),
+ 2);
if (!findShardStatus.isOK()) {
return findShardStatus.getStatus();
@@ -265,13 +263,13 @@ Status ShardingCatalogManagerImpl::removeShardFromZone(OperationContext* txn,
}
auto findChunkRangeStatus =
- configShard->exhaustiveFindOnConfig(txn,
- kConfigPrimarySelector,
- repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString(TagsType::ConfigNS),
- BSON(TagsType::tag() << zoneName),
- BSONObj(),
- 1);
+ configShard->exhaustiveFind(txn,
+ kConfigPrimarySelector,
+ repl::ReadConcernLevel::kLocalReadConcern,
+ NamespaceString(TagsType::ConfigNS),
+ BSON(TagsType::tag() << zoneName),
+ BSONObj(),
+ 1);
if (!findChunkRangeStatus.isOK()) {
return findChunkRangeStatus.getStatus();
@@ -326,14 +324,13 @@ Status ShardingCatalogManagerImpl::assignKeyRangeToZone(OperationContext* txn,
const auto& fullShardKeyRange = fullShardKeyStatus.getValue();
- auto zoneExistStatus =
- configServer->exhaustiveFindOnConfig(txn,
- kConfigPrimarySelector,
- repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString(ShardType::ConfigNS),
- BSON(ShardType::tags() << zoneName),
- BSONObj(),
- 1);
+ auto zoneExistStatus = configServer->exhaustiveFind(txn,
+ kConfigPrimarySelector,
+ repl::ReadConcernLevel::kLocalReadConcern,
+ NamespaceString(ShardType::ConfigNS),
+ BSON(ShardType::tags() << zoneName),
+ BSONObj(),
+ 1);
if (!zoneExistStatus.isOK()) {
return zoneExistStatus.getStatus();
diff --git a/src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp b/src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp
index cdb71cfa7cf..52bfa2df31f 100644
--- a/src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp
@@ -72,13 +72,13 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) {
"shard0000"));
auto findResponse = uassertStatusOK(
- getConfigShard()->exhaustiveFindOnConfig(operationContext(),
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString(ChunkType::ConfigNS),
- BSON(ChunkType::ns() << "TestDB.TestColl"),
- BSON(ChunkType::DEPRECATED_lastmod << -1),
- boost::none));
+ getConfigShard()->exhaustiveFind(operationContext(),
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ NamespaceString(ChunkType::ConfigNS),
+ BSON(ChunkType::ns() << "TestDB.TestColl"),
+ BSON(ChunkType::DEPRECATED_lastmod << -1),
+ boost::none));
const auto& chunksVector = findResponse.docs;
@@ -135,13 +135,13 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) {
"shard0000"));
auto findResponse = uassertStatusOK(
- getConfigShard()->exhaustiveFindOnConfig(operationContext(),
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString(ChunkType::ConfigNS),
- BSON(ChunkType::ns() << "TestDB.TestColl"),
- BSON(ChunkType::DEPRECATED_lastmod << -1),
- boost::none));
+ getConfigShard()->exhaustiveFind(operationContext(),
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ NamespaceString(ChunkType::ConfigNS),
+ BSON(ChunkType::ns() << "TestDB.TestColl"),
+ BSON(ChunkType::DEPRECATED_lastmod << -1),
+ boost::none));
const auto& chunksVector = findResponse.docs;
@@ -202,13 +202,13 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) {
"shard0000"));
auto findResponse = uassertStatusOK(
- getConfigShard()->exhaustiveFindOnConfig(operationContext(),
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString(ChunkType::ConfigNS),
- BSON(ChunkType::ns() << "TestDB.TestColl"),
- BSON(ChunkType::DEPRECATED_lastmod << -1),
- boost::none));
+ getConfigShard()->exhaustiveFind(operationContext(),
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ NamespaceString(ChunkType::ConfigNS),
+ BSON(ChunkType::ns() << "TestDB.TestColl"),
+ BSON(ChunkType::DEPRECATED_lastmod << -1),
+ boost::none));
const auto& chunksVector = findResponse.docs;
@@ -265,13 +265,13 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) {
"shard0000"));
auto findResponse = uassertStatusOK(
- getConfigShard()->exhaustiveFindOnConfig(operationContext(),
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString(ChunkType::ConfigNS),
- BSON(ChunkType::ns() << "TestDB.TestColl"),
- BSON(ChunkType::DEPRECATED_lastmod << -1),
- boost::none));
+ getConfigShard()->exhaustiveFind(operationContext(),
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ NamespaceString(ChunkType::ConfigNS),
+ BSON(ChunkType::ns() << "TestDB.TestColl"),
+ BSON(ChunkType::DEPRECATED_lastmod << -1),
+ boost::none));
const auto& chunksVector = findResponse.docs;
@@ -401,13 +401,13 @@ TEST_F(MergeChunkTest, MergeAlreadyHappenedFailsPrecondition) {
// Verify that no change to config.chunks happened.
auto findResponse = uassertStatusOK(
- getConfigShard()->exhaustiveFindOnConfig(operationContext(),
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- repl::ReadConcernLevel::kLocalReadConcern,
- NamespaceString(ChunkType::ConfigNS),
- BSON(ChunkType::ns() << "TestDB.TestColl"),
- BSON(ChunkType::DEPRECATED_lastmod << -1),
- boost::none));
+ getConfigShard()->exhaustiveFind(operationContext(),
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kLocalReadConcern,
+ NamespaceString(ChunkType::ConfigNS),
+ BSON(ChunkType::ns() << "TestDB.TestColl"),
+ BSON(ChunkType::DEPRECATED_lastmod << -1),
+ boost::none));
const auto& chunksVector = findResponse.docs;
diff --git a/src/mongo/s/client/shard.cpp b/src/mongo/s/client/shard.cpp
index be61c500604..3aeae9d1386 100644
--- a/src/mongo/s/client/shard.cpp
+++ b/src/mongo/s/client/shard.cpp
@@ -91,7 +91,7 @@ Status Shard::CommandResponse::processBatchWriteResponse(
return status;
}
-const Milliseconds Shard::kDefaultConfigCommandTimeout = Seconds{30};
+const Milliseconds Shard::kDefaultCommandTimeout = Seconds{30};
bool Shard::shouldErrorBePropagated(ErrorCodes::Error code) {
return std::find(RemoteCommandRetryScheduler::kAllRetriableErrors.begin(),
@@ -185,10 +185,9 @@ StatusWith<Shard::CommandResponse> Shard::runCommandWithFixedRetryAttempts(
MONGO_UNREACHABLE;
}
-BatchedCommandResponse Shard::runBatchWriteCommandOnConfig(
- OperationContext* txn, const BatchedCommandRequest& batchRequest, RetryPolicy retryPolicy) {
- invariant(isConfig());
-
+BatchedCommandResponse Shard::runBatchWriteCommand(OperationContext* txn,
+ const BatchedCommandRequest& batchRequest,
+ RetryPolicy retryPolicy) {
const std::string dbname = batchRequest.getNS().db().toString();
invariant(batchRequest.sizeWriteOps() == 1);
@@ -198,7 +197,7 @@ BatchedCommandResponse Shard::runBatchWriteCommandOnConfig(
auto response = _runCommand(txn,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
dbname,
- kDefaultConfigCommandTimeout,
+ kDefaultCommandTimeout,
cmdObj);
BatchedCommandResponse batchResponse;
@@ -220,7 +219,7 @@ BatchedCommandResponse Shard::runBatchWriteCommandOnConfig(
MONGO_UNREACHABLE;
}
-StatusWith<Shard::QueryResponse> Shard::exhaustiveFindOnConfig(
+StatusWith<Shard::QueryResponse> Shard::exhaustiveFind(
OperationContext* txn,
const ReadPreferenceSetting& readPref,
const repl::ReadConcernLevel& readConcernLevel,
@@ -228,12 +227,8 @@ StatusWith<Shard::QueryResponse> Shard::exhaustiveFindOnConfig(
const BSONObj& query,
const BSONObj& sort,
const boost::optional<long long> limit) {
- // Do not allow exhaustive finds to be run against regular shards.
- invariant(isConfig());
-
for (int retry = 1; retry <= kOnErrorNumRetries; retry++) {
- auto result =
- _exhaustiveFindOnConfig(txn, readPref, readConcernLevel, nss, query, sort, limit);
+ auto result = _exhaustiveFind(txn, readPref, readConcernLevel, nss, query, sort, limit);
if (retry < kOnErrorNumRetries &&
isRetriableError(result.getStatus().code(), RetryPolicy::kIdempotent)) {
diff --git a/src/mongo/s/client/shard.h b/src/mongo/s/client/shard.h
index 26413b61c6d..7ca10aa358d 100644
--- a/src/mongo/s/client/shard.h
+++ b/src/mongo/s/client/shard.h
@@ -149,7 +149,7 @@ public:
RetryPolicy retryPolicy);
/**
- * Same as the other variant of runCommand, but allows the operation timeout to be overriden.
+ * Same as the other variant of runCommand, but allows the operation timeout to be overridden.
* Runs for the lesser of the remaining time on the operation context or the specified maxTimeMS
* override.
*/
@@ -186,12 +186,12 @@ public:
RetryPolicy retryPolicy);
/**
- * Expects a single-entry batch wrtie command and runs it on the config server's primary using
+ * Expects a single-entry batch write command and runs it with PrimaryOnly read preference using
* the specified retry policy.
*/
- BatchedCommandResponse runBatchWriteCommandOnConfig(OperationContext* txn,
- const BatchedCommandRequest& batchRequest,
- RetryPolicy retryPolicy);
+ BatchedCommandResponse runBatchWriteCommand(OperationContext* txn,
+ const BatchedCommandRequest& batchRequest,
+ RetryPolicy retryPolicy);
/**
* Warning: This method exhausts the cursor and pulls all data into memory.
@@ -201,33 +201,32 @@ public:
* ShardRemote instances expect "readConcernLevel" to always be kMajorityReadConcern, whereas
* ShardLocal instances expect either kLocalReadConcern or kMajorityReadConcern.
*/
- StatusWith<QueryResponse> exhaustiveFindOnConfig(OperationContext* txn,
- const ReadPreferenceSetting& readPref,
- const repl::ReadConcernLevel& readConcernLevel,
- const NamespaceString& nss,
- const BSONObj& query,
- const BSONObj& sort,
- const boost::optional<long long> limit);
+ StatusWith<QueryResponse> exhaustiveFind(OperationContext* txn,
+ const ReadPreferenceSetting& readPref,
+ const repl::ReadConcernLevel& readConcernLevel,
+ const NamespaceString& nss,
+ const BSONObj& query,
+ const BSONObj& sort,
+ const boost::optional<long long> limit);
/**
- * Builds an index on a config server collection.
+ * Builds an index on a collection.
* Creates the collection if it doesn't yet exist. Does not error if the index already exists,
* so long as the options are the same.
* NOTE: Currently only supported for LocalShard.
*/
- virtual Status createIndexOnConfig(OperationContext* txn,
- const NamespaceString& ns,
- const BSONObj& keys,
- bool unique) = 0;
+ virtual Status createIndex(OperationContext* txn,
+ const NamespaceString& ns,
+ const BSONObj& keys,
+ bool unique) = 0;
- // This timeout will be used by default in operations against the config server, unless
- // explicitly overridden
- static const Milliseconds kDefaultConfigCommandTimeout;
+ // This timeout will be used by default in remote operations unless explicitly overridden.
+ static const Milliseconds kDefaultCommandTimeout;
/**
* Returns false if the error is a retriable error and/or causes a replset monitor update. These
* errors, if from a remote call, should not be further propagated back to another server
- * because that server will interpret them as orignating on this server rather than the one this
- * server called.
+ * because that server will interpret them as originating on this server rather than the one
+ * this server called.
*/
static bool shouldErrorBePropagated(ErrorCodes::Error code);
@@ -258,7 +257,7 @@ private:
Milliseconds maxTimeMSOverride,
const BSONObj& cmdObj) = 0;
- virtual StatusWith<QueryResponse> _exhaustiveFindOnConfig(
+ virtual StatusWith<QueryResponse> _exhaustiveFind(
OperationContext* txn,
const ReadPreferenceSetting& readPref,
const repl::ReadConcernLevel& readConcernLevel,
diff --git a/src/mongo/s/client/shard_local.cpp b/src/mongo/s/client/shard_local.cpp
index 9a3d2114588..270fa03a25d 100644
--- a/src/mongo/s/client/shard_local.cpp
+++ b/src/mongo/s/client/shard_local.cpp
@@ -49,11 +49,7 @@
namespace mongo {
-ShardLocal::ShardLocal(const ShardId& id) : Shard(id) {
- // Currently ShardLocal only works for config servers. If we ever start using ShardLocal on
- // shards we'll need to consider how to handle shards.
- invariant(serverGlobalParams.clusterRole == ClusterRole::ConfigServer);
-}
+ShardLocal::ShardLocal(const ShardId& id) : Shard(id) {}
const ConnectionString ShardLocal::getConnString() const {
auto replCoord = repl::getGlobalReplicationCoordinator();
@@ -147,7 +143,7 @@ Shard::HostWithResponse ShardLocal::_runCommand(OperationContext* txn,
}
}
-StatusWith<Shard::QueryResponse> ShardLocal::_exhaustiveFindOnConfig(
+StatusWith<Shard::QueryResponse> ShardLocal::_exhaustiveFind(
OperationContext* txn,
const ReadPreferenceSetting& readPref,
const repl::ReadConcernLevel& readConcernLevel,
@@ -208,10 +204,10 @@ StatusWith<Shard::QueryResponse> ShardLocal::_exhaustiveFindOnConfig(
}
}
-Status ShardLocal::createIndexOnConfig(OperationContext* txn,
- const NamespaceString& ns,
- const BSONObj& keys,
- bool unique) {
+Status ShardLocal::createIndex(OperationContext* txn,
+ const NamespaceString& ns,
+ const BSONObj& keys,
+ bool unique) {
invariant(ns.db() == "config" || ns.db() == "admin");
try {
diff --git a/src/mongo/s/client/shard_local.h b/src/mongo/s/client/shard_local.h
index 75e97ed0d29..7a4169655b1 100644
--- a/src/mongo/s/client/shard_local.h
+++ b/src/mongo/s/client/shard_local.h
@@ -58,10 +58,10 @@ public:
bool isRetriableError(ErrorCodes::Error code, RetryPolicy options) final;
- Status createIndexOnConfig(OperationContext* txn,
- const NamespaceString& ns,
- const BSONObj& keys,
- bool unique) override;
+ Status createIndex(OperationContext* txn,
+ const NamespaceString& ns,
+ const BSONObj& keys,
+ bool unique) override;
private:
Shard::HostWithResponse _runCommand(OperationContext* txn,
@@ -70,14 +70,13 @@ private:
Milliseconds maxTimeMSOverrideUnused,
const BSONObj& cmdObj) final;
- StatusWith<Shard::QueryResponse> _exhaustiveFindOnConfig(
- OperationContext* txn,
- const ReadPreferenceSetting& readPref,
- const repl::ReadConcernLevel& readConcernLevel,
- const NamespaceString& nss,
- const BSONObj& query,
- const BSONObj& sort,
- boost::optional<long long> limit) final;
+ StatusWith<Shard::QueryResponse> _exhaustiveFind(OperationContext* txn,
+ const ReadPreferenceSetting& readPref,
+ const repl::ReadConcernLevel& readConcernLevel,
+ const NamespaceString& nss,
+ const BSONObj& query,
+ const BSONObj& sort,
+ boost::optional<long long> limit) final;
/**
* Checks if an OpTime was set on the current Client (ie if the current operation performed a
@@ -99,9 +98,9 @@ private:
stdx::mutex _mutex;
// Stores the optime that was generated by the last operation to perform a write that was run
- // through _runCommand. Used in _exhaustiveFindOnConfig for waiting for that optime to be
- // committed so that readConcern majority reads will read the writes that were performed without
- // a w:majority write concern.
+ // through _runCommand. Used in _exhaustiveFind for waiting for that optime to be committed so
+ // that readConcern majority reads will read the writes that were performed without a w:majority
+ // write concern.
repl::OpTime _lastOpTime{};
};
diff --git a/src/mongo/s/client/shard_local_test.cpp b/src/mongo/s/client/shard_local_test.cpp
index c667be3ccd0..42d2d7f5401 100644
--- a/src/mongo/s/client/shard_local_test.cpp
+++ b/src/mongo/s/client/shard_local_test.cpp
@@ -83,7 +83,7 @@ void ShardLocalTest::setUp() {
Client::initThreadIfNotAlready();
_txn = getGlobalServiceContext()->makeOperationContext(&cc());
serverGlobalParams.clusterRole = ClusterRole::ConfigServer;
- _shardLocal = stdx::make_unique<ShardLocal>(ShardId("config"));
+ _shardLocal = stdx::make_unique<ShardLocal>(ShardId("shardId"));
const repl::ReplSettings replSettings = {};
repl::setGlobalReplicationCoordinator(
new repl::ReplicationCoordinatorMock(_txn->getServiceContext(), replSettings));
@@ -148,13 +148,13 @@ StatusWith<Shard::QueryResponse> ShardLocalTest::runFindQuery(NamespaceString ns
BSONObj query,
BSONObj sort,
boost::optional<long long> limit) {
- return _shardLocal->exhaustiveFindOnConfig(_txn.get(),
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- repl::ReadConcernLevel::kMajorityReadConcern,
- nss,
- query,
- sort,
- limit);
+ return _shardLocal->exhaustiveFind(_txn.get(),
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ repl::ReadConcernLevel::kMajorityReadConcern,
+ nss,
+ query,
+ sort,
+ limit);
}
TEST_F(ShardLocalTest, RunCommand) {
@@ -246,8 +246,7 @@ TEST_F(ShardLocalTest, CreateIndex) {
ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, getIndexes(nss).getStatus());
- Status status =
- _shardLocal->createIndexOnConfig(_txn.get(), nss, BSON("a" << 1 << "b" << 1), true);
+ Status status = _shardLocal->createIndex(_txn.get(), nss, BSON("a" << 1 << "b" << 1), true);
// Creating the index should implicitly create the collection
ASSERT_OK(status);
@@ -256,13 +255,13 @@ TEST_F(ShardLocalTest, CreateIndex) {
ASSERT_EQ(2U, indexes.size());
// Making an identical index should be a no-op.
- status = _shardLocal->createIndexOnConfig(_txn.get(), nss, BSON("a" << 1 << "b" << 1), true);
+ status = _shardLocal->createIndex(_txn.get(), nss, BSON("a" << 1 << "b" << 1), true);
ASSERT_OK(status);
indexes = unittest::assertGet(getIndexes(nss));
ASSERT_EQ(2U, indexes.size());
// Trying to make the same index as non-unique should fail.
- status = _shardLocal->createIndexOnConfig(_txn.get(), nss, BSON("a" << 1 << "b" << 1), false);
+ status = _shardLocal->createIndex(_txn.get(), nss, BSON("a" << 1 << "b" << 1), false);
ASSERT_EQUALS(ErrorCodes::IndexOptionsConflict, status);
indexes = unittest::assertGet(getIndexes(nss));
ASSERT_EQ(2U, indexes.size());
diff --git a/src/mongo/s/client/shard_remote.cpp b/src/mongo/s/client/shard_remote.cpp
index 85e0bc7678b..1351035133e 100644
--- a/src/mongo/s/client/shard_remote.cpp
+++ b/src/mongo/s/client/shard_remote.cpp
@@ -240,7 +240,7 @@ Shard::HostWithResponse ShardRemote::_runCommand(OperationContext* txn,
std::move(writeConcernStatus)));
}
-StatusWith<Shard::QueryResponse> ShardRemote::_exhaustiveFindOnConfig(
+StatusWith<Shard::QueryResponse> ShardRemote::_exhaustiveFind(
OperationContext* txn,
const ReadPreferenceSetting& readPref,
const repl::ReadConcernLevel& readConcernLevel,
@@ -316,7 +316,7 @@ StatusWith<Shard::QueryResponse> ShardRemote::_exhaustiveFindOnConfig(
}
const Milliseconds maxTimeMS =
- std::min(txn->getRemainingMaxTimeMillis(), kDefaultConfigCommandTimeout);
+ std::min(txn->getRemainingMaxTimeMillis(), kDefaultCommandTimeout);
BSONObjBuilder findCmdBuilder;
@@ -360,10 +360,10 @@ StatusWith<Shard::QueryResponse> ShardRemote::_exhaustiveFindOnConfig(
return response;
}
-Status ShardRemote::createIndexOnConfig(OperationContext* txn,
- const NamespaceString& ns,
- const BSONObj& keys,
- bool unique) {
+Status ShardRemote::createIndex(OperationContext* txn,
+ const NamespaceString& ns,
+ const BSONObj& keys,
+ bool unique) {
MONGO_UNREACHABLE;
}
diff --git a/src/mongo/s/client/shard_remote.h b/src/mongo/s/client/shard_remote.h
index 908ebf33eee..48557d47925 100644
--- a/src/mongo/s/client/shard_remote.h
+++ b/src/mongo/s/client/shard_remote.h
@@ -70,10 +70,10 @@ public:
bool isRetriableError(ErrorCodes::Error code, RetryPolicy options) final;
- Status createIndexOnConfig(OperationContext* txn,
- const NamespaceString& ns,
- const BSONObj& keys,
- bool unique) override;
+ Status createIndex(OperationContext* txn,
+ const NamespaceString& ns,
+ const BSONObj& keys,
+ bool unique) override;
private:
/**
@@ -88,14 +88,13 @@ private:
Milliseconds maxTimeMSOverride,
const BSONObj& cmdObj) final;
- StatusWith<QueryResponse> _exhaustiveFindOnConfig(
- OperationContext* txn,
- const ReadPreferenceSetting& readPref,
- const repl::ReadConcernLevel& readConcernLevel,
- const NamespaceString& nss,
- const BSONObj& query,
- const BSONObj& sort,
- boost::optional<long long> limit) final;
+ StatusWith<QueryResponse> _exhaustiveFind(OperationContext* txn,
+ const ReadPreferenceSetting& readPref,
+ const repl::ReadConcernLevel& readConcernLevel,
+ const NamespaceString& nss,
+ const BSONObj& query,
+ const BSONObj& sort,
+ boost::optional<long long> limit) final;
/**
* Connection string for the shard at the creation time.
diff --git a/src/mongo/s/config_server_test_fixture.cpp b/src/mongo/s/config_server_test_fixture.cpp
index 85a2b6411be..fc6a813ec5c 100644
--- a/src/mongo/s/config_server_test_fixture.cpp
+++ b/src/mongo/s/config_server_test_fixture.cpp
@@ -192,7 +192,7 @@ Status ConfigServerTestFixture::insertToConfigCollection(OperationContext* txn,
kReadPref,
ns.db().toString(),
request.toBSON(),
- Shard::kDefaultConfigCommandTimeout,
+ Shard::kDefaultCommandTimeout,
Shard::RetryPolicy::kNoRetry);
BatchedCommandResponse batchResponse;
@@ -206,7 +206,7 @@ StatusWith<BSONObj> ConfigServerTestFixture::findOneOnConfigCollection(Operation
auto config = getConfigShard();
invariant(config);
- auto findStatus = config->exhaustiveFindOnConfig(
+ auto findStatus = config->exhaustiveFind(
txn, kReadPref, repl::ReadConcernLevel::kMajorityReadConcern, ns, filter, BSONObj(), 1);
if (!findStatus.isOK()) {
return findStatus.getStatus();
@@ -278,7 +278,7 @@ StatusWith<std::vector<BSONObj>> ConfigServerTestFixture::getIndexes(OperationCo
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
ns.db().toString(),
BSON("listIndexes" << ns.coll().toString()),
- Shard::kDefaultConfigCommandTimeout,
+ Shard::kDefaultCommandTimeout,
Shard::RetryPolicy::kIdempotent);
if (!response.isOK()) {
return response.getStatus();