summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorMisha Tyulenev <misha@mongodb.com>2016-06-15 18:09:55 -0400
committerMisha Tyulenev <misha@mongodb.com>2016-06-15 18:10:20 -0400
commit8c8379b493eb9b71e34d6586c27280b75267e7c7 (patch)
treea17f7e982deed23f30aef3f0b7c39c77da2d73cd /src/mongo
parent9b00106b56966b334c878f36cca14deb71f6d8c7 (diff)
downloadmongo-8c8379b493eb9b71e34d6586c27280b75267e7c7.tar.gz
SERVER-23891 implement ShardId class
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/client/parallel.cpp4
-rw-r--r--src/mongo/db/SConscript1
-rw-r--r--src/mongo/db/repl/SConscript1
-rw-r--r--src/mongo/db/s/active_migrations_registry_test.cpp4
-rw-r--r--src/mongo/db/s/collection_metadata_test.cpp2
-rw-r--r--src/mongo/db/s/merge_chunks_command.cpp2
-rw-r--r--src/mongo/db/s/metadata_loader.cpp8
-rw-r--r--src/mongo/db/s/metadata_loader_test.cpp20
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp3
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp2
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp4
-rw-r--r--src/mongo/db/s/move_chunk_command.cpp2
-rw-r--r--src/mongo/db/s/move_timing_helper.cpp12
-rw-r--r--src/mongo/db/s/move_timing_helper.h9
-rw-r--r--src/mongo/db/s/start_chunk_clone_request.cpp5
-rw-r--r--src/mongo/db/s/start_chunk_clone_request.h4
-rw-r--r--src/mongo/db/s/start_chunk_clone_request_test.cpp3
-rw-r--r--src/mongo/s/SConscript22
-rw-r--r--src/mongo/s/balancer/balancer.cpp2
-rw-r--r--src/mongo/s/balancer/balancer_chunk_selection_policy_impl.cpp2
-rw-r--r--src/mongo/s/balancer/balancer_policy.cpp18
-rw-r--r--src/mongo/s/balancer/balancer_policy.h4
-rw-r--r--src/mongo/s/balancer/balancer_policy_tests.cpp148
-rw-r--r--src/mongo/s/balancer/cluster_statistics.cpp2
-rw-r--r--src/mongo/s/balancer/cluster_statistics_test.cpp9
-rw-r--r--src/mongo/s/catalog/replset/sharding_catalog_add_shard_test.cpp14
-rw-r--r--src/mongo/s/catalog/replset/sharding_catalog_client_impl.cpp11
-rw-r--r--src/mongo/s/catalog/replset/sharding_catalog_client_impl.h4
-rw-r--r--src/mongo/s/catalog/replset/sharding_catalog_test.cpp48
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client.h4
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_mock.cpp4
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_mock.h4
-rw-r--r--src/mongo/s/catalog/type_chunk.cpp8
-rw-r--r--src/mongo/s/catalog/type_chunk.h7
-rw-r--r--src/mongo/s/catalog/type_database.cpp8
-rw-r--r--src/mongo/s/catalog/type_database.h7
-rw-r--r--src/mongo/s/chunk_diff.h2
-rw-r--r--src/mongo/s/chunk_diff_test.cpp4
-rw-r--r--src/mongo/s/chunk_manager.cpp8
-rw-r--r--src/mongo/s/chunk_manager.h6
-rw-r--r--src/mongo/s/chunk_manager_targeter.cpp14
-rw-r--r--src/mongo/s/chunk_manager_targeter.h2
-rw-r--r--src/mongo/s/chunk_manager_tests.cpp2
-rw-r--r--src/mongo/s/client/SConscript1
-rw-r--r--src/mongo/s/client/shard.h3
-rw-r--r--src/mongo/s/client/shard_local.cpp2
-rw-r--r--src/mongo/s/client/shard_local_test.cpp2
-rw-r--r--src/mongo/s/client/shard_registry.cpp14
-rw-r--r--src/mongo/s/client/shard_registry.h2
-rw-r--r--src/mongo/s/client/shard_registry_data_test.cpp2
-rw-r--r--src/mongo/s/client/shard_remote.cpp2
-rw-r--r--src/mongo/s/commands/cluster_count_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_explain.cpp6
-rw-r--r--src/mongo/s/commands/cluster_fsync_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_index_filter_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_list_databases_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_plan_cache_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_remove_shard_cmd.cpp19
-rw-r--r--src/mongo/s/commands/cluster_user_management_commands.cpp2
-rw-r--r--src/mongo/s/commands/cluster_write_cmd.cpp2
-rw-r--r--src/mongo/s/commands/commands_public.cpp12
-rw-r--r--src/mongo/s/commands/run_on_all_shards_cmd.cpp6
-rw-r--r--src/mongo/s/commands/sharded_command_processing.cpp5
-rw-r--r--src/mongo/s/commands/sharded_command_processing.h4
-rw-r--r--src/mongo/s/config.cpp10
-rw-r--r--src/mongo/s/move_chunk_request.cpp16
-rw-r--r--src/mongo/s/move_chunk_request.h12
-rw-r--r--src/mongo/s/move_chunk_request_test.cpp20
-rw-r--r--src/mongo/s/ns_targeter.h5
-rw-r--r--src/mongo/s/query/async_results_merger_test.cpp5
-rw-r--r--src/mongo/s/request_types/balance_chunk_request_test.cpp4
-rw-r--r--src/mongo/s/request_types/balance_chunk_request_type.cpp2
-rw-r--r--src/mongo/s/set_shard_version_request.cpp19
-rw-r--r--src/mongo/s/set_shard_version_request.h17
-rw-r--r--src/mongo/s/set_shard_version_request_test.cpp10
-rw-r--r--src/mongo/s/shard_id.cpp88
-rw-r--r--src/mongo/s/shard_id.h105
-rw-r--r--src/mongo/s/shard_id_test.cpp107
-rw-r--r--src/mongo/s/shard_util.cpp2
-rw-r--r--src/mongo/s/write_ops/batch_write_op_test.cpp92
-rw-r--r--src/mongo/s/write_ops/write_op_test.cpp20
81 files changed, 719 insertions, 363 deletions
diff --git a/src/mongo/client/parallel.cpp b/src/mongo/client/parallel.cpp
index 4a1f9d010a1..2baf10a58d1 100644
--- a/src/mongo/client/parallel.cpp
+++ b/src/mongo/client/parallel.cpp
@@ -678,7 +678,7 @@ void ParallelSortClusteredCursor::startInit(OperationContext* txn) {
} catch (SocketException& e) {
warning() << "socket exception when initializing on " << shardId
<< ", current connection state is " << mdata.toBSON() << causedBy(e);
- e._shard = shardId;
+ e._shard = shardId.toString();
mdata.errored = true;
if (returnPartial) {
mdata.cleanup(true);
@@ -688,7 +688,7 @@ void ParallelSortClusteredCursor::startInit(OperationContext* txn) {
} catch (DBException& e) {
warning() << "db exception when initializing on " << shardId
<< ", current connection state is " << mdata.toBSON() << causedBy(e);
- e._shard = shardId;
+ e._shard = shardId.toString();
mdata.errored = true;
if (returnPartial && e.getCode() == 15925 /* From above! */) {
mdata.cleanup(true);
diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript
index accf4d71a5d..d02dace87d7 100644
--- a/src/mongo/db/SConscript
+++ b/src/mongo/db/SConscript
@@ -700,6 +700,7 @@ serveronlyLibdeps = [
"$BUILD_DIR/mongo/s/catalog/replset/sharding_catalog_manager_impl",
"$BUILD_DIR/mongo/s/client/sharding_connection_hook",
"$BUILD_DIR/mongo/s/coreshard",
+ "$BUILD_DIR/mongo/s/shard_id",
"$BUILD_DIR/mongo/s/serveronly",
"$BUILD_DIR/mongo/scripting/scripting_server",
"$BUILD_DIR/mongo/util/clock_sources",
diff --git a/src/mongo/db/repl/SConscript b/src/mongo/db/repl/SConscript
index 89d0aac35cc..66b38a9e163 100644
--- a/src/mongo/db/repl/SConscript
+++ b/src/mongo/db/repl/SConscript
@@ -138,6 +138,7 @@ env.Library(
'oplog_buffer_blocking_queue.cpp',
],
LIBDEPS=[
+ '$BUILD_DIR/mongo/base',
],
)
diff --git a/src/mongo/db/s/active_migrations_registry_test.cpp b/src/mongo/db/s/active_migrations_registry_test.cpp
index 6ced2bb68da..dfde342ed39 100644
--- a/src/mongo/db/s/active_migrations_registry_test.cpp
+++ b/src/mongo/db/s/active_migrations_registry_test.cpp
@@ -71,8 +71,8 @@ MoveChunkRequest createMoveChunkRequest(const NamespaceString& nss) {
nss,
ChunkVersion(2, 3, OID::gen()),
assertGet(ConnectionString::parse("TestConfigRS/CS1:12345,CS2:12345,CS3:12345")),
- "shard0001",
- "shard0002",
+ ShardId("shard0001"),
+ ShardId("shard0002"),
ChunkRange(BSON("Key" << -100), BSON("Key" << 100)),
1024,
MigrationSecondaryThrottleOptions::create(MigrationSecondaryThrottleOptions::kOff),
diff --git a/src/mongo/db/s/collection_metadata_test.cpp b/src/mongo/db/s/collection_metadata_test.cpp
index 103150d031a..013c135cc84 100644
--- a/src/mongo/db/s/collection_metadata_test.cpp
+++ b/src/mongo/db/s/collection_metadata_test.cpp
@@ -67,7 +67,7 @@ protected:
// can't load metadata
ChunkType chunkType;
chunkType.setNS(NamespaceString{"test.foo"}.ns());
- chunkType.setShard("shard0001");
+ chunkType.setShard(ShardId("shard0001"));
chunkType.setMin(BSON("a" << MINKEY));
chunkType.setMax(BSON("a" << MAXKEY));
chunkType.setVersion(ChunkVersion(1, 0, epoch));
diff --git a/src/mongo/db/s/merge_chunks_command.cpp b/src/mongo/db/s/merge_chunks_command.cpp
index 4f37d969bf9..2b3ca75a890 100644
--- a/src/mongo/db/s/merge_chunks_command.cpp
+++ b/src/mongo/db/s/merge_chunks_command.cpp
@@ -55,7 +55,7 @@ using std::vector;
namespace {
BSONArray buildOpPrecond(const string& ns,
- const string& shardName,
+ const ShardId& shardName,
const ChunkVersion& shardVersion) {
BSONArrayBuilder preCond;
BSONObjBuilder condB;
diff --git a/src/mongo/db/s/metadata_loader.cpp b/src/mongo/db/s/metadata_loader.cpp
index f57fc35a2df..8bc72af7f60 100644
--- a/src/mongo/db/s/metadata_loader.cpp
+++ b/src/mongo/db/s/metadata_loader.cpp
@@ -60,7 +60,7 @@ namespace {
*/
class SCMConfigDiffTracker : public ConfigDiffTracker<BSONObj> {
public:
- SCMConfigDiffTracker(const string& currShard) : _currShard(currShard) {}
+ SCMConfigDiffTracker(const ShardId& currShard) : _currShard(currShard) {}
virtual bool isTracked(const ChunkType& chunk) const {
return chunk.getShard() == _currShard;
@@ -70,7 +70,7 @@ public:
return make_pair(chunk.getMin(), chunk.getMax());
}
- virtual string shardFor(OperationContext* txn, const string& name) const {
+ virtual ShardId shardFor(OperationContext* txn, const ShardId& name) const {
return name;
}
@@ -79,7 +79,7 @@ public:
}
private:
- const string _currShard;
+ const ShardId _currShard;
};
} // namespace
@@ -137,7 +137,7 @@ Status MetadataLoader::initChunks(OperationContext* txn,
const string& shard,
const CollectionMetadata* oldMetadata,
CollectionMetadata* metadata) const {
- map<string, ChunkVersion> versionMap;
+ map<ShardId, ChunkVersion> versionMap; // TODO: use .h defined type
// Preserve the epoch
versionMap[shard] = metadata->_shardVersion;
diff --git a/src/mongo/db/s/metadata_loader_test.cpp b/src/mongo/db/s/metadata_loader_test.cpp
index 0825fd2ea6b..c96b77a1590 100644
--- a/src/mongo/db/s/metadata_loader_test.cpp
+++ b/src/mongo/db/s/metadata_loader_test.cpp
@@ -88,7 +88,7 @@ protected:
// Infer namespace, shard, epoch, keypattern from first chunk
const ChunkType* firstChunk = *(chunks.vector().begin());
const string ns = firstChunk->getNS();
- const string shardName = firstChunk->getShard();
+ const string shardName = firstChunk->getShard().toString();
const OID epoch = firstChunk->getVersion().epoch();
CollectionType coll;
@@ -276,7 +276,7 @@ TEST_F(MetadataLoaderFixture, CheckNumChunk) {
// can't load metadata
ChunkType chunkType;
chunkType.setNS("test.foo");
- chunkType.setShard("shard0001");
+ chunkType.setShard(ShardId("shard0001"));
chunkType.setMin(BSON("a" << MINKEY));
chunkType.setMax(BSON("a" << MAXKEY));
chunkType.setVersion(ChunkVersion(1, 0, epoch));
@@ -424,7 +424,7 @@ TEST_F(MetadataLoaderFixture, NoChunks) {
TEST_F(MetadataLoaderFixture, PromotePendingNA) {
unique_ptr<ChunkType> chunk(new ChunkType());
chunk->setNS("foo.bar");
- chunk->setShard("shard0000");
+ chunk->setShard(ShardId("shard0000"));
chunk->setMin(BSON("x" << MINKEY));
chunk->setMax(BSON("x" << 0));
chunk->setVersion(ChunkVersion(1, 0, OID::gen()));
@@ -459,7 +459,7 @@ TEST_F(MetadataLoaderFixture, PromotePendingNAVersion) {
unique_ptr<ChunkType> chunk(new ChunkType());
chunk->setNS("foo.bar");
- chunk->setShard("shard0000");
+ chunk->setShard(ShardId("shard0000"));
chunk->setMin(BSON("x" << MINKEY));
chunk->setMax(BSON("x" << 0));
chunk->setVersion(ChunkVersion(1, 1, epoch));
@@ -500,7 +500,7 @@ TEST_F(MetadataLoaderFixture, PromotePendingGoodOverlap) {
unique_ptr<ChunkType> chunk(new ChunkType());
chunk->setNS("foo.bar");
- chunk->setShard("shard0000");
+ chunk->setShard(ShardId("shard0000"));
chunk->setMin(BSON("x" << MINKEY));
chunk->setMax(BSON("x" << 0));
chunk->setVersion(ChunkVersion(1, 0, epoch));
@@ -508,14 +508,14 @@ TEST_F(MetadataLoaderFixture, PromotePendingGoodOverlap) {
chunk.reset(new ChunkType());
chunk->setNS("foo.bar");
- chunk->setShard("shard0000");
+ chunk->setShard(ShardId("shard0000"));
chunk->setMin(BSON("x" << 10));
chunk->setMax(BSON("x" << 20));
chunks.mutableVector().push_back(chunk.release());
chunk.reset(new ChunkType());
chunk->setNS("foo.bar");
- chunk->setShard("shard0000");
+ chunk->setShard(ShardId("shard0000"));
chunk->setMin(BSON("x" << 30));
chunk->setMax(BSON("x" << MAXKEY));
chunks.mutableVector().push_back(chunk.release());
@@ -531,7 +531,7 @@ TEST_F(MetadataLoaderFixture, PromotePendingGoodOverlap) {
chunk.reset(new ChunkType());
chunk->setNS("foo.bar");
- chunk->setShard("shard0000");
+ chunk->setShard(ShardId("shard0000"));
chunk->setMin(BSON("x" << 0));
chunk->setMax(BSON("x" << 10));
chunk->setVersion(ChunkVersion(1, 0, epoch));
@@ -580,7 +580,7 @@ TEST_F(MetadataLoaderFixture, PromotePendingBadOverlap) {
unique_ptr<ChunkType> chunk(new ChunkType());
chunk->setNS("foo.bar");
- chunk->setShard("shard0000");
+ chunk->setShard(ShardId("shard0000"));
chunk->setMin(BSON("x" << MINKEY));
chunk->setMax(BSON("x" << 0));
chunk->setVersion(ChunkVersion(1, 0, epoch));
@@ -598,7 +598,7 @@ TEST_F(MetadataLoaderFixture, PromotePendingBadOverlap) {
chunk.reset(new ChunkType());
chunk->setNS("foo.bar");
- chunk->setShard("shard0000");
+ chunk->setShard(ShardId("shard0000"));
chunk->setMin(BSON("x" << 15));
chunk->setMax(BSON("x" << MAXKEY));
chunk->setVersion(ChunkVersion(1, 0, epoch));
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index e5b15ac679d..55b38ec4b47 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -170,7 +170,8 @@ MigrationChunkClonerSourceLegacy::MigrationChunkClonerSourceLegacy(MoveChunkRequ
const BSONObj& shardKeyPattern)
: _args(std::move(request)),
_shardKeyPattern(shardKeyPattern),
- _sessionId(MigrationSessionId::generate(_args.getFromShardId(), _args.getToShardId())) {}
+ _sessionId(MigrationSessionId::generate(_args.getFromShardId().toString(),
+ _args.getToShardId().toString())) {}
MigrationChunkClonerSourceLegacy::~MigrationChunkClonerSourceLegacy() {
invariant(!_deleteNotifyExec);
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index e6990087018..89aab68dcd4 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -421,7 +421,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* txn,
<< " for collection " << ns << " from " << fromShard << " at epoch " << epoch.toString();
string errmsg;
- MoveTimingHelper timing(txn, "to", ns, min, max, 6 /* steps */, &errmsg, "", "");
+ MoveTimingHelper timing(txn, "to", ns, min, max, 6 /* steps */, &errmsg, ShardId(), ShardId());
ScopedDbConnection conn(fromShard);
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index 96d32fac9b9..74da4ec2558 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -317,7 +317,7 @@ Status MigrationSourceManager::commitDonateChunk(OperationContext* txn) {
n.append(ChunkType::ns(), _args.getNss().ns());
n.append(ChunkType::min(), _args.getMinKey());
n.append(ChunkType::max(), _args.getMaxKey());
- n.append(ChunkType::shard(), _args.getToShardId());
+ n.append(ChunkType::shard(), _args.getToShardId().toString());
n.done();
BSONObjBuilder q(op.subobjStart("o2"));
@@ -357,7 +357,7 @@ Status MigrationSourceManager::commitDonateChunk(OperationContext* txn) {
n.append(ChunkType::ns(), _args.getNss().ns());
n.append(ChunkType::min(), bumpMin);
n.append(ChunkType::max(), bumpMax);
- n.append(ChunkType::shard(), _args.getFromShardId());
+ n.append(ChunkType::shard(), _args.getFromShardId().toString());
n.done();
BSONObjBuilder q(op.subobjStart("o2"));
diff --git a/src/mongo/db/s/move_chunk_command.cpp b/src/mongo/db/s/move_chunk_command.cpp
index 189f7934405..e7f332fe9f3 100644
--- a/src/mongo/db/s/move_chunk_command.cpp
+++ b/src/mongo/db/s/move_chunk_command.cpp
@@ -149,7 +149,7 @@ public:
txn, moveChunkRequest.getConfigServerCS().toString());
}
- shardingState->setShardName(moveChunkRequest.getFromShardId());
+ shardingState->setShardName(moveChunkRequest.getFromShardId().toString());
// Make sure we're as up-to-date as possible with shard information. This catches the case
// where we might have changed a shard's host by removing/adding a shard with the same name.
diff --git a/src/mongo/db/s/move_timing_helper.cpp b/src/mongo/db/s/move_timing_helper.cpp
index 3e4d1507dbc..3c1ad114976 100644
--- a/src/mongo/db/s/move_timing_helper.cpp
+++ b/src/mongo/db/s/move_timing_helper.cpp
@@ -46,8 +46,8 @@ MoveTimingHelper::MoveTimingHelper(OperationContext* txn,
const BSONObj& max,
int totalNumSteps,
std::string* cmdErrmsg,
- const std::string& toShard,
- const std::string& fromShard)
+ const ShardId& toShard,
+ const ShardId& fromShard)
: _txn(txn),
_where(where),
_ns(ns),
@@ -64,12 +64,12 @@ MoveTimingHelper::~MoveTimingHelper() {
// even if logChange doesn't throw, bson does
// sigh
try {
- if (!_to.empty()) {
- _b.append("to", _to);
+ if (_to.isValid()) {
+ _b.append("to", _to.toString());
}
- if (!_from.empty()) {
- _b.append("from", _from);
+ if (_from.isValid()) {
+ _b.append("from", _from.toString());
}
if (_nextStep != _totalNumSteps) {
diff --git a/src/mongo/db/s/move_timing_helper.h b/src/mongo/db/s/move_timing_helper.h
index fc799ca1d6d..bc1f2644ac7 100644
--- a/src/mongo/db/s/move_timing_helper.h
+++ b/src/mongo/db/s/move_timing_helper.h
@@ -31,6 +31,7 @@
#include <string>
#include "mongo/bson/bsonobjbuilder.h"
+#include "mongo/s/shard_id.h"
#include "mongo/util/timer.h"
namespace mongo {
@@ -47,8 +48,8 @@ public:
const BSONObj& max,
int totalNumSteps,
std::string* cmdErrmsg,
- const std::string& toShard,
- const std::string& fromShard);
+ const ShardId& toShard,
+ const ShardId& fromShard);
~MoveTimingHelper();
void done(int step);
@@ -60,8 +61,8 @@ private:
OperationContext* const _txn;
const std::string _where;
const std::string _ns;
- const std::string _to;
- const std::string _from;
+ const ShardId _to;
+ const ShardId _from;
const int _totalNumSteps;
const std::string* _cmdErrmsg;
diff --git a/src/mongo/db/s/start_chunk_clone_request.cpp b/src/mongo/db/s/start_chunk_clone_request.cpp
index fbeb861ede7..9aa27ba5fc9 100644
--- a/src/mongo/db/s/start_chunk_clone_request.cpp
+++ b/src/mongo/db/s/start_chunk_clone_request.cpp
@@ -33,6 +33,7 @@
#include "mongo/base/status_with.h"
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/bson/util/bson_extract.h"
+#include "mongo/s/shard_id.h"
namespace mongo {
namespace {
@@ -160,7 +161,7 @@ void StartChunkCloneRequest::appendAsCommand(
const MigrationSessionId& sessionId,
const ConnectionString& configServerConnectionString,
const ConnectionString& fromShardConnectionString,
- const std::string& toShardId,
+ const ShardId& toShardId,
const BSONObj& chunkMinKey,
const BSONObj& chunkMaxKey,
const BSONObj& shardKeyPattern,
@@ -173,7 +174,7 @@ void StartChunkCloneRequest::appendAsCommand(
sessionId.append(builder);
builder->append(kConfigServerConnectionString, configServerConnectionString.toString());
builder->append(kFromShardConnectionString, fromShardConnectionString.toString());
- builder->append(kToShardId, toShardId);
+ builder->append(kToShardId, toShardId.toString());
builder->append(kChunkMinKey, chunkMinKey);
builder->append(kChunkMaxKey, chunkMaxKey);
builder->append(kShardKeyPattern, shardKeyPattern);
diff --git a/src/mongo/db/s/start_chunk_clone_request.h b/src/mongo/db/s/start_chunk_clone_request.h
index ce66f7f2b12..8a7b43d5f27 100644
--- a/src/mongo/db/s/start_chunk_clone_request.h
+++ b/src/mongo/db/s/start_chunk_clone_request.h
@@ -40,7 +40,7 @@ namespace mongo {
class BSONObjBuilder;
template <typename T>
class StatusWith;
-
+class ShardId;
/**
* Parses the arguments for a start chunk clone operation.
*/
@@ -62,7 +62,7 @@ public:
const MigrationSessionId& sessionId,
const ConnectionString& configServerConnectionString,
const ConnectionString& fromShardConnectionString,
- const std::string& toShardId,
+ const ShardId& toShardId,
const BSONObj& chunkMinKey,
const BSONObj& chunkMaxKey,
const BSONObj& shardKeyPattern,
diff --git a/src/mongo/db/s/start_chunk_clone_request_test.cpp b/src/mongo/db/s/start_chunk_clone_request_test.cpp
index 2b977178781..107c5628580 100644
--- a/src/mongo/db/s/start_chunk_clone_request_test.cpp
+++ b/src/mongo/db/s/start_chunk_clone_request_test.cpp
@@ -33,6 +33,7 @@
#include "mongo/base/status_with.h"
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/db/jsobj.h"
+#include "mongo/s/shard_id.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
@@ -51,7 +52,7 @@ TEST(StartChunkCloneRequest, CreateAsCommandComplete) {
sessionId,
assertGet(ConnectionString::parse("TestConfigRS/CS1:12345,CS2:12345,CS3:12345")),
assertGet(ConnectionString::parse("TestDonorRS/Donor1:12345,Donor2:12345,Donor3:12345")),
- "shard0002",
+ ShardId("shard0002"),
BSON("Key" << -100),
BSON("Key" << 100),
BSON("Key" << 1),
diff --git a/src/mongo/s/SConscript b/src/mongo/s/SConscript
index 3d047de7734..c4411742da5 100644
--- a/src/mongo/s/SConscript
+++ b/src/mongo/s/SConscript
@@ -33,6 +33,16 @@ env.Library(
# Functionality shared between mongod and mongos
env.Library(
+ target='shard_id',
+ source=[
+ 'shard_id.cpp',
+ ],
+ LIBDEPS=[
+ '$BUILD_DIR/mongo/base',
+ ]
+)
+
+env.Library(
target='common',
source=[
'catalog/mongo_version_range.cpp',
@@ -60,6 +70,7 @@ env.Library(
'shard_key_pattern.cpp',
],
LIBDEPS=[
+ 'shard_id',
'$BUILD_DIR/mongo/client/connection_string',
'$BUILD_DIR/mongo/db/common',
'$BUILD_DIR/mongo/db/matcher/expressions',
@@ -99,6 +110,16 @@ env.Library(
)
env.CppUnitTest(
+ target='shard_id_test',
+ source=[
+ 'shard_id_test.cpp',
+ ],
+ LIBDEPS=[
+ 'common',
+ ]
+)
+
+env.CppUnitTest(
target='sharding_common_test',
source=[
'catalog/type_changelog_test.cpp',
@@ -116,6 +137,7 @@ env.CppUnitTest(
'move_chunk_request_test.cpp',
'set_shard_version_request_test.cpp',
'shard_key_pattern_test.cpp',
+# 'shard_id_test.cpp',
],
LIBDEPS=[
'$BUILD_DIR/mongo/db/query/query_test_service_context',
diff --git a/src/mongo/s/balancer/balancer.cpp b/src/mongo/s/balancer/balancer.cpp
index c751467ea25..c75b4d3b93b 100644
--- a/src/mongo/s/balancer/balancer.cpp
+++ b/src/mongo/s/balancer/balancer.cpp
@@ -600,7 +600,7 @@ bool Balancer::_checkOIDs(OperationContext* txn) {
shardingContext->shardRegistry()->getAllShardIds(&all);
// map of OID machine ID => shardId
- map<int, string> oids;
+ map<int, ShardId> oids;
for (const ShardId& shardId : all) {
const auto s = shardingContext->shardRegistry()->getShard(txn, shardId);
diff --git a/src/mongo/s/balancer/balancer_chunk_selection_policy_impl.cpp b/src/mongo/s/balancer/balancer_chunk_selection_policy_impl.cpp
index e2a00e9034f..f95158b3039 100644
--- a/src/mongo/s/balancer/balancer_chunk_selection_policy_impl.cpp
+++ b/src/mongo/s/balancer/balancer_chunk_selection_policy_impl.cpp
@@ -210,7 +210,7 @@ BalancerChunkSelectionPolicyImpl::selectSpecificChunkToMove(OperationContext* tx
DistributionStatus distStatus(shardStatsStatus.getValue(), shardToChunksMap);
const ShardId newShardId(distStatus.getBestReceieverShard(tagForChunkStatus.getValue()));
- if (newShardId.empty() || newShardId == chunk.getShard()) {
+ if (!newShardId.isValid() || newShardId == chunk.getShard()) {
return boost::optional<MigrateInfo>();
}
diff --git a/src/mongo/s/balancer/balancer_policy.cpp b/src/mongo/s/balancer/balancer_policy.cpp
index f588f089702..b679858e2d0 100644
--- a/src/mongo/s/balancer/balancer_policy.cpp
+++ b/src/mongo/s/balancer/balancer_policy.cpp
@@ -118,8 +118,8 @@ Status DistributionStatus::isShardSuitableReceiver(const ClusterStatistics::Shar
return Status::OK();
}
-string DistributionStatus::getBestReceieverShard(const string& tag) const {
- string best;
+ShardId DistributionStatus::getBestReceieverShard(const string& tag) const {
+ ShardId best;
unsigned minChunks = numeric_limits<unsigned>::max();
for (const auto& stat : _shardInfo) {
@@ -143,8 +143,8 @@ string DistributionStatus::getBestReceieverShard(const string& tag) const {
return best;
}
-string DistributionStatus::getMostOverloadedShard(const string& tag) const {
- string worst;
+ShardId DistributionStatus::getMostOverloadedShard(const string& tag) const {
+ ShardId worst;
unsigned maxChunks = 0;
for (const auto& stat : _shardInfo) {
@@ -274,7 +274,7 @@ MigrateInfo* BalancerPolicy::balance(const string& ns,
string tag = distribution.getTagForChunk(chunkToMove);
const ShardId to = distribution.getBestReceieverShard(tag);
- if (to.size() == 0) {
+ if (!to.isValid()) {
warning() << "want to move chunk: " << chunkToMove << "(" << tag << ") "
<< "from " << stat.shardId << " but can't find anywhere to put it";
continue;
@@ -313,7 +313,7 @@ MigrateInfo* BalancerPolicy::balance(const string& ns,
}
const ShardId to = distribution.getBestReceieverShard(tag);
- if (to.size() == 0) {
+ if (!to.isValid()) {
log() << "no where to put it :(";
continue;
}
@@ -349,15 +349,15 @@ MigrateInfo* BalancerPolicy::balance(const string& ns,
string tag = tags[i];
const ShardId from = distribution.getMostOverloadedShard(tag);
- if (from.size() == 0)
+ if (!from.isValid())
continue;
unsigned max = distribution.numberOfChunksInShardWithTag(from, tag);
if (max == 0)
continue;
- string to = distribution.getBestReceieverShard(tag);
- if (to.size() == 0) {
+ ShardId to = distribution.getBestReceieverShard(tag);
+ if (!to.isValid()) {
log() << "no available shards to take chunks for tag [" << tag << "]";
return NULL;
}
diff --git a/src/mongo/s/balancer/balancer_policy.h b/src/mongo/s/balancer/balancer_policy.h
index 428349098a1..8717aca4045 100644
--- a/src/mongo/s/balancer/balancer_policy.h
+++ b/src/mongo/s/balancer/balancer_policy.h
@@ -95,13 +95,13 @@ public:
* @param forTag "" if you don't care, or a tag
* @return shard best suited to receive a chunk
*/
- std::string getBestReceieverShard(const std::string& tag) const;
+ ShardId getBestReceieverShard(const std::string& tag) const;
/**
* @return the shard with the most chunks
* based on # of chunks with the given tag
*/
- std::string getMostOverloadedShard(const std::string& forTag) const;
+ ShardId getMostOverloadedShard(const std::string& forTag) const;
// ---- basic accessors, counters, etc...
diff --git a/src/mongo/s/balancer/balancer_policy_tests.cpp b/src/mongo/s/balancer/balancer_policy_tests.cpp
index b0ba5c29ce3..95746bae017 100644
--- a/src/mongo/s/balancer/balancer_policy_tests.cpp
+++ b/src/mongo/s/balancer/balancer_policy_tests.cpp
@@ -66,7 +66,7 @@ TEST(BalancerPolicyTests, BalanceNormalTest) {
ChunkType chunk;
chunk.setMin(BSON("x" << BSON("$maxKey" << 1)));
chunk.setMax(BSON("x" << 49));
- chunk.setShard("shard0");
+ chunk.setShard(ShardId("shard0"));
chunks.push_back(chunk);
}
@@ -74,17 +74,17 @@ TEST(BalancerPolicyTests, BalanceNormalTest) {
ChunkType chunk;
chunk.setMin(BSON("x" << 49));
chunk.setMax(BSON("x" << BSON("$maxKey" << 1)));
- chunk.setShard("shard0");
+ chunk.setShard(ShardId("shard0"));
chunks.push_back(chunk);
}
- chunkMap["shard0"] = chunks;
- chunkMap["shard1"] = vector<ChunkType>();
+ chunkMap[ShardId("shard0")] = chunks;
+ chunkMap[ShardId("shard1")] = vector<ChunkType>();
// No limits
DistributionStatus status(
- {ShardStatistics("shard0", 0, 2, false, emptyTagSet, emptyShardVersion),
- ShardStatistics("shard1", 0, 0, false, emptyTagSet, emptyShardVersion)},
+ {ShardStatistics(ShardId("shard0"), 0, 2, false, emptyTagSet, emptyShardVersion),
+ ShardStatistics(ShardId("shard1"), 0, 0, false, emptyTagSet, emptyShardVersion)},
chunkMap);
std::unique_ptr<MigrateInfo> c(BalancerPolicy::balance("ns", status, 1));
@@ -101,7 +101,7 @@ TEST(BalancerPolicyTests, BalanceJumbo) {
chunk.setMin(BSON("x" << BSON("$maxKey" << 1)));
chunk.setMax(BSON("x" << 10));
chunk.setJumbo(true);
- chunk.setShard("shard0");
+ chunk.setShard(ShardId("shard0"));
chunks.push_back(chunk);
}
@@ -110,7 +110,7 @@ TEST(BalancerPolicyTests, BalanceJumbo) {
chunk.setMin(BSON("x" << 10));
chunk.setMax(BSON("x" << 20));
chunk.setJumbo(true);
- chunk.setShard("shard0");
+ chunk.setShard(ShardId("shard0"));
chunks.push_back(chunk);
}
@@ -118,7 +118,7 @@ TEST(BalancerPolicyTests, BalanceJumbo) {
ChunkType chunk;
chunk.setMin(BSON("x" << 20));
chunk.setMax(BSON("x" << 30));
- chunk.setShard("shard0");
+ chunk.setShard(ShardId("shard0"));
chunks.push_back(chunk);
}
@@ -127,7 +127,7 @@ TEST(BalancerPolicyTests, BalanceJumbo) {
chunk.setMin(BSON("x" << 30));
chunk.setMax(BSON("x" << 40));
chunk.setJumbo(true);
- chunk.setShard("shard0");
+ chunk.setShard(ShardId("shard0"));
chunks.push_back(chunk);
}
@@ -135,17 +135,17 @@ TEST(BalancerPolicyTests, BalanceJumbo) {
ChunkType chunk;
chunk.setMin(BSON("x" << 40));
chunk.setMax(BSON("x" << BSON("$maxKey" << 1)));
- chunk.setShard("shard0");
+ chunk.setShard(ShardId("shard0"));
chunks.push_back(chunk);
}
- chunkMap["shard0"] = chunks;
- chunkMap["shard1"] = vector<ChunkType>();
+ chunkMap[ShardId("shard0")] = chunks;
+ chunkMap[ShardId("shard1")] = vector<ChunkType>();
// No limits
DistributionStatus status(
- {ShardStatistics("shard0", 0, 2, false, emptyTagSet, emptyShardVersion),
- ShardStatistics("shard1", 0, 0, false, emptyTagSet, emptyShardVersion)},
+ {ShardStatistics(ShardId("shard0"), 0, 2, false, emptyTagSet, emptyShardVersion),
+ ShardStatistics(ShardId("shard1"), 0, 0, false, emptyTagSet, emptyShardVersion)},
chunkMap);
std::unique_ptr<MigrateInfo> c(BalancerPolicy::balance("ns", status, 1));
@@ -161,7 +161,7 @@ TEST(BalanceNormalTests, BalanceDrainingTest) {
ChunkType chunk;
chunk.setMin(BSON("x" << BSON("$maxKey" << 1)));
chunk.setMax(BSON("x" << 49));
- chunk.setShard("shard0");
+ chunk.setShard(ShardId("shard0"));
chunks.push_back(chunk);
}
@@ -169,23 +169,23 @@ TEST(BalanceNormalTests, BalanceDrainingTest) {
ChunkType chunk;
chunk.setMin(BSON("x" << 49));
chunk.setMax(BSON("x" << BSON("$maxKey" << 1)));
- chunk.setShard("shard0");
+ chunk.setShard(ShardId("shard0"));
chunks.push_back(chunk);
}
- chunkMap["shard0"] = chunks;
- chunkMap["shard1"] = vector<ChunkType>();
+ chunkMap[ShardId("shard0")] = chunks;
+ chunkMap[ShardId("shard1")] = vector<ChunkType>();
// shard0 is draining
DistributionStatus status(
- {ShardStatistics("shard0", 0, 2, true, emptyTagSet, emptyShardVersion),
- ShardStatistics("shard1", 0, 0, false, emptyTagSet, emptyShardVersion)},
+ {ShardStatistics(ShardId("shard0"), 0, 2, true, emptyTagSet, emptyShardVersion),
+ ShardStatistics(ShardId("shard1"), 0, 0, false, emptyTagSet, emptyShardVersion)},
chunkMap);
std::unique_ptr<MigrateInfo> c(BalancerPolicy::balance("ns", status, 0));
ASSERT(c);
- ASSERT_EQUALS(c->to, "shard1");
- ASSERT_EQUALS(c->from, "shard0");
+ ASSERT_EQUALS(c->to, ShardId("shard1"));
+ ASSERT_EQUALS(c->from, ShardId("shard0"));
ASSERT(!c->minKey.isEmpty());
}
@@ -197,7 +197,7 @@ TEST(BalancerPolicyTests, BalanceEndedDrainingTest) {
ChunkType chunk;
chunk.setMin(BSON("x" << BSON("$maxKey" << 1)));
chunk.setMax(BSON("x" << 49));
- chunk.setShard("shard0");
+ chunk.setShard(ShardId("shard0"));
chunks.push_back(chunk);
}
@@ -205,17 +205,17 @@ TEST(BalancerPolicyTests, BalanceEndedDrainingTest) {
ChunkType chunk;
chunk.setMin(BSON("x" << 49));
chunk.setMax(BSON("x" << BSON("$maxKey" << 1)));
- chunk.setShard("shard0");
+ chunk.setShard(ShardId("shard0"));
chunks.push_back(chunk);
}
- chunkMap["shard0"] = chunks;
- chunkMap["shard1"] = vector<ChunkType>();
+ chunkMap[ShardId("shard0")] = chunks;
+ chunkMap[ShardId("shard1")] = vector<ChunkType>();
// shard1 is draining
DistributionStatus status(
- {ShardStatistics("shard0", 0, 2, false, emptyTagSet, emptyShardVersion),
- ShardStatistics("shard1", 0, 0, true, emptyTagSet, emptyShardVersion)},
+ {ShardStatistics(ShardId("shard0"), 0, 2, false, emptyTagSet, emptyShardVersion),
+ ShardStatistics(ShardId("shard1"), 0, 0, true, emptyTagSet, emptyShardVersion)},
chunkMap);
std::unique_ptr<MigrateInfo> c(BalancerPolicy::balance("ns", status, 0));
@@ -230,7 +230,7 @@ TEST(BalancerPolicyTests, BalanceImpasseTest) {
ChunkType chunk;
chunk.setMin(BSON("x" << BSON("$maxKey" << 1)));
chunk.setMax(BSON("x" << 49));
- chunk.setShard("shard1");
+ chunk.setShard(ShardId("shard1"));
chunks.push_back(chunk);
}
@@ -238,19 +238,19 @@ TEST(BalancerPolicyTests, BalanceImpasseTest) {
ChunkType chunk;
chunk.setMin(BSON("x" << 49));
chunk.setMax(BSON("x" << BSON("$maxKey" << 1)));
- chunk.setShard("shard1");
+ chunk.setShard(ShardId("shard1"));
chunks.push_back(chunk);
}
- chunkMap["shard0"] = vector<ChunkType>();
- chunkMap["shard1"] = chunks;
- chunkMap["shard2"] = vector<ChunkType>();
+ chunkMap[ShardId("shard0")] = vector<ChunkType>();
+ chunkMap[ShardId("shard1")] = chunks;
+ chunkMap[ShardId("shard2")] = vector<ChunkType>();
// shard0 is draining, shard1 is maxed out, shard2 has writebacks pending
DistributionStatus status(
- {ShardStatistics("shard0", 0, 2, true, emptyTagSet, emptyShardVersion),
- ShardStatistics("shard1", 1, 1, false, emptyTagSet, emptyShardVersion),
- ShardStatistics("shard2", 0, 1, true, emptyTagSet, emptyShardVersion)},
+ {ShardStatistics(ShardId("shard0"), 0, 2, true, emptyTagSet, emptyShardVersion),
+ ShardStatistics(ShardId("shard1"), 1, 1, false, emptyTagSet, emptyShardVersion),
+ ShardStatistics(ShardId("shard2"), 0, 1, true, emptyTagSet, emptyShardVersion)},
chunkMap);
std::unique_ptr<MigrateInfo> c(BalancerPolicy::balance("ns", status, 0));
@@ -312,14 +312,15 @@ TEST(BalancerPolicyTests, MultipleDraining) {
addShard(chunks, 10, false);
addShard(chunks, 5, true);
- DistributionStatus d({ShardStatistics("shard0", 0, 5, true, emptyTagSet, emptyShardVersion),
- ShardStatistics("shard1", 0, 5, true, emptyTagSet, emptyShardVersion),
- ShardStatistics("shard2", 0, 5, false, emptyTagSet, emptyShardVersion)},
- chunks);
+ DistributionStatus d(
+ {ShardStatistics(ShardId("shard0"), 0, 5, true, emptyTagSet, emptyShardVersion),
+ ShardStatistics(ShardId("shard1"), 0, 5, true, emptyTagSet, emptyShardVersion),
+ ShardStatistics(ShardId("shard2"), 0, 5, false, emptyTagSet, emptyShardVersion)},
+ chunks);
std::unique_ptr<MigrateInfo> m(BalancerPolicy::balance("ns", d, 0));
ASSERT(m);
- ASSERT_EQUALS("shard2", m->to);
+ ASSERT_EQUALS(ShardId("shard2"), m->to);
}
@@ -330,10 +331,11 @@ TEST(BalancerPolicyTests, TagsDraining) {
addShard(chunks, 5, true);
while (true) {
- DistributionStatus d({ShardStatistics("shard0", 0, 5, false, {"a"}, emptyShardVersion),
- ShardStatistics("shard1", 0, 5, true, {"a", "b"}, emptyShardVersion),
- ShardStatistics("shard2", 0, 5, false, {"b"}, emptyShardVersion)},
- chunks);
+ DistributionStatus d(
+ {ShardStatistics(ShardId("shard0"), 0, 5, false, {"a"}, emptyShardVersion),
+ ShardStatistics(ShardId("shard1"), 0, 5, true, {"a", "b"}, emptyShardVersion),
+ ShardStatistics(ShardId("shard2"), 0, 5, false, {"b"}, emptyShardVersion)},
+ chunks);
d.addTagRange(TagRange(BSON("x" << -1), BSON("x" << 7), "a"));
d.addTagRange(TagRange(BSON("x" << 7), BSON("x" << 1000), "b"));
@@ -344,17 +346,17 @@ TEST(BalancerPolicyTests, TagsDraining) {
}
if (m->minKey["x"].numberInt() < 7) {
- ASSERT_EQUALS("shard0", m->to);
+ ASSERT_EQUALS(ShardId("shard0"), m->to);
} else {
- ASSERT_EQUALS("shard2", m->to);
+ ASSERT_EQUALS(ShardId("shard2"), m->to);
}
moveChunk(chunks, m.get());
}
- ASSERT_EQUALS(7U, chunks["shard0"].size());
- ASSERT_EQUALS(0U, chunks["shard1"].size());
- ASSERT_EQUALS(8U, chunks["shard2"].size());
+ ASSERT_EQUALS(7U, chunks[ShardId("shard0")].size());
+ ASSERT_EQUALS(0U, chunks[ShardId("shard1")].size());
+ ASSERT_EQUALS(8U, chunks[ShardId("shard2")].size());
}
@@ -366,9 +368,9 @@ TEST(BalancerPolicyTests, TagsPolicyChange) {
while (true) {
DistributionStatus d(
- {ShardStatistics("shard0", 0, 5, false, {"a"}, emptyShardVersion),
- ShardStatistics("shard1", 0, 5, false, {"a"}, emptyShardVersion),
- ShardStatistics("shard2", 0, 5, false, emptyTagSet, emptyShardVersion)},
+ {ShardStatistics(ShardId("shard0"), 0, 5, false, {"a"}, emptyShardVersion),
+ ShardStatistics(ShardId("shard1"), 0, 5, false, {"a"}, emptyShardVersion),
+ ShardStatistics(ShardId("shard2"), 0, 5, false, emptyTagSet, emptyShardVersion)},
chunks);
d.addTagRange(TagRange(BSON("x" << -1), BSON("x" << 1000), "a"));
@@ -380,12 +382,12 @@ TEST(BalancerPolicyTests, TagsPolicyChange) {
moveChunk(chunks, m.get());
}
- const size_t shard0Size = chunks["shard0"].size();
- const size_t shard1Size = chunks["shard1"].size();
+ const size_t shard0Size = chunks[ShardId("shard0")].size();
+ const size_t shard1Size = chunks[ShardId("shard1")].size();
ASSERT_EQUALS(15U, shard0Size + shard1Size);
ASSERT(shard0Size == 7U || shard0Size == 8U);
- ASSERT_EQUALS(0U, chunks["shard2"].size());
+ ASSERT_EQUALS(0U, chunks[ShardId("shard2")].size());
}
@@ -457,15 +459,16 @@ TEST(BalancerPolicyTests, MaxSizeRespect) {
// Note that maxSize of shard0 is 1, and it is therefore overloaded with currSize = 3.
// Other shards have maxSize = 0 = unset.
- DistributionStatus d({ShardStatistics("shard0", 1, 3, false, emptyTagSet, emptyShardVersion),
- ShardStatistics("shard1", 0, 4, false, emptyTagSet, emptyShardVersion),
- ShardStatistics("shard2", 0, 6, false, emptyTagSet, emptyShardVersion)},
- chunks);
+ DistributionStatus d(
+ {ShardStatistics(ShardId("shard0"), 1, 3, false, emptyTagSet, emptyShardVersion),
+ ShardStatistics(ShardId("shard1"), 0, 4, false, emptyTagSet, emptyShardVersion),
+ ShardStatistics(ShardId("shard2"), 0, 6, false, emptyTagSet, emptyShardVersion)},
+ chunks);
std::unique_ptr<MigrateInfo> m(BalancerPolicy::balance("ns", d, 0));
ASSERT(m);
- ASSERT_EQUALS("shard2", m->from);
- ASSERT_EQUALS("shard1", m->to);
+ ASSERT_EQUALS(ShardId("shard2"), m->from);
+ ASSERT_EQUALS(ShardId("shard1"), m->to);
}
/**
@@ -482,10 +485,11 @@ TEST(BalancerPolicyTests, MaxSizeNoDrain) {
// Note that maxSize of shard0 is 1, and it is therefore overloaded with currSize = 4. Other
// shards have maxSize = 0 = unset.
- DistributionStatus d({ShardStatistics("shard0", 1, 4, false, emptyTagSet, emptyShardVersion),
- ShardStatistics("shard1", 0, 4, false, emptyTagSet, emptyShardVersion),
- ShardStatistics("shard2", 0, 4, false, emptyTagSet, emptyShardVersion)},
- chunks);
+ DistributionStatus d(
+ {ShardStatistics(ShardId("shard0"), 1, 4, false, emptyTagSet, emptyShardVersion),
+ ShardStatistics(ShardId("shard1"), 0, 4, false, emptyTagSet, emptyShardVersion),
+ ShardStatistics(ShardId("shard2"), 0, 4, false, emptyTagSet, emptyShardVersion)},
+ chunks);
std::unique_ptr<MigrateInfo> m(BalancerPolicy::balance("ns", d, 0));
ASSERT(!m);
@@ -519,7 +523,7 @@ TEST(BalancerPolicyTests, Simulation) {
ShardToChunksMap chunks;
vector<ShardStatistics> shards;
- map<string, int> expected;
+ map<ShardId, int> expected;
for (int i = 0; i < numShards; i++) {
int numShardChunks = rng.nextInt32(100);
@@ -527,17 +531,17 @@ TEST(BalancerPolicyTests, Simulation) {
bool maxed = i >= 2 && i < 4;
if (draining) {
- expected[str::stream() << "shard" << i] = 0;
+ expected[ShardId(str::stream() << "shard" << i)] = 0;
}
if (maxed) {
- expected[str::stream() << "shard" << i] = numShardChunks + 1;
+ expected[ShardId(str::stream() << "shard" << i)] = numShardChunks + 1;
}
addShard(chunks, numShardChunks, false);
numChunks += numShardChunks;
- shards.emplace_back(str::stream() << "shard" << i,
+ shards.emplace_back(ShardId(str::stream() << "shard" << i),
maxed ? numShardChunks + 1 : 0,
numShardChunks,
draining,
@@ -576,7 +580,7 @@ TEST(BalancerPolicyTests, Simulation) {
// Cast the size once and use it from here in order to avoid typecast errors
const int shardCurrSizeMB = static_cast<int>(stat.currSizeMB);
- map<string, int>::iterator expectedIt = expected.find(stat.shardId);
+ map<ShardId, int>::iterator expectedIt = expected.find(stat.shardId);
if (expectedIt == expected.end()) {
const bool isInRange =
diff --git a/src/mongo/s/balancer/cluster_statistics.cpp b/src/mongo/s/balancer/cluster_statistics.cpp
index 0c66348ef36..c951c865ce0 100644
--- a/src/mongo/s/balancer/cluster_statistics.cpp
+++ b/src/mongo/s/balancer/cluster_statistics.cpp
@@ -64,7 +64,7 @@ bool ClusterStatistics::ShardStatistics::isSizeMaxed() const {
BSONObj ClusterStatistics::ShardStatistics::toBSON() const {
BSONObjBuilder builder;
- builder.append("id", shardId);
+ builder.append("id", shardId.toString());
builder.append("maxSizeMB", static_cast<long long>(maxSizeMB));
builder.append("currSizeMB", static_cast<long long>(currSizeMB));
builder.append("draining", isDraining);
diff --git a/src/mongo/s/balancer/cluster_statistics_test.cpp b/src/mongo/s/balancer/cluster_statistics_test.cpp
index 4fe4f0b27e5..c9115b6dafd 100644
--- a/src/mongo/s/balancer/cluster_statistics_test.cpp
+++ b/src/mongo/s/balancer/cluster_statistics_test.cpp
@@ -39,9 +39,12 @@ using ShardStatistics = ClusterStatistics::ShardStatistics;
const auto emptyTagSet = std::set<std::string>();
TEST(ShardStatistics, SizeMaxedTest) {
- ASSERT(!ShardStatistics("TestShardId", 0, 0, false, emptyTagSet, "3.2.0").isSizeMaxed());
- ASSERT(!ShardStatistics("TestShardId", 100LL, 80LL, false, emptyTagSet, "3.2.0").isSizeMaxed());
- ASSERT(ShardStatistics("TestShardId", 100LL, 110LL, false, emptyTagSet, "3.2.0").isSizeMaxed());
+ ASSERT(
+ !ShardStatistics(ShardId("TestShardId"), 0, 0, false, emptyTagSet, "3.2.0").isSizeMaxed());
+ ASSERT(!ShardStatistics(ShardId("TestShardId"), 100LL, 80LL, false, emptyTagSet, "3.2.0")
+ .isSizeMaxed());
+ ASSERT(ShardStatistics(ShardId("TestShardId"), 100LL, 110LL, false, emptyTagSet, "3.2.0")
+ .isSizeMaxed());
}
} // namespace
diff --git a/src/mongo/s/catalog/replset/sharding_catalog_add_shard_test.cpp b/src/mongo/s/catalog/replset/sharding_catalog_add_shard_test.cpp
index 62a79a0e37b..a09fe9834b4 100644
--- a/src/mongo/s/catalog/replset/sharding_catalog_add_shard_test.cpp
+++ b/src/mongo/s/catalog/replset/sharding_catalog_add_shard_test.cpp
@@ -345,7 +345,7 @@ TEST_F(AddShardTest, Standalone) {
{
DatabaseType dbTestDB1;
dbTestDB1.setName("TestDB1");
- dbTestDB1.setPrimary("StandaloneShard");
+ dbTestDB1.setPrimary(ShardId("StandaloneShard"));
dbTestDB1.setSharded(false);
expectDatabaseUpdate(dbTestDB1);
@@ -354,7 +354,7 @@ TEST_F(AddShardTest, Standalone) {
{
DatabaseType dbTestDB2;
dbTestDB2.setName("TestDB2");
- dbTestDB2.setPrimary("StandaloneShard");
+ dbTestDB2.setPrimary(ShardId("StandaloneShard"));
dbTestDB2.setSharded(false);
expectDatabaseUpdate(dbTestDB2);
@@ -445,7 +445,7 @@ TEST_F(AddShardTest, StandaloneGenerateName) {
{
DatabaseType dbTestDB1;
dbTestDB1.setName("TestDB1");
- dbTestDB1.setPrimary("shard0006");
+ dbTestDB1.setPrimary(ShardId("shard0006"));
dbTestDB1.setSharded(false);
expectDatabaseUpdate(dbTestDB1);
@@ -454,7 +454,7 @@ TEST_F(AddShardTest, StandaloneGenerateName) {
{
DatabaseType dbTestDB2;
dbTestDB2.setName("TestDB2");
- dbTestDB2.setPrimary("shard0006");
+ dbTestDB2.setPrimary(ShardId("shard0006"));
dbTestDB2.setSharded(false);
expectDatabaseUpdate(dbTestDB2);
@@ -829,7 +829,7 @@ TEST_F(AddShardTest, ShardContainsExistingDatabase) {
DatabaseType existing;
existing.setName("existing");
- existing.setPrimary("existingShard");
+ existing.setPrimary(ShardId("existingShard"));
expectGetDatabase("existing", existing);
future.timed_get(kFutureTimeout);
@@ -953,7 +953,7 @@ TEST_F(AddShardTest, SuccessfullyAddReplicaSet) {
// Add the existing database from the newly added shard
DatabaseType shardDB;
shardDB.setName("shardDB");
- shardDB.setPrimary(expectedShardName);
+ shardDB.setPrimary(ShardId(expectedShardName));
shardDB.setSharded(false);
expectDatabaseUpdate(shardDB);
@@ -1014,7 +1014,7 @@ TEST_F(AddShardTest, AddShardSucceedsEvenIfAddingDBsFromNewShardFails) {
// Add the existing database from the newly added shard
DatabaseType shardDB;
shardDB.setName("shardDB");
- shardDB.setPrimary(expectedShardName);
+ shardDB.setPrimary(ShardId(expectedShardName));
shardDB.setSharded(false);
// Ensure that even if upserting the database discovered on the shard fails, the addShard
diff --git a/src/mongo/s/catalog/replset/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/replset/sharding_catalog_client_impl.cpp
index 71c060df46f..caf410e35a3 100644
--- a/src/mongo/s/catalog/replset/sharding_catalog_client_impl.cpp
+++ b/src/mongo/s/catalog/replset/sharding_catalog_client_impl.cpp
@@ -471,7 +471,7 @@ Status ShardingCatalogClientImpl::shardCollection(OperationContext* txn,
{
BSONArrayBuilder initialShards(collectionDetail.subarrayStart("initShards"));
for (const ShardId& shardId : initShardIds) {
- initialShards.append(shardId);
+ initialShards.append(shardId.toString());
}
}
@@ -527,8 +527,9 @@ Status ShardingCatalogClientImpl::shardCollection(OperationContext* txn,
}
StatusWith<ShardDrainingStatus> ShardingCatalogClientImpl::removeShard(OperationContext* txn,
- const std::string& name) {
+ const ShardId& shardId) {
// Check preconditions for removing the shard
+ string name = shardId.toString();
auto countStatus = _runCountCommandOnConfig(
txn,
NamespaceString(ShardType::ConfigNS),
@@ -634,7 +635,7 @@ StatusWith<repl::OpTimeWith<DatabaseType>> ShardingCatalogClientImpl::getDatabas
DatabaseType dbt;
dbt.setName(dbName);
dbt.setSharded(false);
- dbt.setPrimary("config");
+ dbt.setPrimary(ShardId("config"));
return repl::OpTimeWith<DatabaseType>(dbt);
}
@@ -936,12 +937,12 @@ StatusWith<BSONObj> ShardingCatalogClientImpl::getGlobalSettings(OperationContex
}
Status ShardingCatalogClientImpl::getDatabasesForShard(OperationContext* txn,
- const string& shardName,
+ const ShardId& shardId,
vector<string>* dbs) {
auto findStatus = _exhaustiveFindOnConfig(txn,
kConfigReadSelector,
NamespaceString(DatabaseType::ConfigNS),
- BSON(DatabaseType::primary(shardName)),
+ BSON(DatabaseType::primary(shardId.toString())),
BSONObj(),
boost::none); // no limit
if (!findStatus.isOK()) {
diff --git a/src/mongo/s/catalog/replset/sharding_catalog_client_impl.h b/src/mongo/s/catalog/replset/sharding_catalog_client_impl.h
index 48df7545263..a31dac7b353 100644
--- a/src/mongo/s/catalog/replset/sharding_catalog_client_impl.h
+++ b/src/mongo/s/catalog/replset/sharding_catalog_client_impl.h
@@ -95,7 +95,7 @@ public:
const std::set<ShardId>& initShardsIds) override;
StatusWith<ShardDrainingStatus> removeShard(OperationContext* txn,
- const std::string& name) override;
+ const ShardId& name) override;
StatusWith<repl::OpTimeWith<DatabaseType>> getDatabase(OperationContext* txn,
const std::string& dbName) override;
@@ -111,7 +111,7 @@ public:
Status dropCollection(OperationContext* txn, const NamespaceString& ns) override;
Status getDatabasesForShard(OperationContext* txn,
- const std::string& shardName,
+ const ShardId& shardName,
std::vector<std::string>* dbs) override;
Status getChunks(OperationContext* txn,
diff --git a/src/mongo/s/catalog/replset/sharding_catalog_test.cpp b/src/mongo/s/catalog/replset/sharding_catalog_test.cpp
index a175821900a..05787798d19 100644
--- a/src/mongo/s/catalog/replset/sharding_catalog_test.cpp
+++ b/src/mongo/s/catalog/replset/sharding_catalog_test.cpp
@@ -155,7 +155,7 @@ TEST_F(ShardingCatalogClientTest, GetDatabaseExisting) {
DatabaseType expectedDb;
expectedDb.setName("bigdata");
- expectedDb.setPrimary("shard0000");
+ expectedDb.setPrimary(ShardId("shard0000"));
expectedDb.setSharded(true);
const OpTime newOpTime(Timestamp(7, 6), 5);
@@ -198,7 +198,7 @@ TEST_F(ShardingCatalogClientTest, GetDatabaseStaleSecondaryRetrySuccess) {
DatabaseType expectedDb;
expectedDb.setName("bigdata");
- expectedDb.setPrimary("shard0000");
+ expectedDb.setPrimary(ShardId("shard0000"));
expectedDb.setSharded(true);
auto future = launchAsync([this, &expectedDb] {
@@ -453,14 +453,14 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSWithSortAndLimit) {
chunkA.setMin(BSON("a" << 1));
chunkA.setMax(BSON("a" << 100));
chunkA.setVersion({1, 2, oid});
- chunkA.setShard("shard0000");
+ chunkA.setShard(ShardId("shard0000"));
ChunkType chunkB;
chunkB.setNS("TestDB.TestColl");
chunkB.setMin(BSON("a" << 100));
chunkB.setMax(BSON("a" << 200));
chunkB.setVersion({3, 4, oid});
- chunkB.setShard("shard0001");
+ chunkB.setShard(ShardId("shard0001"));
ChunkVersion queryChunkVersion({1, 2, oid});
@@ -581,7 +581,7 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSInvalidChunk) {
chunkA.setMin(BSON("a" << 1));
chunkA.setMax(BSON("a" << 100));
chunkA.setVersion({1, 2, OID::gen()});
- chunkA.setShard("shard0000");
+ chunkA.setShard(ShardId("shard0000"));
ChunkType chunkB;
chunkB.setNS("TestDB.TestColl");
@@ -1019,16 +1019,16 @@ TEST_F(ShardingCatalogClientTest, GetDatabasesForShardValid) {
DatabaseType dbt1;
dbt1.setName("db1");
- dbt1.setPrimary("shard0000");
+ dbt1.setPrimary(ShardId("shard0000"));
DatabaseType dbt2;
dbt2.setName("db2");
- dbt2.setPrimary("shard0000");
+ dbt2.setPrimary(ShardId("shard0000"));
auto future = launchAsync([this] {
vector<string> dbs;
const auto status =
- catalogClient()->getDatabasesForShard(operationContext(), "shard0000", &dbs);
+ catalogClient()->getDatabasesForShard(operationContext(), ShardId("shard0000"), &dbs);
ASSERT_OK(status);
return dbs;
@@ -1043,7 +1043,7 @@ TEST_F(ShardingCatalogClientTest, GetDatabasesForShardValid) {
auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(query->ns(), DatabaseType::ConfigNS);
- ASSERT_EQ(query->getFilter(), BSON(DatabaseType::primary(dbt1.getPrimary())));
+ ASSERT_EQ(query->getFilter(), BSON(DatabaseType::primary(dbt1.getPrimary().toString())));
ASSERT_EQ(query->getSort(), BSONObj());
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
@@ -1063,7 +1063,7 @@ TEST_F(ShardingCatalogClientTest, GetDatabasesForShardInvalidDoc) {
auto future = launchAsync([this] {
vector<string> dbs;
const auto status =
- catalogClient()->getDatabasesForShard(operationContext(), "shard0000", &dbs);
+ catalogClient()->getDatabasesForShard(operationContext(), ShardId("shard0000"), &dbs);
ASSERT_EQ(ErrorCodes::TypeMismatch, status);
ASSERT_EQ(0U, dbs.size());
@@ -1072,7 +1072,7 @@ TEST_F(ShardingCatalogClientTest, GetDatabasesForShardInvalidDoc) {
onFindCommand([](const RemoteCommandRequest& request) {
DatabaseType dbt1;
dbt1.setName("db1");
- dbt1.setPrimary("shard0000");
+ dbt1.setPrimary(ShardId("shard0000"));
return vector<BSONObj>{
dbt1.toBSON(),
@@ -1187,7 +1187,7 @@ TEST_F(ShardingCatalogClientTest, GetTagForChunkOneTagFound) {
chunk.setMin(BSON("a" << 1));
chunk.setMax(BSON("a" << 100));
chunk.setVersion({1, 2, OID::gen()});
- chunk.setShard("shard0000");
+ chunk.setShard(ShardId("shard0000"));
ASSERT_OK(chunk.validate());
auto future = launchAsync([this, chunk] {
@@ -1232,7 +1232,7 @@ TEST_F(ShardingCatalogClientTest, GetTagForChunkNoTagFound) {
chunk.setMin(BSON("a" << 1));
chunk.setMax(BSON("a" << 100));
chunk.setVersion({1, 2, OID::gen()});
- chunk.setShard("shard0000");
+ chunk.setShard(ShardId("shard0000"));
ASSERT_OK(chunk.validate());
auto future = launchAsync([this, chunk] {
@@ -1271,7 +1271,7 @@ TEST_F(ShardingCatalogClientTest, GetTagForChunkInvalidTagDoc) {
chunk.setMin(BSON("a" << 1));
chunk.setMax(BSON("a" << 100));
chunk.setVersion({1, 2, OID::gen()});
- chunk.setShard("shard0000");
+ chunk.setShard(ShardId("shard0000"));
ASSERT_OK(chunk.validate());
auto future = launchAsync([this, chunk] {
@@ -1311,7 +1311,7 @@ TEST_F(ShardingCatalogClientTest, UpdateDatabase) {
DatabaseType dbt;
dbt.setName("test");
- dbt.setPrimary("shard0000");
+ dbt.setPrimary(ShardId("shard0000"));
dbt.setSharded(true);
auto future = launchAsync([this, dbt] {
@@ -1354,7 +1354,7 @@ TEST_F(ShardingCatalogClientTest, UpdateDatabaseExceededTimeLimit) {
DatabaseType dbt;
dbt.setName("test");
- dbt.setPrimary("shard0001");
+ dbt.setPrimary(ShardId("shard0001"));
dbt.setSharded(false);
auto future = launchAsync([this, dbt] {
@@ -1450,7 +1450,7 @@ TEST_F(ShardingCatalogClientTest, ApplyChunkOpsDeprecatedSuccessfulWithCheck) {
chunk.setMin(BSON("a" << 1));
chunk.setMax(BSON("a" << 100));
chunk.setVersion({1, 2, oid});
- chunk.setShard("shard0000");
+ chunk.setShard(ShardId("shard0000"));
return vector<BSONObj>{chunk.toBSON()};
});
@@ -1621,7 +1621,8 @@ TEST_F(ShardingCatalogClientTest, createDatabaseSuccess) {
DatabaseType expectedDb;
expectedDb.setName(dbname);
- expectedDb.setPrimary(s1.getName()); // This is the one we reported with the smallest size
+ expectedDb.setPrimary(
+ ShardId(s1.getName())); // This is the one we reported with the smallest size
expectedDb.setSharded(false);
ASSERT_EQUALS(expectedDb.toBSON(), insert);
@@ -1910,7 +1911,8 @@ TEST_F(ShardingCatalogClientTest, createDatabaseDuplicateKeyOnInsert) {
DatabaseType expectedDb;
expectedDb.setName(dbname);
- expectedDb.setPrimary(s1.getName()); // This is the one we reported with the smallest size
+ expectedDb.setPrimary(
+ ShardId(s1.getName())); // This is the one we reported with the smallest size
expectedDb.setSharded(false);
ASSERT_EQUALS(expectedDb.toBSON(), insert);
@@ -1937,7 +1939,7 @@ TEST_F(ShardingCatalogClientTest, EnableShardingNoDBExists) {
setupShards(vector<ShardType>{shard});
auto shardTargeter = RemoteCommandTargeterMock::get(
- shardRegistry()->getShard(operationContext(), "shard0")->getTargeter());
+ shardRegistry()->getShard(operationContext(), ShardId("shard0"))->getTargeter());
shardTargeter->setFindHostReturnValue(HostAndPort("shard0:12"));
distLock()->expectLock(
@@ -2327,7 +2329,7 @@ TEST_F(ShardingCatalogClientTest, ReadAfterOpTimeFindThenCmd) {
DatabaseType dbType;
dbType.setName("TestDB");
- dbType.setPrimary("TestShard");
+ dbType.setPrimary(ShardId("TestShard"));
dbType.setSharded("true");
return std::make_tuple(vector<BSONObj>{dbType.toBSON()}, builder.obj());
@@ -2407,7 +2409,7 @@ TEST_F(ShardingCatalogClientTest, ReadAfterOpTimeCmdThenFind) {
DatabaseType dbType;
dbType.setName("TestDB");
- dbType.setPrimary("TestShard");
+ dbType.setPrimary(ShardId("TestShard"));
dbType.setSharded("true");
return vector<BSONObj>{dbType.toBSON()};
@@ -2495,7 +2497,7 @@ TEST_F(ShardingCatalogClientTest, RetryOnFindCommandNetworkErrorSucceedsAtMaxRet
onFindCommand([](const RemoteCommandRequest& request) {
DatabaseType dbType;
dbType.setName("TestDB");
- dbType.setPrimary("TestShard");
+ dbType.setPrimary(ShardId("TestShard"));
dbType.setSharded("true");
return vector<BSONObj>{dbType.toBSON()};
diff --git a/src/mongo/s/catalog/sharding_catalog_client.h b/src/mongo/s/catalog/sharding_catalog_client.h
index 8317debf6db..004482b938a 100644
--- a/src/mongo/s/catalog/sharding_catalog_client.h
+++ b/src/mongo/s/catalog/sharding_catalog_client.h
@@ -148,7 +148,7 @@ public:
* the current draining status. See ShardDrainingStatus enum definition for more details.
*/
virtual StatusWith<ShardDrainingStatus> removeShard(OperationContext* txn,
- const std::string& name) = 0;
+ const ShardId& name) = 0;
/**
* Updates or creates the metadata for a given database.
@@ -222,7 +222,7 @@ public:
* Returns a !OK status if an error occurs.
*/
virtual Status getDatabasesForShard(OperationContext* txn,
- const std::string& shardName,
+ const ShardId& shardId,
std::vector<std::string>* dbs) = 0;
/**
diff --git a/src/mongo/s/catalog/sharding_catalog_client_mock.cpp b/src/mongo/s/catalog/sharding_catalog_client_mock.cpp
index e13711c040c..2095ceef887 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_mock.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_mock.cpp
@@ -68,7 +68,7 @@ Status ShardingCatalogClientMock::shardCollection(OperationContext* txn,
}
StatusWith<ShardDrainingStatus> ShardingCatalogClientMock::removeShard(OperationContext* txn,
- const string& name) {
+ const ShardId& name) {
return ShardDrainingStatus::COMPLETED;
}
@@ -106,7 +106,7 @@ Status ShardingCatalogClientMock::dropCollection(OperationContext* txn, const Na
}
Status ShardingCatalogClientMock::getDatabasesForShard(OperationContext* txn,
- const string& shardName,
+ const ShardId& shardName,
vector<string>* dbs) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
diff --git a/src/mongo/s/catalog/sharding_catalog_client_mock.h b/src/mongo/s/catalog/sharding_catalog_client_mock.h
index 1bd8c5841e3..9edcf9b1391 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_mock.h
+++ b/src/mongo/s/catalog/sharding_catalog_client_mock.h
@@ -55,7 +55,7 @@ public:
const std::set<ShardId>& initShardIds) override;
StatusWith<ShardDrainingStatus> removeShard(OperationContext* txn,
- const std::string& name) override;
+ const ShardId& name) override;
Status updateDatabase(OperationContext* txn,
const std::string& dbName,
@@ -79,7 +79,7 @@ public:
Status dropCollection(OperationContext* txn, const NamespaceString& ns) override;
Status getDatabasesForShard(OperationContext* txn,
- const std::string& shardName,
+ const ShardId& shardName,
std::vector<std::string>* dbs) override;
Status getChunks(OperationContext* txn,
diff --git a/src/mongo/s/catalog/type_chunk.cpp b/src/mongo/s/catalog/type_chunk.cpp
index 1ab80f0e674..111aa67a625 100644
--- a/src/mongo/s/catalog/type_chunk.cpp
+++ b/src/mongo/s/catalog/type_chunk.cpp
@@ -195,7 +195,7 @@ Status ChunkType::validate() const {
return Status(ErrorCodes::NoSuchKey, str::stream() << "missing version field");
}
- if (!_shard.is_initialized() || _shard->empty()) {
+ if (!_shard.is_initialized() || !_shard->isValid()) {
return Status(ErrorCodes::NoSuchKey,
str::stream() << "missing " << shard.name() << " field");
}
@@ -237,7 +237,7 @@ BSONObj ChunkType::toBSON() const {
if (_max)
builder.append(max.name(), getMax());
if (_shard)
- builder.append(shard.name(), getShard());
+ builder.append(shard.name(), getShard().toString());
if (_version)
_version->appendForChunk(&builder);
if (_jumbo)
@@ -276,8 +276,8 @@ void ChunkType::setVersion(const ChunkVersion& version) {
_version = version;
}
-void ChunkType::setShard(const std::string& shard) {
- invariant(!shard.empty());
+void ChunkType::setShard(const ShardId& shard) {
+ invariant(shard.isValid());
_shard = shard;
}
diff --git a/src/mongo/s/catalog/type_chunk.h b/src/mongo/s/catalog/type_chunk.h
index f988a09028d..a7a0e1f7e30 100644
--- a/src/mongo/s/catalog/type_chunk.h
+++ b/src/mongo/s/catalog/type_chunk.h
@@ -33,6 +33,7 @@
#include "mongo/bson/bsonobj.h"
#include "mongo/s/chunk_version.h"
+#include "mongo/s/shard_id.h"
namespace mongo {
@@ -158,10 +159,10 @@ public:
}
void setVersion(const ChunkVersion& version);
- const std::string& getShard() const {
+ const ShardId& getShard() const {
return _shard.get();
}
- void setShard(const std::string& shard);
+ void setShard(const ShardId& shard);
bool getJumbo() const {
return _jumbo.get_value_or(false);
@@ -180,7 +181,7 @@ private:
// (M) version of this chunk
boost::optional<ChunkVersion> _version;
// (M) shard this chunk lives in
- boost::optional<std::string> _shard;
+ boost::optional<ShardId> _shard;
// (O) too big to move?
boost::optional<bool> _jumbo;
};
diff --git a/src/mongo/s/catalog/type_database.cpp b/src/mongo/s/catalog/type_database.cpp
index 24de84c838c..9ccf117ea87 100644
--- a/src/mongo/s/catalog/type_database.cpp
+++ b/src/mongo/s/catalog/type_database.cpp
@@ -86,7 +86,7 @@ Status DatabaseType::validate() const {
return Status(ErrorCodes::NoSuchKey, "missing name");
}
- if (!_primary.is_initialized() || _primary->empty()) {
+ if (!_primary.is_initialized() || !_primary->isValid()) {
return Status(ErrorCodes::NoSuchKey, "missing primary");
}
@@ -100,7 +100,7 @@ Status DatabaseType::validate() const {
BSONObj DatabaseType::toBSON() const {
BSONObjBuilder builder;
builder.append(name.name(), _name.get_value_or(""));
- builder.append(primary.name(), _primary.get_value_or(""));
+ builder.append(primary.name(), _primary.get_value_or(ShardId()).toString());
builder.append(sharded.name(), _sharded.get_value_or(false));
return builder.obj();
@@ -115,8 +115,8 @@ void DatabaseType::setName(const std::string& name) {
_name = name;
}
-void DatabaseType::setPrimary(const std::string& primary) {
- invariant(!primary.empty());
+void DatabaseType::setPrimary(const ShardId& primary) {
+ invariant(primary.isValid());
_primary = primary;
}
diff --git a/src/mongo/s/catalog/type_database.h b/src/mongo/s/catalog/type_database.h
index e452bd826a2..c1948f1132c 100644
--- a/src/mongo/s/catalog/type_database.h
+++ b/src/mongo/s/catalog/type_database.h
@@ -32,6 +32,7 @@
#include <string>
#include "mongo/db/jsobj.h"
+#include "mongo/s/shard_id.h"
namespace mongo {
@@ -82,10 +83,10 @@ public:
}
void setName(const std::string& name);
- const std::string& getPrimary() const {
+ const ShardId& getPrimary() const {
return _primary.get();
}
- void setPrimary(const std::string& primary);
+ void setPrimary(const ShardId& primary);
bool getSharded() const {
return _sharded.get();
@@ -100,7 +101,7 @@ private:
// Required primary shard (must be set even if the database is sharded, because there
// might be collections, which are unsharded).
- boost::optional<std::string> _primary;
+ boost::optional<ShardId> _primary;
// Required whether sharding is enabled for this database. Even though this field is of
// type optional, it is only used as an indicator that the value was explicitly set.
diff --git a/src/mongo/s/chunk_diff.h b/src/mongo/s/chunk_diff.h
index 384f95e7df9..578ca7e51e9 100644
--- a/src/mongo/s/chunk_diff.h
+++ b/src/mongo/s/chunk_diff.h
@@ -128,7 +128,7 @@ protected:
virtual std::pair<BSONObj, ValType> rangeFor(OperationContext* txn,
const ChunkType& chunk) const = 0;
- virtual ShardId shardFor(OperationContext* txn, const std::string& name) const = 0;
+ virtual ShardId shardFor(OperationContext* txn, const ShardId& name) const = 0;
private:
void _assertAttached() const;
diff --git a/src/mongo/s/chunk_diff_test.cpp b/src/mongo/s/chunk_diff_test.cpp
index 46cbce4dabc..51dd7ea1617 100644
--- a/src/mongo/s/chunk_diff_test.cpp
+++ b/src/mongo/s/chunk_diff_test.cpp
@@ -68,7 +68,7 @@ public:
return make_pair(chunk.getMin(), chunk.getMax());
}
- virtual ShardId shardFor(OperationContext* txn, const string& name) const {
+ virtual ShardId shardFor(OperationContext* txn, const ShardId& name) const {
return name;
}
};
@@ -105,7 +105,7 @@ void convertBSONArrayToChunkTypes(const vector<BSONObj>& chunksArray,
class ChunkDiffUnitTest : public mongo::unittest::Test {
protected:
typedef map<BSONObj, BSONObj, BSONObjCmp> RangeMap;
- typedef map<string, ChunkVersion> VersionMap;
+ typedef map<ShardId, ChunkVersion> VersionMap;
ChunkDiffUnitTest() = default;
~ChunkDiffUnitTest() = default;
diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp
index 0dcad211c7e..031721d6324 100644
--- a/src/mongo/s/chunk_manager.cpp
+++ b/src/mongo/s/chunk_manager.cpp
@@ -97,8 +97,8 @@ public:
return make_pair(chunk.getMax(), c);
}
- string shardFor(OperationContext* txn, const string& hostName) const final {
- const auto shard = grid.shardRegistry()->getShard(txn, hostName);
+ ShardId shardFor(OperationContext* txn, const ShardId& shardId) const final {
+ const auto shard = grid.shardRegistry()->getShard(txn, shardId);
return shard->getId();
}
@@ -674,13 +674,13 @@ IndexBounds ChunkManager::collapseQuerySolution(const QuerySolutionNode* node) {
return bounds;
}
-bool ChunkManager::compatibleWith(const ChunkManager& other, const string& shardName) const {
+bool ChunkManager::compatibleWith(const ChunkManager& other, const ShardId& shardName) const {
// Return true if the shard version is the same in the two chunk managers
// TODO: This doesn't need to be so strong, just major vs
return other.getVersion(shardName).equals(getVersion(shardName));
}
-ChunkVersion ChunkManager::getVersion(const std::string& shardName) const {
+ChunkVersion ChunkManager::getVersion(const ShardId& shardName) const {
ShardVersionMap::const_iterator i = _shardVersions.find(shardName);
if (i == _shardVersions.end()) {
// Shards without explicitly tracked shard versions (meaning they have
diff --git a/src/mongo/s/chunk_manager.h b/src/mongo/s/chunk_manager.h
index 49009d8abb6..e2178feddaf 100644
--- a/src/mongo/s/chunk_manager.h
+++ b/src/mongo/s/chunk_manager.h
@@ -57,7 +57,7 @@ typedef std::map<BSONObj, std::shared_ptr<Chunk>, BSONObjCmp> ChunkMap;
class ChunkManager {
public:
- typedef std::map<std::string, ChunkVersion> ShardVersionMap;
+ typedef std::map<ShardId, ChunkVersion> ShardVersionMap;
// Loads a new chunk manager from a collection document
explicit ChunkManager(const CollectionType& coll);
@@ -164,11 +164,11 @@ public:
/**
* Returns true if, for this shard, the chunks are identical in both chunk managers
*/
- bool compatibleWith(const ChunkManager& other, const std::string& shard) const;
+ bool compatibleWith(const ChunkManager& other, const ShardId& shard) const;
std::string toString() const;
- ChunkVersion getVersion(const std::string& shardName) const;
+ ChunkVersion getVersion(const ShardId& shardName) const;
ChunkVersion getVersion() const;
void _printChunks() const;
diff --git a/src/mongo/s/chunk_manager_targeter.cpp b/src/mongo/s/chunk_manager_targeter.cpp
index c9e9bcb76de..6f1578ef5f1 100644
--- a/src/mongo/s/chunk_manager_targeter.cpp
+++ b/src/mongo/s/chunk_manager_targeter.cpp
@@ -188,21 +188,22 @@ ChunkVersion getShardVersion(StringData shardName,
*/
CompareResult compareAllShardVersions(const ChunkManager* cachedChunkManager,
const Shard* cachedPrimary,
- const map<string, ChunkVersion>& remoteShardVersions) {
+ const map<ShardId, ChunkVersion>& remoteShardVersions) {
CompareResult finalResult = CompareResult_GTE;
- for (map<string, ChunkVersion>::const_iterator it = remoteShardVersions.begin();
+ for (map<ShardId, ChunkVersion>::const_iterator it = remoteShardVersions.begin();
it != remoteShardVersions.end();
++it) {
// Get the remote and cached version for the next shard
- const string& shardName = it->first;
+ const ShardId& shardName = it->first;
const ChunkVersion& remoteShardVersion = it->second;
ChunkVersion cachedShardVersion;
try {
// Throws b/c shard constructor throws
- cachedShardVersion = getShardVersion(shardName, cachedChunkManager, cachedPrimary);
+ cachedShardVersion =
+ getShardVersion(shardName.toString(), cachedChunkManager, cachedPrimary);
} catch (const DBException& ex) {
warning() << "could not lookup shard " << shardName
<< " in local cache, shard metadata may have changed"
@@ -576,7 +577,8 @@ void ChunkManagerTargeter::noteStaleResponse(const ShardEndpoint& endpoint,
if (staleInfo["vWanted"].eoo()) {
// If we don't have a vWanted sent, assume the version is higher than our current
// version.
- remoteShardVersion = getShardVersion(endpoint.shardName, _manager.get(), _primary.get());
+ remoteShardVersion =
+ getShardVersion(endpoint.shardName.toString(), _manager.get(), _primary.get());
remoteShardVersion.incMajor();
} else {
remoteShardVersion = ChunkVersion::fromBSON(staleInfo, "vWanted");
@@ -584,7 +586,7 @@ void ChunkManagerTargeter::noteStaleResponse(const ShardEndpoint& endpoint,
ShardVersionMap::iterator it = _remoteShardVersions.find(endpoint.shardName);
if (it == _remoteShardVersions.end()) {
- _remoteShardVersions.insert(make_pair(endpoint.shardName, remoteShardVersion));
+ _remoteShardVersions.insert(std::make_pair(endpoint.shardName, remoteShardVersion));
} else {
ChunkVersion& previouslyNotedVersion = it->second;
if (previouslyNotedVersion.hasEqualEpoch(remoteShardVersion)) {
diff --git a/src/mongo/s/chunk_manager_targeter.h b/src/mongo/s/chunk_manager_targeter.h
index b9fa547d89e..06c823bbcf1 100644
--- a/src/mongo/s/chunk_manager_targeter.h
+++ b/src/mongo/s/chunk_manager_targeter.h
@@ -110,7 +110,7 @@ private:
RefreshType_ReloadDatabase
};
- typedef std::map<std::string, ChunkVersion> ShardVersionMap;
+ typedef std::map<ShardId, ChunkVersion> ShardVersionMap;
/**
diff --git a/src/mongo/s/chunk_manager_tests.cpp b/src/mongo/s/chunk_manager_tests.cpp
index d674e94bc51..08ac87e954c 100644
--- a/src/mongo/s/chunk_manager_tests.cpp
+++ b/src/mongo/s/chunk_manager_tests.cpp
@@ -138,7 +138,7 @@ protected:
ASSERT(version.epoch() == epoch);
// Check chunk's shard id.
- ASSERT(chunk[ChunkType::shard()].String() == _shardId);
+ ASSERT(chunk[ChunkType::shard()].String() == _shardId.toString());
return RemoteCommandResponse(BSON("ok" << 1), BSONObj(), Milliseconds(1));
});
diff --git a/src/mongo/s/client/SConscript b/src/mongo/s/client/SConscript
index dbc890279c2..6c1263d354d 100644
--- a/src/mongo/s/client/SConscript
+++ b/src/mongo/s/client/SConscript
@@ -79,6 +79,7 @@ env.Library(
],
LIBDEPS=[
'$BUILD_DIR/mongo/base',
+ '$BUILD_DIR/mongo/s/shard_id',
]
)
diff --git a/src/mongo/s/client/shard.h b/src/mongo/s/client/shard.h
index 145bf1e2593..cf57f4ef1ac 100644
--- a/src/mongo/s/client/shard.h
+++ b/src/mongo/s/client/shard.h
@@ -33,14 +33,13 @@
#include "mongo/client/read_preference.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/repl/optime.h"
+#include "mongo/s/shard_id.h"
namespace mongo {
class OperationContext;
class RemoteCommandTargeter;
-using ShardId = std::string;
-
/**
* Presents an interface for talking to shards, regardless of whether that shard is remote or is
* the current (local) shard.
diff --git a/src/mongo/s/client/shard_local.cpp b/src/mongo/s/client/shard_local.cpp
index 26f17aaa029..58db95ad74c 100644
--- a/src/mongo/s/client/shard_local.cpp
+++ b/src/mongo/s/client/shard_local.cpp
@@ -78,7 +78,7 @@ void ShardLocal::updateReplSetMonitor(const HostAndPort& remoteHost,
}
std::string ShardLocal::toString() const {
- return getId() + ":<local>";
+ return getId().toString() + ":<local>";
}
bool ShardLocal::isRetriableError(ErrorCodes::Error code, RetryPolicy options) {
diff --git a/src/mongo/s/client/shard_local_test.cpp b/src/mongo/s/client/shard_local_test.cpp
index 45faebc4a9e..b04ab897ead 100644
--- a/src/mongo/s/client/shard_local_test.cpp
+++ b/src/mongo/s/client/shard_local_test.cpp
@@ -76,7 +76,7 @@ void ShardLocalTest::setUp() {
ServiceContextMongoDTest::setUp();
Client::initThreadIfNotAlready();
_txn = getGlobalServiceContext()->makeOperationContext(&cc());
- _shardLocal = stdx::make_unique<ShardLocal>("shardOrConfig");
+ _shardLocal = stdx::make_unique<ShardLocal>(ShardId("shardOrConfig"));
const repl::ReplSettings replSettings = {};
repl::setGlobalReplicationCoordinator(new repl::ReplicationCoordinatorMock(replSettings));
}
diff --git a/src/mongo/s/client/shard_registry.cpp b/src/mongo/s/client/shard_registry.cpp
index 34307cd1fe2..13135db1142 100644
--- a/src/mongo/s/client/shard_registry.cpp
+++ b/src/mongo/s/client/shard_registry.cpp
@@ -119,7 +119,7 @@ shared_ptr<Shard> ShardRegistry::getConfigShard() const {
}
unique_ptr<Shard> ShardRegistry::createConnection(const ConnectionString& connStr) const {
- return _shardFactory->createUniqueShard("<unnamed>", connStr);
+ return _shardFactory->createUniqueShard(ShardId("<unnamed>"), connStr);
}
shared_ptr<Shard> ShardRegistry::lookupRSName(const string& name) const {
@@ -127,7 +127,7 @@ shared_ptr<Shard> ShardRegistry::lookupRSName(const string& name) const {
}
void ShardRegistry::getAllShardIds(vector<ShardId>* all) const {
- std::set<string> seen;
+ std::set<ShardId> seen;
_data.getAllShardIds(seen);
all->assign(seen.begin(), seen.end());
}
@@ -146,7 +146,7 @@ void ShardRegistry::updateReplSetHosts(const ConnectionString& newConnString) {
void ShardRegistry::startup() {
stdx::unique_lock<stdx::mutex> reloadLock(_reloadMutex);
invariant(_initConfigServerCS.isValid());
- auto configShard = _shardFactory->createShard("config", _initConfigServerCS);
+ auto configShard = _shardFactory->createShard(ShardId("config"), _initConfigServerCS);
_data.addConfigShard(configShard);
// set to invalid so it cant be started more than once.
_initConfigServerCS = ConnectionString();
@@ -302,11 +302,11 @@ void ShardRegistryData::toBSON(BSONObjBuilder* result) const {
}
}
-void ShardRegistryData::getAllShardIds(std::set<string>& seen) const {
+void ShardRegistryData::getAllShardIds(std::set<ShardId>& seen) const {
stdx::lock_guard<stdx::mutex> lk(_mutex);
for (auto i = _lookup.begin(); i != _lookup.end(); ++i) {
const auto& s = i->second;
- if (s->getId() == "config") {
+ if (s->getId().toString() == "config") {
continue;
}
seen.insert(s->getId());
@@ -377,8 +377,8 @@ void ShardRegistryData::_addShard_inlock(const std::shared_ptr<Shard>& shard) {
// CUSTOM connection strings (ie "$dummy:10000) become DBDirectClient connections which
// always return "localhost" as their response to getServerAddress(). This is just for
// making dbtest work.
- _lookup["localhost"] = shard;
- _hostLookup[HostAndPort{"localhost"}] = shard;
+ _lookup[ShardId("localhost")] = shard;
+ _hostLookup[HostAndPort("localhost")] = shard;
}
// TODO: The only reason to have the shard host names in the lookup table is for the
diff --git a/src/mongo/s/client/shard_registry.h b/src/mongo/s/client/shard_registry.h
index e65987485e9..e384a53feea 100644
--- a/src/mongo/s/client/shard_registry.h
+++ b/src/mongo/s/client/shard_registry.h
@@ -115,7 +115,7 @@ private:
// Protects the lookup maps below.
mutable stdx::mutex _mutex;
- using ShardMap = std::unordered_map<ShardId, std::shared_ptr<Shard>>;
+ using ShardMap = std::unordered_map<ShardId, std::shared_ptr<Shard>, ShardId::Hasher>;
// Map of both shardName -> Shard and hostName -> Shard
ShardMap _lookup;
diff --git a/src/mongo/s/client/shard_registry_data_test.cpp b/src/mongo/s/client/shard_registry_data_test.cpp
index 162ebb52c78..a005cb1aae8 100644
--- a/src/mongo/s/client/shard_registry_data_test.cpp
+++ b/src/mongo/s/client/shard_registry_data_test.cpp
@@ -85,7 +85,7 @@ private:
TEST_F(ShardRegistryDataTest, AddConfigShard) {
ConnectionString configCS("rs/dummy1:1234,dummy2:2345,dummy3:3456", ConnectionString::SET);
- auto configShard = shardFactory()->createShard("config", configCS);
+ auto configShard = shardFactory()->createShard(ShardId("config"), configCS);
ShardRegistryData data;
data.addConfigShard(configShard);
diff --git a/src/mongo/s/client/shard_remote.cpp b/src/mongo/s/client/shard_remote.cpp
index 1ed6abe2d41..3aa44218b4f 100644
--- a/src/mongo/s/client/shard_remote.cpp
+++ b/src/mongo/s/client/shard_remote.cpp
@@ -177,7 +177,7 @@ void ShardRemote::updateReplSetMonitor(const HostAndPort& remoteHost,
}
std::string ShardRemote::toString() const {
- return getId() + ":" + _originalConnString.toString();
+ return getId().toString() + ":" + _originalConnString.toString();
}
const BSONObj& ShardRemote::_getMetadataForCommand(const ReadPreferenceSetting& readPref) {
diff --git a/src/mongo/s/commands/cluster_count_cmd.cpp b/src/mongo/s/commands/cluster_count_cmd.cpp
index 4bb96e6b086..2a4ca984a16 100644
--- a/src/mongo/s/commands/cluster_count_cmd.cpp
+++ b/src/mongo/s/commands/cluster_count_cmd.cpp
@@ -162,16 +162,16 @@ public:
for (vector<Strategy::CommandResult>::const_iterator iter = countResult.begin();
iter != countResult.end();
++iter) {
- const string& shardName = iter->shardTargetId;
+ const ShardId& shardName = iter->shardTargetId;
if (iter->result["ok"].trueValue()) {
long long shardCount = iter->result["n"].numberLong();
- shardSubTotal.appendNumber(shardName, shardCount);
+ shardSubTotal.appendNumber(shardName.toString(), shardCount);
total += shardCount;
} else {
shardSubTotal.doneFast();
- errmsg = "failed on : " + shardName;
+ errmsg = "failed on : " + shardName.toString();
result.append("cause", iter->result);
// Add "code" to the top-level response, if the failure of the sharded command
diff --git a/src/mongo/s/commands/cluster_explain.cpp b/src/mongo/s/commands/cluster_explain.cpp
index e38a2d072b5..fa4b231741a 100644
--- a/src/mongo/s/commands/cluster_explain.cpp
+++ b/src/mongo/s/commands/cluster_explain.cpp
@@ -218,7 +218,7 @@ void ClusterExplain::buildPlannerInfo(OperationContext* txn,
BSONObj queryPlanner = shardResults[i].result["queryPlanner"].Obj();
BSONObj serverInfo = shardResults[i].result["serverInfo"].Obj();
- singleShardBob.append("shardName", shardResults[i].shardTargetId);
+ singleShardBob.append("shardName", shardResults[i].shardTargetId.toString());
{
const auto shard = grid.shardRegistry()->getShard(txn, shardResults[i].shardTargetId);
singleShardBob.append("connectionString", shard->getConnString().toString());
@@ -291,7 +291,7 @@ void ClusterExplain::buildExecStats(const vector<Strategy::CommandResult>& shard
BSONObj execStats = shardResults[i].result["executionStats"].Obj();
BSONObj execStages = execStats["executionStages"].Obj();
- singleShardBob.append("shardName", shardResults[i].shardTargetId);
+ singleShardBob.append("shardName", shardResults[i].shardTargetId.toString());
// Append error-related fields, if present.
if (!execStats["executionSuccess"].eoo()) {
@@ -323,7 +323,7 @@ void ClusterExplain::buildExecStats(const vector<Strategy::CommandResult>& shard
for (size_t i = 0; i < shardResults.size(); i++) {
BSONObjBuilder singleShardBob(execShardsBuilder.subobjStart());
- singleShardBob.append("shardName", shardResults[i].shardTargetId);
+ singleShardBob.append("shardName", shardResults[i].shardTargetId.toString());
BSONObj execStats = shardResults[i].result["executionStats"].Obj();
vector<BSONElement> allPlans = execStats["allPlansExecution"].Array();
diff --git a/src/mongo/s/commands/cluster_fsync_cmd.cpp b/src/mongo/s/commands/cluster_fsync_cmd.cpp
index b3dc4e5edcd..8a1ce646c2e 100644
--- a/src/mongo/s/commands/cluster_fsync_cmd.cpp
+++ b/src/mongo/s/commands/cluster_fsync_cmd.cpp
@@ -101,7 +101,7 @@ public:
uassertStatusOK(response.commandStatus);
BSONObj x = std::move(response.response);
- sub.append(s->getId(), x);
+ sub.append(s->getId().toString(), x);
if (!x["ok"].trueValue()) {
ok = false;
diff --git a/src/mongo/s/commands/cluster_index_filter_cmd.cpp b/src/mongo/s/commands/cluster_index_filter_cmd.cpp
index 14e8562024c..a1f4d40d81d 100644
--- a/src/mongo/s/commands/cluster_index_filter_cmd.cpp
+++ b/src/mongo/s/commands/cluster_index_filter_cmd.cpp
@@ -126,7 +126,7 @@ public:
// Append shard result as a sub object.
// Name the field after the shard.
- result.append(cmdResult.shardTargetId, cmdResult.result);
+ result.append(cmdResult.shardTargetId.toString(), cmdResult.result);
}
return clusterCmdResult;
diff --git a/src/mongo/s/commands/cluster_list_databases_cmd.cpp b/src/mongo/s/commands/cluster_list_databases_cmd.cpp
index b9fcc41af54..3b911f97588 100644
--- a/src/mongo/s/commands/cluster_list_databases_cmd.cpp
+++ b/src/mongo/s/commands/cluster_list_databases_cmd.cpp
@@ -130,7 +130,7 @@ public:
bb.reset(new BSONObjBuilder());
}
- bb->appendNumber(s->getId(), size);
+ bb->appendNumber(s->getId().toString(), size);
}
}
diff --git a/src/mongo/s/commands/cluster_plan_cache_cmd.cpp b/src/mongo/s/commands/cluster_plan_cache_cmd.cpp
index 9da09c470a0..1973c93d7d3 100644
--- a/src/mongo/s/commands/cluster_plan_cache_cmd.cpp
+++ b/src/mongo/s/commands/cluster_plan_cache_cmd.cpp
@@ -141,7 +141,7 @@ bool ClusterPlanCacheCmd::run(OperationContext* txn,
// Append shard result as a sub object.
// Name the field after the shard.
- string shardName = cmdResult.shardTargetId;
+ string shardName = cmdResult.shardTargetId.toString();
result.append(shardName, cmdResult.result);
}
diff --git a/src/mongo/s/commands/cluster_remove_shard_cmd.cpp b/src/mongo/s/commands/cluster_remove_shard_cmd.cpp
index a82bba5e38e..0a8c4ff318f 100644
--- a/src/mongo/s/commands/cluster_remove_shard_cmd.cpp
+++ b/src/mongo/s/commands/cluster_remove_shard_cmd.cpp
@@ -87,7 +87,7 @@ public:
BSONObjBuilder& result) {
const string target = cmdObj.firstElement().valuestrsafe();
- const auto s = grid.shardRegistry()->getShard(txn, target);
+ const auto s = grid.shardRegistry()->getShard(txn, ShardId(target));
if (!s) {
string msg(str::stream() << "Could not drop shard '" << target
<< "' because it does not exist");
@@ -131,17 +131,18 @@ public:
case ShardDrainingStatus::STARTED:
result.append("msg", "draining started successfully");
result.append("state", "started");
- result.append("shard", s->getId());
+ result.append("shard", s->getId().toString());
result.appendElements(dbInfo);
break;
case ShardDrainingStatus::ONGOING: {
vector<ChunkType> chunks;
- Status status = catalogClient->getChunks(txn,
- BSON(ChunkType::shard(s->getId())),
- BSONObj(),
- boost::none, // return all
- &chunks,
- nullptr);
+ Status status =
+ catalogClient->getChunks(txn,
+ BSON(ChunkType::shard(s->getId().toString())),
+ BSONObj(),
+ boost::none, // return all
+ &chunks,
+ nullptr);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -161,7 +162,7 @@ public:
case ShardDrainingStatus::COMPLETED:
result.append("msg", "removeshard completed successfully");
result.append("state", "completed");
- result.append("shard", s->getId());
+ result.append("shard", s->getId().toString());
}
return true;
diff --git a/src/mongo/s/commands/cluster_user_management_commands.cpp b/src/mongo/s/commands/cluster_user_management_commands.cpp
index fd425be5691..93e344c766c 100644
--- a/src/mongo/s/commands/cluster_user_management_commands.cpp
+++ b/src/mongo/s/commands/cluster_user_management_commands.cpp
@@ -846,7 +846,7 @@ Status runUpgradeOnAllShards(OperationContext* txn,
// Upgrade each shard in turn, stopping on first failure.
auto shardRegistry = grid.shardRegistry();
shardRegistry->reload(txn);
- vector<string> shardIds;
+ vector<ShardId> shardIds;
shardRegistry->getAllShardIds(&shardIds);
bool hasWCError = false;
diff --git a/src/mongo/s/commands/cluster_write_cmd.cpp b/src/mongo/s/commands/cluster_write_cmd.cpp
index eafdb4940a8..19a9969de6c 100644
--- a/src/mongo/s/commands/cluster_write_cmd.cpp
+++ b/src/mongo/s/commands/cluster_write_cmd.cpp
@@ -270,7 +270,7 @@ private:
auto shard = grid.shardRegistry()->getShard(txn, endpoint->shardName);
if (!shard) {
return Status(ErrorCodes::ShardNotFound,
- "Could not find shard with id " + endpoint->shardName);
+ "Could not find shard with id " + endpoint->shardName.toString());
}
auto swHostAndPort = shard->getTargeter()->findHost(readPref);
if (!swHostAndPort.isOK()) {
diff --git a/src/mongo/s/commands/commands_public.cpp b/src/mongo/s/commands/commands_public.cpp
index f3115964c1b..1323ea8e215 100644
--- a/src/mongo/s/commands/commands_public.cpp
+++ b/src/mongo/s/commands/commands_public.cpp
@@ -447,7 +447,7 @@ public:
bool isValid = true;
bool errored = false;
for (const auto& cmdResult : results) {
- const string& shardName = cmdResult.shardTargetId;
+ const ShardId& shardName = cmdResult.shardTargetId;
BSONObj result = cmdResult.result;
const BSONElement valid = result["valid"];
if (!valid.trueValue()) {
@@ -459,7 +459,7 @@ public:
errmsg = result["errmsg"].toString();
errored = true;
}
- rawResBuilder.append(shardName, result);
+ rawResBuilder.append(shardName.toString(), result);
}
rawResBuilder.done();
@@ -701,7 +701,7 @@ public:
auto conf = uassertStatusOK(grid.catalogCache()->getDatabase(txn, dbName));
if (!conf->isShardingEnabled() || !conf->isSharded(fullns)) {
result.appendBool("sharded", false);
- result.append("primary", conf->getPrimaryId());
+ result.append("primary", conf->getPrimaryId().toString());
return passthrough(txn, conf.get(), cmdObj, result);
}
@@ -811,7 +811,7 @@ public:
warning() << "mongos collstats doesn't know about: " << e.fieldName();
}
}
- shardStats.append(shardId, res);
+ shardStats.append(shardId.toString(), res);
}
result.append("ns", fullns);
@@ -1288,7 +1288,7 @@ public:
} catch (DBException& e) {
// This is handled below and logged
Strategy::CommandResult errResult;
- errResult.shardTargetId = "";
+ errResult.shardTargetId = ShardId();
errResult.result = BSON("errmsg" << e.what() << "ok" << 0);
results.push_back(errResult);
}
@@ -1400,7 +1400,7 @@ public:
futures.push_back(
Future::spawnCommand(shard->getConnString().toString(), dbName, cmdObj, options));
- shardArray.append(shardId);
+ shardArray.append(shardId.toString());
}
multimap<double, BSONObj> results; // TODO: maybe use merge-sort instead
diff --git a/src/mongo/s/commands/run_on_all_shards_cmd.cpp b/src/mongo/s/commands/run_on_all_shards_cmd.cpp
index 5e273e159ea..e83e6d353cb 100644
--- a/src/mongo/s/commands/run_on_all_shards_cmd.cpp
+++ b/src/mongo/s/commands/run_on_all_shards_cmd.cpp
@@ -119,7 +119,7 @@ bool RunOnAllShardsCommand::run(OperationContext* txn,
if (res->join(txn)) {
// success :)
BSONObj result = res->result();
- results.emplace_back(*shardIdsIt, result);
+ results.emplace_back(shardIdsIt->toString(), result);
subobj.append(res->getServer(), result);
if (!hasWCError) {
@@ -146,7 +146,7 @@ bool RunOnAllShardsCommand::run(OperationContext* txn,
BSONElement errmsgObj = result["errmsg"];
if (errmsgObj.eoo() || errmsgObj.String().empty()) {
// it was fixed!
- results.emplace_back(*shardIdsIt, result);
+ results.emplace_back(shardIdsIt->toString(), result);
subobj.append(res->getServer(), result);
continue;
}
@@ -168,7 +168,7 @@ bool RunOnAllShardsCommand::run(OperationContext* txn,
} else if (commonErrCode != errCode) {
commonErrCode = 0;
}
- results.emplace_back(*shardIdsIt, result);
+ results.emplace_back(shardIdsIt->toString(), result);
subobj.append(res->getServer(), result);
}
diff --git a/src/mongo/s/commands/sharded_command_processing.cpp b/src/mongo/s/commands/sharded_command_processing.cpp
index f873a4e0265..0c44761b9c2 100644
--- a/src/mongo/s/commands/sharded_command_processing.cpp
+++ b/src/mongo/s/commands/sharded_command_processing.cpp
@@ -31,10 +31,11 @@
#include "mongo/s/commands/sharded_command_processing.h"
#include "mongo/rpc/write_concern_error_detail.h"
+#include "mongo/s/shard_id.h"
namespace mongo {
-void appendWriteConcernErrorToCmdResponse(const std::string& shardID,
+void appendWriteConcernErrorToCmdResponse(const ShardId& shardId,
const BSONElement& wcErrorElem,
BSONObjBuilder& responseBuilder) {
WriteConcernErrorDetail wcError;
@@ -44,7 +45,7 @@ void appendWriteConcernErrorToCmdResponse(const std::string& shardID,
wcError.setErrMessage("Failed to parse writeConcernError: " + wcErrorObj.toString() +
", Received error: " + errMsg);
}
- wcError.setErrMessage(wcError.getErrMessage() + " at " + shardID);
+ wcError.setErrMessage(wcError.getErrMessage() + " at " + shardId.toString());
responseBuilder.append("writeConcernError", wcError.toBSON());
}
} // namespace mongo
diff --git a/src/mongo/s/commands/sharded_command_processing.h b/src/mongo/s/commands/sharded_command_processing.h
index f0424424f89..18732669ef1 100644
--- a/src/mongo/s/commands/sharded_command_processing.h
+++ b/src/mongo/s/commands/sharded_command_processing.h
@@ -34,10 +34,12 @@
namespace mongo {
+class ShardId;
+
/**
* This function appends the provided writeConcernError BSONElement to the sharded response.
*/
-void appendWriteConcernErrorToCmdResponse(const std::string& shardID,
+void appendWriteConcernErrorToCmdResponse(const ShardId& shardID,
const BSONElement& wcErrorElem,
BSONObjBuilder& responseBuilder);
} // namespace mongo
diff --git a/src/mongo/s/config.cpp b/src/mongo/s/config.cpp
index 5773d7a3995..723e474ca74 100644
--- a/src/mongo/s/config.cpp
+++ b/src/mongo/s/config.cpp
@@ -579,14 +579,14 @@ bool DBConfig::dropDatabase(OperationContext* txn, string& errmsg) {
ScopedDbConnection conn(shard->getConnString(), 30.0);
BSONObj res;
if (!conn->dropDatabase(_name, txn->getWriteConcern(), &res)) {
- errmsg = res.toString() + " at " + _primaryId;
+ errmsg = res.toString() + " at " + _primaryId.toString();
return 0;
}
conn.done();
if (auto wcErrorElem = res["writeConcernError"]) {
auto wcError = wcErrorElem.Obj();
if (auto errMsgElem = wcError["errmsg"]) {
- errmsg = errMsgElem.str() + " at " + _primaryId;
+ errmsg = errMsgElem.str() + " at " + _primaryId.toString();
return false;
}
}
@@ -602,14 +602,14 @@ bool DBConfig::dropDatabase(OperationContext* txn, string& errmsg) {
ScopedDbConnection conn(shard->getConnString(), 30.0);
BSONObj res;
if (!conn->dropDatabase(_name, txn->getWriteConcern(), &res)) {
- errmsg = res.toString() + " at " + shardId;
+ errmsg = res.toString() + " at " + shardId.toString();
return 0;
}
conn.done();
if (auto wcErrorElem = res["writeConcernError"]) {
auto wcError = wcErrorElem.Obj();
if (auto errMsgElem = wcError["errmsg"]) {
- errmsg = errMsgElem.str() + " at " + shardId;
+ errmsg = errMsgElem.str() + " at " + shardId.toString();
return false;
}
}
@@ -724,7 +724,7 @@ void ConfigServer::replicaSetChangeConfigServerUpdateHook(const string& setName,
auto status = grid.catalogClient(txn.get())->updateConfigDocument(
txn.get(),
ShardType::ConfigNS,
- BSON(ShardType::name(s->getId())),
+ BSON(ShardType::name(s->getId().toString())),
BSON("$set" << BSON(ShardType::host(newConnectionString))),
false);
if (!status.isOK()) {
diff --git a/src/mongo/s/move_chunk_request.cpp b/src/mongo/s/move_chunk_request.cpp
index 8370000d511..3117f2a2cbb 100644
--- a/src/mongo/s/move_chunk_request.cpp
+++ b/src/mongo/s/move_chunk_request.cpp
@@ -86,14 +86,18 @@ StatusWith<MoveChunkRequest> MoveChunkRequest::createFromCommand(NamespaceString
}
{
- Status status = bsonExtractStringField(obj, kFromShardId, &request._fromShardId);
+ std::string shardStr;
+ Status status = bsonExtractStringField(obj, kFromShardId, &shardStr);
+ request._fromShardId = shardStr;
if (!status.isOK()) {
return status;
}
}
{
- Status status = bsonExtractStringField(obj, kToShardId, &request._toShardId);
+ std::string shardStr;
+ Status status = bsonExtractStringField(obj, kToShardId, &shardStr);
+ request._toShardId = shardStr;
if (!status.isOK()) {
return status;
}
@@ -132,8 +136,8 @@ void MoveChunkRequest::appendAsCommand(BSONObjBuilder* builder,
const NamespaceString& nss,
const ChunkVersion& shardVersion,
const ConnectionString& configServerConnectionString,
- const std::string& fromShardId,
- const std::string& toShardId,
+ const ShardId& fromShardId,
+ const ShardId& toShardId,
const ChunkRange& range,
int64_t maxChunkSizeBytes,
const MigrationSecondaryThrottleOptions& secondaryThrottle,
@@ -145,8 +149,8 @@ void MoveChunkRequest::appendAsCommand(BSONObjBuilder* builder,
builder->append(kMoveChunk, nss.ns());
shardVersion.appendForCommands(builder);
builder->append(kConfigServerConnectionString, configServerConnectionString.toString());
- builder->append(kFromShardId, fromShardId);
- builder->append(kToShardId, toShardId);
+ builder->append(kFromShardId, fromShardId.toString());
+ builder->append(kToShardId, toShardId.toString());
range.append(builder);
builder->append(kMaxChunkSizeBytes, static_cast<long long>(maxChunkSizeBytes));
secondaryThrottle.append(builder);
diff --git a/src/mongo/s/move_chunk_request.h b/src/mongo/s/move_chunk_request.h
index 3cbb4101c1a..7004dc99b92 100644
--- a/src/mongo/s/move_chunk_request.h
+++ b/src/mongo/s/move_chunk_request.h
@@ -66,8 +66,8 @@ public:
const NamespaceString& nss,
const ChunkVersion& shardVersion,
const ConnectionString& configServerConnectionString,
- const std::string& fromShardId,
- const std::string& toShardId,
+ const ShardId& fromShardId,
+ const ShardId& toShardId,
const ChunkRange& range,
int64_t maxChunkSizeBytes,
const MigrationSecondaryThrottleOptions& secondaryThrottle,
@@ -82,11 +82,11 @@ public:
return _configServerCS;
}
- const std::string& getFromShardId() const {
+ const ShardId& getFromShardId() const {
return _fromShardId;
}
- const std::string& getToShardId() const {
+ const ShardId& getToShardId() const {
return _toShardId;
}
@@ -135,10 +135,10 @@ private:
ConnectionString _configServerCS;
// The source shard id
- std::string _fromShardId;
+ ShardId _fromShardId;
// The recipient shard id
- std::string _toShardId;
+ ShardId _toShardId;
// Range of chunk chunk being moved
ChunkRange _range;
diff --git a/src/mongo/s/move_chunk_request_test.cpp b/src/mongo/s/move_chunk_request_test.cpp
index 7bf5d8b46a6..4b657e5215d 100644
--- a/src/mongo/s/move_chunk_request_test.cpp
+++ b/src/mongo/s/move_chunk_request_test.cpp
@@ -46,8 +46,8 @@ TEST(MoveChunkRequest, CreateAsCommandComplete) {
NamespaceString("TestDB", "TestColl"),
ChunkVersion(2, 3, OID::gen()),
assertGet(ConnectionString::parse("TestConfigRS/CS1:12345,CS2:12345,CS3:12345")),
- "shard0001",
- "shard0002",
+ ShardId("shard0001"),
+ ShardId("shard0002"),
ChunkRange(BSON("Key" << -100), BSON("Key" << 100)),
1024,
MigrationSecondaryThrottleOptions::create(MigrationSecondaryThrottleOptions::kOff),
@@ -60,8 +60,8 @@ TEST(MoveChunkRequest, CreateAsCommandComplete) {
MoveChunkRequest::createFromCommand(NamespaceString(cmdObj["moveChunk"].String()), cmdObj));
ASSERT_EQ("TestDB.TestColl", request.getNss().ns());
ASSERT_EQ("TestConfigRS/CS1:12345,CS2:12345,CS3:12345", request.getConfigServerCS().toString());
- ASSERT_EQ("shard0001", request.getFromShardId());
- ASSERT_EQ("shard0002", request.getToShardId());
+ ASSERT_EQ(ShardId("shard0001"), request.getFromShardId());
+ ASSERT_EQ(ShardId("shard0002"), request.getToShardId());
ASSERT_EQ(BSON("Key" << -100), request.getMinKey());
ASSERT_EQ(BSON("Key" << 100), request.getMaxKey());
ASSERT_EQ(1024, request.getMaxChunkSizeBytes());
@@ -78,8 +78,8 @@ TEST(MoveChunkRequest, EqualityOperatorSameValue) {
NamespaceString("TestDB", "TestColl"),
ChunkVersion(2, 3, OID::gen()),
assertGet(ConnectionString::parse("TestConfigRS/CS1:12345,CS2:12345,CS3:12345")),
- "shard0001",
- "shard0002",
+ ShardId("shard0001"),
+ ShardId("shard0002"),
ChunkRange(BSON("Key" << -100), BSON("Key" << 100)),
1024,
MigrationSecondaryThrottleOptions::create(MigrationSecondaryThrottleOptions::kOff),
@@ -104,8 +104,8 @@ TEST(MoveChunkRequest, EqualityOperatorDifferentValues) {
NamespaceString("TestDB", "TestColl"),
ChunkVersion(2, 3, OID::gen()),
assertGet(ConnectionString::parse("TestConfigRS/CS1:12345,CS2:12345,CS3:12345")),
- "shard0001",
- "shard0002",
+ ShardId("shard0001"),
+ ShardId("shard0002"),
ChunkRange(BSON("Key" << -100), BSON("Key" << 100)),
1024,
MigrationSecondaryThrottleOptions::create(MigrationSecondaryThrottleOptions::kOff),
@@ -121,8 +121,8 @@ TEST(MoveChunkRequest, EqualityOperatorDifferentValues) {
NamespaceString("TestDB", "TestColl"),
ChunkVersion(2, 3, OID::gen()),
assertGet(ConnectionString::parse("TestConfigRS/CS1:12345,CS2:12345,CS3:12345")),
- "shard0001",
- "shard0002",
+ ShardId("shard0001"),
+ ShardId("shard0002"),
ChunkRange(BSON("Key" << 100), BSON("Key" << 200)), // Different key ranges
1024,
MigrationSecondaryThrottleOptions::create(MigrationSecondaryThrottleOptions::kOff),
diff --git a/src/mongo/s/ns_targeter.h b/src/mongo/s/ns_targeter.h
index 23bb6656f52..62fd02e5e22 100644
--- a/src/mongo/s/ns_targeter.h
+++ b/src/mongo/s/ns_targeter.h
@@ -35,6 +35,7 @@
#include "mongo/client/dbclientinterface.h"
#include "mongo/db/namespace_string.h"
#include "mongo/s/chunk_version.h"
+#include "mongo/s/shard_id.h"
#include "mongo/s/write_ops/batched_delete_document.h"
#include "mongo/s/write_ops/batched_update_document.h"
@@ -157,10 +158,10 @@ struct ShardEndpoint {
ShardEndpoint(const ShardEndpoint& other)
: shardName(other.shardName), shardVersion(other.shardVersion) {}
- ShardEndpoint(const std::string& shardName, const ChunkVersion& shardVersion)
+ ShardEndpoint(const ShardId& shardName, const ChunkVersion& shardVersion)
: shardName(shardName), shardVersion(shardVersion) {}
- const std::string shardName;
+ const ShardId shardName;
const ChunkVersion shardVersion;
};
diff --git a/src/mongo/s/query/async_results_merger_test.cpp b/src/mongo/s/query/async_results_merger_test.cpp
index fc5a8989a61..b6893c076ea 100644
--- a/src/mongo/s/query/async_results_merger_test.cpp
+++ b/src/mongo/s/query/async_results_merger_test.cpp
@@ -55,7 +55,8 @@ using executor::RemoteCommandRequest;
using executor::RemoteCommandResponse;
const HostAndPort kTestConfigShardHost = HostAndPort("FakeConfigHost", 12345);
-const std::vector<std::string> kTestShardIds = {"FakeShard1", "FakeShard2", "FakeShard3"};
+const std::vector<ShardId> kTestShardIds = {
+ ShardId("FakeShard1"), ShardId("FakeShard2"), ShardId("FakeShard3")};
const std::vector<HostAndPort> kTestShardHosts = {HostAndPort("FakeShard1Host", 12345),
HostAndPort("FakeShard2Host", 12345),
HostAndPort("FakeShard3Host", 12345)};
@@ -74,7 +75,7 @@ public:
for (size_t i = 0; i < kTestShardIds.size(); i++) {
ShardType shardType;
- shardType.setName(kTestShardIds[i]);
+ shardType.setName(kTestShardIds[i].toString());
shardType.setHost(kTestShardHosts[i].toString());
shards.push_back(shardType);
diff --git a/src/mongo/s/request_types/balance_chunk_request_test.cpp b/src/mongo/s/request_types/balance_chunk_request_test.cpp
index 7aa51b14482..e39fdf90674 100644
--- a/src/mongo/s/request_types/balance_chunk_request_test.cpp
+++ b/src/mongo/s/request_types/balance_chunk_request_test.cpp
@@ -60,7 +60,7 @@ TEST(BalanceChunkRequest, ParseFromConfigCommandNoSecondaryThrottle) {
ASSERT_EQ("TestDB.TestColl", chunk.getNS());
ASSERT_EQ(BSON("a" << -100LL), chunk.getMin());
ASSERT_EQ(BSON("a" << 100LL), chunk.getMax());
- ASSERT_EQ("TestShard0000", chunk.getShard());
+ ASSERT_EQ(ShardId("TestShard0000"), chunk.getShard());
ASSERT_EQ(version, chunk.getVersion());
const auto& secondaryThrottle = request.getSecondaryThrottle();
@@ -90,7 +90,7 @@ TEST(BalanceChunkRequest, ParseFromConfigCommandWithSecondaryThrottle) {
ASSERT_EQ("TestDB.TestColl", chunk.getNS());
ASSERT_EQ(BSON("a" << -100LL), chunk.getMin());
ASSERT_EQ(BSON("a" << 100LL), chunk.getMax());
- ASSERT_EQ("TestShard0000", chunk.getShard());
+ ASSERT_EQ(ShardId("TestShard0000"), chunk.getShard());
ASSERT_EQ(version, chunk.getVersion());
const auto& secondaryThrottle = request.getSecondaryThrottle();
diff --git a/src/mongo/s/request_types/balance_chunk_request_type.cpp b/src/mongo/s/request_types/balance_chunk_request_type.cpp
index 77b0e794eac..806f08f71fe 100644
--- a/src/mongo/s/request_types/balance_chunk_request_type.cpp
+++ b/src/mongo/s/request_types/balance_chunk_request_type.cpp
@@ -127,7 +127,7 @@ BSONObj BalanceChunkRequest::serializeToMoveCommandForConfig(
BSONObjBuilder cmdBuilder;
cmdBuilder.append(kConfigSvrMoveChunk, 1);
cmdBuilder.appendElements(chunk.toBSON());
- cmdBuilder.append(kToShardId, newShardId);
+ cmdBuilder.append(kToShardId, newShardId.toString());
cmdBuilder.append(kMaxChunkSizeBytes, static_cast<long long>(maxChunkSizeBytes));
{
BSONObjBuilder secondaryThrottleBuilder(cmdBuilder.subobjStart(kSecondaryThrottle));
diff --git a/src/mongo/s/set_shard_version_request.cpp b/src/mongo/s/set_shard_version_request.cpp
index b21c4423a84..979a5f5f915 100644
--- a/src/mongo/s/set_shard_version_request.cpp
+++ b/src/mongo/s/set_shard_version_request.cpp
@@ -52,7 +52,7 @@ const char kNoConnectionVersioning[] = "noConnectionVersioning";
} // namespace
SetShardVersionRequest::SetShardVersionRequest(ConnectionString configServer,
- std::string shardName,
+ ShardId shardName,
ConnectionString shardConnectionString)
: _init(true),
_isAuthoritative(true),
@@ -61,7 +61,7 @@ SetShardVersionRequest::SetShardVersionRequest(ConnectionString configServer,
_shardCS(std::move(shardConnectionString)) {}
SetShardVersionRequest::SetShardVersionRequest(ConnectionString configServer,
- std::string shardName,
+ ShardId shardName,
ConnectionString shardConnectionString,
NamespaceString nss,
ChunkVersion version,
@@ -78,14 +78,14 @@ SetShardVersionRequest::SetShardVersionRequest() = default;
SetShardVersionRequest SetShardVersionRequest::makeForInit(
const ConnectionString& configServer,
- const std::string& shardName,
+ const ShardId& shardName,
const ConnectionString& shardConnectionString) {
return SetShardVersionRequest(configServer, shardName, shardConnectionString);
}
SetShardVersionRequest SetShardVersionRequest::makeForInitNoPersist(
const ConnectionString& configServer,
- const std::string& shardName,
+ const ShardId& shardName,
const ConnectionString& shardConnectionString) {
auto ssv = SetShardVersionRequest(configServer, shardName, shardConnectionString);
ssv._noConnectionVersioning = true;
@@ -94,7 +94,7 @@ SetShardVersionRequest SetShardVersionRequest::makeForInitNoPersist(
SetShardVersionRequest SetShardVersionRequest::makeForVersioning(
const ConnectionString& configServer,
- const std::string& shardName,
+ const ShardId& shardName,
const ConnectionString& shardConnectionString,
const NamespaceString& nss,
const ChunkVersion& nssVersion,
@@ -106,7 +106,7 @@ SetShardVersionRequest SetShardVersionRequest::makeForVersioning(
SetShardVersionRequest SetShardVersionRequest::makeForVersioningNoPersist(
const ConnectionString& configServer,
- const std::string& shardName,
+ const ShardId& shardName,
const ConnectionString& shard,
const NamespaceString& nss,
const ChunkVersion& nssVersion,
@@ -134,7 +134,10 @@ StatusWith<SetShardVersionRequest> SetShardVersionRequest::parseFromBSON(const B
}
{
- Status status = bsonExtractStringField(cmdObj, kShardName, &request._shardName);
+ std::string shardName;
+ Status status = bsonExtractStringField(cmdObj, kShardName, &shardName);
+ request._shardName = ShardId(shardName);
+
if (!status.isOK())
return status;
}
@@ -212,7 +215,7 @@ BSONObj SetShardVersionRequest::toBSON() const {
cmdBuilder.append(kInit, _init);
cmdBuilder.append(kAuthoritative, _isAuthoritative);
cmdBuilder.append(kConfigServer, _configServer.toString());
- cmdBuilder.append(kShardName, _shardName);
+ cmdBuilder.append(kShardName, _shardName.toString());
cmdBuilder.append(kShardConnectionString, _shardCS.toString());
if (_init) {
diff --git a/src/mongo/s/set_shard_version_request.h b/src/mongo/s/set_shard_version_request.h
index af86db021e9..857866a36a9 100644
--- a/src/mongo/s/set_shard_version_request.h
+++ b/src/mongo/s/set_shard_version_request.h
@@ -34,6 +34,7 @@
#include "mongo/client/connection_string.h"
#include "mongo/db/namespace_string.h"
#include "mongo/s/chunk_version.h"
+#include "mongo/s/shard_id.h"
namespace mongo {
@@ -55,7 +56,7 @@ public:
* are marked as sharded.
*/
static SetShardVersionRequest makeForInit(const ConnectionString& configServer,
- const std::string& shardName,
+ const ShardId& shardName,
const ConnectionString& shardConnectionString);
/**
@@ -68,7 +69,7 @@ public:
*/
static SetShardVersionRequest makeForInitNoPersist(
const ConnectionString& configServer,
- const std::string& shardName,
+ const ShardId& shardName,
const ConnectionString& shardConnectionString);
/**
@@ -81,7 +82,7 @@ public:
* are marked as sharded.
*/
static SetShardVersionRequest makeForVersioning(const ConnectionString& configServer,
- const std::string& shardName,
+ const ShardId& shardName,
const ConnectionString& shard,
const NamespaceString& nss,
const ChunkVersion& nssVersion,
@@ -96,7 +97,7 @@ public:
* marked as sharded.
*/
static SetShardVersionRequest makeForVersioningNoPersist(const ConnectionString& configServer,
- const std::string& shardName,
+ const ShardId& shardName,
const ConnectionString& shard,
const NamespaceString& nss,
const ChunkVersion& nssVersion,
@@ -134,7 +135,7 @@ public:
return _configServer;
}
- const std::string& getShardName() const {
+ const ShardId& getShardName() const {
return _shardName;
}
@@ -164,11 +165,11 @@ public:
private:
SetShardVersionRequest(ConnectionString configServer,
- std::string shardName,
+ ShardId shardName,
ConnectionString shardConnectionString);
SetShardVersionRequest(ConnectionString configServer,
- std::string shardName,
+ ShardId shardName,
ConnectionString shardConnectionString,
NamespaceString nss,
ChunkVersion version,
@@ -182,7 +183,7 @@ private:
ConnectionString _configServer;
- std::string _shardName;
+ ShardId _shardName;
ConnectionString _shardCS;
// These values are only set if _init is false
diff --git a/src/mongo/s/set_shard_version_request_test.cpp b/src/mongo/s/set_shard_version_request_test.cpp
index 794d8bb552c..776a8e00c8f 100644
--- a/src/mongo/s/set_shard_version_request_test.cpp
+++ b/src/mongo/s/set_shard_version_request_test.cpp
@@ -258,7 +258,7 @@ TEST(SetShardVersionRequest, ParseFullNSContainsDBOnly) {
TEST(SetShardVersionRequest, ToSSVCommandInit) {
SetShardVersionRequest ssv =
- SetShardVersionRequest::makeForInit(configCS, "TestShard", shardCS);
+ SetShardVersionRequest::makeForInit(configCS, ShardId("TestShard"), shardCS);
ASSERT(ssv.isInit());
ASSERT(ssv.isAuthoritative());
@@ -286,7 +286,7 @@ TEST(SetShardVersionRequest, ToSSVCommandInit) {
TEST(SetShardVersionRequest, ToSSVCommandInitNoConnectionVersioning) {
SetShardVersionRequest ssv =
- SetShardVersionRequest::makeForInitNoPersist(configCS, "TestShard", shardCS);
+ SetShardVersionRequest::makeForInitNoPersist(configCS, ShardId("TestShard"), shardCS);
ASSERT(ssv.isInit());
ASSERT(ssv.isAuthoritative());
@@ -318,7 +318,7 @@ TEST(SetShardVersionRequest, ToSSVCommandFull) {
const ChunkVersion chunkVersion(1, 2, OID::gen());
SetShardVersionRequest ssv = SetShardVersionRequest::makeForVersioning(
- configCS, "TestShard", shardCS, NamespaceString("db.coll"), chunkVersion, false);
+ configCS, ShardId("TestShard"), shardCS, NamespaceString("db.coll"), chunkVersion, false);
ASSERT(!ssv.isInit());
ASSERT(!ssv.isAuthoritative());
@@ -353,7 +353,7 @@ TEST(SetShardVersionRequest, ToSSVCommandFullAuthoritative) {
const ChunkVersion chunkVersion(1, 2, OID::gen());
SetShardVersionRequest ssv = SetShardVersionRequest::makeForVersioning(
- configCS, "TestShard", shardCS, NamespaceString("db.coll"), chunkVersion, true);
+ configCS, ShardId("TestShard"), shardCS, NamespaceString("db.coll"), chunkVersion, true);
ASSERT(!ssv.isInit());
ASSERT(ssv.isAuthoritative());
@@ -388,7 +388,7 @@ TEST(SetShardVersionRequest, ToSSVCommandFullNoConnectionVersioning) {
const ChunkVersion chunkVersion(1, 2, OID::gen());
SetShardVersionRequest ssv = SetShardVersionRequest::makeForVersioningNoPersist(
- configCS, "TestShard", shardCS, NamespaceString("db.coll"), chunkVersion, true);
+ configCS, ShardId("TestShard"), shardCS, NamespaceString("db.coll"), chunkVersion, true);
ASSERT(!ssv.isInit());
ASSERT(ssv.isAuthoritative());
diff --git a/src/mongo/s/shard_id.cpp b/src/mongo/s/shard_id.cpp
new file mode 100644
index 00000000000..961c8711e17
--- /dev/null
+++ b/src/mongo/s/shard_id.cpp
@@ -0,0 +1,88 @@
+/**
+ * Copyright (C) 2016 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kSharding
+
+#include "mongo/platform/basic.h"
+
+#include <functional>
+#include <string.h>
+
+#include "mongo/base/status_with.h"
+#include "mongo/s/shard_id.h"
+
+namespace mongo {
+
+using std::string;
+using std::ostream;
+
+bool ShardId::operator==(const ShardId& other) const {
+ return (this->_shardId == other._shardId);
+}
+
+bool ShardId::operator!=(const ShardId& other) const {
+ return !(*this == other);
+}
+
+bool ShardId::operator==(const string& other) const {
+ return (this->_shardId == other);
+}
+
+bool ShardId::operator!=(const string& other) const {
+ return !(*this == other);
+}
+
+ShardId::operator StringData() {
+ return StringData(_shardId.data(), _shardId.size());
+}
+
+const string& ShardId::toString() const {
+ return _shardId;
+}
+
+bool ShardId::isValid() const {
+ return !_shardId.empty();
+}
+
+ostream& operator<<(ostream& os, const ShardId& shardId) {
+ os << shardId._shardId;
+ return os;
+}
+
+bool ShardId::operator<(const ShardId& other) const {
+ return _shardId < other._shardId;
+}
+
+int ShardId::compare(const ShardId& other) const {
+ return _shardId.compare(other._shardId);
+}
+
+std::size_t ShardId::Hasher::operator()(const ShardId& shardId) const {
+ return std::hash<std::string>()(shardId._shardId);
+}
+} // namespace mongo
diff --git a/src/mongo/s/shard_id.h b/src/mongo/s/shard_id.h
new file mode 100644
index 00000000000..6456fc41677
--- /dev/null
+++ b/src/mongo/s/shard_id.h
@@ -0,0 +1,105 @@
+/**
+ * Copyright (C) 2016 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include <iostream>
+#include <string>
+
+#include "mongo/base/string_data.h"
+#include "mongo/bson/util/builder.h"
+
+
+namespace mongo {
+
+class NamespaceString;
+
+/**
+ * Representation of a shard identifier.
+ */
+class ShardId {
+public:
+ friend std::ostream& operator<<(std::ostream&, const ShardId&);
+
+ ShardId() = default;
+
+ // Note that this c-tor allows the implicit conversion from std::string
+ ShardId(const std::string shardId) : _shardId(std::move(shardId)) {}
+
+ // Implicit StringData conversion
+ operator StringData();
+
+ bool operator==(const ShardId&) const;
+ bool operator!=(const ShardId&) const;
+ bool operator==(const std::string&) const;
+ bool operator!=(const std::string&) const;
+
+ template <size_t N>
+ bool operator==(const char (&val)[N]) const {
+ return (strncmp(val, _shardId.data(), N) == 0);
+ }
+
+ template <size_t N>
+ bool operator!=(const char (&val)[N]) const {
+ return (strncmp(val, _shardId.data(), N) != 0);
+ }
+
+ // The operator< is needed to do proper comparison in a std::map
+ bool operator<(const ShardId&) const;
+
+ const std::string& toString() const;
+
+ /**
+ * Returns -1, 0, or 1 if 'this' is less, equal, or greater than 'other' in
+ * lexicographical order.
+ */
+ int compare(const ShardId& other) const;
+
+ /**
+ * Returns true if _shardId is empty. Subject to include more validations in the future.
+ */
+ bool isValid() const;
+
+ /**
+ * Functor compatible with std::hash for std::unordered_{map,set}
+ */
+ struct Hasher {
+ std::size_t operator()(const ShardId&) const;
+ };
+
+private:
+ std::string _shardId;
+};
+
+template <typename Allocator>
+StringBuilderImpl<Allocator>& operator<<(StringBuilderImpl<Allocator>& stream,
+ const ShardId& shardId) {
+ return stream << shardId.toString();
+}
+
+} // namespace mongo
diff --git a/src/mongo/s/shard_id_test.cpp b/src/mongo/s/shard_id_test.cpp
new file mode 100644
index 00000000000..6393c9e9a9e
--- /dev/null
+++ b/src/mongo/s/shard_id_test.cpp
@@ -0,0 +1,107 @@
+/* Copyright 2016 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects
+ * for all of the code used other than as permitted herein. If you modify
+ * file(s) with this exception, you may extend this exception to your
+ * version of the file(s), but you are not obligated to do so. If you do not
+ * wish to do so, delete this exception statement from your version. If you
+ * delete this exception statement from all source files in the program,
+ * then also delete it in the license file.
+ */
+
+#include "mongo/s/shard_id.h"
+
+#include "mongo/base/string_data.h"
+#include "mongo/platform/basic.h"
+#include "mongo/unittest/unittest.h"
+
+namespace mongo {
+namespace {
+
+using std::string;
+using namespace mongo;
+
+TEST(ShardId, Valid) {
+ ShardId shardId("my_shard_id");
+ ASSERT(shardId.isValid());
+}
+
+TEST(ShardId, Invalid) {
+ ShardId shardId("");
+ ASSERT(!shardId.isValid());
+}
+
+TEST(ShardId, Roundtrip) {
+ string shard_id_str("my_shard_id");
+ ShardId shardId(shard_id_str);
+ ASSERT(shard_id_str == shardId.toString());
+}
+
+TEST(ShardId, ToStringData) {
+ string shard_id_str("my_shard_id");
+ ShardId shardId(shard_id_str);
+ StringData stringData(shardId);
+ ASSERT(stringData == shard_id_str);
+}
+
+TEST(ShardId, Assign) {
+ ShardId shardId1("my_shard_id");
+ auto shardId2 = shardId1;
+ ASSERT(shardId1 == shardId2);
+}
+
+TEST(ShardId, Less) {
+ string a("aaa");
+ string a1("aaa");
+ string b("bbb");
+ ShardId sa(a);
+ ShardId sa1(a1);
+ ShardId sb(b);
+ ASSERT_EQUALS(sa < sa1, a < a1);
+ ASSERT_EQUALS(sb < sa1, b < a1);
+ ASSERT_EQUALS(sa < sb, a < b);
+}
+
+TEST(ShardId, Compare) {
+ string a("aaa");
+ string a1("aaa");
+ string b("bbb");
+ ShardId sa(a);
+ ShardId sa1(a1);
+ ShardId sb(b);
+ ASSERT_EQUALS(sa.compare(sa1), a.compare(a1));
+ ASSERT_EQUALS(sb.compare(sa1), b.compare(a1));
+ ASSERT_EQUALS(sa.compare(sb), a.compare(b));
+}
+
+TEST(ShardId, Equals) {
+ string a("aaa");
+ string a1("aaa");
+ string b("bbb");
+ ShardId sa(a);
+ ShardId sa1(a1);
+ ShardId sb(b);
+ ASSERT(sa == sa1);
+ ASSERT(sa != sb);
+ ASSERT(sa == "aaa");
+ ASSERT(sa != "bbb");
+}
+
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/shard_util.cpp b/src/mongo/s/shard_util.cpp
index 7b96fb78239..61f90ab9588 100644
--- a/src/mongo/s/shard_util.cpp
+++ b/src/mongo/s/shard_util.cpp
@@ -191,7 +191,7 @@ StatusWith<boost::optional<ChunkRange>> splitChunkAtMultiplePoints(
cmd.append("splitChunk", nss.ns());
cmd.append("configdb",
Grid::get(txn)->shardRegistry()->getConfigServerConnectionString().toString());
- cmd.append("from", shardId);
+ cmd.append("from", shardId.toString());
cmd.append("keyPattern", shardKeyPattern.toBSON());
collectionVersion.appendForCommands(&cmd);
cmd.append(kMinKey, minKey);
diff --git a/src/mongo/s/write_ops/batch_write_op_test.cpp b/src/mongo/s/write_ops/batch_write_op_test.cpp
index a20f5c98f47..01513e0eb76 100644
--- a/src/mongo/s/write_ops/batch_write_op_test.cpp
+++ b/src/mongo/s/write_ops/batch_write_op_test.cpp
@@ -137,7 +137,7 @@ TEST(WriteOpTests, SingleOp) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
+ ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
initTargeterFullRange(nss, endpoint, &targeter);
@@ -177,7 +177,7 @@ TEST(WriteOpTests, SingleError) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
+ ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
initTargeterFullRange(nss, endpoint, &targeter);
@@ -223,7 +223,7 @@ TEST(WriteOpTests, SingleTargetError) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
+ ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
initTargeterHalfRange(nss, endpoint, &targeter);
@@ -266,7 +266,7 @@ TEST(WriteOpTests, SingleWriteConcernErrorOrdered) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
+ ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
initTargeterFullRange(nss, endpoint, &targeter);
@@ -316,7 +316,7 @@ TEST(WriteOpTests, SingleStaleError) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
+ ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
initTargeterFullRange(nss, endpoint, &targeter);
@@ -383,7 +383,7 @@ TEST(WriteOpTests, MultiOpSameShardOrdered) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
+ ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
initTargeterFullRange(nss, endpoint, &targeter);
@@ -427,7 +427,7 @@ TEST(WriteOpTests, MultiOpSameShardUnordered) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
+ ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
initTargeterFullRange(nss, endpoint, &targeter);
@@ -472,8 +472,8 @@ TEST(WriteOpTests, MultiOpTwoShardsOrdered) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA("shardA", ChunkVersion::IGNORED());
- ShardEndpoint endpointB("shardB", ChunkVersion::IGNORED());
+ ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
+ ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
initTargeterSplitRange(nss, endpointA, endpointB, &targeter);
@@ -531,8 +531,8 @@ TEST(WriteOpTests, MultiOpTwoShardsUnordered) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA("shardA", ChunkVersion::IGNORED());
- ShardEndpoint endpointB("shardB", ChunkVersion::IGNORED());
+ ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
+ ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
initTargeterSplitRange(nss, endpointA, endpointB, &targeter);
@@ -583,8 +583,8 @@ TEST(WriteOpTests, MultiOpTwoShardsEachOrdered) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA("shardA", ChunkVersion::IGNORED());
- ShardEndpoint endpointB("shardB", ChunkVersion::IGNORED());
+ ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
+ ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
initTargeterSplitRange(nss, endpointA, endpointB, &targeter);
@@ -654,8 +654,8 @@ TEST(WriteOpTests, MultiOpTwoShardsEachUnordered) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA("shardA", ChunkVersion::IGNORED());
- ShardEndpoint endpointB("shardB", ChunkVersion::IGNORED());
+ ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
+ ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
initTargeterSplitRange(nss, endpointA, endpointB, &targeter);
@@ -710,8 +710,8 @@ TEST(WriteOpTests, MultiOpOneOrTwoShardsOrdered) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA("shardA", ChunkVersion::IGNORED());
- ShardEndpoint endpointB("shardB", ChunkVersion::IGNORED());
+ ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
+ ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
initTargeterSplitRange(nss, endpointA, endpointB, &targeter);
@@ -822,8 +822,8 @@ TEST(WriteOpTests, MultiOpOneOrTwoShardsUnordered) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA("shardA", ChunkVersion::IGNORED());
- ShardEndpoint endpointB("shardB", ChunkVersion::IGNORED());
+ ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
+ ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
initTargeterSplitRange(nss, endpointA, endpointB, &targeter);
@@ -884,8 +884,8 @@ TEST(WriteOpTests, MultiOpSingleShardErrorUnordered) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA("shardA", ChunkVersion::IGNORED());
- ShardEndpoint endpointB("shardB", ChunkVersion::IGNORED());
+ ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
+ ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
initTargeterSplitRange(nss, endpointA, endpointB, &targeter);
@@ -948,8 +948,8 @@ TEST(WriteOpTests, MultiOpTwoShardErrorsUnordered) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA("shardA", ChunkVersion::IGNORED());
- ShardEndpoint endpointB("shardB", ChunkVersion::IGNORED());
+ ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
+ ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
initTargeterSplitRange(nss, endpointA, endpointB, &targeter);
@@ -1015,8 +1015,8 @@ TEST(WriteOpTests, MultiOpPartialSingleShardErrorUnordered) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA("shardA", ChunkVersion::IGNORED());
- ShardEndpoint endpointB("shardB", ChunkVersion::IGNORED());
+ ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
+ ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
initTargeterSplitRange(nss, endpointA, endpointB, &targeter);
@@ -1082,8 +1082,8 @@ TEST(WriteOpTests, MultiOpPartialSingleShardErrorOrdered) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA("shardA", ChunkVersion::IGNORED());
- ShardEndpoint endpointB("shardB", ChunkVersion::IGNORED());
+ ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
+ ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
initTargeterSplitRange(nss, endpointA, endpointB, &targeter);
@@ -1153,7 +1153,7 @@ TEST(WriteOpTests, MultiOpErrorAndWriteConcernErrorUnordered) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
+ ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
initTargeterFullRange(nss, endpoint, &targeter);
@@ -1197,8 +1197,8 @@ TEST(WriteOpTests, SingleOpErrorAndWriteConcernErrorOrdered) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA("shardA", ChunkVersion::IGNORED());
- ShardEndpoint endpointB("shardB", ChunkVersion::IGNORED());
+ ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
+ ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
initTargeterSplitRange(nss, endpointA, endpointB, &targeter);
@@ -1248,7 +1248,7 @@ TEST(WriteOpTests, MultiOpFailedTargetOrdered) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
+ ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
initTargeterHalfRange(nss, endpoint, &targeter);
@@ -1311,7 +1311,7 @@ TEST(WriteOpTests, MultiOpFailedTargetUnordered) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
+ ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
initTargeterHalfRange(nss, endpoint, &targeter);
@@ -1368,8 +1368,8 @@ TEST(WriteOpTests, MultiOpFailedBatchOrdered) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA("shardA", ChunkVersion::IGNORED());
- ShardEndpoint endpointB("shardB", ChunkVersion::IGNORED());
+ ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
+ ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
initTargeterSplitRange(nss, endpointA, endpointB, &targeter);
@@ -1421,8 +1421,8 @@ TEST(WriteOpTests, MultiOpFailedBatchUnordered) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA("shardA", ChunkVersion::IGNORED());
- ShardEndpoint endpointB("shardB", ChunkVersion::IGNORED());
+ ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
+ ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
initTargeterSplitRange(nss, endpointA, endpointB, &targeter);
@@ -1474,8 +1474,8 @@ TEST(WriteOpTests, MultiOpAbortOrdered) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA("shardA", ChunkVersion::IGNORED());
- ShardEndpoint endpointB("shardB", ChunkVersion::IGNORED());
+ ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
+ ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
initTargeterSplitRange(nss, endpointA, endpointB, &targeter);
@@ -1524,8 +1524,8 @@ TEST(WriteOpTests, MultiOpAbortUnordered) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA("shardA", ChunkVersion::IGNORED());
- ShardEndpoint endpointB("shardB", ChunkVersion::IGNORED());
+ ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
+ ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
initTargeterSplitRange(nss, endpointA, endpointB, &targeter);
@@ -1565,8 +1565,8 @@ TEST(WriteOpTests, MultiOpTwoWCErrors) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA("shardA", ChunkVersion::IGNORED());
- ShardEndpoint endpointB("shardB", ChunkVersion::IGNORED());
+ ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
+ ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
initTargeterSplitRange(nss, endpointA, endpointB, &targeter);
@@ -1617,7 +1617,7 @@ TEST(WriteOpLimitTests, OneBigDoc) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
+ ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
initTargeterFullRange(nss, endpoint, &targeter);
@@ -1652,7 +1652,7 @@ TEST(WriteOpLimitTests, OneBigOneSmall) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
+ ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
initTargeterFullRange(nss, endpoint, &targeter);
@@ -1699,7 +1699,7 @@ TEST(WriteOpLimitTests, TooManyOps) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
+ ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
initTargeterFullRange(nss, endpoint, &targeter);
@@ -1745,7 +1745,7 @@ TEST(WriteOpLimitTests, UpdateOverheadIncluded) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
+ ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
initTargeterFullRange(nss, endpoint, &targeter);
diff --git a/src/mongo/s/write_ops/write_op_test.cpp b/src/mongo/s/write_ops/write_op_test.cpp
index e488c068e47..1f43d6188be 100644
--- a/src/mongo/s/write_ops/write_op_test.cpp
+++ b/src/mongo/s/write_ops/write_op_test.cpp
@@ -86,7 +86,7 @@ TEST(WriteOpTests, TargetSingle) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
+ ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
vector<MockRange*> mockRanges;
mockRanges.push_back(new MockRange(endpoint, nss, BSON("x" << MINKEY), BSON("x" << MAXKEY)));
@@ -144,9 +144,9 @@ TEST(WriteOpTests, TargetMultiOneShard) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA("shardA", ChunkVersion(10, 0, OID()));
- ShardEndpoint endpointB("shardB", ChunkVersion(20, 0, OID()));
- ShardEndpoint endpointC("shardB", ChunkVersion(20, 0, OID()));
+ ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion(10, 0, OID()));
+ ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion(20, 0, OID()));
+ ShardEndpoint endpointC(ShardId("shardB"), ChunkVersion(20, 0, OID()));
vector<MockRange*> mockRanges;
mockRanges.push_back(new MockRange(endpointA, nss, BSON("x" << MINKEY), BSON("x" << 0)));
@@ -187,9 +187,9 @@ TEST(WriteOpTests, TargetMultiAllShards) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpointA("shardA", ChunkVersion(10, 0, OID()));
- ShardEndpoint endpointB("shardB", ChunkVersion(20, 0, OID()));
- ShardEndpoint endpointC("shardB", ChunkVersion(20, 0, OID()));
+ ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion(10, 0, OID()));
+ ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion(20, 0, OID()));
+ ShardEndpoint endpointC(ShardId("shardB"), ChunkVersion(20, 0, OID()));
vector<MockRange*> mockRanges;
mockRanges.push_back(new MockRange(endpointA, nss, BSON("x" << MINKEY), BSON("x" << 0)));
@@ -239,7 +239,7 @@ TEST(WriteOpTests, ErrorSingle) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
+ ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
vector<MockRange*> mockRanges;
mockRanges.push_back(new MockRange(endpoint, nss, BSON("x" << MINKEY), BSON("x" << MAXKEY)));
@@ -285,7 +285,7 @@ TEST(WriteOpTests, CancelSingle) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
+ ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
vector<MockRange*> mockRanges;
mockRanges.push_back(new MockRange(endpoint, nss, BSON("x" << MINKEY), BSON("x" << MAXKEY)));
@@ -328,7 +328,7 @@ TEST(WriteOpTests, RetrySingleOp) {
OperationContextNoop txn;
NamespaceString nss("foo.bar");
- ShardEndpoint endpoint("shard", ChunkVersion::IGNORED());
+ ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
vector<MockRange*> mockRanges;
mockRanges.push_back(new MockRange(endpoint, nss, BSON("x" << MINKEY), BSON("x" << MAXKEY)));