summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEsha Maharishi <esha.maharishi@mongodb.com>2016-09-19 11:05:03 -0400
committerEsha Maharishi <esha.maharishi@mongodb.com>2016-09-19 11:05:24 -0400
commitfd19ddff758912365f22813d2ec8c93688676144 (patch)
tree552c85c591e919a774aee33476b1f474bebc1193
parent69c7bd7247639e143d21db25460d33667224a8b1 (diff)
downloadmongo-fd19ddff758912365f22813d2ec8c93688676144.tar.gz
SERVER-24527 add test to ensure shard undergoes sharding initialization through setShardVersion
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml1
-rw-r--r--jstests/sharding/shard_aware_on_set_shard_version.js61
-rw-r--r--src/mongo/s/catalog/replset/sharding_catalog_manager_impl.cpp30
3 files changed, 80 insertions, 12 deletions
diff --git a/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml b/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
index edfab8060ea..e7b5b9fbef7 100644
--- a/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
@@ -9,6 +9,7 @@ selector:
- jstests/sharding/shard_aware_init.js
- jstests/sharding/shard_aware_on_add_shard.js
- jstests/sharding/shard_aware_on_config_election.js
+ - jstests/sharding/shard_aware_on_set_shard_version.js
- jstests/sharding/shard_aware_primary_failover.js
- jstests/sharding/shard_identity_config_update.js
- jstests/sharding/add_shard_to_zone.js
diff --git a/jstests/sharding/shard_aware_on_set_shard_version.js b/jstests/sharding/shard_aware_on_set_shard_version.js
new file mode 100644
index 00000000000..d5919d7648e
--- /dev/null
+++ b/jstests/sharding/shard_aware_on_set_shard_version.js
@@ -0,0 +1,61 @@
+/**
+ * Tests the correctness of sharding initialization through setShardVersion.
+ *
+ * Though sharding initialization is typically done:
+ *
+ * 1) when the config server inserts the shardIdentity document on a new shard, or
+ * 2) when the shard starts up with a shardIdentity document already on disk
+ *
+ * the initialization may be done through setShardVersion if a sharded connection from a mongos or
+ * config is made to the new shard before the shardIdentity insert triggers sharding initialization.
+ */
+(function() {
+ 'use strict';
+
+ // Prevent a config primary from upserting the shardIdentity document into the shards by using
+ // the dontUpsertShardIdentityOnNewShards failpoint.
+ var st = new ShardingTest({
+ shards: 1,
+ mongos: 1,
+ other: {
+ rs: true,
+ rsOptions: {nodes: 1},
+ configOptions: {
+ setParameter: "failpoint.dontUpsertShardIdentityOnNewShards={'mode':'alwaysOn'}"
+ }
+ }
+ });
+
+ st.configRS.awaitReplication(60 * 1000);
+ var configVersion = st.configRS.getPrimary().getDB('config').getCollection('version').findOne();
+ assert.neq(null, configVersion);
+ var clusterId = configVersion.clusterId;
+ assert.neq(null, clusterId);
+
+ // The balancer, even when disabled, initiates a sharded connection to each new shard through
+ // its periodic check that no shards' process OIDs clash. Expect that this check will send
+ // setShardVersion and trigger sharding initialization on the new shard soon.
+ var fiveMinutes = 30000;
+ assert.soon(function() {
+ var res = st.rs0.getPrimary().adminCommand({shardingState: 1});
+ assert.commandWorked(res);
+ if (res.enabled) {
+ // If sharding state was initialized, make sure all fields are correct. Note, the
+ // clusterId field is not initialized through setShardVersion.
+ return (st.configRS.getURL() === res.configServer) && (st.rs0.name === res.shardName) &&
+ (!clusterId.equals(res.clusterId));
+ } else {
+ return false;
+ }
+ }, "Shard failed to initialize sharding awareness after being added as a shard", fiveMinutes);
+
+ // Assert that the shardIdentity document was not somehow inserted on the shard, triggering
+ // sharding initialization unexpectedly.
+ var res = st.rs0.getPrimary().getDB("admin").getCollection("system.version").findOne({
+ _id: "shardIdentity"
+ });
+ assert.eq(null, res);
+
+ st.stop();
+
+})();
diff --git a/src/mongo/s/catalog/replset/sharding_catalog_manager_impl.cpp b/src/mongo/s/catalog/replset/sharding_catalog_manager_impl.cpp
index 88c7d4c482f..b71af9ea0d5 100644
--- a/src/mongo/s/catalog/replset/sharding_catalog_manager_impl.cpp
+++ b/src/mongo/s/catalog/replset/sharding_catalog_manager_impl.cpp
@@ -75,6 +75,7 @@
#include "mongo/s/write_ops/batched_command_request.h"
#include "mongo/s/write_ops/batched_command_response.h"
#include "mongo/stdx/memory.h"
+#include "mongo/util/fail_point_service.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
#include "mongo/util/net/hostandport.h"
@@ -82,6 +83,8 @@
namespace mongo {
+MONGO_FP_DECLARE(dontUpsertShardIdentityOnNewShards);
+
using std::string;
using std::vector;
using str::stream;
@@ -829,22 +832,25 @@ StatusWith<string> ShardingCatalogManagerImpl::addShard(
}
}
- auto commandRequest = createShardIdentityUpsertForAddShard(txn, shardType.getName());
+ if (!MONGO_FAIL_POINT(dontUpsertShardIdentityOnNewShards)) {
+ auto commandRequest = createShardIdentityUpsertForAddShard(txn, shardType.getName());
- LOG(2) << "going to insert shardIdentity document into shard: " << shardType;
+ LOG(2) << "going to insert shardIdentity document into shard: " << shardType;
- auto swCommandResponse = _runCommandForAddShard(txn, targeter.get(), "admin", commandRequest);
- if (!swCommandResponse.isOK()) {
- return swCommandResponse.getStatus();
- }
+ auto swCommandResponse =
+ _runCommandForAddShard(txn, targeter.get(), "admin", commandRequest);
+ if (!swCommandResponse.isOK()) {
+ return swCommandResponse.getStatus();
+ }
- auto commandResponse = std::move(swCommandResponse.getValue());
+ auto commandResponse = std::move(swCommandResponse.getValue());
- BatchedCommandResponse batchResponse;
- auto batchResponseStatus =
- Shard::CommandResponse::processBatchWriteResponse(commandResponse, &batchResponse);
- if (!batchResponseStatus.isOK()) {
- return batchResponseStatus;
+ BatchedCommandResponse batchResponse;
+ auto batchResponseStatus =
+ Shard::CommandResponse::processBatchWriteResponse(commandResponse, &batchResponse);
+ if (!batchResponseStatus.isOK()) {
+ return batchResponseStatus;
+ }
}
log() << "going to insert new entry for shard into config.shards: " << shardType.toString();