summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-06-24 11:52:27 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-06-24 11:52:27 -0400
commite9746acfea16d422d6f94e878a670496ca0f916c (patch)
tree2c02b035a4baec74d1cff745971608849971d710
parent13b915aac8cbdceb4fee6b972dbd821003d9d5e2 (diff)
downloadmongo-e9746acfea16d422d6f94e878a670496ca0f916c.tar.gz
Revert "SERVER-23930 Ensure new shard is available in the ShardRegistry after addShard"
This reverts commit 13b915aac8cbdceb4fee6b972dbd821003d9d5e2.
-rw-r--r--jstests/sharding/move_chunk_basic.js40
-rw-r--r--src/mongo/s/catalog/catalog_manager_common.cpp9
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set_add_shard_test.cpp24
3 files changed, 37 insertions, 36 deletions
diff --git a/jstests/sharding/move_chunk_basic.js b/jstests/sharding/move_chunk_basic.js
index 69bdf4d8c90..354a222da10 100644
--- a/jstests/sharding/move_chunk_basic.js
+++ b/jstests/sharding/move_chunk_basic.js
@@ -6,35 +6,24 @@
'use strict';
var st = new ShardingTest({mongos: 1, shards: 2});
- var kDbName = 'db';
var mongos = st.s0;
- var shard0 = st.shard0.shardName;
- var shard1 = st.shard1.shardName;
-
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- st.ensurePrimaryShard(kDbName, shard0);
- // Fail if invalid namespace.
- assert.commandFailed(mongos.adminCommand({moveChunk: '', find: {_id: 1}, to: shard1}));
+ var kDbName = 'db';
- // Fail if database does not exist.
- assert.commandFailed(mongos.adminCommand({moveChunk: 'a.b', find: {_id: 1}, to: shard1}));
+ var shards = mongos.getCollection('config.shards').find().toArray();
- // Fail if collection is unsharded.
- assert.commandFailed(
- mongos.adminCommand({moveChunk: kDbName + '.xxx', find: {_id: 1}, to: shard1}));
+ var shard0 = shards[0]._id;
+ var shard1 = shards[1]._id;
function testHashed() {
var ns = kDbName + '.fooHashed';
+
+ // Errors if either bounds is not a valid shard key
assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {_id: 'hashed'}}));
var aChunk = mongos.getDB('config').chunks.findOne({_id: RegExp(ns), shard: shard0});
assert(aChunk);
-
- // Error if either of the bounds is not a valid shard key (BSON object - 1 yields a NaN)
- assert.commandFailed(mongos.adminCommand(
- {moveChunk: ns, bounds: [aChunk.min - 1, aChunk.max], to: shard1}));
assert.commandFailed(mongos.adminCommand(
{moveChunk: ns, bounds: [aChunk.min, aChunk.max - 1], to: shard1}));
@@ -51,7 +40,6 @@
assert.commandWorked(
mongos.adminCommand({moveChunk: ns, bounds: [aChunk.min, aChunk.max], to: shard1}));
-
assert.eq(0, mongos.getDB('config').chunks.count({_id: aChunk._id, shard: shard0}));
assert.eq(1, mongos.getDB('config').chunks.count({_id: aChunk._id, shard: shard1}));
@@ -82,6 +70,22 @@
mongos.getDB(kDbName).foo.drop();
}
+ assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+
+ st.ensurePrimaryShard(kDbName, shard0);
+
+ // Fail if invalid namespace.
+ var res =
+ assert.commandFailed(mongos.adminCommand({moveChunk: '', find: {_id: 1}, to: shard1}));
+ assert.eq(res.info);
+
+ // Fail if database does not exist.
+ assert.commandFailed(mongos.adminCommand({moveChunk: 'a.b', find: {_id: 1}, to: shard1}));
+
+ // Fail if collection is unsharded.
+ assert.commandFailed(
+ mongos.adminCommand({moveChunk: kDbName + '.xxx', find: {_id: 1}, to: shard1}));
+
testHashed();
testNotHashed({a: 1});
diff --git a/src/mongo/s/catalog/catalog_manager_common.cpp b/src/mongo/s/catalog/catalog_manager_common.cpp
index 228d5e492f1..82c2a863fa9 100644
--- a/src/mongo/s/catalog/catalog_manager_common.cpp
+++ b/src/mongo/s/catalog/catalog_manager_common.cpp
@@ -352,6 +352,9 @@ StatusWith<string> CatalogManagerCommon::addShard(OperationContext* txn,
return result;
}
+ // Make sure the new shard is visible
+ grid.shardRegistry()->reload(txn);
+
// Add all databases which were discovered on the new shard
for (const string& dbName : dbNamesStatus.getValue()) {
DatabaseType dbt;
@@ -373,12 +376,6 @@ StatusWith<string> CatalogManagerCommon::addShard(OperationContext* txn,
logChange(txn, "addShard", "", shardDetails.obj());
- // Make sure the new shard is visible from this point on. Do the reload twice in case there was
- // a concurrent reload, which started before we added the shard.
- if (!Grid::get(txn)->shardRegistry()->reload(txn)) {
- Grid::get(txn)->shardRegistry()->reload(txn);
- }
-
return shardType.getName();
}
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_add_shard_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_add_shard_test.cpp
index 3c93c81f51a..0d8cbfa7aae 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_add_shard_test.cpp
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_add_shard_test.cpp
@@ -308,6 +308,9 @@ TEST_F(AddShardTest, Standalone) {
expectInserts(NamespaceString(ShardType::ConfigNS), {expectedShard.toBSON()});
+ // Shards are being reloaded
+ expectGetShards({expectedShard});
+
// Add the existing database from the newly added shard
{
DatabaseType dbTestDB1;
@@ -330,9 +333,6 @@ TEST_F(AddShardTest, Standalone) {
// Expect the change log operation
expectAddShardChangeLog("StandaloneShard", "StandaloneHost:12345");
- // Shards are being reloaded
- expectGetShards({expectedShard});
-
future.timed_get(kFutureTimeout);
}
@@ -402,6 +402,9 @@ TEST_F(AddShardTest, StandaloneGenerateName) {
expectInserts(NamespaceString(ShardType::ConfigNS), {expectedShard.toBSON()});
+ // Shards are being reloaded
+ expectGetShards({expectedShard});
+
// Add the existing database from the newly added shard
{
DatabaseType dbTestDB1;
@@ -424,9 +427,6 @@ TEST_F(AddShardTest, StandaloneGenerateName) {
// Expect the change log operation
expectAddShardChangeLog("shard0006", "StandaloneHost:12345");
- // Shards are being reloaded
- expectGetShards({expectedShard});
-
future.timed_get(kFutureTimeout);
}
@@ -895,6 +895,8 @@ TEST_F(AddShardTest, ReplicaSet) {
expectInserts(NamespaceString(ShardType::ConfigNS), {newShard.toBSON()});
+ expectGetShards({newShard});
+
// Add the existing database from the newly added shard
DatabaseType shardDB;
shardDB.setName("shardDB");
@@ -905,8 +907,6 @@ TEST_F(AddShardTest, ReplicaSet) {
expectAddShardChangeLog(expectedShardName, connString.toString());
- expectGetShards({newShard});
-
future.timed_get(kFutureTimeout);
}
@@ -949,6 +949,8 @@ TEST_F(AddShardTest, AddShardSucceedsEvenIfAddingDBsFromNewShardFails) {
expectInserts(NamespaceString(ShardType::ConfigNS), {newShard.toBSON()});
+ expectGetShards({newShard});
+
// Add the existing database from the newly added shard
DatabaseType shardDB;
shardDB.setName("shardDB");
@@ -981,8 +983,6 @@ TEST_F(AddShardTest, AddShardSucceedsEvenIfAddingDBsFromNewShardFails) {
expectAddShardChangeLog(expectedShardName, connString.toString());
- expectGetShards({newShard});
-
future.timed_get(kFutureTimeout);
}
@@ -1023,10 +1023,10 @@ TEST_F(AddShardTest, ReplicaSetExtraHostsDiscovered) {
expectInserts(NamespaceString(ShardType::ConfigNS), {newShard.toBSON()});
- expectAddShardChangeLog(expectedShardName, seedString.toString());
-
expectGetShards({newShard});
+ expectAddShardChangeLog(expectedShardName, seedString.toString());
+
future.timed_get(kFutureTimeout);
}