summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_6.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_auth_6.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_auth_audit_5.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_auth_audit_misc.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_auth_misc.yml3
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_ese_5.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_ese_misc.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards_5.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards_misc.yml1
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_misc.yml1
-rw-r--r--jstests/sharding/remove4.js55
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.h10
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp119
13 files changed, 45 insertions, 151 deletions
diff --git a/buildscripts/resmokeconfig/suites/sharding_6.yml b/buildscripts/resmokeconfig/suites/sharding_6.yml
index 868d95b5276..92cb7b22f73 100644
--- a/buildscripts/resmokeconfig/suites/sharding_6.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_6.yml
@@ -19,7 +19,6 @@ selector:
- jstests/sharding/all_config_servers_blackholed_from_mongos.js
- jstests/sharding/authCommands.js
- jstests/sharding/addshard4.js
- - jstests/sharding/remove4.js
- jstests/sharding/split_with_force.js
- jstests/sharding/auto_rebalance_parallel_replica_sets.js
- jstests/sharding/database_and_shard_versioning_all_commands.js
diff --git a/buildscripts/resmokeconfig/suites/sharding_auth_6.yml b/buildscripts/resmokeconfig/suites/sharding_auth_6.yml
index f4cf7f6235f..f1e181ba88f 100644
--- a/buildscripts/resmokeconfig/suites/sharding_auth_6.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_auth_6.yml
@@ -18,7 +18,6 @@ selector:
# 14 minutes to run on linux-64-debug
# 18 minutes to run on enterprise-windows-64-2k8
roots:
- - jstests/sharding/remove4.js
- jstests/sharding/mongos_no_replica_set_refresh.js
- jstests/sharding/repl_monitor_refresh.js
- jstests/sharding/bouncing_count.js
diff --git a/buildscripts/resmokeconfig/suites/sharding_auth_audit_5.yml b/buildscripts/resmokeconfig/suites/sharding_auth_audit_5.yml
index 98902601eb5..cc295f0a6e1 100644
--- a/buildscripts/resmokeconfig/suites/sharding_auth_audit_5.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_auth_audit_5.yml
@@ -20,7 +20,6 @@ selector:
roots:
- jstests/sharding/drop_sharded_db.js
- jstests/sharding/query_after_multi_write.js
- - jstests/sharding/remove4.js
- jstests/sharding/find_and_modify_after_multi_write.js
- jstests/sharding/mongos_no_replica_set_refresh.js
- jstests/sharding/bouncing_count.js
diff --git a/buildscripts/resmokeconfig/suites/sharding_auth_audit_misc.yml b/buildscripts/resmokeconfig/suites/sharding_auth_audit_misc.yml
index f18842f91ed..05ed04fe320 100644
--- a/buildscripts/resmokeconfig/suites/sharding_auth_audit_misc.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_auth_audit_misc.yml
@@ -77,7 +77,6 @@ selector:
- jstests/sharding/mongos_shard_failure_tolerance.js
- jstests/sharding/drop_sharded_db.js
- jstests/sharding/query_after_multi_write.js
- - jstests/sharding/remove4.js
- jstests/sharding/find_and_modify_after_multi_write.js
- jstests/sharding/mongos_no_replica_set_refresh.js
- jstests/sharding/bouncing_count.js
diff --git a/buildscripts/resmokeconfig/suites/sharding_auth_misc.yml b/buildscripts/resmokeconfig/suites/sharding_auth_misc.yml
index dbbcabf98d5..ca8dd89ddef 100644
--- a/buildscripts/resmokeconfig/suites/sharding_auth_misc.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_auth_misc.yml
@@ -81,7 +81,6 @@ selector:
- jstests/sharding/query_after_multi_write.js
- jstests/sharding/find_and_modify_after_multi_write.js
- jstests/sharding/shard_collection_basic.js
- - jstests/sharding/remove4.js
- jstests/sharding/mongos_no_replica_set_refresh.js
- jstests/sharding/repl_monitor_refresh.js
- jstests/sharding/bouncing_count.js
@@ -386,4 +385,4 @@ executor:
keyFile: *keyFile
keyFileData: *keyFileData
nodb: ''
- readMode: commands \ No newline at end of file
+ readMode: commands
diff --git a/buildscripts/resmokeconfig/suites/sharding_ese_5.yml b/buildscripts/resmokeconfig/suites/sharding_ese_5.yml
index bfdbba270cc..71c092bb8a6 100644
--- a/buildscripts/resmokeconfig/suites/sharding_ese_5.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_ese_5.yml
@@ -26,7 +26,6 @@ selector:
- jstests/sharding/drop_sharded_db.js
- jstests/sharding/bouncing_count.js
- jstests/sharding/find_and_modify_after_multi_write.js
- - jstests/sharding/remove4.js
executor:
config:
diff --git a/buildscripts/resmokeconfig/suites/sharding_ese_misc.yml b/buildscripts/resmokeconfig/suites/sharding_ese_misc.yml
index b966148b90f..9501b743779 100644
--- a/buildscripts/resmokeconfig/suites/sharding_ese_misc.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_ese_misc.yml
@@ -55,7 +55,6 @@ selector:
- jstests/sharding/drop_sharded_db.js
- jstests/sharding/bouncing_count.js
- jstests/sharding/find_and_modify_after_multi_write.js
- - jstests/sharding/remove4.js
- jstests/sharding/query_after_multi_write.js
- jstests/sharding/mongos_no_replica_set_refresh.js
- jstests/sharding/all_config_servers_blackholed_from_mongos.js
diff --git a/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards_5.yml b/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards_5.yml
index 9ceaa0da6f8..329db56e743 100644
--- a/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards_5.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards_5.yml
@@ -15,7 +15,6 @@ selector:
- jstests/sharding/sharding_multiple_ns_rs.js
- jstests/sharding/shard_aware_init.js
- jstests/sharding/balance_repl.js
- - jstests/sharding/remove4.js
- jstests/sharding/authCommands.js
- jstests/sharding/test_stacked_migration_cleanup.js
- jstests/sharding/shard_aware_primary_failover.js
diff --git a/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards_misc.yml b/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards_misc.yml
index 719836db40d..bac468e7ace 100644
--- a/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards_misc.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards_misc.yml
@@ -138,7 +138,6 @@ selector:
- jstests/sharding/sharding_multiple_ns_rs.js
- jstests/sharding/shard_aware_init.js
- jstests/sharding/balance_repl.js
- - jstests/sharding/remove4.js
- jstests/sharding/authCommands.js
- jstests/sharding/test_stacked_migration_cleanup.js
- jstests/sharding/shard_aware_primary_failover.js
diff --git a/buildscripts/resmokeconfig/suites/sharding_misc.yml b/buildscripts/resmokeconfig/suites/sharding_misc.yml
index 235d21c3a0d..7f7fa6fc508 100644
--- a/buildscripts/resmokeconfig/suites/sharding_misc.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_misc.yml
@@ -64,7 +64,6 @@ selector:
- jstests/sharding/all_config_servers_blackholed_from_mongos.js
- jstests/sharding/authCommands.js
- jstests/sharding/addshard4.js
- - jstests/sharding/remove4.js
- jstests/sharding/split_with_force.js
- jstests/sharding/auto_rebalance_parallel_replica_sets.js
- jstests/sharding/database_and_shard_versioning_all_commands.js
diff --git a/jstests/sharding/remove4.js b/jstests/sharding/remove4.js
deleted file mode 100644
index adae9442291..00000000000
--- a/jstests/sharding/remove4.js
+++ /dev/null
@@ -1,55 +0,0 @@
-// Validates that after a primary shard is drained, a new sharded collection will not be created on
-// the primary shard
-(function() {
- 'use strict';
-
- function removeShardAddNewColl(shardCollCmd) {
- let st = new ShardingTest({name: "remove_shard4", shards: 2, mongos: 2});
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
- let primaryShard = st.shard0.shardName;
- st.ensurePrimaryShard('TestDB', primaryShard);
-
- // Remove primary shard
- var removeRes;
- removeRes = assert.commandWorked(st.s0.adminCommand({removeShard: primaryShard}));
- assert.eq('started', removeRes.state);
-
- // Add a new sharded collection and check that its data is not on the primary drained shard
- assert.commandWorked(st.s0.adminCommand(shardCollCmd));
- st.s0.getDB('TestDB').Coll.insert({_id: -2, value: 'Negative value'});
- st.s0.getDB('TestDB').Coll.insert({_id: 2, value: 'Positive value'});
-
- let chunks = st.config.chunks.find({'ns': 'TestDB.Coll'}).toArray();
- assert.neq(chunks.length, 0);
-
- for (let i = 0; i < chunks.length; i++) {
- assert.neq(chunks[i].shard,
- primaryShard,
- 'New sharded collection should not have been created on primary shard');
- }
-
- removeRes = assert.commandWorked(st.s0.adminCommand({removeShard: primaryShard}));
- assert.eq('ongoing', removeRes.state);
-
- // Drop TestDB so can finish draining
- assert.commandWorked(st.s0.getDB('TestDB').runCommand({dropDatabase: 1}));
-
- // Move the config.system.sessions chunk off primary
- assert.commandWorked(st.s0.adminCommand({
- moveChunk: 'config.system.sessions',
- find: {_id: 'config.system.sessions-_id_MinKey'},
- to: st.shard1.shardName,
- _waitForDelete: true
- }));
-
- // Remove shard must succeed now
- removeRes = assert.commandWorked(st.s0.adminCommand({removeShard: primaryShard}));
- assert.eq('completed', removeRes.state);
-
- st.stop();
- }
-
- removeShardAddNewColl({shardCollection: 'TestDB.Coll', key: {_id: 1}});
- removeShardAddNewColl(
- {shardCollection: 'TestDB.Coll', key: {_id: "hashed"}, numInitialChunks: 2});
-})();
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.h b/src/mongo/db/s/config/sharding_catalog_manager.h
index 15edcbd1319..1f7d88d0fd7 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.h
+++ b/src/mongo/db/s/config/sharding_catalog_manager.h
@@ -481,16 +481,6 @@ private:
void _appendReadConcern(BSONObjBuilder* builder);
/**
- * Creates the first chunks of a new sharded collection.
- */
- ChunkVersion _createFirstChunks(OperationContext* opCtx,
- const NamespaceString& nss,
- const ShardKeyPattern& shardKeyPattern,
- const ShardId& primaryShardId,
- const std::vector<BSONObj>& initPoints,
- const bool distributeInitialChunks);
-
- /**
* Retrieve the full chunk description from the config.
*/
StatusWith<ChunkType> _findChunkOnConfig(OperationContext* opCtx,
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
index 1e4609937af..f26033e54c0 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp
@@ -84,41 +84,6 @@ const ReadPreferenceSetting kConfigReadSelector(ReadPreference::Nearest, TagSet{
const WriteConcernOptions kNoWaitWriteConcern(1, WriteConcernOptions::SyncMode::UNSET, Seconds(0));
const char kWriteConcernField[] = "writeConcern";
-void checkForExistingChunks(OperationContext* opCtx, const NamespaceString& nss) {
- BSONObjBuilder countBuilder;
- countBuilder.append("count", ChunkType::ConfigNS.coll());
- countBuilder.append("query", BSON(ChunkType::ns(nss.ns())));
-
- // OK to use limit=1, since if any chunks exist, we will fail.
- countBuilder.append("limit", 1);
-
- // Use readConcern local to guarantee we see any chunks that have been written and may
- // become committed; readConcern majority will not see the chunks if they have not made it
- // to the majority snapshot.
- repl::ReadConcernArgs readConcern(repl::ReadConcernLevel::kLocalReadConcern);
- readConcern.appendInfo(&countBuilder);
-
- auto cmdResponse = uassertStatusOK(
- Grid::get(opCtx)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
- opCtx,
- kConfigReadSelector,
- ChunkType::ConfigNS.db().toString(),
- countBuilder.done(),
- Shard::kDefaultConfigCommandTimeout,
- Shard::RetryPolicy::kIdempotent));
- uassertStatusOK(cmdResponse.commandStatus);
-
- long long numChunks;
- uassertStatusOK(bsonExtractIntegerField(cmdResponse.response, "n", &numChunks));
- uassert(ErrorCodes::ManualInterventionRequired,
- str::stream() << "A previous attempt to shard collection " << nss.ns()
- << " failed after writing some initial chunks to config.chunks. Please "
- "manually delete the partially written chunks for collection "
- << nss.ns()
- << " from config.chunks",
- numChunks == 0);
-}
-
boost::optional<UUID> checkCollectionOptions(OperationContext* opCtx,
Shard* shard,
const NamespaceString& ns,
@@ -168,41 +133,18 @@ boost::optional<UUID> checkCollectionOptions(OperationContext* opCtx,
* Creates and writes to the config server the first chunks for a newly sharded collection. Returns
* the version generated for the collection.
*/
-ChunkVersion ShardingCatalogManager::_createFirstChunks(OperationContext* opCtx,
- const NamespaceString& nss,
- const ShardKeyPattern& shardKeyPattern,
- const ShardId& primaryShardId,
- const std::vector<BSONObj>& initPoints,
- const bool distributeInitialChunks) {
+ChunkVersion createFirstChunks(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const ShardKeyPattern& shardKeyPattern,
+ const ShardId& primaryShardId,
+ const std::vector<BSONObj>& initPoints,
+ const bool distributeInitialChunks) {
const KeyPattern keyPattern = shardKeyPattern.getKeyPattern();
vector<BSONObj> splitPoints;
vector<ShardId> shardIds;
- std::string primaryShardName = primaryShardId.toString();
- auto drainingCount = uassertStatusOK(_runCountCommandOnConfig(
- opCtx,
- NamespaceString(ShardType::ConfigNS),
- BSON(ShardType::name() << primaryShardName << ShardType::draining(true))));
-
- const bool primaryDraining = (drainingCount > 0);
- auto getPrimaryOrFirstNonDrainingShard = [&opCtx, primaryShardId, primaryDraining]() {
- if (primaryDraining) {
- vector<ShardId> allShardIds;
- Grid::get(opCtx)->shardRegistry()->getAllShardIdsNoReload(&allShardIds);
-
- auto dbShardId = allShardIds[0];
- if (allShardIds[0] == primaryShardId && allShardIds.size() > 1) {
- dbShardId = allShardIds[1];
- }
-
- return dbShardId;
- } else {
- return primaryShardId;
- }
- };
-
if (initPoints.empty()) {
// If no split points were specified use the shard's data distribution to determine them
auto primaryShard =
@@ -238,12 +180,8 @@ ChunkVersion ShardingCatalogManager::_createFirstChunks(OperationContext* opCtx,
// otherwise defer to passed-in distribution option.
if (numObjects == 0 && distributeInitialChunks) {
Grid::get(opCtx)->shardRegistry()->getAllShardIdsNoReload(&shardIds);
- if (primaryDraining && shardIds.size() > 1) {
- shardIds.erase(std::remove(shardIds.begin(), shardIds.end(), primaryShardId),
- shardIds.end());
- }
} else {
- shardIds.push_back(getPrimaryOrFirstNonDrainingShard());
+ shardIds.push_back(primaryShardId);
}
} else {
// Make sure points are unique and ordered
@@ -259,12 +197,8 @@ ChunkVersion ShardingCatalogManager::_createFirstChunks(OperationContext* opCtx,
if (distributeInitialChunks) {
Grid::get(opCtx)->shardRegistry()->getAllShardIdsNoReload(&shardIds);
- if (primaryDraining) {
- shardIds.erase(std::remove(shardIds.begin(), shardIds.end(), primaryShardId),
- shardIds.end());
- }
} else {
- shardIds.push_back(getPrimaryOrFirstNonDrainingShard());
+ shardIds.push_back(primaryShardId);
}
}
@@ -310,6 +244,41 @@ ChunkVersion ShardingCatalogManager::_createFirstChunks(OperationContext* opCtx,
return version;
}
+void checkForExistingChunks(OperationContext* opCtx, const NamespaceString& nss) {
+ BSONObjBuilder countBuilder;
+ countBuilder.append("count", ChunkType::ConfigNS.coll());
+ countBuilder.append("query", BSON(ChunkType::ns(nss.ns())));
+
+ // OK to use limit=1, since if any chunks exist, we will fail.
+ countBuilder.append("limit", 1);
+
+ // Use readConcern local to guarantee we see any chunks that have been written and may
+ // become committed; readConcern majority will not see the chunks if they have not made it
+ // to the majority snapshot.
+ repl::ReadConcernArgs readConcern(repl::ReadConcernLevel::kLocalReadConcern);
+ readConcern.appendInfo(&countBuilder);
+
+ auto cmdResponse = uassertStatusOK(
+ Grid::get(opCtx)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
+ opCtx,
+ kConfigReadSelector,
+ ChunkType::ConfigNS.db().toString(),
+ countBuilder.done(),
+ Shard::kDefaultConfigCommandTimeout,
+ Shard::RetryPolicy::kIdempotent));
+ uassertStatusOK(cmdResponse.commandStatus);
+
+ long long numChunks;
+ uassertStatusOK(bsonExtractIntegerField(cmdResponse.response, "n", &numChunks));
+ uassert(ErrorCodes::ManualInterventionRequired,
+ str::stream() << "A previous attempt to shard collection " << nss.ns()
+ << " failed after writing some initial chunks to config.chunks. Please "
+ "manually delete the partially written chunks for collection "
+ << nss.ns()
+ << " from config.chunks",
+ numChunks == 0);
+}
+
Status ShardingCatalogManager::dropCollection(OperationContext* opCtx, const NamespaceString& nss) {
const auto catalogClient = Grid::get(opCtx)->catalogClient();
catalogClient
@@ -537,7 +506,7 @@ void ShardingCatalogManager::shardCollection(OperationContext* opCtx,
->makeFromBSON(defaultCollation));
}
- const auto& collVersion = _createFirstChunks(
+ const auto& collVersion = createFirstChunks(
opCtx, nss, fieldsAndOrder, dbPrimaryShardId, initPoints, distributeInitialChunks);
{