diff options
-rw-r--r-- | buildscripts/resmokeconfig/suites/read_only_sharded.yml | 4 | ||||
-rw-r--r-- | jstests/noPassthrough/read_majority_reads.js | 3 | ||||
-rw-r--r-- | jstests/readonly/lib/read_only_test.js | 1 | ||||
-rw-r--r-- | jstests/replsets/auth1.js | 10 | ||||
-rw-r--r-- | jstests/sharding/addshard2.js | 12 | ||||
-rw-r--r-- | jstests/sharding/auth.js | 6 | ||||
-rw-r--r-- | jstests/sharding/auth_add_shard.js | 4 | ||||
-rw-r--r-- | jstests/sharding/remove2.js | 6 | ||||
-rw-r--r-- | jstests/sharding/replmonitor_bad_seed.js | 2 | ||||
-rw-r--r-- | jstests/sharding/user_flags_sharded.js | 4 | ||||
-rw-r--r-- | src/mongo/db/commands/dbcommands.cpp | 29 | ||||
-rw-r--r-- | src/mongo/db/s/operation_sharding_state.cpp | 6 | ||||
-rw-r--r-- | src/mongo/db/s/sharding_state.cpp | 3 | ||||
-rw-r--r-- | src/mongo/db/s/sharding_state.h | 14 | ||||
-rw-r--r-- | src/mongo/shell/replsettest.js | 7 |
15 files changed, 72 insertions, 39 deletions
diff --git a/buildscripts/resmokeconfig/suites/read_only_sharded.yml b/buildscripts/resmokeconfig/suites/read_only_sharded.yml index 58ab7df836e..2d2292e5913 100644 --- a/buildscripts/resmokeconfig/suites/read_only_sharded.yml +++ b/buildscripts/resmokeconfig/suites/read_only_sharded.yml @@ -2,6 +2,10 @@ selector: js_test: roots: - jstests/readonly/*.js + exclude_files: + # TODO: re-enable in SERVER-25549 + - jstests/readonly/find.js + - jstests/readonly/get_more.js executor: js_test: diff --git a/jstests/noPassthrough/read_majority_reads.js b/jstests/noPassthrough/read_majority_reads.js index 92a0032a2cd..f0a5172e07f 100644 --- a/jstests/noPassthrough/read_majority_reads.js +++ b/jstests/noPassthrough/read_majority_reads.js @@ -206,7 +206,8 @@ } } - var mongod = MongoRunner.runMongod({setParameter: 'testingSnapshotBehaviorInIsolation=true'}); + var mongod = MongoRunner.runMongod( + {setParameter: 'testingSnapshotBehaviorInIsolation=true', shardsvr: ""}); assert.neq( null, mongod, diff --git a/jstests/readonly/lib/read_only_test.js b/jstests/readonly/lib/read_only_test.js index e3b68671966..792d64e1d04 100644 --- a/jstests/readonly/lib/read_only_test.js +++ b/jstests/readonly/lib/read_only_test.js @@ -67,6 +67,7 @@ var StandaloneFixture, ShardedFixture, runReadOnlyTest, zip2, cycleN; jsTest.log("restarting shards..."); try { for (var i = 0; i < this.nShards; ++i) { + // TODO(esha): add shardsvr: "" option when this test is re-enabled in SERVER-25549 var opts = {queryableBackupMode: "", dbpath: this.paths[i]}; assert.commandWorked(this.shardingTest["d" + i].getDB("local").dropDatabase()); diff --git a/jstests/replsets/auth1.js b/jstests/replsets/auth1.js index 23054b9c38d..69bba87a81d 100644 --- a/jstests/replsets/auth1.js +++ b/jstests/replsets/auth1.js @@ -42,6 +42,8 @@ load("jstests/replsets/rslib.js"); m, _isWindows() ? 100 : 1, "mongod should exit w/ 1 (EXIT_FAILURE): permissions too open"); MongoRunner.stopMongod(port[0]); + // Pre-populate the data directory for the first replica set node, to be started later, with + // a user's credentials. print("add a user to server0: foo"); m = MongoRunner.runMongod({dbpath: MongoRunner.dataPath + name + "-0"}); m.getDB("admin").createUser({user: "foo", pwd: "bar", roles: jsTest.adminUserRoles}); @@ -51,11 +53,13 @@ load("jstests/replsets/rslib.js"); print("start up rs"); var rs = new ReplSetTest({"name": name, "nodes": 3}); - print("restart 0 with keyFile"); + + // The first node is started with the pre-populated data directory. + print("start 0 with keyFile"); m = rs.start(0, {"keyFile": key1_600, noCleanData: true}); - print("restart 1 with keyFile"); + print("start 1 with keyFile"); rs.start(1, {"keyFile": key1_600}); - print("restart 2 with keyFile"); + print("start 2 with keyFile"); rs.start(2, {"keyFile": key1_600}); var result = m.getDB("admin").auth("foo", "bar"); diff --git a/jstests/sharding/addshard2.js b/jstests/sharding/addshard2.js index b3413c4ad98..8e23cc92d4f 100644 --- a/jstests/sharding/addshard2.js +++ b/jstests/sharding/addshard2.js @@ -5,27 +5,27 @@ new ShardingTest({name: "add_shard2", shards: 1, mongos: 1, other: {useHostname: true}}); // Start two new instances, which will be used for shards - var conn1 = MongoRunner.runMongod({useHostname: true}); - var conn2 = MongoRunner.runMongod({useHostname: true}); + var conn1 = MongoRunner.runMongod({useHostname: true, shardsvr: ""}); + var conn2 = MongoRunner.runMongod({useHostname: true, shardsvr: ""}); var rs1 = new ReplSetTest({"name": "add_shard2_rs1", nodes: 3}); - rs1.startSet(); + rs1.startSet({shardsvr: ""}); rs1.initiate(); var master1 = rs1.getPrimary(); var rs2 = new ReplSetTest({"name": "add_shard2_rs2", nodes: 3}); - rs2.startSet(); + rs2.startSet({shardsvr: ""}); rs2.initiate(); var master2 = rs2.getPrimary(); // replica set with set name = 'config' var rs3 = new ReplSetTest({'name': 'config', nodes: 3}); - rs3.startSet(); + rs3.startSet({shardsvr: ""}); rs3.initiate(); // replica set with set name = 'admin' var rs4 = new ReplSetTest({'name': 'admin', nodes: 3}); - rs4.startSet(); + rs4.startSet({shardsvr: ""}); rs4.initiate(); // replica set with configsvr: true should *not* be allowed to be added as a shard diff --git a/jstests/sharding/auth.js b/jstests/sharding/auth.js index f85ad22c6d5..03a3068008c 100644 --- a/jstests/sharding/auth.js +++ b/jstests/sharding/auth.js @@ -66,7 +66,7 @@ login(adminUser); var d1 = new ReplSetTest({name: "d1", nodes: 3, useHostName: true}); - d1.startSet({keyFile: "jstests/libs/key2"}); + d1.startSet({keyFile: "jstests/libs/key2", shardsvr: ""}); d1.initiate(); print("d1 initiated"); @@ -97,7 +97,7 @@ print("start rs w/correct key"); d1.stopSet(); - d1.startSet({keyFile: "jstests/libs/key1"}); + d1.startSet({keyFile: "jstests/libs/key1", restart: true}); d1.initiate(); var master = d1.getPrimary(); @@ -147,7 +147,7 @@ logout(testUser); var d2 = new ReplSetTest({name: "d2", nodes: 3, useHostName: true}); - d2.startSet({keyFile: "jstests/libs/key1"}); + d2.startSet({keyFile: "jstests/libs/key1", shardsvr: ""}); d2.initiate(); d2.awaitSecondaryNodes(); diff --git a/jstests/sharding/auth_add_shard.js b/jstests/sharding/auth_add_shard.js index f1cf6eccea4..c08b69b9afa 100644 --- a/jstests/sharding/auth_add_shard.js +++ b/jstests/sharding/auth_add_shard.js @@ -35,7 +35,7 @@ assert.eq(1, st.config.shards.count(), "initial server count wrong"); // start a mongod with NO keyfile - var conn = MongoRunner.runMongod({}); + var conn = MongoRunner.runMongod({shardsvr: ""}); print(conn); // --------------- Test 1 -------------------- @@ -47,7 +47,7 @@ //--------------- Test 2 -------------------- // start mongod again, this time with keyfile - var conn = MongoRunner.runMongod({keyFile: "jstests/libs/key1"}); + var conn = MongoRunner.runMongod({keyFile: "jstests/libs/key1", shardsvr: ""}); // try adding the new shard assert.commandWorked(admin.runCommand({addShard: conn.host})); diff --git a/jstests/sharding/remove2.js b/jstests/sharding/remove2.js index d2121511344..4f583a8ab4b 100644 --- a/jstests/sharding/remove2.js +++ b/jstests/sharding/remove2.js @@ -118,7 +118,7 @@ rst1.stopSet(); print("Sleeping for 20 seconds to let the other shard's ReplicaSetMonitor time out"); sleep(20000); // 1 failed check should take 10 seconds, sleep for 20 just to be safe -rst1.startSet(); +rst1.startSet({restart: true}); rst1.initiate(); rst1.awaitReplication(); @@ -166,7 +166,7 @@ print("Sleeping for 60 seconds to let the other shards restart their ReplicaSetM sleep(60000); var rst2 = new ReplSetTest({name: rst1.name, nodes: 2, useHostName: true}); -rst2.startSet(); +rst2.startSet({shardsvr: ""}); rst2.initiate(); rst2.awaitReplication(); @@ -186,7 +186,7 @@ rst2.stopSet(); print("Sleeping for 60 seconds to let the other shards restart their ReplicaSetMonitors"); sleep(60000); -rst1.startSet(); +rst1.startSet({restart: true}); rst1.initiate(); rst1.awaitReplication(); diff --git a/jstests/sharding/replmonitor_bad_seed.js b/jstests/sharding/replmonitor_bad_seed.js index c41f1043012..d3fe61a1275 100644 --- a/jstests/sharding/replmonitor_bad_seed.js +++ b/jstests/sharding/replmonitor_bad_seed.js @@ -28,7 +28,7 @@ st.restartMongos(0); - replTest.startSet({oplogSize: 10}); + replTest.startSet({restart: true}); replTest.initiate(); replTest.awaitSecondaryNodes(); diff --git a/jstests/sharding/user_flags_sharded.js b/jstests/sharding/user_flags_sharded.js index f629c3b759f..caf5280d185 100644 --- a/jstests/sharding/user_flags_sharded.js +++ b/jstests/sharding/user_flags_sharded.js @@ -8,8 +8,8 @@ var coll = "userFlagsColl"; var ns = dbname + "." + coll; - // First create fresh collection on a new standalone mongod - var newShardConn = MongoRunner.runMongod({}); + // First create fresh collection on a new standalone mongod that will become a shard. + var newShardConn = MongoRunner.runMongod({"shardsvr": ""}); var db1 = newShardConn.getDB(dbname); var t = db1.getCollection(coll); print(t); diff --git a/src/mongo/db/commands/dbcommands.cpp b/src/mongo/db/commands/dbcommands.cpp index bd848af59b6..3bd645c8cfd 100644 --- a/src/mongo/db/commands/dbcommands.cpp +++ b/src/mongo/db/commands/dbcommands.cpp @@ -1433,19 +1433,30 @@ void Command::execCommand(OperationContext* txn, oss.initializeShardVersion(commandNS, extractedFields[kShardVersionFieldIdx]); auto shardingState = ShardingState::get(txn); + + if (oss.hasShardVersion()) { + if (serverGlobalParams.clusterRole != ClusterRole::ShardServer) { + uassertStatusOK( + {ErrorCodes::IllegalOperation, + "Cannot accept sharding commands if not started with --shardsvr"}); + } else if (!shardingState->enabled()) { + // TODO(esha): Once 3.4 ships, we no longer need to support initializing + // sharding awareness through commands, so just reject all sharding commands. + if (!shardingState->commandInitializesShardingAwareness( + request.getCommandName().toString())) { + uassertStatusOK({ErrorCodes::IllegalOperation, + str::stream() + << "Received a command with sharding chunk version " + "information but this node is not sharding aware: " + << request.getCommandArgs().jsonString()}); + } + } + } + if (shardingState->enabled()) { // TODO(spencer): Do this unconditionally once all nodes are sharding aware // by default. uassertStatusOK(shardingState->updateConfigServerOpTimeFromMetadata(txn)); - } else { - massert( - 34422, - str::stream() - << "Received a command with sharding chunk version information but this " - "node is not sharding aware: " - << request.getCommandArgs().jsonString(), - !oss.hasShardVersion() || - ChunkVersion::isIgnoredVersion(oss.getShardVersion(commandNS))); } } diff --git a/src/mongo/db/s/operation_sharding_state.cpp b/src/mongo/db/s/operation_sharding_state.cpp index 97991dc27f8..5d0cad65b52 100644 --- a/src/mongo/db/s/operation_sharding_state.cpp +++ b/src/mongo/db/s/operation_sharding_state.cpp @@ -54,11 +54,6 @@ void OperationShardingState::initializeShardVersion(NamespaceString nss, const BSONElement& shardVersionElt) { invariant(!hasShardVersion()); - if (nss.isSystemDotIndexes()) { - setShardVersion(std::move(nss), ChunkVersion::IGNORED()); - return; - } - if (shardVersionElt.eoo() || shardVersionElt.type() != BSONType::Array) { return; } @@ -89,7 +84,6 @@ ChunkVersion OperationShardingState::getShardVersion(const NamespaceString& nss) void OperationShardingState::setShardVersion(NamespaceString nss, ChunkVersion newVersion) { // This currently supports only setting the shard version for one namespace. invariant(!_hasVersion || _ns == nss); - invariant(!nss.isSystemDotIndexes() || ChunkVersion::isIgnoredVersion(newVersion)); _ns = std::move(nss); _shardVersion = std::move(newVersion); diff --git a/src/mongo/db/s/sharding_state.cpp b/src/mongo/db/s/sharding_state.cpp index a68a2d1231b..d5e57b8357d 100644 --- a/src/mongo/db/s/sharding_state.cpp +++ b/src/mongo/db/s/sharding_state.cpp @@ -118,6 +118,9 @@ void updateShardIdentityConfigStringCB(const string& setName, const string& newC } // namespace +const std::set<std::string> ShardingState::_commandsThatInitializeShardingAwareness{ + "_recvChunkStart", "mergeChunks", "moveChunk", "setShardVersion", "splitChunk"}; + ShardingState::ShardingState() : _initializationState(static_cast<uint32_t>(InitializationState::kNew)), _initializationStatus(Status(ErrorCodes::InternalError, "Uninitialized value")), diff --git a/src/mongo/db/s/sharding_state.h b/src/mongo/db/s/sharding_state.h index c959592c866..28eb837c215 100644 --- a/src/mongo/db/s/sharding_state.h +++ b/src/mongo/db/s/sharding_state.h @@ -259,6 +259,16 @@ public: */ Status initializeShardingAwarenessIfNeeded(OperationContext* txn); + /** + * Check if a command is one of the whitelisted commands that can be accepted with shardVersion + * information before this node is sharding aware, because the command initializes sharding + * awareness. + */ + static bool commandInitializesShardingAwareness(const std::string& commandName) { + return _commandsThatInitializeShardingAwareness.find(commandName) != + _commandsThatInitializeShardingAwareness.end(); + } + private: friend class ScopedRegisterMigration; @@ -372,6 +382,10 @@ private: // The id for the cluster this shard belongs to. OID _clusterId; + // A whitelist of sharding commands that are allowed when running with --shardsvr but not yet + // shard aware, because they initialize sharding awareness. + static const std::set<std::string> _commandsThatInitializeShardingAwareness; + // Function for initializing the external sharding state components not owned here. GlobalInitFunc _globalInit; diff --git a/src/mongo/shell/replsettest.js b/src/mongo/shell/replsettest.js index 570ef6d13c2..dcb49984fe9 100644 --- a/src/mongo/shell/replsettest.js +++ b/src/mongo/shell/replsettest.js @@ -454,13 +454,14 @@ var ReplSetTest = function(opts) { * Starts each node in the replica set with the given options. * * @param options - The options passed to {@link MongoRunner.runMongod} + * @param restart - If true and no options are provided, each node is restarted with its + * existing options. */ - this.startSet = function(options) { + this.startSet = function(options, restart) { print("ReplSetTest starting set"); - var nodes = []; for (var n = 0; n < this.ports.length; n++) { - nodes.push(this.start(n, options)); + nodes.push(this.start(n, options, restart)); } this.nodes = nodes; |