summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEsha Maharishi <esha.maharishi@mongodb.com>2019-10-17 21:38:49 +0000
committerevergreen <evergreen@mongodb.com>2019-10-17 21:38:49 +0000
commitdab2ae229660af56a786a084ebc36666c4dd6a91 (patch)
treeb761dedcc28f54c7db04077058eaa5b313ba6882
parent66cc9d9c2e7db216881afc605669028c55042e5e (diff)
downloadmongo-dab2ae229660af56a786a084ebc36666c4dd6a91.tar.gz
SERVER-42112 uassert on _flushDatabaseCacheUpdates cmdResponse in configsvrDropDatabase and configsvrCreateDatabase if FCV 4.4
-rw-r--r--jstests/multiVersion/create_and_drop_database_succeed_in_mixed_version_cluster.js22
-rw-r--r--src/mongo/db/s/config/configsvr_drop_database_command.cpp11
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_create_database_test.cpp97
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp24
4 files changed, 151 insertions, 3 deletions
diff --git a/jstests/multiVersion/create_and_drop_database_succeed_in_mixed_version_cluster.js b/jstests/multiVersion/create_and_drop_database_succeed_in_mixed_version_cluster.js
new file mode 100644
index 00000000000..c5c052a1dfe
--- /dev/null
+++ b/jstests/multiVersion/create_and_drop_database_succeed_in_mixed_version_cluster.js
@@ -0,0 +1,22 @@
+// Tests that create database and drop database succeed when the config servers are v4.4 and shard
+// servers are v4.2.
+
+(function() {
+"use strict";
+
+const st = new ShardingTest({
+ shards: [
+ {binVersion: "last-stable"},
+ ],
+ mongos: 1,
+ other: {mongosOptions: {binVersion: "last-stable"}}
+});
+
+// Create a database by inserting into a collection.
+assert.commandWorked(st.s.getDB("test").getCollection("foo").insert({x: 1}));
+
+// Drop the database.
+assert.commandWorked(st.s.getDB("test").dropDatabase());
+
+st.stop();
+})();
diff --git a/src/mongo/db/s/config/configsvr_drop_database_command.cpp b/src/mongo/db/s/config/configsvr_drop_database_command.cpp
index 9f34f47cad0..68a296a0716 100644
--- a/src/mongo/db/s/config/configsvr_drop_database_command.cpp
+++ b/src/mongo/db/s/config/configsvr_drop_database_command.cpp
@@ -183,7 +183,16 @@ public:
"admin",
BSON("_flushDatabaseCacheUpdates" << dbname),
Shard::RetryPolicy::kIdempotent));
- // TODO SERVER-42112: uassert on the cmdResponse.
+
+ // If the shard had binary version v4.2 when it received the
+ // _flushDatabaseCacheUpdates, it will have responded with NamespaceNotFound,
+ // because the shard no longer has the database (see SERVER-34431). Ignore this
+ // error, since once the shard is restarted in v4.4, its in-memory database version
+ // will be cleared anyway.
+ if (cmdResponse.commandStatus == ErrorCodes::NamespaceNotFound) {
+ continue;
+ }
+ uassertStatusOK(cmdResponse.commandStatus);
}
ShardingLogging::get(opCtx)->logChange(
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_create_database_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_create_database_test.cpp
index fac498f768c..dc89e277360 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_create_database_test.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_create_database_test.cpp
@@ -159,6 +159,103 @@ TEST_F(CreateDatabaseTest, createDatabaseSuccess) {
future.default_timed_get();
}
+TEST_F(CreateDatabaseTest,
+ createDatabaseShardReturnsNamespaceNotFoundForFlushDatabaseCacheUpdates) {
+ const std::string dbname = "db1";
+
+ ShardType s0;
+ s0.setName("shard0000");
+ s0.setHost("ShardHost0:27017");
+ setupShards(vector<ShardType>{s0});
+
+ ShardType s1;
+ s1.setName("shard0001");
+ s1.setHost("ShardHost1:27017");
+ setupShards(vector<ShardType>{s1});
+
+ ShardType s2;
+ s2.setName("shard0002");
+ s2.setHost("ShardHost2:27017");
+ setupShards(vector<ShardType>{s2});
+
+ // Prime the shard registry with information about the existing shards
+ shardRegistry()->reload(operationContext());
+
+ // Set up all the target mocks return values.
+ RemoteCommandTargeterMock::get(
+ uassertStatusOK(shardRegistry()->getShard(operationContext(), s0.getName()))->getTargeter())
+ ->setFindHostReturnValue(HostAndPort(s0.getHost()));
+ RemoteCommandTargeterMock::get(
+ uassertStatusOK(shardRegistry()->getShard(operationContext(), s1.getName()))->getTargeter())
+ ->setFindHostReturnValue(HostAndPort(s1.getHost()));
+ RemoteCommandTargeterMock::get(
+ uassertStatusOK(shardRegistry()->getShard(operationContext(), s2.getName()))->getTargeter())
+ ->setFindHostReturnValue(HostAndPort(s2.getHost()));
+
+ // Now actually start the createDatabase work.
+
+ auto future = launchAsync([this, dbname] {
+ ThreadClient tc("Test", getGlobalServiceContext());
+ auto opCtx = cc().makeOperationContext();
+ ShardingCatalogManager::get(opCtx.get())->createDatabase(opCtx.get(), dbname);
+ });
+
+ // Return size information about first shard
+ onCommand([&](const RemoteCommandRequest& request) {
+ ASSERT_EQUALS(s0.getHost(), request.target.toString());
+ ASSERT_EQUALS("admin", request.dbname);
+ std::string cmdName = request.cmdObj.firstElement().fieldName();
+ ASSERT_EQUALS("listDatabases", cmdName);
+ ASSERT_FALSE(request.cmdObj.hasField(repl::ReadConcernArgs::kReadConcernFieldName));
+
+ ASSERT_BSONOBJ_EQ(
+ ReadPreferenceSetting(ReadPreference::PrimaryPreferred).toContainingBSON(),
+ rpc::TrackingMetadata::removeTrackingData(request.metadata));
+
+ return BSON("ok" << 1 << "totalSize" << 10);
+ });
+
+ // Return size information about second shard
+ onCommand([&](const RemoteCommandRequest& request) {
+ ASSERT_EQUALS(s1.getHost(), request.target.toString());
+ ASSERT_EQUALS("admin", request.dbname);
+ std::string cmdName = request.cmdObj.firstElement().fieldName();
+ ASSERT_EQUALS("listDatabases", cmdName);
+ ASSERT_FALSE(request.cmdObj.hasField(repl::ReadConcernArgs::kReadConcernFieldName));
+
+ ASSERT_BSONOBJ_EQ(
+ ReadPreferenceSetting(ReadPreference::PrimaryPreferred).toContainingBSON(),
+ rpc::TrackingMetadata::removeTrackingData(request.metadata));
+
+ return BSON("ok" << 1 << "totalSize" << 1);
+ });
+
+ // Return size information about third shard
+ onCommand([&](const RemoteCommandRequest& request) {
+ ASSERT_EQUALS(s2.getHost(), request.target.toString());
+ ASSERT_EQUALS("admin", request.dbname);
+ std::string cmdName = request.cmdObj.firstElement().fieldName();
+ ASSERT_EQUALS("listDatabases", cmdName);
+
+ ASSERT_BSONOBJ_EQ(
+ ReadPreferenceSetting(ReadPreference::PrimaryPreferred).toContainingBSON(),
+ rpc::TrackingMetadata::removeTrackingData(request.metadata));
+
+ return BSON("ok" << 1 << "totalSize" << 100);
+ });
+
+ // Return NamespaceNotFound for _flushDatabaseCacheUpdates
+ onCommand([&](const RemoteCommandRequest& request) {
+ std::string cmdName = request.cmdObj.firstElement().fieldName();
+ ASSERT_EQUALS("_flushDatabaseCacheUpdates", cmdName);
+
+ return BSON("ok" << 0 << "code" << ErrorCodes::NamespaceNotFound << "errmsg"
+ << "dummy");
+ });
+
+ future.default_timed_get();
+}
+
TEST_F(CreateDatabaseTest, createDatabaseDBExists) {
const std::string dbname = "db3";
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
index 11091ef8957..3d0fc74910a 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
@@ -119,7 +119,19 @@ DatabaseType ShardingCatalogManager::createDatabase(OperationContext* opCtx,
uassertStatusOK(Grid::get(opCtx)->catalogClient()->insertConfigDocument(
opCtx, DatabaseType::ConfigNS, db.toBSON(), ShardingCatalogClient::kMajorityWriteConcern));
- // Send _flushDatabaseCacheUpdates to the primary shard
+ // Note, making the primary shard refresh its databaseVersion here is not required for
+ // correctness, since either:
+ // 1) This is the first time this database is being created. The primary shard will not have a
+ // databaseVersion already cached.
+ // 2) The database was dropped and is being re-created. Since dropping a database also sends
+ // _flushDatabaseCacheUpdates to all shards, the primary shard should not have a database
+ // version cached. (Note, it is possible that dropping a database will skip sending
+ // _flushDatabaseCacheUpdates if the config server fails over while dropping the database.)
+ // However, routers don't support retrying internally on StaleDbVersion in transactions
+ // (SERVER-39704), so if the first operation run against the database is in a transaction, it
+ // would fail with StaleDbVersion. Making the primary shard refresh here allows that first
+ // transaction to succeed. This allows our transaction passthrough suites and transaction demos
+ // to succeed without additional special logic.
const auto shard =
uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, db.getPrimary()));
auto cmdResponse = uassertStatusOK(
@@ -128,7 +140,15 @@ DatabaseType ShardingCatalogManager::createDatabase(OperationContext* opCtx,
"admin",
BSON("_flushDatabaseCacheUpdates" << dbName),
Shard::RetryPolicy::kIdempotent));
- // TODO SERVER-42112: uassert on the cmdResponse.
+
+ // If the shard had binary version v4.2 when it received the _flushDatabaseCacheUpdates, it will
+ // have responded with NamespaceNotFound, because the shard does not have the database (see
+ // SERVER-34431). Ignore this error, since the _flushDatabaseCacheUpdates is only a nicety for
+ // users testing transactions, and the transaction passthrough suites do not change shard binary
+ // versions.
+ if (cmdResponse.commandStatus != ErrorCodes::NamespaceNotFound) {
+ uassertStatusOK(cmdResponse.commandStatus);
+ }
return db;
}