diff options
author | Dianna Hohensee <dianna.hohensee@10gen.com> | 2016-02-18 16:10:30 -0500 |
---|---|---|
committer | Dianna Hohensee <dianna.hohensee@10gen.com> | 2016-02-25 11:15:56 -0500 |
commit | 77358fcc55c37446a1964441ab27b3fb19f4b060 (patch) | |
tree | 151c1c93688651d2ef07780ebb1928209bbcafff /src | |
parent | 16cf986b4b828f89f251c257ff812d02d77b8468 (diff) | |
download | mongo-77358fcc55c37446a1964441ab27b3fb19f4b060.tar.gz |
SERVER-22590 improve applyChunkOpsDeprecated to check for false failures in chunk operations
(cherry picked from commit ff296b3279257d2ff8e53ee90eb0f6a6f5c562f4)
Diffstat (limited to 'src')
-rw-r--r-- | src/mongo/db/s/migration_impl.cpp | 57 | ||||
-rw-r--r-- | src/mongo/s/catalog/catalog_manager.h | 18 | ||||
-rw-r--r-- | src/mongo/s/catalog/catalog_manager_common.cpp | 2 | ||||
-rw-r--r-- | src/mongo/s/catalog/catalog_manager_common.h | 3 | ||||
-rw-r--r-- | src/mongo/s/catalog/catalog_manager_mock.cpp | 4 | ||||
-rw-r--r-- | src/mongo/s/catalog/catalog_manager_mock.h | 4 | ||||
-rw-r--r-- | src/mongo/s/catalog/forwarding_catalog_manager.cpp | 9 | ||||
-rw-r--r-- | src/mongo/s/catalog/forwarding_catalog_manager.h | 4 | ||||
-rw-r--r-- | src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp | 46 | ||||
-rw-r--r-- | src/mongo/s/catalog/legacy/catalog_manager_legacy.h | 4 | ||||
-rw-r--r-- | src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp | 51 | ||||
-rw-r--r-- | src/mongo/s/catalog/replset/catalog_manager_replica_set.h | 4 | ||||
-rw-r--r-- | src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp | 90 | ||||
-rw-r--r-- | src/mongo/s/d_merge.cpp | 4 | ||||
-rw-r--r-- | src/mongo/s/d_split.cpp | 5 |
15 files changed, 204 insertions, 101 deletions
diff --git a/src/mongo/db/s/migration_impl.cpp b/src/mongo/db/s/migration_impl.cpp index e761f27a78d..a01a79f9db1 100644 --- a/src/mongo/db/s/migration_impl.cpp +++ b/src/mongo/db/s/migration_impl.cpp @@ -90,7 +90,6 @@ BSONObj createRecvChunkCommitRequest(const MigrationSessionId& sessionId) { MONGO_FP_DECLARE(failMigrationCommit); MONGO_FP_DECLARE(hangBeforeLeavingCriticalSection); MONGO_FP_DECLARE(failMigrationConfigWritePrepare); -MONGO_FP_DECLARE(failMigrationApplyOps); } // namespace @@ -466,13 +465,9 @@ Status ChunkMoveOperationState::commitMigration(const MigrationSessionId& sessio ErrorCodes::PrepareConfigsFailed); } - applyOpsStatus = - grid.catalogManager(_txn)->applyChunkOpsDeprecated(_txn, updates.arr(), preCond.arr()); + applyOpsStatus = grid.catalogManager(_txn)->applyChunkOpsDeprecated( + _txn, updates.arr(), preCond.arr(), _nss.ns(), nextVersion); - if (MONGO_FAIL_POINT(failMigrationApplyOps)) { - throw SocketException(SocketException::RECV_ERROR, - shardingState->getConfigServer(_txn).toString()); - } } catch (const DBException& ex) { applyOpsStatus = ex.toStatus(); } @@ -501,53 +496,7 @@ Status ChunkMoveOperationState::commitMigration(const MigrationSessionId& sessio << causedBy(applyOpsStatus); return Status(applyOpsStatus.code(), msg); } else if (!applyOpsStatus.isOK()) { - // This could be a blip in the connectivity. Wait out a few seconds and check if the - // commit request made it. - // - // If the commit made it to the config, we'll see the chunk in the new shard and - // there's no further action to be done. - // - // If the commit did not make it, currently the only way to fix this state is to - // bounce the mongod so that the old state (before migrating) is brought in. - - warning() << "moveChunk commit failed and metadata will be revalidated" - << causedBy(applyOpsStatus) << migrateLog; - sleepsecs(10); - - // Look for the chunk in this shard whose version got bumped. We assume that if that - // mod made it to the config server, then applyOps was successful. - try { - std::vector<ChunkType> newestChunk; - Status status = - grid.catalogManager(_txn)->getChunks(_txn, - BSON(ChunkType::ns(_nss.ns())), - BSON(ChunkType::DEPRECATED_lastmod() << -1), - 1, - &newestChunk, - nullptr); - uassertStatusOK(status); - - ChunkVersion checkVersion; - if (!newestChunk.empty()) { - invariant(newestChunk.size() == 1); - checkVersion = newestChunk[0].getVersion(); - } - - if (checkVersion.equals(nextVersion)) { - log() << "moveChunk commit confirmed" << migrateLog; - } else { - error() << "moveChunk commit failed: version is at " << checkVersion - << " instead of " << nextVersion << migrateLog; - error() << "TERMINATING" << migrateLog; - - dbexit(EXIT_SHARDING_ERROR); - } - } catch (...) { - error() << "moveChunk failed to get confirmation of commit" << migrateLog; - error() << "TERMINATING" << migrateLog; - - dbexit(EXIT_SHARDING_ERROR); - } + fassertStatusOK(34431, applyOpsStatus); } MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangBeforeLeavingCriticalSection); diff --git a/src/mongo/s/catalog/catalog_manager.h b/src/mongo/s/catalog/catalog_manager.h index 2172c05964a..eadfdc9c231 100644 --- a/src/mongo/s/catalog/catalog_manager.h +++ b/src/mongo/s/catalog/catalog_manager.h @@ -47,6 +47,7 @@ class BSONArrayBuilder; class BSONObj; class BSONObjBuilder; class ChunkType; +struct ChunkVersion; class CollectionType; class ConnectionString; class DatabaseType; @@ -317,12 +318,23 @@ public: * Applies oplog entries to the config servers. * Used by mergeChunk, splitChunk, and moveChunk commands. * - * @param updateOps: oplog entries to apply - * @param preCondition: preconditions for applying oplog entries + * @param updateOps: documents to write to the chunks collection. + * @param preCondition: preconditions for applying documents. + * @param nss: namespace string for the chunks collection. + * @param lastChunkVersion: version of the last document being written to the chunks + * collection. + * + * 'nss' and 'lastChunkVersion' uniquely identify the last document being written, which is + * expected to appear in the chunks collection on success. This is important for the + * case where network problems cause a retry of a successful write, which then returns + * failure because the precondition no longer matches. If a query of the chunks collection + * returns a document matching both 'nss' and 'lastChunkVersion,' the write succeeded. */ virtual Status applyChunkOpsDeprecated(OperationContext* txn, const BSONArray& updateOps, - const BSONArray& preCondition) = 0; + const BSONArray& preCondition, + const std::string& nss, + const ChunkVersion& lastChunkVersion) = 0; /** * Writes a diagnostic event to the action log. diff --git a/src/mongo/s/catalog/catalog_manager_common.cpp b/src/mongo/s/catalog/catalog_manager_common.cpp index 49f460af05c..d8943f626d3 100644 --- a/src/mongo/s/catalog/catalog_manager_common.cpp +++ b/src/mongo/s/catalog/catalog_manager_common.cpp @@ -60,6 +60,8 @@ namespace mongo { +MONGO_FP_DECLARE(failApplyChunkOps); + using std::string; using std::unique_ptr; using std::vector; diff --git a/src/mongo/s/catalog/catalog_manager_common.h b/src/mongo/s/catalog/catalog_manager_common.h index 7f1c5e92686..95353f800f8 100644 --- a/src/mongo/s/catalog/catalog_manager_common.h +++ b/src/mongo/s/catalog/catalog_manager_common.h @@ -39,9 +39,12 @@ #include "mongo/s/client/shard.h" #include "mongo/s/optime_pair.h" #include "mongo/stdx/memory.h" +#include "mongo/util/fail_point_service.h" namespace mongo { +MONGO_FP_FORWARD_DECLARE(failApplyChunkOps); + /** * Common implementation shared by concrete catalog manager classes. */ diff --git a/src/mongo/s/catalog/catalog_manager_mock.cpp b/src/mongo/s/catalog/catalog_manager_mock.cpp index 60bc7018b42..8ab6fdc3ee9 100644 --- a/src/mongo/s/catalog/catalog_manager_mock.cpp +++ b/src/mongo/s/catalog/catalog_manager_mock.cpp @@ -162,7 +162,9 @@ bool CatalogManagerMock::runUserManagementReadCommand(OperationContext* txn, Status CatalogManagerMock::applyChunkOpsDeprecated(OperationContext* txn, const BSONArray& updateOps, - const BSONArray& preCondition) { + const BSONArray& preCondition, + const std::string& nss, + const ChunkVersion& lastChunkVersion) { return {ErrorCodes::InternalError, "Method not implemented"}; } diff --git a/src/mongo/s/catalog/catalog_manager_mock.h b/src/mongo/s/catalog/catalog_manager_mock.h index ab3ade439b2..2ff1c71fab4 100644 --- a/src/mongo/s/catalog/catalog_manager_mock.h +++ b/src/mongo/s/catalog/catalog_manager_mock.h @@ -121,7 +121,9 @@ public: Status applyChunkOpsDeprecated(OperationContext* txn, const BSONArray& updateOps, - const BSONArray& preCondition) override; + const BSONArray& preCondition, + const std::string& nss, + const ChunkVersion& lastChunkVersion) override; Status logAction(OperationContext* txn, const std::string& what, diff --git a/src/mongo/s/catalog/forwarding_catalog_manager.cpp b/src/mongo/s/catalog/forwarding_catalog_manager.cpp index cdc956fef32..63e75997d1e 100644 --- a/src/mongo/s/catalog/forwarding_catalog_manager.cpp +++ b/src/mongo/s/catalog/forwarding_catalog_manager.cpp @@ -532,9 +532,14 @@ bool ForwardingCatalogManager::runUserManagementReadCommand(OperationContext* tx Status ForwardingCatalogManager::applyChunkOpsDeprecated(OperationContext* txn, const BSONArray& updateOps, - const BSONArray& preCondition) { + const BSONArray& preCondition, + const std::string& nss, + const ChunkVersion& lastChunkVersion) { return retry(txn, - [&] { return _actual->applyChunkOpsDeprecated(txn, updateOps, preCondition); }); + [&] { + return _actual->applyChunkOpsDeprecated( + txn, updateOps, preCondition, nss, lastChunkVersion); + }); } Status ForwardingCatalogManager::logAction(OperationContext* txn, diff --git a/src/mongo/s/catalog/forwarding_catalog_manager.h b/src/mongo/s/catalog/forwarding_catalog_manager.h index 23a450a9752..15227b16c29 100644 --- a/src/mongo/s/catalog/forwarding_catalog_manager.h +++ b/src/mongo/s/catalog/forwarding_catalog_manager.h @@ -198,7 +198,9 @@ private: Status applyChunkOpsDeprecated(OperationContext* txn, const BSONArray& updateOps, - const BSONArray& preCondition) override; + const BSONArray& preCondition, + const std::string& nss, + const ChunkVersion& lastChunkVersion) override; Status logAction(OperationContext* txn, const std::string& what, diff --git a/src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp b/src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp index 00741a31016..82d66a63316 100644 --- a/src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp +++ b/src/mongo/s/catalog/legacy/catalog_manager_legacy.cpp @@ -962,7 +962,9 @@ StatusWith<BSONObj> CatalogManagerLegacy::_findOneOnConfig(const string& ns, con Status CatalogManagerLegacy::applyChunkOpsDeprecated(OperationContext* txn, const BSONArray& updateOps, - const BSONArray& preCondition) { + const BSONArray& preCondition, + const std::string& nss, + const ChunkVersion& lastChunkVersion) { BSONObj cmd = BSON("applyOps" << updateOps << "preCondition" << preCondition); BSONObj cmdResult; try { @@ -974,9 +976,47 @@ Status CatalogManagerLegacy::applyChunkOpsDeprecated(OperationContext* txn, } Status status = Command::getStatusFromCommandResult(cmdResult); + + if (MONGO_FAIL_POINT(failApplyChunkOps)) { + status = Status(ErrorCodes::InternalError, "Failpoint 'failApplyChunkOps' generated error"); + } + if (!status.isOK()) { - string errMsg(str::stream() << "Unable to save chunk ops. Command: " << cmd - << ". Result: " << cmdResult); + string errMsg; + + // This could be a blip in the network connectivity. Check if the commit request made it. + // + // If all the updates were successfully written to the chunks collection, the last + // document in the list of updates should be returned from a query to the chunks + // collection. The last chunk can be identified by namespace and version number. + + warning() << "chunk operation commit failed and metadata will be revalidated" + << causedBy(status); + + std::vector<ChunkType> newestChunk; + BSONObjBuilder query; + lastChunkVersion.addToBSON(query, ChunkType::DEPRECATED_lastmod()); + query.append(ChunkType::ns(), nss); + Status chunkStatus = getChunks(txn, query.obj(), BSONObj(), 1, &newestChunk, nullptr); + + if (!chunkStatus.isOK()) { + warning() << "getChunks function failed, unable to validate chunk operation metadata" + << causedBy(chunkStatus); + errMsg = str::stream() << "getChunks function failed, unable to validate chunk " + << "operation metadata: " << causedBy(chunkStatus) + << ". applyChunkOpsDeprecated failed to get confirmation " + << "of commit. Unable to save chunk ops. Command: " << cmd + << ". Result: " << cmdResult; + } else if (!newestChunk.empty()) { + invariant(newestChunk.size() == 1); + log() << "chunk operation commit confirmed"; + return Status::OK(); + } else { + errMsg = str::stream() << "chunk operation commit failed: version " + << lastChunkVersion.toString() << " doesn't exist in namespace" + << nss << ". Unable to save chunk ops. Command: " << cmd + << ". Result: " << cmdResult; + } return Status(status.code(), errMsg); } diff --git a/src/mongo/s/catalog/legacy/catalog_manager_legacy.h b/src/mongo/s/catalog/legacy/catalog_manager_legacy.h index 785500a0547..769113b923e 100644 --- a/src/mongo/s/catalog/legacy/catalog_manager_legacy.h +++ b/src/mongo/s/catalog/legacy/catalog_manager_legacy.h @@ -123,7 +123,9 @@ public: Status applyChunkOpsDeprecated(OperationContext* txn, const BSONArray& updateOps, - const BSONArray& preCondition) override; + const BSONArray& preCondition, + const std::string& nss, + const ChunkVersion& lastChunkVersion) override; StatusWith<SettingsType> getGlobalSettings(OperationContext* txn, const std::string& key) override; diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp index 6355d87ad62..9179b0f3c0a 100644 --- a/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp +++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp @@ -875,9 +875,12 @@ bool CatalogManagerReplicaSet::runUserManagementReadCommand(OperationContext* tx Status CatalogManagerReplicaSet::applyChunkOpsDeprecated(OperationContext* txn, const BSONArray& updateOps, - const BSONArray& preCondition) { + const BSONArray& preCondition, + const std::string& nss, + const ChunkVersion& lastChunkVersion) { BSONObj cmd = BSON("applyOps" << updateOps << "preCondition" << preCondition << kWriteConcernField << kMajorityWriteConcern.toBSON()); + auto response = grid.shardRegistry()->runCommandOnConfigWithRetries( txn, "config", cmd, ShardRegistry::kAllRetriableErrors); @@ -886,12 +889,52 @@ Status CatalogManagerReplicaSet::applyChunkOpsDeprecated(OperationContext* txn, } Status status = Command::getStatusFromCommandResult(response.getValue()); - if (!status.isOK()) { - string errMsg(str::stream() << "Unable to save chunk ops. Command: " << cmd - << ". Result: " << response.getValue()); + if (MONGO_FAIL_POINT(failApplyChunkOps)) { + status = Status(ErrorCodes::InternalError, "Failpoint 'failApplyChunkOps' generated error"); + } + + if (!status.isOK()) { + string errMsg; + + // This could be a blip in the network connectivity. Check if the commit request made it. + // + // If all the updates were successfully written to the chunks collection, the last + // document in the list of updates should be returned from a query to the chunks + // collection. The last chunk can be identified by namespace and version number. + + warning() << "chunk operation commit failed and metadata will be revalidated" + << causedBy(status); + + // Look for the chunk in this shard whose version got bumped. We assume that if that + // mod made it to the config server, then applyOps was successful. + std::vector<ChunkType> newestChunk; + BSONObjBuilder query; + lastChunkVersion.addToBSON(query, ChunkType::DEPRECATED_lastmod()); + query.append(ChunkType::ns(), nss); + Status chunkStatus = getChunks(txn, query.obj(), BSONObj(), 1, &newestChunk, nullptr); + + if (!chunkStatus.isOK()) { + warning() << "getChunks function failed, unable to validate chunk operation metadata" + << causedBy(chunkStatus); + errMsg = str::stream() << "getChunks function failed, unable to validate chunk " + << "operation metadata: " << causedBy(chunkStatus) + << ". applyChunkOpsDeprecated failed to get confirmation " + << "of commit. Unable to save chunk ops. Command: " << cmd + << ". Result: " << response.getValue(); + } else if (!newestChunk.empty()) { + invariant(newestChunk.size() == 1); + log() << "chunk operation commit confirmed"; + return Status::OK(); + } else { + errMsg = str::stream() << "chunk operation commit failed: version " + << lastChunkVersion.toString() << " doesn't exist in namespace" + << nss << ". Unable to save chunk ops. Command: " << cmd + << ". Result: " << response.getValue(); + } return Status(status.code(), errMsg); } + return Status::OK(); } diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set.h b/src/mongo/s/catalog/replset/catalog_manager_replica_set.h index 93a8dc06ca7..52feade2277 100644 --- a/src/mongo/s/catalog/replset/catalog_manager_replica_set.h +++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set.h @@ -116,7 +116,9 @@ public: Status applyChunkOpsDeprecated(OperationContext* txn, const BSONArray& updateOps, - const BSONArray& preCondition) override; + const BSONArray& preCondition, + const std::string& nss, + const ChunkVersion& lastChunkVersion) override; StatusWith<SettingsType> getGlobalSettings(OperationContext* txn, const std::string& key) override; diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp index 317a1a1358b..d62025ef97b 100644 --- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp +++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp @@ -1497,7 +1497,7 @@ TEST_F(CatalogManagerReplSetTest, UpdateDatabaseExceededTimeLimit) { future.timed_get(kFutureTimeout); } -TEST_F(CatalogManagerReplSetTest, ApplyChunkOpsDeprecated) { +TEST_F(CatalogManagerReplSetTest, ApplyChunkOpsDeprecatedSuccessful) { configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1")); BSONArray updateOps = BSON_ARRAY(BSON("update1" @@ -1508,23 +1508,23 @@ TEST_F(CatalogManagerReplSetTest, ApplyChunkOpsDeprecated) { << "first precondition") << BSON("precondition2" << "second precondition")); + std::string nss = "config.chunks"; + ChunkVersion lastChunkVersion(0, 0, OID()); - auto future = launchAsync([this, updateOps, preCondition] { - auto status = - catalogManager()->applyChunkOpsDeprecated(operationContext(), updateOps, preCondition); + auto future = launchAsync([this, updateOps, preCondition, nss, lastChunkVersion] { + auto status = catalogManager()->applyChunkOpsDeprecated( + operationContext(), updateOps, preCondition, nss, lastChunkVersion); ASSERT_OK(status); }); onCommand( - [updateOps, preCondition](const RemoteCommandRequest& request) { + [updateOps, preCondition, nss](const RemoteCommandRequest& request) { ASSERT_EQUALS("config", request.dbname); ASSERT_EQUALS(BSON("w" << "majority" << "wtimeout" << 15000), request.cmdObj["writeConcern"].Obj()); - ASSERT_EQUALS(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata); - ASSERT_EQUALS(updateOps, request.cmdObj["applyOps"].Obj()); ASSERT_EQUALS(preCondition, request.cmdObj["preCondition"].Obj()); @@ -1535,7 +1535,7 @@ TEST_F(CatalogManagerReplSetTest, ApplyChunkOpsDeprecated) { future.timed_get(kFutureTimeout); } -TEST_F(CatalogManagerReplSetTest, ApplyChunkOpsDeprecatedCommandFailed) { +TEST_F(CatalogManagerReplSetTest, ApplyChunkOpsDeprecatedSuccessfulWithCheck) { configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1")); BSONArray updateOps = BSON_ARRAY(BSON("update1" @@ -1546,30 +1546,66 @@ TEST_F(CatalogManagerReplSetTest, ApplyChunkOpsDeprecatedCommandFailed) { << "first precondition") << BSON("precondition2" << "second precondition")); + std::string nss = "config.chunks"; + ChunkVersion lastChunkVersion(0, 0, OID()); - auto future = launchAsync([this, updateOps, preCondition] { - auto status = - catalogManager()->applyChunkOpsDeprecated(operationContext(), updateOps, preCondition); - ASSERT_EQUALS(ErrorCodes::BadValue, status); + auto future = launchAsync([this, updateOps, preCondition, nss, lastChunkVersion] { + auto status = catalogManager()->applyChunkOpsDeprecated( + operationContext(), updateOps, preCondition, nss, lastChunkVersion); + ASSERT_OK(status); }); - onCommand( - [updateOps, preCondition](const RemoteCommandRequest& request) { - ASSERT_EQUALS("config", request.dbname); - ASSERT_EQUALS(BSON("w" - << "majority" - << "wtimeout" << 15000), - request.cmdObj["writeConcern"].Obj()); - ASSERT_EQUALS(updateOps, request.cmdObj["applyOps"].Obj()); - ASSERT_EQUALS(preCondition, request.cmdObj["preCondition"].Obj()); + onCommand([&](const RemoteCommandRequest& request) { + BSONObjBuilder responseBuilder; + Command::appendCommandStatus(responseBuilder, + Status(ErrorCodes::DuplicateKey, "precondition failed")); + return responseBuilder.obj(); + }); - ASSERT_EQUALS(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata); + onFindCommand([this](const RemoteCommandRequest& request) { + OID oid = OID::gen(); + ChunkType chunk; + chunk.setName("chunk0000"); + chunk.setNS("TestDB.TestColl"); + chunk.setMin(BSON("a" << 1)); + chunk.setMax(BSON("a" << 100)); + chunk.setVersion({1, 2, oid}); + chunk.setShard("shard0000"); + return vector<BSONObj>{chunk.toBSON()}; + }); - BSONObjBuilder responseBuilder; - Command::appendCommandStatus(responseBuilder, - Status(ErrorCodes::BadValue, "precondition failed")); - return responseBuilder.obj(); - }); + // Now wait for the applyChunkOpsDeprecated call to return + future.timed_get(kFutureTimeout); +} + +TEST_F(CatalogManagerReplSetTest, ApplyChunkOpsDeprecatedFailedWithCheck) { + configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1")); + + BSONArray updateOps = BSON_ARRAY(BSON("update1" + << "first update") + << BSON("update2" + << "second update")); + BSONArray preCondition = BSON_ARRAY(BSON("precondition1" + << "first precondition") + << BSON("precondition2" + << "second precondition")); + std::string nss = "config.chunks"; + ChunkVersion lastChunkVersion(0, 0, OID()); + + auto future = launchAsync([this, updateOps, preCondition, nss, lastChunkVersion] { + auto status = catalogManager()->applyChunkOpsDeprecated( + operationContext(), updateOps, preCondition, nss, lastChunkVersion); + ASSERT_EQUALS(ErrorCodes::NoMatchingDocument, status); + }); + + onCommand([&](const RemoteCommandRequest& request) { + BSONObjBuilder responseBuilder; + Command::appendCommandStatus(responseBuilder, + Status(ErrorCodes::NoMatchingDocument, "some error")); + return responseBuilder.obj(); + }); + + onFindCommand([this](const RemoteCommandRequest& request) { return vector<BSONObj>{}; }); // Now wait for the applyChunkOpsDeprecated call to return future.timed_get(kFutureTimeout); diff --git a/src/mongo/s/d_merge.cpp b/src/mongo/s/d_merge.cpp index c4844c964fa..07aabc79a46 100644 --- a/src/mongo/s/d_merge.cpp +++ b/src/mongo/s/d_merge.cpp @@ -354,6 +354,8 @@ Status runApplyOpsCmd(OperationContext* txn, } BSONArray preCond = buildOpPrecond(firstChunk.getNS(), firstChunk.getShard(), currShardVersion); - return grid.catalogManager(txn)->applyChunkOpsDeprecated(txn, updatesB.arr(), preCond); + + return grid.catalogManager(txn)->applyChunkOpsDeprecated( + txn, updatesB.arr(), preCond, firstChunk.getNS(), newMergedVersion); } } diff --git a/src/mongo/s/d_split.cpp b/src/mongo/s/d_split.cpp index 2ba4b36d8bd..3b72bbca20c 100644 --- a/src/mongo/s/d_split.cpp +++ b/src/mongo/s/d_split.cpp @@ -811,8 +811,9 @@ public: // // 4. apply the batch of updates to remote and local metadata // - Status applyOpsStatus = - grid.catalogManager(txn)->applyChunkOpsDeprecated(txn, updates.arr(), preCond.arr()); + + Status applyOpsStatus = grid.catalogManager(txn)->applyChunkOpsDeprecated( + txn, updates.arr(), preCond.arr(), nss.ns(), nextChunkVersion); if (!applyOpsStatus.isOK()) { return appendCommandStatus(result, applyOpsStatus); } |