summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-03-22 14:21:30 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-03-23 09:10:57 -0400
commitd9a1dd9dbd4349b18151a3223a718cf1642cd5b6 (patch)
tree938a3a971582ee8eca9b5969be1a2b0e1ee12a10
parent4deaa5883717145719caaca5002aa3ce4bf1fc76 (diff)
downloadmongo-d9a1dd9dbd4349b18151a3223a718cf1642cd5b6.tar.gz
SERVER-23296 Move pending chunk management under MigrationDestinationManager
Moves the code, which manipulates the pending chunk data to reside under the migration destination manager instead of the global sharding state and makes it use the collection sharding state instead of the global map.
-rw-r--r--jstests/sharding/balancer_window.js (renamed from jstests/noPassthrough/balancer_window.js)0
-rw-r--r--src/mongo/db/s/collection_metadata.cpp84
-rw-r--r--src/mongo/db/s/collection_metadata.h10
-rw-r--r--src/mongo/db/s/collection_metadata_test.cpp169
-rw-r--r--src/mongo/db/s/metadata_loader_fixture.h68
-rw-r--r--src/mongo/db/s/metadata_loader_test.cpp226
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp125
-rw-r--r--src/mongo/db/s/migration_destination_manager.h37
-rw-r--r--src/mongo/db/s/sharding_state.cpp87
-rw-r--r--src/mongo/db/s/sharding_state.h37
10 files changed, 289 insertions, 554 deletions
diff --git a/jstests/noPassthrough/balancer_window.js b/jstests/sharding/balancer_window.js
index 9f100846f1e..9f100846f1e 100644
--- a/jstests/noPassthrough/balancer_window.js
+++ b/jstests/sharding/balancer_window.js
diff --git a/src/mongo/db/s/collection_metadata.cpp b/src/mongo/db/s/collection_metadata.cpp
index 787b201b53b..d2ce3b91895 100644
--- a/src/mongo/db/s/collection_metadata.cpp
+++ b/src/mongo/db/s/collection_metadata.cpp
@@ -122,73 +122,37 @@ CollectionMetadata* CollectionMetadata::clonePlusChunk(const ChunkType& chunk,
return metadata.release();
}
-CollectionMetadata* CollectionMetadata::cloneMinusPending(const ChunkType& pending,
- string* errMsg) const {
- // The error message string is optional.
- string dummy;
- if (errMsg == NULL) {
- errMsg = &dummy;
- }
-
- // Check that we have the exact chunk that will be subtracted.
- if (!rangeMapContains(_pendingMap, pending.getMin(), pending.getMax())) {
- *errMsg = stream() << "cannot remove pending chunk "
- << rangeToString(pending.getMin(), pending.getMax())
- << ", this shard does not contain the chunk";
-
- if (rangeMapOverlaps(_pendingMap, pending.getMin(), pending.getMax())) {
- RangeVector overlap;
- getRangeMapOverlap(_pendingMap, pending.getMin(), pending.getMax(), &overlap);
+std::unique_ptr<CollectionMetadata> CollectionMetadata::cloneMinusPending(
+ const ChunkType& chunk) const {
+ invariant(rangeMapContains(_pendingMap, chunk.getMin(), chunk.getMax()));
- *errMsg += stream() << " and it overlaps " << overlapToString(overlap);
- }
-
- warning() << *errMsg;
- return NULL;
- }
-
- unique_ptr<CollectionMetadata> metadata(new CollectionMetadata);
- metadata->_keyPattern = this->_keyPattern;
+ unique_ptr<CollectionMetadata> metadata(stdx::make_unique<CollectionMetadata>());
+ metadata->_keyPattern = _keyPattern;
metadata->_keyPattern.getOwned();
metadata->fillKeyPatternFields();
- metadata->_pendingMap = this->_pendingMap;
- metadata->_pendingMap.erase(pending.getMin());
- metadata->_chunksMap = this->_chunksMap;
- metadata->_rangesMap = this->_rangesMap;
+ metadata->_pendingMap = _pendingMap;
+ metadata->_pendingMap.erase(chunk.getMin());
+
+ metadata->_chunksMap = _chunksMap;
+ metadata->_rangesMap = _rangesMap;
metadata->_shardVersion = _shardVersion;
metadata->_collVersion = _collVersion;
invariant(metadata->isValid());
- return metadata.release();
+ return metadata;
}
-CollectionMetadata* CollectionMetadata::clonePlusPending(const ChunkType& pending,
- string* errMsg) const {
- // The error message string is optional.
- string dummy;
- if (errMsg == NULL) {
- errMsg = &dummy;
- }
-
- if (rangeMapOverlaps(_chunksMap, pending.getMin(), pending.getMax())) {
- RangeVector overlap;
- getRangeMapOverlap(_chunksMap, pending.getMin(), pending.getMax(), &overlap);
-
- *errMsg = stream() << "cannot add pending chunk "
- << rangeToString(pending.getMin(), pending.getMax())
- << " because the chunk overlaps " << overlapToString(overlap);
-
- warning() << *errMsg;
- return NULL;
- }
+std::unique_ptr<CollectionMetadata> CollectionMetadata::clonePlusPending(
+ const ChunkType& chunk) const {
+ invariant(!rangeMapOverlaps(_chunksMap, chunk.getMin(), chunk.getMax()));
- unique_ptr<CollectionMetadata> metadata(new CollectionMetadata);
- metadata->_keyPattern = this->_keyPattern;
+ unique_ptr<CollectionMetadata> metadata(stdx::make_unique<CollectionMetadata>());
+ metadata->_keyPattern = _keyPattern;
metadata->_keyPattern.getOwned();
metadata->fillKeyPatternFields();
- metadata->_pendingMap = this->_pendingMap;
- metadata->_chunksMap = this->_chunksMap;
- metadata->_rangesMap = this->_rangesMap;
+ metadata->_pendingMap = _pendingMap;
+ metadata->_chunksMap = _chunksMap;
+ metadata->_rangesMap = _rangesMap;
metadata->_shardVersion = _shardVersion;
metadata->_collVersion = _collVersion;
@@ -198,11 +162,11 @@ CollectionMetadata* CollectionMetadata::clonePlusPending(const ChunkType& pendin
// We remove any chunks we overlap, the remote request starting a chunk migration must have
// been authoritative.
- if (rangeMapOverlaps(_pendingMap, pending.getMin(), pending.getMax())) {
+ if (rangeMapOverlaps(_pendingMap, chunk.getMin(), chunk.getMax())) {
RangeVector pendingOverlap;
- getRangeMapOverlap(_pendingMap, pending.getMin(), pending.getMax(), &pendingOverlap);
+ getRangeMapOverlap(_pendingMap, chunk.getMin(), chunk.getMax(), &pendingOverlap);
- warning() << "new pending chunk " << rangeToString(pending.getMin(), pending.getMax())
+ warning() << "new pending chunk " << rangeToString(chunk.getMin(), chunk.getMax())
<< " overlaps existing pending chunks " << overlapToString(pendingOverlap)
<< ", a migration may not have completed";
@@ -211,10 +175,10 @@ CollectionMetadata* CollectionMetadata::clonePlusPending(const ChunkType& pendin
}
}
- metadata->_pendingMap.insert(make_pair(pending.getMin(), pending.getMax()));
+ metadata->_pendingMap.insert(make_pair(chunk.getMin(), chunk.getMax()));
invariant(metadata->isValid());
- return metadata.release();
+ return metadata;
}
CollectionMetadata* CollectionMetadata::cloneSplit(const ChunkType& chunk,
diff --git a/src/mongo/db/s/collection_metadata.h b/src/mongo/db/s/collection_metadata.h
index 6d9a1063b0c..d11b0ca2fff 100644
--- a/src/mongo/db/s/collection_metadata.h
+++ b/src/mongo/db/s/collection_metadata.h
@@ -71,22 +71,16 @@ public:
*
* The shard and collection version of the new metadata are unaffected. The caller owns the
* new metadata.
- *
- * If a new metadata can't be created, returns NULL and fills in 'errMsg', if it was
- * provided.
*/
- CollectionMetadata* cloneMinusPending(const ChunkType& pending, std::string* errMsg) const;
+ std::unique_ptr<CollectionMetadata> cloneMinusPending(const ChunkType& chunk) const;
/**
* Returns a new metadata's instance based on 'this's state by adding a 'pending' chunk.
*
* The shard and collection version of the new metadata are unaffected. The caller owns the
* new metadata.
- *
- * If a new metadata can't be created, returns NULL and fills in 'errMsg', if it was
- * provided.
*/
- CollectionMetadata* clonePlusPending(const ChunkType& pending, std::string* errMsg) const;
+ std::unique_ptr<CollectionMetadata> clonePlusPending(const ChunkType& chunk) const;
/**
* Returns a new metadata's instance based on 'this's state by removing 'chunk'. When cloning
diff --git a/src/mongo/db/s/collection_metadata_test.cpp b/src/mongo/db/s/collection_metadata_test.cpp
index 473d3977c2f..ffd3904bdfc 100644
--- a/src/mongo/db/s/collection_metadata_test.cpp
+++ b/src/mongo/db/s/collection_metadata_test.cpp
@@ -26,6 +26,8 @@
* then also delete it in the license file.
*/
+#include "mongo/platform/basic.h"
+
#include "mongo/base/status.h"
#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/client/remote_command_targeter_factory_mock.h"
@@ -36,7 +38,6 @@
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/chunk_version.h"
-#include "mongo/s/write_ops/batched_command_response.h"
namespace mongo {
namespace {
@@ -45,8 +46,6 @@ using std::string;
using std::unique_ptr;
using std::vector;
-using executor::RemoteCommandResponse;
-
class NoChunkFixture : public CatalogManagerReplSetTestFixture {
protected:
void setUp() {
@@ -165,18 +164,11 @@ TEST_F(NoChunkFixture, NoPendingChunks) {
}
TEST_F(NoChunkFixture, FirstPendingChunk) {
- string errMsg;
ChunkType chunk;
- unique_ptr<CollectionMetadata> cloned;
-
chunk.setMin(BSON("a" << 10));
chunk.setMax(BSON("a" << 20));
- cloned.reset(getCollMetadata().clonePlusPending(chunk, &errMsg));
-
- ASSERT_EQUALS(errMsg, "");
- ASSERT(cloned != NULL);
-
+ unique_ptr<CollectionMetadata> cloned(getCollMetadata().clonePlusPending(chunk));
ASSERT(cloned->keyIsPending(BSON("a" << 15)));
ASSERT(!cloned->keyIsPending(BSON("a" << 25)));
ASSERT(cloned->keyIsPending(BSON("a" << 10)));
@@ -184,26 +176,16 @@ TEST_F(NoChunkFixture, FirstPendingChunk) {
}
TEST_F(NoChunkFixture, EmptyMultiPendingChunk) {
- string errMsg;
ChunkType chunk;
- unique_ptr<CollectionMetadata> cloned;
-
chunk.setMin(BSON("a" << 10));
chunk.setMax(BSON("a" << 20));
- cloned.reset(getCollMetadata().clonePlusPending(chunk, &errMsg));
-
- ASSERT_EQUALS(errMsg, "");
- ASSERT(cloned != NULL);
+ unique_ptr<CollectionMetadata> cloned(getCollMetadata().clonePlusPending(chunk));
chunk.setMin(BSON("a" << 40));
chunk.setMax(BSON("a" << 50));
- cloned.reset(cloned->clonePlusPending(chunk, &errMsg));
-
- ASSERT_EQUALS(errMsg, "");
- ASSERT(cloned != NULL);
-
+ cloned = cloned->clonePlusPending(chunk);
ASSERT(cloned->keyIsPending(BSON("a" << 15)));
ASSERT(!cloned->keyIsPending(BSON("a" << 25)));
ASSERT(cloned->keyIsPending(BSON("a" << 45)));
@@ -211,48 +193,28 @@ TEST_F(NoChunkFixture, EmptyMultiPendingChunk) {
}
TEST_F(NoChunkFixture, MinusPendingChunk) {
- string errMsg;
ChunkType chunk;
- unique_ptr<CollectionMetadata> cloned;
-
chunk.setMin(BSON("a" << 10));
chunk.setMax(BSON("a" << 20));
- cloned.reset(getCollMetadata().clonePlusPending(chunk, &errMsg));
-
- ASSERT_EQUALS(errMsg, "");
- ASSERT(cloned != NULL);
-
- cloned.reset(cloned->cloneMinusPending(chunk, &errMsg));
-
- ASSERT_EQUALS(errMsg, "");
- ASSERT(cloned != NULL);
+ unique_ptr<CollectionMetadata> cloned(getCollMetadata().clonePlusPending(chunk));
+ cloned = cloned->cloneMinusPending(chunk);
ASSERT(!cloned->keyIsPending(BSON("a" << 15)));
ASSERT(!cloned->keyIsPending(BSON("a" << 25)));
}
TEST_F(NoChunkFixture, OverlappingPendingChunk) {
- string errMsg;
ChunkType chunk;
- unique_ptr<CollectionMetadata> cloned;
-
chunk.setMin(BSON("a" << 10));
chunk.setMax(BSON("a" << 30));
- cloned.reset(getCollMetadata().clonePlusPending(chunk, &errMsg));
-
- ASSERT_EQUALS(errMsg, "");
- ASSERT(cloned != NULL);
+ unique_ptr<CollectionMetadata> cloned(getCollMetadata().clonePlusPending(chunk));
chunk.setMin(BSON("a" << 20));
chunk.setMax(BSON("a" << 40));
- cloned.reset(cloned->clonePlusPending(chunk, &errMsg));
-
- ASSERT_EQUALS(errMsg, "");
- ASSERT(cloned != NULL);
-
+ cloned = cloned->clonePlusPending(chunk);
ASSERT(!cloned->keyIsPending(BSON("a" << 15)));
ASSERT(cloned->keyIsPending(BSON("a" << 25)));
ASSERT(cloned->keyIsPending(BSON("a" << 35)));
@@ -260,33 +222,21 @@ TEST_F(NoChunkFixture, OverlappingPendingChunk) {
}
TEST_F(NoChunkFixture, OverlappingPendingChunks) {
- string errMsg;
ChunkType chunk;
- unique_ptr<CollectionMetadata> cloned;
-
chunk.setMin(BSON("a" << 10));
chunk.setMax(BSON("a" << 30));
- cloned.reset(getCollMetadata().clonePlusPending(chunk, &errMsg));
-
- ASSERT_EQUALS(errMsg, "");
- ASSERT(cloned != NULL);
+ unique_ptr<CollectionMetadata> cloned(getCollMetadata().clonePlusPending(chunk));
chunk.setMin(BSON("a" << 30));
chunk.setMax(BSON("a" << 50));
- cloned.reset(cloned->clonePlusPending(chunk, &errMsg));
-
- ASSERT_EQUALS(errMsg, "");
- ASSERT(cloned != NULL);
+ cloned = cloned->clonePlusPending(chunk);
chunk.setMin(BSON("a" << 20));
chunk.setMax(BSON("a" << 40));
- cloned.reset(cloned->clonePlusPending(chunk, &errMsg));
-
- ASSERT_EQUALS(errMsg, "");
- ASSERT(cloned != NULL);
+ cloned = cloned->clonePlusPending(chunk);
ASSERT(!cloned->keyIsPending(BSON("a" << 15)));
ASSERT(cloned->keyIsPending(BSON("a" << 25)));
@@ -294,61 +244,19 @@ TEST_F(NoChunkFixture, OverlappingPendingChunks) {
ASSERT(!cloned->keyIsPending(BSON("a" << 45)));
}
-TEST_F(NoChunkFixture, MinusInvalidPendingChunk) {
- string errMsg;
- ChunkType chunk;
- unique_ptr<CollectionMetadata> cloned;
-
- chunk.setMin(BSON("a" << 10));
- chunk.setMax(BSON("a" << 30));
-
- cloned.reset(getCollMetadata().cloneMinusPending(chunk, &errMsg));
-
- ASSERT_NOT_EQUALS(errMsg, "");
- ASSERT(cloned == NULL);
-}
-
-TEST_F(NoChunkFixture, MinusOverlappingPendingChunk) {
- string errMsg;
- ChunkType chunk;
- unique_ptr<CollectionMetadata> cloned;
-
- chunk.setMin(BSON("a" << 10));
- chunk.setMax(BSON("a" << 30));
-
- cloned.reset(getCollMetadata().clonePlusPending(chunk, &errMsg));
-
- ASSERT_EQUALS(errMsg, "");
- ASSERT(cloned != NULL);
-
- chunk.setMin(BSON("a" << 15));
- chunk.setMax(BSON("a" << 35));
-
- cloned.reset(cloned->cloneMinusPending(chunk, &errMsg));
-
- ASSERT_NOT_EQUALS(errMsg, "");
- ASSERT(cloned == NULL);
-}
-
TEST_F(NoChunkFixture, PlusChunkWithPending) {
- string errMsg;
ChunkType chunk;
- unique_ptr<CollectionMetadata> cloned;
-
chunk.setMin(BSON("a" << 10));
chunk.setMax(BSON("a" << 20));
- cloned.reset(getCollMetadata().clonePlusPending(chunk, &errMsg));
-
- ASSERT_EQUALS(errMsg, "");
- ASSERT(cloned != NULL);
-
+ unique_ptr<CollectionMetadata> cloned(getCollMetadata().clonePlusPending(chunk));
ASSERT(cloned->keyIsPending(BSON("a" << 15)));
ASSERT(!cloned->keyIsPending(BSON("a" << 25)));
chunk.setMin(BSON("a" << 20));
chunk.setMax(BSON("a" << 30));
+ std::string errMsg;
cloned.reset(cloned->clonePlusChunk(
chunk, ChunkVersion(1, 0, cloned->getCollVersion().epoch()), &errMsg));
@@ -407,16 +315,11 @@ TEST_F(NoChunkFixture, OrphanedDataRangeEnd) {
}
TEST_F(NoChunkFixture, PendingOrphanedDataRanges) {
- string errMsg;
ChunkType chunk;
- unique_ptr<CollectionMetadata> cloned;
-
chunk.setMin(BSON("a" << 10));
chunk.setMax(BSON("a" << 20));
- cloned.reset(getCollMetadata().clonePlusPending(chunk, &errMsg));
- ASSERT_EQUALS(errMsg, string(""));
- ASSERT(cloned != NULL);
+ unique_ptr<CollectionMetadata> cloned(getCollMetadata().clonePlusPending(chunk));
KeyRange keyRange;
ASSERT(cloned->getNextOrphanRange(cloned->getMinKey(), &keyRange));
@@ -537,17 +440,11 @@ TEST_F(SingleChunkFixture, DonateLastChunk) {
}
TEST_F(SingleChunkFixture, PlusPendingChunk) {
- string errMsg;
ChunkType chunk;
- unique_ptr<CollectionMetadata> cloned;
-
chunk.setMin(BSON("a" << 20));
chunk.setMax(BSON("a" << 30));
- cloned.reset(getCollMetadata().clonePlusPending(chunk, &errMsg));
-
- ASSERT_EQUALS(errMsg, "");
- ASSERT(cloned != NULL);
+ unique_ptr<CollectionMetadata> cloned(getCollMetadata().clonePlusPending(chunk));
ASSERT(cloned->keyBelongsToMe(BSON("a" << 15)));
ASSERT(!cloned->keyBelongsToMe(BSON("a" << 25)));
@@ -555,27 +452,12 @@ TEST_F(SingleChunkFixture, PlusPendingChunk) {
ASSERT(cloned->keyIsPending(BSON("a" << 25)));
}
-TEST_F(SingleChunkFixture, PlusOverlapPendingChunk) {
- string errMsg;
- ChunkType chunk;
- unique_ptr<CollectionMetadata> cloned;
-
- chunk.setMin(BSON("a" << 10));
- chunk.setMax(BSON("a" << 20));
-
- cloned.reset(getCollMetadata().clonePlusPending(chunk, &errMsg));
-
- ASSERT_NOT_EQUALS(errMsg, "");
- ASSERT(cloned == NULL);
-}
-
TEST_F(SingleChunkFixture, MinusChunkWithPending) {
ChunkType chunk;
chunk.setMin(BSON("a" << 20));
chunk.setMax(BSON("a" << 30));
- string errMsg;
- unique_ptr<CollectionMetadata> cloned(getCollMetadata().clonePlusPending(chunk, &errMsg));
+ unique_ptr<CollectionMetadata> cloned(getCollMetadata().clonePlusPending(chunk));
ASSERT(cloned->keyIsPending(BSON("a" << 25)));
ASSERT(!cloned->keyIsPending(BSON("a" << 35)));
@@ -665,18 +547,11 @@ TEST_F(SingleChunkFixture, MultiSplit) {
}
TEST_F(SingleChunkFixture, SplitChunkWithPending) {
- string errMsg;
ChunkType chunk;
- unique_ptr<CollectionMetadata> cloned;
-
chunk.setMin(BSON("a" << 20));
chunk.setMax(BSON("a" << 30));
- cloned.reset(getCollMetadata().clonePlusPending(chunk, &errMsg));
-
- ASSERT_EQUALS(errMsg, "");
- ASSERT(cloned != NULL);
-
+ unique_ptr<CollectionMetadata> cloned(getCollMetadata().clonePlusPending(chunk));
ASSERT(cloned->keyIsPending(BSON("a" << 25)));
ASSERT(!cloned->keyIsPending(BSON("a" << 35)));
@@ -687,6 +562,7 @@ TEST_F(SingleChunkFixture, SplitChunkWithPending) {
splitPoints.push_back(BSON("a" << 14));
splitPoints.push_back(BSON("a" << 16));
+ std::string errMsg;
cloned.reset(cloned->cloneSplit(chunk,
splitPoints,
ChunkVersion(cloned->getCollVersion().majorVersion() + 1,
@@ -1001,16 +877,11 @@ TEST_F(TwoChunksWithGapCompoundKeyFixture, ChunkGapOrphanedDataRanges) {
}
TEST_F(TwoChunksWithGapCompoundKeyFixture, ChunkGapAndPendingOrphanedDataRanges) {
- string errMsg;
ChunkType chunk;
- unique_ptr<CollectionMetadata> cloned;
-
chunk.setMin(BSON("a" << 20 << "b" << 0));
chunk.setMax(BSON("a" << 30 << "b" << 0));
- cloned.reset(getCollMetadata().clonePlusPending(chunk, &errMsg));
- ASSERT_EQUALS(errMsg, string(""));
- ASSERT(cloned != NULL);
+ unique_ptr<CollectionMetadata> cloned(getCollMetadata().clonePlusPending(chunk));
KeyRange keyRange;
ASSERT(cloned->getNextOrphanRange(cloned->getMinKey(), &keyRange));
diff --git a/src/mongo/db/s/metadata_loader_fixture.h b/src/mongo/db/s/metadata_loader_fixture.h
deleted file mode 100644
index 1f12caa90fe..00000000000
--- a/src/mongo/db/s/metadata_loader_fixture.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * Copyright (C) 2015 MongoDB Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#pragma once
-
-#include "mongo/base/owned_pointer_vector.h"
-#include "mongo/db/s/metadata_loader.h"
-#include "mongo/platform/basic.h"
-#include "mongo/s/catalog/replset/catalog_manager_replica_set_test_fixture.h"
-#include "mongo/s/catalog/type_chunk.h"
-#include "mongo/s/catalog/type_collection.h"
-
-namespace mongo {
-
-using std::unique_ptr;
-
-class MetadataLoaderFixture : public CatalogManagerReplSetTestFixture {
-public:
- MetadataLoaderFixture();
- ~MetadataLoaderFixture();
-
-protected:
- void setUp() override;
-
- void expectFindOnConfigSendCollectionDefault();
- void expectFindOnConfigSendChunksDefault();
-
- MetadataLoader& loader() const;
-
- void getMetadataFor(const OwnedPointerVector<ChunkType>& chunks, CollectionMetadata* metadata);
-
- ChunkVersion getMaxCollVersion() const;
- ChunkVersion getMaxShardVersion() const;
-
- OID _epoch;
-
-private:
- ChunkVersion _maxCollVersion;
- unique_ptr<MetadataLoader> _loader;
- const HostAndPort configHost{HostAndPort(CONFIG_HOST_PORT)};
-};
-
-} // namespace mongo
diff --git a/src/mongo/db/s/metadata_loader_test.cpp b/src/mongo/db/s/metadata_loader_test.cpp
index 0d5b93cc7fc..1e01a516802 100644
--- a/src/mongo/db/s/metadata_loader_test.cpp
+++ b/src/mongo/db/s/metadata_loader_test.cpp
@@ -26,114 +26,124 @@
* then also delete it in the license file.
*/
+#include "mongo/platform/basic.h"
+
#include "mongo/base/status.h"
#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/client/remote_command_targeter_factory_mock.h"
-#include "mongo/db/commands.h"
#include "mongo/db/s/collection_metadata.h"
-#include "mongo/db/s/metadata_loader_fixture.h"
-#include "mongo/s/write_ops/batched_command_response.h"
+#include "mongo/db/s/metadata_loader.h"
+#include "mongo/s/catalog/replset/catalog_manager_replica_set_test_fixture.h"
+#include "mongo/s/catalog/type_chunk.h"
+#include "mongo/s/catalog/type_collection.h"
namespace mongo {
+namespace {
using std::string;
using std::unique_ptr;
using std::vector;
-using executor::RemoteCommandRequest;
+class MetadataLoaderFixture : public CatalogManagerReplSetTestFixture {
+public:
+ MetadataLoaderFixture() = default;
+ ~MetadataLoaderFixture() = default;
+
+protected:
+ void setUp() override {
+ CatalogManagerReplSetTestFixture::setUp();
+ getMessagingPort()->setRemote(HostAndPort("FakeRemoteClient:34567"));
+ configTargeter()->setFindHostReturnValue(configHost);
+ _maxCollVersion = ChunkVersion(1, 0, OID::gen());
+ _loader.reset(new MetadataLoader);
+ }
-MetadataLoaderFixture::MetadataLoaderFixture() = default;
-MetadataLoaderFixture::~MetadataLoaderFixture() = default;
+ void expectFindOnConfigSendCollectionDefault() {
+ CollectionType collType;
+ collType.setNs(NamespaceString{"test.foo"});
+ collType.setKeyPattern(BSON("a" << 1));
+ collType.setUnique(false);
+ collType.setUpdatedAt(Date_t::fromMillisSinceEpoch(1));
+ collType.setEpoch(_maxCollVersion.epoch());
+ ASSERT_OK(collType.validate());
+ expectFindOnConfigSendBSONObjVector(std::vector<BSONObj>{collType.toBSON()});
+ }
-void MetadataLoaderFixture::setUp() {
- CatalogManagerReplSetTestFixture::setUp();
- getMessagingPort()->setRemote(HostAndPort("FakeRemoteClient:34567"));
- configTargeter()->setFindHostReturnValue(configHost);
- _epoch = OID::gen();
- _maxCollVersion = ChunkVersion(1, 0, _epoch);
- _loader.reset(new MetadataLoader);
-}
+ void expectFindOnConfigSendChunksDefault() {
+ BSONObj chunk = BSON(
+ ChunkType::name("test.foo-a_MinKey")
+ << ChunkType::ns("test.foo") << ChunkType::min(BSON("a" << MINKEY))
+ << ChunkType::max(BSON("a" << MAXKEY))
+ << ChunkType::DEPRECATED_lastmod(Date_t::fromMillisSinceEpoch(_maxCollVersion.toLong()))
+ << ChunkType::DEPRECATED_epoch(_maxCollVersion.epoch())
+ << ChunkType::shard("shard0000"));
+ expectFindOnConfigSendBSONObjVector(std::vector<BSONObj>{chunk});
+ }
-void MetadataLoaderFixture::expectFindOnConfigSendCollectionDefault() {
- CollectionType collType;
- collType.setNs(NamespaceString{"test.foo"});
- collType.setKeyPattern(BSON("a" << 1));
- collType.setUnique(false);
- collType.setUpdatedAt(Date_t::fromMillisSinceEpoch(1));
- collType.setEpoch(_epoch);
- ASSERT_OK(collType.validate());
- expectFindOnConfigSendBSONObjVector(std::vector<BSONObj>{collType.toBSON()});
-}
+ MetadataLoader& loader() const {
+ return *_loader;
+ }
-void MetadataLoaderFixture::expectFindOnConfigSendChunksDefault() {
- ChunkVersion _maxCollVersion = ChunkVersion(1, 0, _epoch);
- BSONObj chunk = BSON(
- ChunkType::name("test.foo-a_MinKey")
- << ChunkType::ns("test.foo") << ChunkType::min(BSON("a" << MINKEY))
- << ChunkType::max(BSON("a" << MAXKEY))
- << ChunkType::DEPRECATED_lastmod(Date_t::fromMillisSinceEpoch(_maxCollVersion.toLong()))
- << ChunkType::DEPRECATED_epoch(_epoch) << ChunkType::shard("shard0000"));
- expectFindOnConfigSendBSONObjVector(std::vector<BSONObj>{chunk});
-}
+ void getMetadataFor(const OwnedPointerVector<ChunkType>& chunks, CollectionMetadata* metadata) {
+ // Infer namespace, shard, epoch, keypattern from first chunk
+ const ChunkType* firstChunk = *(chunks.vector().begin());
+ const string ns = firstChunk->getNS();
+ const string shardName = firstChunk->getShard();
+ const OID epoch = firstChunk->getVersion().epoch();
+
+ CollectionType coll;
+ coll.setNs(NamespaceString{ns});
+ coll.setKeyPattern(BSON("a" << 1));
+ coll.setUpdatedAt(Date_t::fromMillisSinceEpoch(1));
+ coll.setEpoch(epoch);
+ ASSERT_OK(coll.validate());
+ std::vector<BSONObj> collToSend{coll.toBSON()};
+
+ ChunkVersion version(1, 0, epoch);
+ std::vector<BSONObj> chunksToSend;
+ for (const auto chunkVal : chunks.vector()) {
+ ChunkType chunk(*chunkVal);
+
+ chunk.setName(OID::gen().toString());
+ if (!chunk.isVersionSet()) {
+ chunk.setVersion(version);
+ version.incMajor();
+ }
+
+ ASSERT(chunk.validate().isOK());
+ chunksToSend.push_back(chunk.toBSON());
+ }
-MetadataLoader& MetadataLoaderFixture::loader() const {
- return *_loader;
-}
+ auto future = launchAsync([this, ns, shardName, metadata] {
+ auto status = loader().makeCollectionMetadata(operationContext(),
+ catalogManager(),
+ ns,
+ shardName,
+ NULL, /* no old metadata */
+ metadata);
+ ASSERT_OK(status);
+ });
-void MetadataLoaderFixture::getMetadataFor(const OwnedPointerVector<ChunkType>& chunks,
- CollectionMetadata* metadata) {
- // Infer namespace, shard, epoch, keypattern from first chunk
- const ChunkType* firstChunk = *(chunks.vector().begin());
- const string ns = firstChunk->getNS();
- const string shardName = firstChunk->getShard();
- const OID epoch = firstChunk->getVersion().epoch();
-
- CollectionType coll;
- coll.setNs(NamespaceString{ns});
- coll.setKeyPattern(BSON("a" << 1));
- coll.setUpdatedAt(Date_t::fromMillisSinceEpoch(1));
- coll.setEpoch(epoch);
- ASSERT_OK(coll.validate());
- std::vector<BSONObj> collToSend{coll.toBSON()};
-
- ChunkVersion version(1, 0, epoch);
- std::vector<BSONObj> chunksToSend;
- for (const auto chunkVal : chunks.vector()) {
- ChunkType chunk(*chunkVal);
-
- chunk.setName(OID::gen().toString());
- if (!chunk.isVersionSet()) {
- chunk.setVersion(version);
- version.incMajor();
- }
+ expectFindOnConfigSendBSONObjVector(collToSend);
+ expectFindOnConfigSendBSONObjVector(chunksToSend);
- ASSERT(chunk.validate().isOK());
- chunksToSend.push_back(chunk.toBSON());
+ future.timed_get(kFutureTimeout);
}
- auto future = launchAsync([this, ns, shardName, metadata] {
- auto status = this->loader().makeCollectionMetadata(operationContext(),
- catalogManager(),
- ns,
- shardName,
- NULL, /* no old metadata */
- metadata);
- ASSERT(status.isOK());
- });
-
- expectFindOnConfigSendBSONObjVector(collToSend);
- expectFindOnConfigSendBSONObjVector(chunksToSend);
+ ChunkVersion getMaxCollVersion() const {
+ return _maxCollVersion;
+ }
- future.timed_get(kFutureTimeout);
-}
+ ChunkVersion getMaxShardVersion() const {
+ return _maxCollVersion;
+ }
-ChunkVersion MetadataLoaderFixture::getMaxCollVersion() const {
- return _maxCollVersion;
-}
+private:
+ const HostAndPort configHost{HostAndPort(CONFIG_HOST_PORT)};
-ChunkVersion MetadataLoaderFixture::getMaxShardVersion() const {
- return _maxCollVersion;
-}
+ unique_ptr<MetadataLoader> _loader;
+ ChunkVersion _maxCollVersion;
+};
// TODO: Test config server down
// TODO: Test read of chunks with new epoch
@@ -284,7 +294,7 @@ TEST_F(MetadataLoaderFixture, CheckNumChunk) {
NULL, /* no old metadata */
&metadata);
std::cout << "status: " << status << std::endl;
- ASSERT(status.isOK());
+ ASSERT_OK(status);
ASSERT_EQUALS(0U, metadata.getNumChunks());
ASSERT_EQUALS(1, metadata.getCollVersion().majorVersion());
ASSERT_EQUALS(0, metadata.getShardVersion().majorVersion());
@@ -308,7 +318,7 @@ TEST_F(MetadataLoaderFixture, SingleChunkCheckNumChunk) {
"shard0000",
NULL, /* no old metadata */
&metadata);
- ASSERT(status.isOK());
+ ASSERT_OK(status);
ASSERT_EQUALS(1U, metadata.getNumChunks());
});
@@ -434,18 +444,15 @@ TEST_F(MetadataLoaderFixture, PromotePendingNA) {
getMetadataFor(chunks, &remoteMetadata);
Status status = loader().promotePendingChunks(&afterMetadata, &remoteMetadata);
- ASSERT(status.isOK());
+ ASSERT_OK(status);
- string errMsg;
ChunkType pending;
pending.setMin(BSON("x" << 0));
pending.setMax(BSON("x" << 10));
- unique_ptr<CollectionMetadata> cloned(afterMetadata.clonePlusPending(pending, &errMsg));
- ASSERT(cloned != NULL);
-
+ unique_ptr<CollectionMetadata> cloned(afterMetadata.clonePlusPending(pending));
status = loader().promotePendingChunks(cloned.get(), &remoteMetadata);
- ASSERT(status.isOK());
+ ASSERT_OK(status);
ASSERT_EQUALS(remoteMetadata.getNumPending(), 0u);
}
@@ -472,18 +479,15 @@ TEST_F(MetadataLoaderFixture, PromotePendingNAVersion) {
getMetadataFor(chunks, &remoteMetadata);
Status status = loader().promotePendingChunks(&afterMetadata, &remoteMetadata);
- ASSERT(status.isOK());
+ ASSERT_OK(status);
- string errMsg;
ChunkType pending;
pending.setMin(BSON("x" << 0));
pending.setMax(BSON("x" << 10));
- unique_ptr<CollectionMetadata> cloned(afterMetadata.clonePlusPending(pending, &errMsg));
- ASSERT(cloned != NULL);
-
+ unique_ptr<CollectionMetadata> cloned(afterMetadata.clonePlusPending(pending));
status = loader().promotePendingChunks(cloned.get(), &remoteMetadata);
- ASSERT(status.isOK());
+ ASSERT_OK(status);
ASSERT_EQUALS(remoteMetadata.getNumPending(), 0u);
}
@@ -539,34 +543,29 @@ TEST_F(MetadataLoaderFixture, PromotePendingGoodOverlap) {
CollectionMetadata afterMetadata;
getMetadataFor(chunks, &afterMetadata);
- string errMsg;
ChunkType pending;
pending.setMin(BSON("x" << MINKEY));
pending.setMax(BSON("x" << 0));
- unique_ptr<CollectionMetadata> cloned(afterMetadata.clonePlusPending(pending, &errMsg));
- ASSERT(cloned != NULL);
+ unique_ptr<CollectionMetadata> cloned(afterMetadata.clonePlusPending(pending));
pending.setMin(BSON("x" << 10));
pending.setMax(BSON("x" << 20));
- cloned.reset(cloned->clonePlusPending(pending, &errMsg));
- ASSERT(cloned != NULL);
+ cloned = cloned->clonePlusPending(pending);
pending.setMin(BSON("x" << 20));
pending.setMax(BSON("x" << 30));
- cloned.reset(cloned->clonePlusPending(pending, &errMsg));
- ASSERT(cloned != NULL);
+ cloned = cloned->clonePlusPending(pending);
pending.setMin(BSON("x" << 30));
pending.setMax(BSON("x" << MAXKEY));
- cloned.reset(cloned->clonePlusPending(pending, &errMsg));
- ASSERT(cloned != NULL);
+ cloned = cloned->clonePlusPending(pending);
Status status = loader().promotePendingChunks(cloned.get(), &remoteMetadata);
- ASSERT(status.isOK());
+ ASSERT_OK(status);
ASSERT_EQUALS(remoteMetadata.getNumPending(), 1u);
ASSERT(remoteMetadata.keyIsPending(BSON("x" << 25)));
@@ -611,19 +610,16 @@ TEST_F(MetadataLoaderFixture, PromotePendingBadOverlap) {
CollectionMetadata afterMetadata;
getMetadataFor(chunks, &afterMetadata);
- string errMsg;
ChunkType pending;
pending.setMin(BSON("x" << MINKEY));
pending.setMax(BSON("x" << 1));
- unique_ptr<CollectionMetadata> cloned(afterMetadata.clonePlusPending(pending, &errMsg));
- ASSERT(cloned != NULL);
-
- cloned.reset(cloned->clonePlusPending(pending, &errMsg));
- ASSERT(cloned != NULL);
+ unique_ptr<CollectionMetadata> cloned(afterMetadata.clonePlusPending(pending));
+ cloned = cloned->clonePlusPending(pending);
Status status = loader().promotePendingChunks(cloned.get(), &remoteMetadata);
ASSERT_EQUALS(status.code(), ErrorCodes::RemoteChangeDetected);
}
+} // namespace
} // namespace mongo
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index deefb866bdb..d01d2b87d38 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -48,17 +48,19 @@
#include "mongo/db/dbhelpers.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context.h"
-#include "mongo/db/operation_context_impl.h"
#include "mongo/db/ops/delete.h"
#include "mongo/db/range_deleter_service.h"
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/repl/replication_coordinator_global.h"
#include "mongo/db/service_context.h"
#include "mongo/db/storage/mmap_v1/dur.h"
+#include "mongo/db/s/collection_metadata.h"
+#include "mongo/db/s/collection_sharding_state.h"
#include "mongo/db/s/sharded_connection_info.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/logger/ramlog.h"
#include "mongo/s/catalog/catalog_manager.h"
+#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/grid.h"
#include "mongo/s/shard_key_pattern.h"
#include "mongo/stdx/chrono.h"
@@ -187,10 +189,6 @@ BSONObj createTransferModsRequest(const MigrationSessionId& sessionId) {
return builder.obj();
}
-MONGO_FP_DECLARE(failMigrationReceivedOutOfRangeOperation);
-
-} // namespace
-
// Enabling / disabling these fail points pauses / resumes MigrateStatus::_go(), the thread which
// receives a chunk migration from the donor.
MONGO_FP_DECLARE(migrateThreadHangAtStep1);
@@ -200,6 +198,9 @@ MONGO_FP_DECLARE(migrateThreadHangAtStep4);
MONGO_FP_DECLARE(migrateThreadHangAtStep5);
MONGO_FP_DECLARE(migrateThreadHangAtStep6);
+MONGO_FP_DECLARE(failMigrationReceivedOutOfRangeOperation);
+
+} // namespace
MigrationDestinationManager::MigrationDestinationManager() = default;
@@ -275,6 +276,8 @@ Status MigrationDestinationManager::start(const string& ns,
_max = max;
_shardKeyPattern = shardKeyPattern;
+ _chunkMarkedPending = false;
+
_numCloned = 0;
_clonedBytes = 0;
_numCatchup = 0;
@@ -353,17 +356,16 @@ void MigrationDestinationManager::_migrateThread(std::string ns,
OID epoch,
WriteConcernOptions writeConcern) {
Client::initThread("migrateThread");
-
- OperationContextImpl txn;
+ auto opCtx = getGlobalServiceContext()->makeOperationContext(&cc());
if (getGlobalAuthorizationManager()->isAuthEnabled()) {
ShardedConnectionInfo::addHook();
- AuthorizationSession::get(txn.getClient())->grantInternalAuthorization();
+ AuthorizationSession::get(opCtx->getClient())->grantInternalAuthorization();
}
try {
_migrateDriver(
- &txn, ns, sessionId, min, max, shardKeyPattern, fromShard, epoch, writeConcern);
+ opCtx.get(), ns, sessionId, min, max, shardKeyPattern, fromShard, epoch, writeConcern);
} catch (std::exception& e) {
{
stdx::lock_guard<stdx::mutex> sl(_mutex);
@@ -384,13 +386,9 @@ void MigrationDestinationManager::_migrateThread(std::string ns,
if (getState() != DONE) {
// Unprotect the range if needed/possible on unsuccessful TO migration
- ScopedTransaction transaction(&txn, MODE_IX);
- Lock::DBLock dbLock(txn.lockState(), nsToDatabaseSubstring(ns), MODE_IX);
- Lock::CollectionLock collLock(txn.lockState(), ns, MODE_X);
-
- string errMsg;
- if (!ShardingState::get(&txn)->forgetPending(&txn, ns, min, max, epoch, &errMsg)) {
- warning() << errMsg;
+ Status status = _forgetPending(opCtx.get(), NamespaceString(ns), min, max, epoch);
+ if (!status.isOK()) {
+ warning() << "Failed to remove pending range" << causedBy(status);
}
}
@@ -475,7 +473,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* txn,
indexSpecs.insert(indexSpecs.begin(), indexes.begin(), indexes.end());
}
- ScopedTransaction transaction(txn, MODE_IX);
+ ScopedTransaction scopedXact(txn, MODE_IX);
Lock::DBLock lk(txn->lockState(), nsToDatabaseSubstring(ns), MODE_X);
OldClientContext ctx(txn, ns);
@@ -567,17 +565,11 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* txn,
return;
}
- {
- // Protect the range by noting that we're now starting a migration to it
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbLock(txn->lockState(), nsToDatabaseSubstring(ns), MODE_IX);
- Lock::CollectionLock collLock(txn->lockState(), ns, MODE_X);
-
- if (!ShardingState::get(txn)->notePending(txn, ns, min, max, epoch, &errmsg)) {
- warning() << errmsg;
- setState(FAIL);
- return;
- }
+ Status status = _notePending(txn, NamespaceString(ns), min, max, epoch);
+ if (!status.isOK()) {
+ warning() << errmsg;
+ setState(FAIL);
+ return;
}
timing.done(2);
@@ -861,7 +853,7 @@ bool MigrationDestinationManager::_applyMigrateOp(OperationContext* txn,
bool didAnything = false;
if (xfer["deleted"].isABSONObj()) {
- ScopedTransaction transaction(txn, MODE_IX);
+ ScopedTransaction scopedXact(txn, MODE_IX);
Lock::DBLock dlk(txn->lockState(), nsToDatabaseSubstring(ns), MODE_IX);
Helpers::RemoveSaver rs("moveChunk", ns, "removedDuring");
@@ -959,7 +951,7 @@ bool MigrationDestinationManager::_flushPendingWrites(OperationContext* txn,
{
// Get global lock to wait for write to be commited to journal.
- ScopedTransaction transaction(txn, MODE_S);
+ ScopedTransaction scopedXact(txn, MODE_S);
Lock::GlobalRead lk(txn->lockState());
// if durability is on, force a write to journal
@@ -972,6 +964,79 @@ bool MigrationDestinationManager::_flushPendingWrites(OperationContext* txn,
return true;
}
+Status MigrationDestinationManager::_notePending(OperationContext* txn,
+ const NamespaceString& nss,
+ const BSONObj& min,
+ const BSONObj& max,
+ const OID& epoch) {
+ ScopedTransaction scopedXact(txn, MODE_IX);
+ AutoGetCollection autoColl(txn, nss, MODE_IX, MODE_X);
+
+ auto css = CollectionShardingState::get(txn, nss);
+ auto metadata = css->getMetadata();
+
+ // This can currently happen because drops aren't synchronized with in-migrations. The idea for
+ // checking this here is that in the future we shouldn't have this problem.
+ if (metadata->getCollVersion().epoch() != epoch) {
+ return {ErrorCodes::StaleShardVersion,
+ str::stream() << "could not note chunk "
+ << "[" << min << "," << max << ")"
+ << " as pending because the epoch for " << nss.ns()
+ << " has changed from " << epoch << " to "
+ << metadata->getCollVersion().epoch()};
+ }
+
+ ChunkType chunk;
+ chunk.setMin(min);
+ chunk.setMax(max);
+
+ css->setMetadata(metadata->clonePlusPending(chunk));
+
+ stdx::lock_guard<stdx::mutex> sl(_mutex);
+ invariant(!_chunkMarkedPending);
+ _chunkMarkedPending = true;
+
+ return Status::OK();
+}
+
+Status MigrationDestinationManager::_forgetPending(OperationContext* txn,
+ const NamespaceString& nss,
+ const BSONObj& min,
+ const BSONObj& max,
+ const OID& epoch) {
+ {
+ stdx::lock_guard<stdx::mutex> sl(_mutex);
+ if (!_chunkMarkedPending) {
+ return Status::OK();
+ }
+
+ _chunkMarkedPending = false;
+ }
+
+ ScopedTransaction scopedXact(txn, MODE_IX);
+ AutoGetCollection autoColl(txn, nss, MODE_IX, MODE_X);
+
+ auto css = CollectionShardingState::get(txn, nss);
+ auto metadata = css->getMetadata();
+
+ // This can currently happen because drops aren't synchronized with in-migrations. The idea for
+ // checking this here is that in the future we shouldn't have this problem.
+ if (metadata->getCollVersion().epoch() != epoch) {
+ return {ErrorCodes::StaleShardVersion,
+ str::stream() << "no need to forget pending chunk "
+ << "[" << min << "," << max << ")"
+ << " because the epoch for " << nss.ns() << " has changed from "
+ << epoch << " to " << metadata->getCollVersion().epoch()};
+ }
+
+ ChunkType chunk;
+ chunk.setMin(min);
+ chunk.setMax(max);
+
+ css->setMetadata(metadata->cloneMinusPending(chunk));
+ return Status::OK();
+}
+
MoveTimingHelper::MoveTimingHelper(OperationContext* txn,
const string& where,
const string& ns,
diff --git a/src/mongo/db/s/migration_destination_manager.h b/src/mongo/db/s/migration_destination_manager.h
index 03c3d05d665..305885e8ff8 100644
--- a/src/mongo/db/s/migration_destination_manager.h
+++ b/src/mongo/db/s/migration_destination_manager.h
@@ -42,6 +42,7 @@
namespace mongo {
+class NamespaceString;
class OperationContext;
class Status;
struct WriteConcernOptions;
@@ -126,6 +127,38 @@ private:
const repl::OpTime& lastOpApplied,
const WriteConcernOptions& writeConcern);
+ /**
+ * Remembers a chunk range between 'min' and 'max' as a range which will have data migrated
+ * into it. This data can then be protected against cleanup of orphaned data.
+ *
+ * Overlapping pending ranges will be removed, so it is only safe to use this when you know
+ * your metadata view is definitive, such as at the start of a migration.
+ *
+ * TODO: Because migrations may currently be active when a collection drops, an epoch is
+ * necessary to ensure the pending metadata change is still applicable.
+ */
+ Status _notePending(OperationContext* txn,
+ const NamespaceString& nss,
+ const BSONObj& min,
+ const BSONObj& max,
+ const OID& epoch);
+
+ /**
+ * Stops tracking a chunk range between 'min' and 'max' that previously was having data
+ * migrated into it. This data is no longer protected against cleanup of orphaned data.
+ *
+ * To avoid removing pending ranges of other operations, ensure that this is only used when
+ * a migration is still active.
+ *
+ * TODO: Because migrations may currently be active when a collection drops, an epoch is
+ * necessary to ensure the pending metadata change is still applicable.
+ */
+ Status _forgetPending(OperationContext* txn,
+ const NamespaceString& nss,
+ const BSONObj& min,
+ const BSONObj& max,
+ const OID& epoch);
+
// Mutex to guard all fields
mutable stdx::mutex _mutex;
@@ -144,6 +177,10 @@ private:
BSONObj _max;
BSONObj _shardKeyPattern;
+ // Set to true once we have accepted the chunk as pending into our metadata. Used so that on
+ // failure we can perform the appropriate cleanup.
+ bool _chunkMarkedPending{false};
+
long long _numCloned{0};
long long _clonedBytes{0};
long long _numCatchup{0};
diff --git a/src/mongo/db/s/sharding_state.cpp b/src/mongo/db/s/sharding_state.cpp
index 9e93ef5e2b2..9acfe275af5 100644
--- a/src/mongo/db/s/sharding_state.cpp
+++ b/src/mongo/db/s/sharding_state.cpp
@@ -284,93 +284,6 @@ void ShardingState::undoDonateChunk(OperationContext* txn,
it->second->setMetadata(std::move(prevMetadata));
}
-bool ShardingState::notePending(OperationContext* txn,
- const string& ns,
- const BSONObj& min,
- const BSONObj& max,
- const OID& epoch,
- string* errMsg) {
- invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
- stdx::lock_guard<stdx::mutex> lk(_mutex);
-
- CollectionShardingStateMap::const_iterator it = _collections.find(ns);
- if (it == _collections.end()) {
- *errMsg = str::stream() << "could not note chunk "
- << "[" << min << "," << max << ")"
- << " as pending because the local metadata for " << ns
- << " has changed";
- return false;
- }
-
- shared_ptr<CollectionMetadata> metadata = it->second->getMetadata();
-
- // This can currently happen because drops aren't synchronized with in-migrations
- // The idea for checking this here is that in the future we shouldn't have this problem
- if (metadata->getCollVersion().epoch() != epoch) {
- *errMsg = str::stream() << "could not note chunk "
- << "[" << min << "," << max << ")"
- << " as pending because the epoch for " << ns
- << " has changed from " << epoch << " to "
- << metadata->getCollVersion().epoch();
-
- return false;
- }
-
- ChunkType chunk;
- chunk.setMin(min);
- chunk.setMax(max);
-
- std::unique_ptr<CollectionMetadata> cloned(metadata->clonePlusPending(chunk, errMsg));
- if (!cloned)
- return false;
-
- it->second->setMetadata(std::move(cloned));
- return true;
-}
-
-bool ShardingState::forgetPending(OperationContext* txn,
- const string& ns,
- const BSONObj& min,
- const BSONObj& max,
- const OID& epoch,
- string* errMsg) {
- invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
- stdx::lock_guard<stdx::mutex> lk(_mutex);
-
- CollectionShardingStateMap::const_iterator it = _collections.find(ns);
- if (it == _collections.end()) {
- *errMsg = str::stream() << "no need to forget pending chunk "
- << "[" << min << "," << max << ")"
- << " because the local metadata for " << ns << " has changed";
-
- return false;
- }
-
- shared_ptr<CollectionMetadata> metadata = it->second->getMetadata();
-
- // This can currently happen because drops aren't synchronized with in-migrations
- // The idea for checking this here is that in the future we shouldn't have this problem
- if (metadata->getCollVersion().epoch() != epoch) {
- *errMsg = str::stream() << "no need to forget pending chunk "
- << "[" << min << "," << max << ")"
- << " because the epoch for " << ns << " has changed from " << epoch
- << " to " << metadata->getCollVersion().epoch();
-
- return false;
- }
-
- ChunkType chunk;
- chunk.setMin(min);
- chunk.setMax(max);
-
- std::unique_ptr<CollectionMetadata> cloned(metadata->cloneMinusPending(chunk, errMsg));
- if (!cloned)
- return false;
-
- it->second->setMetadata(std::move(cloned));
- return true;
-}
-
void ShardingState::splitChunk(OperationContext* txn,
const string& ns,
const BSONObj& min,
diff --git a/src/mongo/db/s/sharding_state.h b/src/mongo/db/s/sharding_state.h
index 80bc441a30c..18aee698093 100644
--- a/src/mongo/db/s/sharding_state.h
+++ b/src/mongo/db/s/sharding_state.h
@@ -196,8 +196,6 @@ public:
std::shared_ptr<CollectionMetadata> getCollectionMetadata(const std::string& ns);
- // chunk migrate and split support
-
/**
* Creates and installs a new chunk metadata for a given collection by "forgetting" about
* one of its chunks. The new metadata uses the provided version, which has to be higher
@@ -240,41 +238,6 @@ public:
std::shared_ptr<CollectionMetadata> prevMetadata);
/**
- * Remembers a chunk range between 'min' and 'max' as a range which will have data migrated
- * into it. This data can then be protected against cleanup of orphaned data.
- *
- * Overlapping pending ranges will be removed, so it is only safe to use this when you know
- * your metadata view is definitive, such as at the start of a migration.
- *
- * @return false with errMsg if the range is owned by this shard
- */
- bool notePending(OperationContext* txn,
- const std::string& ns,
- const BSONObj& min,
- const BSONObj& max,
- const OID& epoch,
- std::string* errMsg);
-
- /**
- * Stops tracking a chunk range between 'min' and 'max' that previously was having data
- * migrated into it. This data is no longer protected against cleanup of orphaned data.
- *
- * To avoid removing pending ranges of other operations, ensure that this is only used when
- * a migration is still active.
- * TODO: Because migrations may currently be active when a collection drops, an epoch is
- * necessary to ensure the pending metadata change is still applicable.
- *
- * @return false with errMsg if the range is owned by the shard or the epoch of the metadata
- * has changed
- */
- bool forgetPending(OperationContext* txn,
- const std::string& ns,
- const BSONObj& min,
- const BSONObj& max,
- const OID& epoch,
- std::string* errMsg);
-
- /**
* Creates and installs a new chunk metadata for a given collection by splitting one of its
* chunks in two or more. The version for the first split chunk should be provided. The
* subsequent chunks' version would be the latter with the minor portion incremented.