summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-03-30 10:53:40 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-03-30 16:47:01 -0400
commite47ce9f899d09eba1cbdb45cc433786f5510e41a (patch)
tree6370d81081db5258300401e53ace8b58edb6ff7a
parent2b56c5ce4527403329bc60ee406a0f1a7de3f10a (diff)
downloadmongo-e47ce9f899d09eba1cbdb45cc433786f5510e41a.tar.gz
SERVER-23377 Make sure _migrateClone always advances to next document
-rw-r--r--src/mongo/db/s/SConscript4
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp26
2 files changed, 15 insertions, 15 deletions
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index 83ab621ca27..85a559085ab 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -82,6 +82,7 @@ env.Library(
'set_shard_version_command.cpp',
],
LIBDEPS=[
+ '$BUILD_DIR/mongo/db/commands',
'$BUILD_DIR/mongo/db/repl/repl_coordinator_global',
'$BUILD_DIR/mongo/s/serveronly',
'metadata',
@@ -99,7 +100,7 @@ env.Library(
'migration_chunk_cloner_source.cpp',
],
LIBDEPS=[
-
+ '$BUILD_DIR/mongo/base',
],
)
@@ -110,6 +111,7 @@ env.Library(
'migration_chunk_cloner_source_legacy_commands.cpp',
],
LIBDEPS=[
+ '$BUILD_DIR/mongo/db/commands',
'migration_chunk_cloner',
],
LIBDEPS_TAGS=[
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index 1cb8c5c3bda..a9b324eaaad 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -402,24 +402,22 @@ Status MigrationChunkClonerSourceLegacy::nextCloneBatch(OperationContext* txn,
std::set<RecordId>::iterator it;
for (it = _cloneLocs.begin(); it != _cloneLocs.end(); ++it) {
- Snapshotted<BSONObj> doc;
- if (!collection->findDoc(txn, *it, &doc)) {
- // Document must have been deleted
- continue;
- }
-
- // Use the builder size instead of accumulating the document sizes directly so that we take
- // into consideration the overhead of BSONArray indices, and *always* append at least one
- // document.
- if (arrBuilder->arrSize() &&
- (arrBuilder->len() + doc.value().objsize() + 1024) > BSONObjMaxUserSize) {
+ // We must always make progress in this method by at least one document because empty return
+ // indicates there is no more initial clone data.
+ if (arrBuilder->arrSize() && tracker.intervalHasElapsed()) {
break;
}
- arrBuilder->append(doc.value());
+ Snapshotted<BSONObj> doc;
+ if (collection->findDoc(txn, *it, &doc)) {
+ // Use the builder size instead of accumulating the document sizes directly so that we
+ // take into consideration the overhead of BSONArray indices.
+ if (arrBuilder->arrSize() &&
+ (arrBuilder->len() + doc.value().objsize() + 1024) > BSONObjMaxUserSize) {
+ break;
+ }
- if (tracker.intervalHasElapsed()) {
- break;
+ arrBuilder->append(doc.value());
}
}