summaryrefslogtreecommitdiff
path: root/src/mongo/dbtests/sharding.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/dbtests/sharding.cpp')
-rw-r--r--src/mongo/dbtests/sharding.cpp25
1 files changed, 15 insertions, 10 deletions
diff --git a/src/mongo/dbtests/sharding.cpp b/src/mongo/dbtests/sharding.cpp
index e9be534f995..4b15938625a 100644
--- a/src/mongo/dbtests/sharding.cpp
+++ b/src/mongo/dbtests/sharding.cpp
@@ -168,9 +168,9 @@ public:
};
//
-// Tests creating a new chunk manager with random split points. Creating chunks on multiple shards is not
-// tested here since there are unresolved race conditions there and probably should be avoided if at all
-// possible.
+// Tests creating a new chunk manager with random split points. Creating chunks on multiple shards
+// is not tested here since there are unresolved race conditions there and probably should be
+// avoided if at all possible.
//
class ChunkManagerCreateFullTest : public ChunkManagerTest {
public:
@@ -212,7 +212,8 @@ public:
set<int> minorVersions;
OID epoch;
- // Check that all chunks were created with version 1|x with consistent epoch and unique minor versions
+ // Check that all chunks were created with version 1|x with consistent epoch and unique
+ // minor versions
while (cursor->more()) {
BSONObj chunk = cursor->next();
@@ -234,8 +235,8 @@ public:
};
//
-// Tests that chunks are loaded correctly from the db with no a-priori info and also that they can be reloaded
-// on top of an old chunk manager with changes.
+// Tests that chunks are loaded correctly from the db with no a-priori info and also that they can
+// be reloaded on top of an old chunk manager with changes.
//
class ChunkManagerLoadBasicTest : public ChunkManagerCreateFullTest {
public:
@@ -325,7 +326,8 @@ public:
}
};
- // Allow validating with and without ranges (b/c our splits won't actually be updated by the diffs)
+ // Allow validating with and without ranges (b/c our splits won't actually be updated by the
+ // diffs)
void validate(const std::vector<ChunkType>& chunks,
ChunkVersion maxVersion,
const VersionMap& maxShardVersions) {
@@ -354,7 +356,8 @@ public:
for (const ChunkType& chunk : chunks) {
if (ranges != NULL) {
- // log() << "Validating chunk " << chunkDoc << " size : " << ranges->size() << " vs " << chunkCount << endl;
+ // log() << "Validating chunk " << chunkDoc << " size : " << ranges->size() << " vs
+ // " << chunkCount << endl;
RangeMap::iterator chunkRange =
ranges->find(_inverse ? chunk.getMax() : chunk.getMin());
@@ -563,7 +566,8 @@ public:
chunk[ChunkType::shard()].String())
break;
- // log() << "... appending chunk from diff shard: " << prevShardChunk << endl;
+ // log() << "... appending chunk from diff shard: " << prevShardChunk <<
+ // endl;
newChunksB.append(prevShardChunk);
prevShardChunk = BSONObj();
@@ -599,7 +603,8 @@ public:
BSONObj newShard = newShardB.obj();
BSONObj prevShard = prevShardB.obj();
- // log() << " ... migrated to " << newShard << " and updated " << prevShard << endl;
+ // log() << " ... migrated to " << newShard << " and updated " << prevShard
+ // << endl;
newChunksB.append(newShard);
newChunksB.append(prevShard);