summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-09-26 14:07:14 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-09-26 16:57:01 -0400
commit3ab7ab1ccb9bf89ecfdd793cc0d2e4acb8e334d4 (patch)
treece95b8b2373bef79a0c601be7c3ae1f287b859dd
parente786f06d2f38f50ced6f8c4750166990ed080897 (diff)
downloadmongo-3ab7ab1ccb9bf89ecfdd793cc0d2e4acb8e334d4.tar.gz
SERVER-26327 Extend diagnostics in bulk_shard_insert.js
-rw-r--r--jstests/sharding/bulk_shard_insert.js73
-rw-r--r--src/mongo/db/s/metadata_manager.cpp6
2 files changed, 50 insertions, 29 deletions
diff --git a/jstests/sharding/bulk_shard_insert.js b/jstests/sharding/bulk_shard_insert.js
index ae4626b8fdb..6d36e5fda93 100644
--- a/jstests/sharding/bulk_shard_insert.js
+++ b/jstests/sharding/bulk_shard_insert.js
@@ -3,20 +3,15 @@
(function() {
'use strict';
- // Setup randomized test
- var seed = new Date().getTime();
- // seed = 0
-
- Random.srand(seed);
- print("Seeded with " + seed);
-
var st = new ShardingTest({shards: 4, chunkSize: 1});
- // Setup sharded collection
- var mongos = st.s0;
- var db = mongos.getDB(jsTestName());
- var coll = db.coll;
- st.shardColl(coll, {_id: 1}, false);
+ assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
+ st.ensurePrimaryShard('TestDB', st.shard0.shardName);
+ assert.commandWorked(
+ st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {Counter: 1}}));
+
+ var db = st.s0.getDB('TestDB');
+ var coll = db.TestColl;
// Insert lots of bulk documents
var numDocs = 1000000;
@@ -35,38 +30,64 @@
var docsInserted = 0;
var balancerOn = false;
+ /**
+ * Ensures that the just inserted documents can be found.
+ */
+ function checkDocuments() {
+ var docsFound = coll.find({}, {_id: 0, Counter: 1}).toArray();
+ var count = coll.find().count();
+
+ if (docsFound.length != docsInserted) {
+ print("Inserted " + docsInserted + " count : " + count + " doc count : " +
+ docsFound.length);
+
+ var allFoundDocsSorted = docsFound.sort(function(a, b) {
+ return a.Counter - b.Counter;
+ });
+
+ var missingValueInfo;
+
+ for (var i = 0; i < docsInserted; i++) {
+ if (i != allFoundDocsSorted[i].Counter) {
+ missingValueInfo = {expected: i, actual: allFoundDocsSorted[i].Counter};
+ break;
+ }
+ }
+
+ st.printShardingStatus();
+
+ assert(false,
+ 'Inserted number of documents does not match the actual: ' +
+ tojson(missingValueInfo));
+ }
+ }
+
while (docsInserted < numDocs) {
var currBulkSize =
(numDocs - docsInserted > bulkSize) ? bulkSize : (numDocs - docsInserted);
var bulk = [];
for (var i = 0; i < currBulkSize; i++) {
- bulk.push({hi: "there", at: docsInserted, i: i, x: data});
+ bulk.push({Counter: docsInserted, hi: "there", i: i, x: data});
+ docsInserted++;
}
assert.writeOK(coll.insert(bulk));
- if (Math.floor(docsInserted / 10000) != Math.floor((docsInserted + currBulkSize) / 10000)) {
- print("Inserted " + (docsInserted + currBulkSize) + " documents.");
+ if (docsInserted % 10000 == 0) {
+ print("Inserted " + docsInserted + " documents.");
+ checkDocuments();
st.printShardingStatus();
}
- docsInserted += currBulkSize;
-
- if (docsInserted > numDocs / 2 && !balancerOn) {
- print("Turning on balancer after half documents inserted.");
+ if (docsInserted > numDocs / 3 && !balancerOn) {
+ print('Turning on balancer after ' + docsInserted + ' documents inserted.');
st.startBalancer();
balancerOn = true;
}
}
- // Check we inserted all the documents
- st.printShardingStatus();
-
- var count = coll.find().count();
- var itcount = coll.find().itcount();
- print("Inserted " + docsInserted + " count : " + count + " itcount : " + itcount);
- assert.eq(docsInserted, itcount);
+ checkDocuments();
st.stop();
})();
diff --git a/src/mongo/db/s/metadata_manager.cpp b/src/mongo/db/s/metadata_manager.cpp
index 590ec64cd4c..e4271070a5d 100644
--- a/src/mongo/db/s/metadata_manager.cpp
+++ b/src/mongo/db/s/metadata_manager.cpp
@@ -128,9 +128,9 @@ void MetadataManager::refreshActiveMetadata(std::unique_ptr<CollectionMetadata>
return;
}
- LOG(1) << "Refreshing metadata for collection " << _nss.ns() << " from "
- << _activeMetadataTracker->metadata->toStringBasic() << " to "
- << remoteMetadata->toStringBasic();
+ log() << "Refreshing metadata for collection " << _nss.ns() << " from "
+ << _activeMetadataTracker->metadata->toStringBasic() << " to "
+ << remoteMetadata->toStringBasic();
// Resolve any receiving chunks, which might have completed by now
for (auto it = _receivingChunks.begin(); it != _receivingChunks.end();) {