summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-09-26 14:07:01 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-09-26 16:57:01 -0400
commite786f06d2f38f50ced6f8c4750166990ed080897 (patch)
tree33e8b5aedd5d8bb7f385a05a9186e298c08cf735
parenta0e21dec26e64d359c0093959cf3897c53893c8e (diff)
downloadmongo-e786f06d2f38f50ced6f8c4750166990ed080897.tar.gz
Revert "SERVER-26327 Extend diagnostics in bulk_shard_insert.js"
This reverts commit e2b8d3da53a06617fa9c0251070bfc31fe38f154.
-rw-r--r--jstests/sharding/bulk_shard_insert.js72
-rw-r--r--src/mongo/db/s/metadata_manager.cpp6
2 files changed, 29 insertions, 49 deletions
diff --git a/jstests/sharding/bulk_shard_insert.js b/jstests/sharding/bulk_shard_insert.js
index 81c9fa25dd1..ae4626b8fdb 100644
--- a/jstests/sharding/bulk_shard_insert.js
+++ b/jstests/sharding/bulk_shard_insert.js
@@ -3,15 +3,20 @@
(function() {
'use strict';
- var st = new ShardingTest({shards: 4, chunkSize: 1});
+ // Setup randomized test
+ var seed = new Date().getTime();
+ // seed = 0
+
+ Random.srand(seed);
+ print("Seeded with " + seed);
- assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
- st.ensurePrimaryShard('TestDB', st.shard0.shardName);
- assert.commandWorked(
- st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {Counter: 1}}));
+ var st = new ShardingTest({shards: 4, chunkSize: 1});
- var db = st.s0.getDB('TestDB');
- var coll = db.TestColl;
+ // Setup sharded collection
+ var mongos = st.s0;
+ var db = mongos.getDB(jsTestName());
+ var coll = db.coll;
+ st.shardColl(coll, {_id: 1}, false);
// Insert lots of bulk documents
var numDocs = 1000000;
@@ -30,63 +35,38 @@
var docsInserted = 0;
var balancerOn = false;
- /**
- * Ensures that the just inserted documents can be found.
- */
- function checkDocuments() {
- var count = coll.find().count();
- var itcount = coll.find().itcount();
-
- if (itcount != docsInserted) {
- print("Inserted " + docsInserted + " count : " + count + " itcount : " + itcount);
-
- var allFoundDocs = coll.find({}, {_id: 0, Counter: 1}).toArray().sort(function(a, b) {
- return a.Counter - b.Counter;
- });
-
- var missingValueInfo;
-
- for (var i = 0; i < docsInserted; i++) {
- if (i != allFoundDocs[i].Counter) {
- missingValueInfo = {expected: i, actual: allFoundDocs[i].Counter};
- break;
- }
- }
-
- st.printShardingStatus();
-
- assert(false,
- 'Inserted number of documents does not match the actual: ' +
- tojson(missingValueInfo));
- }
- }
-
while (docsInserted < numDocs) {
var currBulkSize =
(numDocs - docsInserted > bulkSize) ? bulkSize : (numDocs - docsInserted);
var bulk = [];
for (var i = 0; i < currBulkSize; i++) {
- bulk.push({Counter: docsInserted, hi: "there", i: i, x: data});
- docsInserted++;
+ bulk.push({hi: "there", at: docsInserted, i: i, x: data});
}
assert.writeOK(coll.insert(bulk));
- if (docsInserted % 10000 == 0) {
- print("Inserted " + docsInserted + " documents.");
- checkDocuments();
+ if (Math.floor(docsInserted / 10000) != Math.floor((docsInserted + currBulkSize) / 10000)) {
+ print("Inserted " + (docsInserted + currBulkSize) + " documents.");
st.printShardingStatus();
}
- if (docsInserted > numDocs / 3 && !balancerOn) {
- print('Turning on balancer after ' + docsInserted + ' documents inserted.');
+ docsInserted += currBulkSize;
+
+ if (docsInserted > numDocs / 2 && !balancerOn) {
+ print("Turning on balancer after half documents inserted.");
st.startBalancer();
balancerOn = true;
}
}
- checkDocuments();
+ // Check we inserted all the documents
+ st.printShardingStatus();
+
+ var count = coll.find().count();
+ var itcount = coll.find().itcount();
+ print("Inserted " + docsInserted + " count : " + count + " itcount : " + itcount);
+ assert.eq(docsInserted, itcount);
st.stop();
})();
diff --git a/src/mongo/db/s/metadata_manager.cpp b/src/mongo/db/s/metadata_manager.cpp
index e4271070a5d..590ec64cd4c 100644
--- a/src/mongo/db/s/metadata_manager.cpp
+++ b/src/mongo/db/s/metadata_manager.cpp
@@ -128,9 +128,9 @@ void MetadataManager::refreshActiveMetadata(std::unique_ptr<CollectionMetadata>
return;
}
- log() << "Refreshing metadata for collection " << _nss.ns() << " from "
- << _activeMetadataTracker->metadata->toStringBasic() << " to "
- << remoteMetadata->toStringBasic();
+ LOG(1) << "Refreshing metadata for collection " << _nss.ns() << " from "
+ << _activeMetadataTracker->metadata->toStringBasic() << " to "
+ << remoteMetadata->toStringBasic();
// Resolve any receiving chunks, which might have completed by now
for (auto it = _receivingChunks.begin(); it != _receivingChunks.end();) {