summaryrefslogtreecommitdiff
path: root/jstests/sharding/move_chunk_open_cursors.js
diff options
context:
space:
mode:
authorclang-format-7.0.1 <adam.martin@10gen.com>2019-07-26 18:20:35 -0400
committerADAM David Alan Martin <adam.martin@10gen.com>2019-07-27 11:02:23 -0400
commit134a4083953270e8a11430395357fb70a29047ad (patch)
treedd428e1230e31d92b20b393dfdc17ffe7fa79cb6 /jstests/sharding/move_chunk_open_cursors.js
parent1e46b5049003f427047e723ea5fab15b5a9253ca (diff)
downloadmongo-134a4083953270e8a11430395357fb70a29047ad.tar.gz
SERVER-41772 Apply clang-format 7.0.1 to the codebase
Diffstat (limited to 'jstests/sharding/move_chunk_open_cursors.js')
-rw-r--r--jstests/sharding/move_chunk_open_cursors.js80
1 files changed, 39 insertions, 41 deletions
diff --git a/jstests/sharding/move_chunk_open_cursors.js b/jstests/sharding/move_chunk_open_cursors.js
index fe0942f0558..1b15fb198cf 100644
--- a/jstests/sharding/move_chunk_open_cursors.js
+++ b/jstests/sharding/move_chunk_open_cursors.js
@@ -3,52 +3,50 @@
* migration.
*/
(function() {
- "use strict";
- const dbName = "test";
- const collName = jsTest.name();
- const testNs = dbName + "." + collName;
+"use strict";
+const dbName = "test";
+const collName = jsTest.name();
+const testNs = dbName + "." + collName;
- const nDocs = 1000 * 10;
- const st = new ShardingTest({shards: 2});
- const coll = st.s0.getDB(dbName)[collName];
- let bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < nDocs; i++) {
- bulk.insert({_id: i});
- }
- assert.writeOK(bulk.execute());
+const nDocs = 1000 * 10;
+const st = new ShardingTest({shards: 2});
+const coll = st.s0.getDB(dbName)[collName];
+let bulk = coll.initializeUnorderedBulkOp();
+for (let i = 0; i < nDocs; i++) {
+ bulk.insert({_id: i});
+}
+assert.writeOK(bulk.execute());
- // Make sure we know which shard will host the data to begin.
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
- assert.commandWorked(st.admin.runCommand({enableSharding: dbName}));
- assert.commandWorked(st.admin.runCommand({shardCollection: testNs, key: {_id: 1}}));
+// Make sure we know which shard will host the data to begin.
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.commandWorked(st.admin.runCommand({enableSharding: dbName}));
+assert.commandWorked(st.admin.runCommand({shardCollection: testNs, key: {_id: 1}}));
- // Open some cursors before migrating data.
- // Ensure the cursor stage at the front of the pipeline does not buffer any data.
- assert.commandWorked(
- st.shard0.adminCommand({setParameter: 1, internalDocumentSourceCursorBatchSizeBytes: 1}));
- const getMoreBatchSize = 100;
- const aggResponse = assert.commandWorked(
- coll.runCommand({aggregate: collName, pipeline: [], cursor: {batchSize: 0}}));
- const aggCursor = new DBCommandCursor(coll.getDB(), aggResponse, getMoreBatchSize);
+// Open some cursors before migrating data.
+// Ensure the cursor stage at the front of the pipeline does not buffer any data.
+assert.commandWorked(
+ st.shard0.adminCommand({setParameter: 1, internalDocumentSourceCursorBatchSizeBytes: 1}));
+const getMoreBatchSize = 100;
+const aggResponse = assert.commandWorked(
+ coll.runCommand({aggregate: collName, pipeline: [], cursor: {batchSize: 0}}));
+const aggCursor = new DBCommandCursor(coll.getDB(), aggResponse, getMoreBatchSize);
- assert(st.adminCommand({split: testNs, middle: {_id: nDocs / 2}}));
- assert(st.adminCommand({moveChunk: testNs, find: {_id: nDocs - 1}, to: st.shard1.shardName}));
+assert(st.adminCommand({split: testNs, middle: {_id: nDocs / 2}}));
+assert(st.adminCommand({moveChunk: testNs, find: {_id: nDocs - 1}, to: st.shard1.shardName}));
- assert.eq(
- aggCursor.itcount(),
- nDocs,
- "expected agg cursor to return all matching documents, even though some have migrated");
+assert.eq(aggCursor.itcount(),
+ nDocs,
+ "expected agg cursor to return all matching documents, even though some have migrated");
- // Test the same behavior with the find command.
- const findResponse = assert.commandWorked(
- coll.runCommand({find: collName, filter: {}, batchSize: getMoreBatchSize}));
- const findCursor = new DBCommandCursor(coll.getDB(), findResponse, getMoreBatchSize);
- assert(st.adminCommand({split: testNs, middle: {_id: nDocs / 4}}));
- assert(st.adminCommand({moveChunk: testNs, find: {_id: 0}, to: st.shard1.shardName}));
+// Test the same behavior with the find command.
+const findResponse = assert.commandWorked(
+ coll.runCommand({find: collName, filter: {}, batchSize: getMoreBatchSize}));
+const findCursor = new DBCommandCursor(coll.getDB(), findResponse, getMoreBatchSize);
+assert(st.adminCommand({split: testNs, middle: {_id: nDocs / 4}}));
+assert(st.adminCommand({moveChunk: testNs, find: {_id: 0}, to: st.shard1.shardName}));
- assert.eq(
- findCursor.itcount(),
- nDocs,
- "expected find cursor to return all matching documents, even though some have migrated");
- st.stop();
+assert.eq(findCursor.itcount(),
+ nDocs,
+ "expected find cursor to return all matching documents, even though some have migrated");
+st.stop();
}());