diff options
Diffstat (limited to 'jstests/core')
496 files changed, 37553 insertions, 37847 deletions
diff --git a/jstests/core/SERVER-23626.js b/jstests/core/SERVER-23626.js index f4f01495269..9a25bda2291 100644 --- a/jstests/core/SERVER-23626.js +++ b/jstests/core/SERVER-23626.js @@ -1,18 +1,17 @@ (function() { - "use strict"; - var t = db.jstests_server23626; +"use strict"; +var t = db.jstests_server23626; - t.mycoll.drop(); - assert.writeOK(t.mycoll.insert({_id: 0, a: Date.prototype})); - assert.eq(1, t.mycoll.find({a: {$type: 'date'}}).itcount()); +t.mycoll.drop(); +assert.writeOK(t.mycoll.insert({_id: 0, a: Date.prototype})); +assert.eq(1, t.mycoll.find({a: {$type: 'date'}}).itcount()); - t.mycoll.drop(); - assert.writeOK(t.mycoll.insert({_id: 0, a: Function.prototype})); - assert.eq(1, t.mycoll.find({a: {$type: 'javascript'}}).itcount()); - - t.mycoll.drop(); - assert.writeOK(t.mycoll.insert({_id: 0, a: RegExp.prototype})); - assert.eq(1, t.mycoll.find({a: {$type: 'regex'}}).itcount()); +t.mycoll.drop(); +assert.writeOK(t.mycoll.insert({_id: 0, a: Function.prototype})); +assert.eq(1, t.mycoll.find({a: {$type: 'javascript'}}).itcount()); +t.mycoll.drop(); +assert.writeOK(t.mycoll.insert({_id: 0, a: RegExp.prototype})); +assert.eq(1, t.mycoll.find({a: {$type: 'regex'}}).itcount()); }());
\ No newline at end of file diff --git a/jstests/core/add_skip_stage_before_fetch.js b/jstests/core/add_skip_stage_before_fetch.js index 3f907b5e49c..aaad7bb5db3 100644 --- a/jstests/core/add_skip_stage_before_fetch.js +++ b/jstests/core/add_skip_stage_before_fetch.js @@ -6,58 +6,61 @@ // @tags: [assumes_unsharded_collection, operations_longer_than_stepdown_interval_in_txns] (function() { - "use strict"; - - load("jstests/libs/analyze_plan.js"); - - const coll = db.add_skip_stage_before_fetch; - - coll.drop(); - const testIndex = {a: 1, b: 1, c: 1}; - assert.commandWorked(coll.createIndex(testIndex)); - - const bulk = coll.initializeUnorderedBulkOp(); - for (let i = 0; i < 10000; i++) { - bulk.insert({ - a: i % 2, - b: i % 4, - c: Math.floor(Math.random() * 1000), - d: Math.floor(Math.random() * 1000) - }); - } - assert.writeOK(bulk.execute()); - - // The {a: 0, b: 2} query will match exactly one quarter of the documents in the collection: - // 2500 in total. In the test queries below, we skip the first 2400, returning exactly 100 - // documents. - - // This find can be computed using the index, so we should only need to fetch the 100 documents - // that get returned to the client after skipping the first 2400. - let explainResult = - coll.find({a: 0, b: 2}).hint(testIndex).skip(2400).explain("executionStats"); - assert.gte(explainResult.executionStats.totalKeysExamined, 2500); - assert.eq(explainResult.executionStats.totalDocsExamined, 100); - - // This sort can also be computed using the index. - explainResult = - coll.find({a: 0, b: 2}).hint(testIndex).sort({c: 1}).skip(2400).explain("executionStats"); - assert.gte(explainResult.executionStats.totalKeysExamined, 2500); - assert.eq(explainResult.executionStats.totalDocsExamined, 100); - - // This query is covered by the index, so there should be no fetch at all. - explainResult = coll.find({a: 0, b: 2}, {_id: 0, a: 1}) - .hint(testIndex) - .sort({c: 1}) - .skip(2400) - .explain("executionStats"); - assert.gte(explainResult.executionStats.totalKeysExamined, 2500); - assert.eq(explainResult.executionStats.totalDocsExamined, 0); - assert(isIndexOnly(db, explainResult.queryPlanner.winningPlan)); - - // This sort requires a field that is not in the index, so we should be fetching all 2500 - // documents that match the find predicate. - explainResult = - coll.find({a: 0, b: 2}).hint(testIndex).sort({d: 1}).skip(2400).explain("executionStats"); - assert.gte(explainResult.executionStats.totalKeysExamined, 2500); - assert.eq(explainResult.executionStats.totalDocsExamined, 2500); +"use strict"; + +load("jstests/libs/analyze_plan.js"); + +const coll = db.add_skip_stage_before_fetch; + +coll.drop(); +const testIndex = { + a: 1, + b: 1, + c: 1 +}; +assert.commandWorked(coll.createIndex(testIndex)); + +const bulk = coll.initializeUnorderedBulkOp(); +for (let i = 0; i < 10000; i++) { + bulk.insert({ + a: i % 2, + b: i % 4, + c: Math.floor(Math.random() * 1000), + d: Math.floor(Math.random() * 1000) + }); +} +assert.writeOK(bulk.execute()); + +// The {a: 0, b: 2} query will match exactly one quarter of the documents in the collection: +// 2500 in total. In the test queries below, we skip the first 2400, returning exactly 100 +// documents. + +// This find can be computed using the index, so we should only need to fetch the 100 documents +// that get returned to the client after skipping the first 2400. +let explainResult = coll.find({a: 0, b: 2}).hint(testIndex).skip(2400).explain("executionStats"); +assert.gte(explainResult.executionStats.totalKeysExamined, 2500); +assert.eq(explainResult.executionStats.totalDocsExamined, 100); + +// This sort can also be computed using the index. +explainResult = + coll.find({a: 0, b: 2}).hint(testIndex).sort({c: 1}).skip(2400).explain("executionStats"); +assert.gte(explainResult.executionStats.totalKeysExamined, 2500); +assert.eq(explainResult.executionStats.totalDocsExamined, 100); + +// This query is covered by the index, so there should be no fetch at all. +explainResult = coll.find({a: 0, b: 2}, {_id: 0, a: 1}) + .hint(testIndex) + .sort({c: 1}) + .skip(2400) + .explain("executionStats"); +assert.gte(explainResult.executionStats.totalKeysExamined, 2500); +assert.eq(explainResult.executionStats.totalDocsExamined, 0); +assert(isIndexOnly(db, explainResult.queryPlanner.winningPlan)); + +// This sort requires a field that is not in the index, so we should be fetching all 2500 +// documents that match the find predicate. +explainResult = + coll.find({a: 0, b: 2}).hint(testIndex).sort({d: 1}).skip(2400).explain("executionStats"); +assert.gte(explainResult.executionStats.totalKeysExamined, 2500); +assert.eq(explainResult.executionStats.totalDocsExamined, 2500); })(); diff --git a/jstests/core/agg_hint.js b/jstests/core/agg_hint.js index 2d088daaf7b..899bbd2217a 100644 --- a/jstests/core/agg_hint.js +++ b/jstests/core/agg_hint.js @@ -7,255 +7,254 @@ // command against views, which is converted to a hinted aggregation on execution. (function() { - "use strict"; +"use strict"; - load("jstests/libs/analyze_plan.js"); // For getAggPlanStage. +load("jstests/libs/analyze_plan.js"); // For getAggPlanStage. - const testDB = db.getSiblingDB("agg_hint"); - assert.commandWorked(testDB.dropDatabase()); - const coll = testDB.getCollection("test"); - const view = testDB.getCollection("view"); +const testDB = db.getSiblingDB("agg_hint"); +assert.commandWorked(testDB.dropDatabase()); +const coll = testDB.getCollection("test"); +const view = testDB.getCollection("view"); - function confirmWinningPlanUsesExpectedIndex( - explainResult, expectedKeyPattern, stageName, pipelineOptimizedAway) { - const planStage = pipelineOptimizedAway ? getPlanStage(explainResult, stageName) - : getAggPlanStage(explainResult, stageName); - assert.neq(null, planStage); +function confirmWinningPlanUsesExpectedIndex( + explainResult, expectedKeyPattern, stageName, pipelineOptimizedAway) { + const planStage = pipelineOptimizedAway ? getPlanStage(explainResult, stageName) + : getAggPlanStage(explainResult, stageName); + assert.neq(null, planStage); - assert.eq(planStage.keyPattern, expectedKeyPattern, tojson(planStage)); - } + assert.eq(planStage.keyPattern, expectedKeyPattern, tojson(planStage)); +} - // Runs explain on 'command', with the hint specified by 'hintKeyPattern' when not null. - // Confirms that the winning query plan uses the index specified by 'expectedKeyPattern'. - // If 'pipelineOptimizedAway' is set to true, then we expect the pipeline to be entirely - // optimized away from the plan and replaced with a query tier. - function confirmCommandUsesIndex({command = null, - hintKeyPattern = null, - expectedKeyPattern = null, - stageName = "IXSCAN", - pipelineOptimizedAway = false} = {}) { - if (hintKeyPattern) { - command["hint"] = hintKeyPattern; - } - const res = - assert.commandWorked(testDB.runCommand({explain: command, verbosity: "queryPlanner"})); - confirmWinningPlanUsesExpectedIndex( - res, expectedKeyPattern, stageName, pipelineOptimizedAway); +// Runs explain on 'command', with the hint specified by 'hintKeyPattern' when not null. +// Confirms that the winning query plan uses the index specified by 'expectedKeyPattern'. +// If 'pipelineOptimizedAway' is set to true, then we expect the pipeline to be entirely +// optimized away from the plan and replaced with a query tier. +function confirmCommandUsesIndex({ + command = null, + hintKeyPattern = null, + expectedKeyPattern = null, + stageName = "IXSCAN", + pipelineOptimizedAway = false +} = {}) { + if (hintKeyPattern) { + command["hint"] = hintKeyPattern; } + const res = + assert.commandWorked(testDB.runCommand({explain: command, verbosity: "queryPlanner"})); + confirmWinningPlanUsesExpectedIndex(res, expectedKeyPattern, stageName, pipelineOptimizedAway); +} - // Runs explain on an aggregation with a pipeline specified by 'aggPipeline' and a hint - // specified by 'hintKeyPattern' if not null. Confirms that the winning query plan uses the - // index specified by 'expectedKeyPattern'. If 'pipelineOptimizedAway' is set to true, then - // we expect the pipeline to be entirely optimized away from the plan and replaced with a - // query tier. - // - // This method exists because the explain command does not support the aggregation command. - function confirmAggUsesIndex({collName = null, - aggPipeline = [], - hintKeyPattern = null, - expectedKeyPattern = null, - stageName = "IXSCAN", - pipelineOptimizedAway = false} = {}) { - let options = {}; +// Runs explain on an aggregation with a pipeline specified by 'aggPipeline' and a hint +// specified by 'hintKeyPattern' if not null. Confirms that the winning query plan uses the +// index specified by 'expectedKeyPattern'. If 'pipelineOptimizedAway' is set to true, then +// we expect the pipeline to be entirely optimized away from the plan and replaced with a +// query tier. +// +// This method exists because the explain command does not support the aggregation command. +function confirmAggUsesIndex({ + collName = null, + aggPipeline = [], + hintKeyPattern = null, + expectedKeyPattern = null, + stageName = "IXSCAN", + pipelineOptimizedAway = false +} = {}) { + let options = {}; - if (hintKeyPattern) { - options = {hint: hintKeyPattern}; - } - const res = assert.commandWorked( - testDB.getCollection(collName).explain().aggregate(aggPipeline, options)); - confirmWinningPlanUsesExpectedIndex( - res, expectedKeyPattern, stageName, pipelineOptimizedAway); + if (hintKeyPattern) { + options = {hint: hintKeyPattern}; } + const res = assert.commandWorked( + testDB.getCollection(collName).explain().aggregate(aggPipeline, options)); + confirmWinningPlanUsesExpectedIndex(res, expectedKeyPattern, stageName, pipelineOptimizedAway); +} - // Specify hint as a string, representing index name. - assert.commandWorked(coll.createIndex({x: 1})); - for (let i = 0; i < 5; ++i) { - assert.writeOK(coll.insert({x: i})); - } +// Specify hint as a string, representing index name. +assert.commandWorked(coll.createIndex({x: 1})); +for (let i = 0; i < 5; ++i) { + assert.writeOK(coll.insert({x: i})); +} - confirmAggUsesIndex({ - collName: "test", - aggPipeline: [{$match: {x: 3}}], - hintKeyPattern: "x_1", - expectedKeyPattern: {x: 1}, - pipelineOptimizedAway: true - }); +confirmAggUsesIndex({ + collName: "test", + aggPipeline: [{$match: {x: 3}}], + hintKeyPattern: "x_1", + expectedKeyPattern: {x: 1}, + pipelineOptimizedAway: true +}); - // - // For each of the following tests we confirm: - // * That the expected index is chosen by the query planner when no hint is provided. - // * That the expected index is chosen when hinted. - // * That an index other than the one expected is chosen when hinted. - // +// +// For each of the following tests we confirm: +// * That the expected index is chosen by the query planner when no hint is provided. +// * That the expected index is chosen when hinted. +// * That an index other than the one expected is chosen when hinted. +// - // Hint on poor index choice should force use of the hinted index over one more optimal. - coll.drop(); - assert.commandWorked(coll.createIndex({x: 1})); - for (let i = 0; i < 5; ++i) { - assert.writeOK(coll.insert({x: i})); - } +// Hint on poor index choice should force use of the hinted index over one more optimal. +coll.drop(); +assert.commandWorked(coll.createIndex({x: 1})); +for (let i = 0; i < 5; ++i) { + assert.writeOK(coll.insert({x: i})); +} - confirmAggUsesIndex({ - collName: "test", - aggPipeline: [{$match: {x: 3}}], - expectedKeyPattern: {x: 1}, - pipelineOptimizedAway: true - }); - confirmAggUsesIndex({ - collName: "test", - aggPipeline: [{$match: {x: 3}}], - hintKeyPattern: {x: 1}, - expectedKeyPattern: {x: 1}, - pipelineOptimizedAway: true - }); - confirmAggUsesIndex({ - collName: "test", - aggPipeline: [{$match: {x: 3}}], - hintKeyPattern: {_id: 1}, - expectedKeyPattern: {_id: 1}, - pipelineOptimizedAway: true - }); +confirmAggUsesIndex({ + collName: "test", + aggPipeline: [{$match: {x: 3}}], + expectedKeyPattern: {x: 1}, + pipelineOptimizedAway: true +}); +confirmAggUsesIndex({ + collName: "test", + aggPipeline: [{$match: {x: 3}}], + hintKeyPattern: {x: 1}, + expectedKeyPattern: {x: 1}, + pipelineOptimizedAway: true +}); +confirmAggUsesIndex({ + collName: "test", + aggPipeline: [{$match: {x: 3}}], + hintKeyPattern: {_id: 1}, + expectedKeyPattern: {_id: 1}, + pipelineOptimizedAway: true +}); - // With no hint specified, aggregation will always prefer an index that provides sort order over - // one that requires a blocking sort. A hinted aggregation should allow for choice of an index - // that provides blocking sort. - coll.drop(); - assert.commandWorked(coll.createIndex({x: 1})); - assert.commandWorked(coll.createIndex({y: 1})); - for (let i = 0; i < 5; ++i) { - assert.writeOK(coll.insert({x: i, y: i})); - } +// With no hint specified, aggregation will always prefer an index that provides sort order over +// one that requires a blocking sort. A hinted aggregation should allow for choice of an index +// that provides blocking sort. +coll.drop(); +assert.commandWorked(coll.createIndex({x: 1})); +assert.commandWorked(coll.createIndex({y: 1})); +for (let i = 0; i < 5; ++i) { + assert.writeOK(coll.insert({x: i, y: i})); +} - confirmAggUsesIndex({ - collName: "test", - aggPipeline: [{$match: {x: {$gte: 0}}}, {$sort: {y: 1}}], - expectedKeyPattern: {y: 1}, - pipelineOptimizedAway: true - }); - confirmAggUsesIndex({ - collName: "test", - aggPipeline: [{$match: {x: {$gte: 0}}}, {$sort: {y: 1}}], - hintKeyPattern: {y: 1}, - expectedKeyPattern: {y: 1}, - pipelineOptimizedAway: true - }); - confirmAggUsesIndex({ - collName: "test", - aggPipeline: [{$match: {x: {$gte: 0}}}, {$sort: {y: 1}}], - hintKeyPattern: {x: 1}, - expectedKeyPattern: {x: 1} - }); +confirmAggUsesIndex({ + collName: "test", + aggPipeline: [{$match: {x: {$gte: 0}}}, {$sort: {y: 1}}], + expectedKeyPattern: {y: 1}, + pipelineOptimizedAway: true +}); +confirmAggUsesIndex({ + collName: "test", + aggPipeline: [{$match: {x: {$gte: 0}}}, {$sort: {y: 1}}], + hintKeyPattern: {y: 1}, + expectedKeyPattern: {y: 1}, + pipelineOptimizedAway: true +}); +confirmAggUsesIndex({ + collName: "test", + aggPipeline: [{$match: {x: {$gte: 0}}}, {$sort: {y: 1}}], + hintKeyPattern: {x: 1}, + expectedKeyPattern: {x: 1} +}); - // With no hint specified, aggregation will always prefer an index that provides a covered - // projection over one that does not. A hinted aggregation should allow for choice of an index - // that does not cover. - coll.drop(); - assert.commandWorked(coll.createIndex({x: 1})); - assert.commandWorked(coll.createIndex({x: 1, y: 1})); - for (let i = 0; i < 5; ++i) { - assert.writeOK(coll.insert({x: i, y: i})); - } +// With no hint specified, aggregation will always prefer an index that provides a covered +// projection over one that does not. A hinted aggregation should allow for choice of an index +// that does not cover. +coll.drop(); +assert.commandWorked(coll.createIndex({x: 1})); +assert.commandWorked(coll.createIndex({x: 1, y: 1})); +for (let i = 0; i < 5; ++i) { + assert.writeOK(coll.insert({x: i, y: i})); +} - confirmAggUsesIndex({ - collName: "test", - aggPipeline: [{$match: {x: {$gte: 0}}}, {$project: {x: 1, y: 1, _id: 0}}], - expectedKeyPattern: {x: 1, y: 1}, - pipelineOptimizedAway: true - }); - confirmAggUsesIndex({ - collName: "test", - aggPipeline: [{$match: {x: {$gte: 0}}}, {$project: {x: 1, y: 1, _id: 0}}], - hintKeyPattern: {x: 1, y: 1}, - expectedKeyPattern: {x: 1, y: 1}, - pipelineOptimizedAway: true - }); - confirmAggUsesIndex({ - collName: "test", - aggPipeline: [{$match: {x: {$gte: 0}}}, {$project: {x: 1, y: 1, _id: 0}}], - hintKeyPattern: {x: 1}, - expectedKeyPattern: {x: 1} - }); +confirmAggUsesIndex({ + collName: "test", + aggPipeline: [{$match: {x: {$gte: 0}}}, {$project: {x: 1, y: 1, _id: 0}}], + expectedKeyPattern: {x: 1, y: 1}, + pipelineOptimizedAway: true +}); +confirmAggUsesIndex({ + collName: "test", + aggPipeline: [{$match: {x: {$gte: 0}}}, {$project: {x: 1, y: 1, _id: 0}}], + hintKeyPattern: {x: 1, y: 1}, + expectedKeyPattern: {x: 1, y: 1}, + pipelineOptimizedAway: true +}); +confirmAggUsesIndex({ + collName: "test", + aggPipeline: [{$match: {x: {$gte: 0}}}, {$project: {x: 1, y: 1, _id: 0}}], + hintKeyPattern: {x: 1}, + expectedKeyPattern: {x: 1} +}); - // Confirm that a hinted agg can be executed against a view. - coll.drop(); - view.drop(); - assert.commandWorked(coll.createIndex({x: 1})); - for (let i = 0; i < 5; ++i) { - assert.writeOK(coll.insert({x: i})); - } - assert.commandWorked(testDB.createView("view", "test", [{$match: {x: {$gte: 0}}}])); +// Confirm that a hinted agg can be executed against a view. +coll.drop(); +view.drop(); +assert.commandWorked(coll.createIndex({x: 1})); +for (let i = 0; i < 5; ++i) { + assert.writeOK(coll.insert({x: i})); +} +assert.commandWorked(testDB.createView("view", "test", [{$match: {x: {$gte: 0}}}])); - confirmAggUsesIndex({ - collName: "view", - aggPipeline: [{$match: {x: 3}}], - expectedKeyPattern: {x: 1}, - pipelineOptimizedAway: true - }); - confirmAggUsesIndex({ - collName: "view", - aggPipeline: [{$match: {x: 3}}], - hintKeyPattern: {x: 1}, - expectedKeyPattern: {x: 1}, - pipelineOptimizedAway: true - }); - confirmAggUsesIndex({ - collName: "view", - aggPipeline: [{$match: {x: 3}}], - hintKeyPattern: {_id: 1}, - expectedKeyPattern: {_id: 1}, - pipelineOptimizedAway: true - }); +confirmAggUsesIndex({ + collName: "view", + aggPipeline: [{$match: {x: 3}}], + expectedKeyPattern: {x: 1}, + pipelineOptimizedAway: true +}); +confirmAggUsesIndex({ + collName: "view", + aggPipeline: [{$match: {x: 3}}], + hintKeyPattern: {x: 1}, + expectedKeyPattern: {x: 1}, + pipelineOptimizedAway: true +}); +confirmAggUsesIndex({ + collName: "view", + aggPipeline: [{$match: {x: 3}}], + hintKeyPattern: {_id: 1}, + expectedKeyPattern: {_id: 1}, + pipelineOptimizedAway: true +}); - // Confirm that a hinted find can be executed against a view. - coll.drop(); - view.drop(); - assert.commandWorked(coll.createIndex({x: 1})); - for (let i = 0; i < 5; ++i) { - assert.writeOK(coll.insert({x: i})); - } - assert.commandWorked(testDB.createView("view", "test", [])); +// Confirm that a hinted find can be executed against a view. +coll.drop(); +view.drop(); +assert.commandWorked(coll.createIndex({x: 1})); +for (let i = 0; i < 5; ++i) { + assert.writeOK(coll.insert({x: i})); +} +assert.commandWorked(testDB.createView("view", "test", [])); - confirmCommandUsesIndex({ - command: {find: "view", filter: {x: 3}}, - expectedKeyPattern: {x: 1}, - pipelineOptimizedAway: true - }); - confirmCommandUsesIndex({ - command: {find: "view", filter: {x: 3}}, - hintKeyPattern: {x: 1}, - expectedKeyPattern: {x: 1}, - pipelineOptimizedAway: true - }); - confirmCommandUsesIndex({ - command: {find: "view", filter: {x: 3}}, - hintKeyPattern: {_id: 1}, - expectedKeyPattern: {_id: 1}, - pipelineOptimizedAway: true - }); +confirmCommandUsesIndex({ + command: {find: "view", filter: {x: 3}}, + expectedKeyPattern: {x: 1}, + pipelineOptimizedAway: true +}); +confirmCommandUsesIndex({ + command: {find: "view", filter: {x: 3}}, + hintKeyPattern: {x: 1}, + expectedKeyPattern: {x: 1}, + pipelineOptimizedAway: true +}); +confirmCommandUsesIndex({ + command: {find: "view", filter: {x: 3}}, + hintKeyPattern: {_id: 1}, + expectedKeyPattern: {_id: 1}, + pipelineOptimizedAway: true +}); - // Confirm that a hinted count can be executed against a view. - coll.drop(); - view.drop(); - assert.commandWorked(coll.createIndex({x: 1})); - for (let i = 0; i < 5; ++i) { - assert.writeOK(coll.insert({x: i})); - } - assert.commandWorked(testDB.createView("view", "test", [])); +// Confirm that a hinted count can be executed against a view. +coll.drop(); +view.drop(); +assert.commandWorked(coll.createIndex({x: 1})); +for (let i = 0; i < 5; ++i) { + assert.writeOK(coll.insert({x: i})); +} +assert.commandWorked(testDB.createView("view", "test", [])); - confirmCommandUsesIndex({ - command: {count: "view", query: {x: 3}}, - expectedKeyPattern: {x: 1}, - stageName: "COUNT_SCAN" - }); - confirmCommandUsesIndex({ - command: {count: "view", query: {x: 3}}, - hintKeyPattern: {x: 1}, - expectedKeyPattern: {x: 1}, - stageName: "COUNT_SCAN" - }); - confirmCommandUsesIndex({ - command: {count: "view", query: {x: 3}}, - hintKeyPattern: {_id: 1}, - expectedKeyPattern: {_id: 1} - }); +confirmCommandUsesIndex( + {command: {count: "view", query: {x: 3}}, expectedKeyPattern: {x: 1}, stageName: "COUNT_SCAN"}); +confirmCommandUsesIndex({ + command: {count: "view", query: {x: 3}}, + hintKeyPattern: {x: 1}, + expectedKeyPattern: {x: 1}, + stageName: "COUNT_SCAN" +}); +confirmCommandUsesIndex({ + command: {count: "view", query: {x: 3}}, + hintKeyPattern: {_id: 1}, + expectedKeyPattern: {_id: 1} +}); })(); diff --git a/jstests/core/aggregation_accepts_write_concern.js b/jstests/core/aggregation_accepts_write_concern.js index 6db86a31411..2c764414a1d 100644 --- a/jstests/core/aggregation_accepts_write_concern.js +++ b/jstests/core/aggregation_accepts_write_concern.js @@ -4,28 +4,28 @@ * @tags: [assumes_write_concern_unchanged, does_not_support_stepdowns] */ (function() { - "use strict"; +"use strict"; - const testDB = db.getSiblingDB("aggregation_accepts_write_concern"); - assert.commandWorked(testDB.dropDatabase()); - const collName = "test"; +const testDB = db.getSiblingDB("aggregation_accepts_write_concern"); +assert.commandWorked(testDB.dropDatabase()); +const collName = "test"; - assert.commandWorked(testDB.runCommand( - {insert: collName, documents: [{_id: 1}], writeConcern: {w: "majority"}})); +assert.commandWorked( + testDB.runCommand({insert: collName, documents: [{_id: 1}], writeConcern: {w: "majority"}})); - // A read-only aggregation accepts writeConcern. - assert.commandWorked(testDB.runCommand({ - aggregate: collName, - pipeline: [{$match: {_id: 1}}], - cursor: {}, - writeConcern: {w: "majority"} - })); +// A read-only aggregation accepts writeConcern. +assert.commandWorked(testDB.runCommand({ + aggregate: collName, + pipeline: [{$match: {_id: 1}}], + cursor: {}, + writeConcern: {w: "majority"} +})); - // An aggregation pipeline that writes accepts writeConcern. - assert.commandWorked(testDB.runCommand({ - aggregate: collName, - pipeline: [{$match: {_id: 1}}, {$out: collName + "_out"}], - cursor: {}, - writeConcern: {w: "majority"} - })); +// An aggregation pipeline that writes accepts writeConcern. +assert.commandWorked(testDB.runCommand({ + aggregate: collName, + pipeline: [{$match: {_id: 1}}, {$out: collName + "_out"}], + cursor: {}, + writeConcern: {w: "majority"} +})); })(); diff --git a/jstests/core/aggregation_getmore_batchsize.js b/jstests/core/aggregation_getmore_batchsize.js index c0e12cfced3..c723d2ca45d 100644 --- a/jstests/core/aggregation_getmore_batchsize.js +++ b/jstests/core/aggregation_getmore_batchsize.js @@ -4,37 +4,37 @@ // from the aggregate sell helper (function() { - 'use strict'; - - db.getMongo().forceReadMode("commands"); - var coll = db["aggregation_getmore_batchsize"]; - - // Insert some data to query for - assert.writeOK(coll.insert([{a: 1}, {a: 1}, {a: 1}, {a: 1}, {a: 1}, {a: 1}])); - - // Create a cursor with a batch size of 2 (should require three full batches to return all - // documents). - var cursor = coll.aggregate([{$match: {a: 1}}, {$limit: 6}], {cursor: {batchSize: 2}}); - var curCount = 2; - - // Check that each batch has only two documents in it. - for (var i = 0; i < 6; i++) { - print(tojson(cursor.next())); - jsTestLog("Expecting " + (curCount - 1)); - assert.eq(cursor.objsLeftInBatch(), --curCount); - if (curCount == 0) - curCount = 2; - } - - // Create a cursor with a batch size of 0 (should only return one full batch of documents). - // {batchSize: 0} is a special case where the server will return a cursor ID immediately, or - // an error, but the first document result will be fetched by a getMore. - cursor = coll.aggregate([{$match: {a: 1}}, {$limit: 6}], {cursor: {batchSize: 0}}); - assert.eq(cursor.objsLeftInBatch(), 0); - print(tojson(cursor.next())); - assert.eq(cursor.objsLeftInBatch(), 5); +'use strict'; + +db.getMongo().forceReadMode("commands"); +var coll = db["aggregation_getmore_batchsize"]; + +// Insert some data to query for +assert.writeOK(coll.insert([{a: 1}, {a: 1}, {a: 1}, {a: 1}, {a: 1}, {a: 1}])); - // Check that the default cursor behavior works if you specify a cursor but no batch size. - cursor = coll.aggregate([{$match: {a: 1}}, {$limit: 6}], {cursor: {}}); - assert.eq(cursor.objsLeftInBatch(), 6); +// Create a cursor with a batch size of 2 (should require three full batches to return all +// documents). +var cursor = coll.aggregate([{$match: {a: 1}}, {$limit: 6}], {cursor: {batchSize: 2}}); +var curCount = 2; + +// Check that each batch has only two documents in it. +for (var i = 0; i < 6; i++) { + print(tojson(cursor.next())); + jsTestLog("Expecting " + (curCount - 1)); + assert.eq(cursor.objsLeftInBatch(), --curCount); + if (curCount == 0) + curCount = 2; +} + +// Create a cursor with a batch size of 0 (should only return one full batch of documents). +// {batchSize: 0} is a special case where the server will return a cursor ID immediately, or +// an error, but the first document result will be fetched by a getMore. +cursor = coll.aggregate([{$match: {a: 1}}, {$limit: 6}], {cursor: {batchSize: 0}}); +assert.eq(cursor.objsLeftInBatch(), 0); +print(tojson(cursor.next())); +assert.eq(cursor.objsLeftInBatch(), 5); + +// Check that the default cursor behavior works if you specify a cursor but no batch size. +cursor = coll.aggregate([{$match: {a: 1}}, {$limit: 6}], {cursor: {}}); +assert.eq(cursor.objsLeftInBatch(), 6); })(); diff --git a/jstests/core/all.js b/jstests/core/all.js index e77c0279215..9d142e6e6c4 100644 --- a/jstests/core/all.js +++ b/jstests/core/all.js @@ -2,7 +2,6 @@ t = db.jstests_all; t.drop(); doTest = function() { - assert.commandWorked(t.save({a: [1, 2, 3]})); assert.commandWorked(t.save({a: [1, 2, 4]})); assert.commandWorked(t.save({a: [1, 8, 5]})); @@ -36,7 +35,6 @@ doTest = function() { assert.eq(5, t.find({a: {$all: [1]}}).count(), "E1"); assert.eq(0, t.find({a: {$all: [19]}}).count(), "E2"); assert.eq(0, t.find({a: {$all: []}}).count(), "E3"); - }; doTest(); diff --git a/jstests/core/andor.js b/jstests/core/andor.js index c574ab261a4..fb1ee98a448 100644 --- a/jstests/core/andor.js +++ b/jstests/core/andor.js @@ -11,7 +11,6 @@ function ok(q) { t.save({a: 1}); test = function() { - ok({a: 1}); ok({$and: [{a: 1}]}); @@ -45,7 +44,6 @@ test = function() { ok({$nor: [{$and: [{$and: [{a: 2}]}]}]}); ok({$nor: [{$and: [{$nor: [{a: 1}]}]}]}); - }; test(); @@ -55,7 +53,6 @@ test(); // Test an inequality base match. test = function() { - ok({a: {$ne: 2}}); ok({$and: [{a: {$ne: 2}}]}); @@ -89,7 +86,6 @@ test = function() { ok({$nor: [{$and: [{$and: [{a: {$ne: 1}}]}]}]}); ok({$nor: [{$and: [{$nor: [{a: {$ne: 2}}]}]}]}); - }; t.drop(); diff --git a/jstests/core/apitest_db_profile_level.js b/jstests/core/apitest_db_profile_level.js index 2172b4ed1cb..adfc2b0ee43 100644 --- a/jstests/core/apitest_db_profile_level.js +++ b/jstests/core/apitest_db_profile_level.js @@ -4,37 +4,37 @@ */ (function() { - 'use strict'; - - /* - * be sure the public collection API is complete - */ - assert(db.getProfilingLevel, "getProfilingLevel"); - assert(db.setProfilingLevel, "setProfilingLevel"); - - // A test-specific database is used for profiler testing so as not to interfere with - // other tests that modify profiler level, when run in parallel. - var profileLevelDB = db.getSiblingDB("apitest_db_profile_level"); - - profileLevelDB.setProfilingLevel(0); - assert(profileLevelDB.getProfilingLevel() == 0, "prof level 0"); - - profileLevelDB.setProfilingLevel(1); - assert(profileLevelDB.getProfilingLevel() == 1, "p1"); - - profileLevelDB.setProfilingLevel(2); - assert(profileLevelDB.getProfilingLevel() == 2, "p2"); - - profileLevelDB.setProfilingLevel(0); - assert(profileLevelDB.getProfilingLevel() == 0, "prof level 0"); - - var asserted = false; - try { - profileLevelDB.setProfilingLevel(10); - assert(false); - } catch (e) { - asserted = true; - assert(e.dbSetProfilingException); - } - assert(asserted, "should have asserted"); +'use strict'; + +/* + * be sure the public collection API is complete + */ +assert(db.getProfilingLevel, "getProfilingLevel"); +assert(db.setProfilingLevel, "setProfilingLevel"); + +// A test-specific database is used for profiler testing so as not to interfere with +// other tests that modify profiler level, when run in parallel. +var profileLevelDB = db.getSiblingDB("apitest_db_profile_level"); + +profileLevelDB.setProfilingLevel(0); +assert(profileLevelDB.getProfilingLevel() == 0, "prof level 0"); + +profileLevelDB.setProfilingLevel(1); +assert(profileLevelDB.getProfilingLevel() == 1, "p1"); + +profileLevelDB.setProfilingLevel(2); +assert(profileLevelDB.getProfilingLevel() == 2, "p2"); + +profileLevelDB.setProfilingLevel(0); +assert(profileLevelDB.getProfilingLevel() == 0, "prof level 0"); + +var asserted = false; +try { + profileLevelDB.setProfilingLevel(10); + assert(false); +} catch (e) { + asserted = true; + assert(e.dbSetProfilingException); +} +assert(asserted, "should have asserted"); })(); diff --git a/jstests/core/apitest_dbcollection.js b/jstests/core/apitest_dbcollection.js index 3f348bd6963..dd79ac99552 100644 --- a/jstests/core/apitest_dbcollection.js +++ b/jstests/core/apitest_dbcollection.js @@ -47,44 +47,44 @@ for (i = 0; i < 100; i++) { } (function() { - var validateResult = assert.commandWorked(db.getCollection("test_db").validate()); - // Extract validation results from mongos output if running in a sharded context. - var isShardedNS = validateResult.hasOwnProperty('raw'); - - if (isShardedNS) { - // Sample mongos format: - // { - // raw: { - // "localhost:30000": { - // "ns" : "test.test_db", - // ... - // "valid": true, - // ... - // "ok": 1 - // } - // }, - // "valid": true, - // ... - // "ok": 1 - // } - - var numFields = 0; - var result = null; - for (var field in validateResult.raw) { - result = validateResult.raw[field]; - numFields++; - } - - assert.eq(1, numFields); - assert.neq(null, result); - validateResult = result; +var validateResult = assert.commandWorked(db.getCollection("test_db").validate()); +// Extract validation results from mongos output if running in a sharded context. +var isShardedNS = validateResult.hasOwnProperty('raw'); + +if (isShardedNS) { + // Sample mongos format: + // { + // raw: { + // "localhost:30000": { + // "ns" : "test.test_db", + // ... + // "valid": true, + // ... + // "ok": 1 + // } + // }, + // "valid": true, + // ... + // "ok": 1 + // } + + var numFields = 0; + var result = null; + for (var field in validateResult.raw) { + result = validateResult.raw[field]; + numFields++; } - assert.eq('test.test_db', - validateResult.ns, - 'incorrect namespace in db.collection.validate() result: ' + tojson(validateResult)); - assert(validateResult.valid, 'collection validation failed'); - assert.eq(100, validateResult.nrecords, "11"); + assert.eq(1, numFields); + assert.neq(null, result); + validateResult = result; +} + +assert.eq('test.test_db', + validateResult.ns, + 'incorrect namespace in db.collection.validate() result: ' + tojson(validateResult)); +assert(validateResult.valid, 'collection validation failed'); +assert.eq(100, validateResult.nrecords, "11"); }()); /* @@ -149,133 +149,127 @@ assert.eq(0, db.getCollection("test_db").getIndexes().length, "24"); */ (function() { - var t = db.apttest_dbcollection; - - // Non-existent collection. - t.drop(); - var noCollStats = assert.commandWorked( - t.stats(), 'db.collection.stats() should work on non-existent collection'); - assert.eq(0, noCollStats.size, "All properties should be 0 on nonexistant collections"); - assert.eq(0, noCollStats.count, "All properties should be 0 on nonexistant collections"); - assert.eq(0, noCollStats.storageSize, "All properties should be 0 on nonexistant collections"); - assert.eq(0, noCollStats.nindexes, "All properties should be 0 on nonexistant collections"); - assert.eq( - 0, noCollStats.totalIndexSize, "All properties should be 0 on nonexistant collections"); - assert.eq(0, noCollStats.totalSize, "All properties should be 0 on nonexistant collections"); - - // scale - passed to stats() as sole numerical argument or part of an options object. - t.drop(); - assert.commandWorked(db.createCollection(t.getName(), {capped: true, size: 10 * 1024 * 1024})); - var collectionStats = assert.commandWorked(t.stats(1024 * 1024)); - assert.eq(10, - collectionStats.maxSize, - 'db.collection.stats(scale) - capped collection size scaled incorrectly: ' + - tojson(collectionStats)); - var collectionStats = assert.commandWorked(t.stats({scale: 1024 * 1024})); - assert.eq(10, - collectionStats.maxSize, - 'db.collection.stats({scale: N}) - capped collection size scaled incorrectly: ' + - tojson(collectionStats)); - - // indexDetails - If true, includes 'indexDetails' field in results. Default: false. - t.drop(); - t.save({a: 1}); - t.ensureIndex({a: 1}); - collectionStats = assert.commandWorked(t.stats()); - assert(!collectionStats.hasOwnProperty('indexDetails'), - 'unexpected indexDetails found in db.collection.stats() result: ' + - tojson(collectionStats)); - collectionStats = assert.commandWorked(t.stats({indexDetails: false})); - assert(!collectionStats.hasOwnProperty('indexDetails'), - 'unexpected indexDetails found in db.collection.stats({indexDetails: true}) result: ' + - tojson(collectionStats)); - collectionStats = assert.commandWorked(t.stats({indexDetails: true})); +var t = db.apttest_dbcollection; + +// Non-existent collection. +t.drop(); +var noCollStats = + assert.commandWorked(t.stats(), 'db.collection.stats() should work on non-existent collection'); +assert.eq(0, noCollStats.size, "All properties should be 0 on nonexistant collections"); +assert.eq(0, noCollStats.count, "All properties should be 0 on nonexistant collections"); +assert.eq(0, noCollStats.storageSize, "All properties should be 0 on nonexistant collections"); +assert.eq(0, noCollStats.nindexes, "All properties should be 0 on nonexistant collections"); +assert.eq(0, noCollStats.totalIndexSize, "All properties should be 0 on nonexistant collections"); +assert.eq(0, noCollStats.totalSize, "All properties should be 0 on nonexistant collections"); + +// scale - passed to stats() as sole numerical argument or part of an options object. +t.drop(); +assert.commandWorked(db.createCollection(t.getName(), {capped: true, size: 10 * 1024 * 1024})); +var collectionStats = assert.commandWorked(t.stats(1024 * 1024)); +assert.eq(10, + collectionStats.maxSize, + 'db.collection.stats(scale) - capped collection size scaled incorrectly: ' + + tojson(collectionStats)); +var collectionStats = assert.commandWorked(t.stats({scale: 1024 * 1024})); +assert.eq(10, + collectionStats.maxSize, + 'db.collection.stats({scale: N}) - capped collection size scaled incorrectly: ' + + tojson(collectionStats)); + +// indexDetails - If true, includes 'indexDetails' field in results. Default: false. +t.drop(); +t.save({a: 1}); +t.ensureIndex({a: 1}); +collectionStats = assert.commandWorked(t.stats()); +assert(!collectionStats.hasOwnProperty('indexDetails'), + 'unexpected indexDetails found in db.collection.stats() result: ' + tojson(collectionStats)); +collectionStats = assert.commandWorked(t.stats({indexDetails: false})); +assert(!collectionStats.hasOwnProperty('indexDetails'), + 'unexpected indexDetails found in db.collection.stats({indexDetails: true}) result: ' + + tojson(collectionStats)); +collectionStats = assert.commandWorked(t.stats({indexDetails: true})); +assert(collectionStats.hasOwnProperty('indexDetails'), + 'indexDetails missing from db.collection.stats({indexDetails: true}) result: ' + + tojson(collectionStats)); + +// Returns index name. +function getIndexName(indexKey) { + var indexes = t.getIndexes().filter(function(doc) { + return friendlyEqual(doc.key, indexKey); + }); + assert.eq(1, + indexes.length, + tojson(indexKey) + ' not found in getIndexes() result: ' + tojson(t.getIndexes())); + return indexes[0].name; +} + +function checkIndexDetails(options, indexName) { + var collectionStats = assert.commandWorked(t.stats(options)); assert(collectionStats.hasOwnProperty('indexDetails'), - 'indexDetails missing from db.collection.stats({indexDetails: true}) result: ' + - tojson(collectionStats)); - - // Returns index name. - function getIndexName(indexKey) { - var indexes = t.getIndexes().filter(function(doc) { - return friendlyEqual(doc.key, indexKey); - }); - assert.eq( - 1, - indexes.length, - tojson(indexKey) + ' not found in getIndexes() result: ' + tojson(t.getIndexes())); - return indexes[0].name; + 'indexDetails missing from ' + + 'db.collection.stats(' + tojson(options) + ') result: ' + tojson(collectionStats)); + // Currently, indexDetails is only supported with WiredTiger. + var storageEngine = jsTest.options().storageEngine; + if (storageEngine && storageEngine !== 'wiredTiger') { + return; } - - function checkIndexDetails(options, indexName) { - var collectionStats = assert.commandWorked(t.stats(options)); - assert(collectionStats.hasOwnProperty('indexDetails'), - 'indexDetails missing from ' + - 'db.collection.stats(' + tojson(options) + ') result: ' + + assert.eq(1, + Object.keys(collectionStats.indexDetails).length, + 'indexDetails must have exactly one entry'); + assert(collectionStats.indexDetails[indexName], + indexName + ' missing from indexDetails: ' + tojson(collectionStats.indexDetails)); + assert.neq(0, + Object.keys(collectionStats.indexDetails[indexName]).length, + indexName + ' exists in indexDetails but contains no information: ' + tojson(collectionStats)); - // Currently, indexDetails is only supported with WiredTiger. - var storageEngine = jsTest.options().storageEngine; - if (storageEngine && storageEngine !== 'wiredTiger') { - return; - } - assert.eq(1, - Object.keys(collectionStats.indexDetails).length, - 'indexDetails must have exactly one entry'); - assert(collectionStats.indexDetails[indexName], - indexName + ' missing from indexDetails: ' + tojson(collectionStats.indexDetails)); - assert.neq(0, - Object.keys(collectionStats.indexDetails[indexName]).length, - indexName + ' exists in indexDetails but contains no information: ' + - tojson(collectionStats)); - } +} - // indexDetailsKey - show indexDetails results for this index key only. - var indexKey = {a: 1}; - var indexName = getIndexName(indexKey); - checkIndexDetails({indexDetails: true, indexDetailsKey: indexKey}, indexName); +// indexDetailsKey - show indexDetails results for this index key only. +var indexKey = {a: 1}; +var indexName = getIndexName(indexKey); +checkIndexDetails({indexDetails: true, indexDetailsKey: indexKey}, indexName); - // indexDetailsName - show indexDetails results for this index name only. - checkIndexDetails({indexDetails: true, indexDetailsName: indexName}, indexName); +// indexDetailsName - show indexDetails results for this index name only. +checkIndexDetails({indexDetails: true, indexDetailsName: indexName}, indexName); - // Cannot specify both indexDetailsKey and indexDetailsName. - var error = assert.throws(function() { - t.stats({indexDetails: true, indexDetailsKey: indexKey, indexDetailsName: indexName}); - }, [], 'indexDetailsKey and indexDetailsName cannot be used at the same time'); - assert.eq(Error, - error.constructor, - 'db.collection.stats() failed when both indexDetailsKey and indexDetailsName ' + - 'are used but with incorrect error type'); +// Cannot specify both indexDetailsKey and indexDetailsName. +var error = assert.throws(function() { + t.stats({indexDetails: true, indexDetailsKey: indexKey, indexDetailsName: indexName}); +}, [], 'indexDetailsKey and indexDetailsName cannot be used at the same time'); +assert.eq(Error, + error.constructor, + 'db.collection.stats() failed when both indexDetailsKey and indexDetailsName ' + + 'are used but with incorrect error type'); - t.drop(); +t.drop(); }()); /* * test db.collection.totalSize() */ (function() { - 'use strict'; - - var t = db.apitest_dbcollection; - - t.drop(); - var emptyStats = assert.commandWorked(t.stats()); - assert.eq(emptyStats.storageSize, 0); - assert.eq(emptyStats.totalIndexSize, 0); - - assert.eq( - 0, t.storageSize(), 'db.collection.storageSize() on empty collection should return 0'); - assert.eq(0, - t.totalIndexSize(), - 'db.collection.totalIndexSize() on empty collection should return 0'); - assert.eq(0, t.totalSize(), 'db.collection.totalSize() on empty collection should return 0'); - - t.save({a: 1}); - var stats = assert.commandWorked(t.stats()); - assert.neq(undefined, - t.storageSize(), - 'db.collection.storageSize() cannot be undefined on a non-empty collection'); - assert.neq(undefined, - t.totalIndexSize(), - 'db.collection.totalIndexSize() cannot be undefined on a non-empty collection'); - - t.drop(); +'use strict'; + +var t = db.apitest_dbcollection; + +t.drop(); +var emptyStats = assert.commandWorked(t.stats()); +assert.eq(emptyStats.storageSize, 0); +assert.eq(emptyStats.totalIndexSize, 0); + +assert.eq(0, t.storageSize(), 'db.collection.storageSize() on empty collection should return 0'); +assert.eq( + 0, t.totalIndexSize(), 'db.collection.totalIndexSize() on empty collection should return 0'); +assert.eq(0, t.totalSize(), 'db.collection.totalSize() on empty collection should return 0'); + +t.save({a: 1}); +var stats = assert.commandWorked(t.stats()); +assert.neq(undefined, + t.storageSize(), + 'db.collection.storageSize() cannot be undefined on a non-empty collection'); +assert.neq(undefined, + t.totalIndexSize(), + 'db.collection.totalIndexSize() cannot be undefined on a non-empty collection'); + +t.drop(); }()); diff --git a/jstests/core/apply_ops1.js b/jstests/core/apply_ops1.js index dd2e4ceb79b..29961fab45e 100644 --- a/jstests/core/apply_ops1.js +++ b/jstests/core/apply_ops1.js @@ -7,438 +7,421 @@ // ] (function() { - "use strict"; - - load("jstests/libs/get_index_helpers.js"); - - var t = db.apply_ops1; - t.drop(); - - // - // Input validation tests - // - - // Empty array of operations. - assert.commandWorked(db.adminCommand({applyOps: []}), - 'applyOps should not fail on empty array of operations'); - - // Non-array type for operations. - assert.commandFailed(db.adminCommand({applyOps: "not an array"}), - 'applyOps should fail on non-array type for operations'); - - // Missing 'op' field in an operation. - assert.commandFailed(db.adminCommand({applyOps: [{ns: t.getFullName()}]}), - 'applyOps should fail on operation without "op" field'); - - // Non-string 'op' field in an operation. - assert.commandFailed(db.adminCommand({applyOps: [{op: 12345, ns: t.getFullName()}]}), - 'applyOps should fail on operation with non-string "op" field'); - - // Empty 'op' field value in an operation. - assert.commandFailed(db.adminCommand({applyOps: [{op: '', ns: t.getFullName()}]}), - 'applyOps should fail on operation with empty "op" field value'); - - // Missing 'ns' field in an operation. - assert.commandFailed(db.adminCommand({applyOps: [{op: 'c'}]}), - 'applyOps should fail on operation without "ns" field'); - - // Non-string 'ns' field in an operation. - assert.commandFailed(db.adminCommand({applyOps: [{op: 'c', ns: 12345}]}), - 'applyOps should fail on operation with non-string "ns" field'); - - // Empty 'ns' field value in an operation of type 'n' (noop). - assert.commandWorked(db.adminCommand({applyOps: [{op: 'n', ns: ''}]}), - 'applyOps should work on no op operation with empty "ns" field value'); - - // Missing dbname in 'ns' field. - assert.commandFailed(db.adminCommand({applyOps: [{op: 'd', ns: t.getName(), o: {_id: 1}}]})); - - // Missing 'o' field value in an operation of type 'c' (command). - assert.commandFailed(db.adminCommand({applyOps: [{op: 'c', ns: t.getFullName()}]}), - 'applyOps should fail on command operation without "o" field'); - - // Non-object 'o' field value in an operation of type 'c' (command). - assert.commandFailed(db.adminCommand({applyOps: [{op: 'c', ns: t.getFullName(), o: 'bar'}]}), - 'applyOps should fail on command operation with non-object "o" field'); - - // Empty object 'o' field value in an operation of type 'c' (command). - assert.commandFailed(db.adminCommand({applyOps: [{op: 'c', ns: t.getFullName(), o: {}}]}), - 'applyOps should fail on command operation with empty object "o" field'); - - // Unknown key in 'o' field value in an operation of type 'c' (command). - assert.commandFailed(db.adminCommand({applyOps: [{op: 'c', ns: t.getFullName(), o: {a: 1}}]}), - 'applyOps should fail on command operation on unknown key in "o" field'); - - // Empty 'ns' field value in operation type other than 'n'. - assert.commandFailed( - db.adminCommand({applyOps: [{op: 'c', ns: ''}]}), - 'applyOps should fail on non-"n" operation type with empty "ns" field value'); - - // Excessively nested applyOps commands gracefully fail. - assert.commandFailed(db.adminCommand({ - "applyOps": [{ - "ts": {"$timestamp": {"t": 1, "i": 100}}, - "h": 0, - "v": 2, - "op": "c", - "ns": "test.$cmd", - "o": { - "applyOps": [{ - "ts": {"$timestamp": {"t": 1, "i": 100}}, - "h": 0, - "v": 2, - "op": "c", - "ns": "test.$cmd", - "o": { - "applyOps": [{ - "ts": {"$timestamp": {"t": 1, "i": 100}}, - "h": 0, - "v": 2, - "op": "c", - "ns": "test.$cmd", - "o": { - "applyOps": [{ - "ts": {"$timestamp": {"t": 1, "i": 100}}, - "h": 0, - "v": 2, - "op": "c", - "ns": "test.$cmd", - "o": { - "applyOps": [{ - "ts": {"$timestamp": {"t": 1, "i": 100}}, - "h": 0, - "v": 2, - "op": "c", - "ns": "test.$cmd", - "o": { - "applyOps": [{ - "ts": {"$timestamp": {"t": 1, "i": 100}}, - "h": 0, - "v": 2, - "op": "c", - "ns": "test.$cmd", - "o": { - "applyOps": [{ - "ts": - {"$timestamp": {"t": 1, "i": 100}}, - "h": 0, - "v": 2, - "op": "c", - "ns": "test.$cmd", - "o": { - "applyOps": [{ - "ts": { - "$timestamp": - {"t": 1, "i": 100} - }, - "h": 0, - "v": 2, - "op": "c", - "ns": "test.$cmd", - "o": { - "applyOps": [{ - "ts": { - "$timestamp": { - "t": 1, - "i": 100 - } - }, - "h": 0, - "v": 2, - "op": "c", - "ns": "test.$cmd", - "o": { - "applyOps": [{ - "ts": { - "$timestamp": - { - "t": - 1, - "i": - 100 - } - }, - "h": 0, - "v": 2, - "op": "c", - "ns": - "test.$cmd", - "o": { - "applyOps": [ - { - "ts": { - "$timestamp": { - "t": - 1, - "i": - 100 - } - }, - "h": - 0, - "v": - 2, - "op": - "c", - "ns": - "test.$cmd", - "o": { - "applyOps": - [ - ] - } - } - ] +"use strict"; + +load("jstests/libs/get_index_helpers.js"); + +var t = db.apply_ops1; +t.drop(); + +// +// Input validation tests +// + +// Empty array of operations. +assert.commandWorked(db.adminCommand({applyOps: []}), + 'applyOps should not fail on empty array of operations'); + +// Non-array type for operations. +assert.commandFailed(db.adminCommand({applyOps: "not an array"}), + 'applyOps should fail on non-array type for operations'); + +// Missing 'op' field in an operation. +assert.commandFailed(db.adminCommand({applyOps: [{ns: t.getFullName()}]}), + 'applyOps should fail on operation without "op" field'); + +// Non-string 'op' field in an operation. +assert.commandFailed(db.adminCommand({applyOps: [{op: 12345, ns: t.getFullName()}]}), + 'applyOps should fail on operation with non-string "op" field'); + +// Empty 'op' field value in an operation. +assert.commandFailed(db.adminCommand({applyOps: [{op: '', ns: t.getFullName()}]}), + 'applyOps should fail on operation with empty "op" field value'); + +// Missing 'ns' field in an operation. +assert.commandFailed(db.adminCommand({applyOps: [{op: 'c'}]}), + 'applyOps should fail on operation without "ns" field'); + +// Non-string 'ns' field in an operation. +assert.commandFailed(db.adminCommand({applyOps: [{op: 'c', ns: 12345}]}), + 'applyOps should fail on operation with non-string "ns" field'); + +// Empty 'ns' field value in an operation of type 'n' (noop). +assert.commandWorked(db.adminCommand({applyOps: [{op: 'n', ns: ''}]}), + 'applyOps should work on no op operation with empty "ns" field value'); + +// Missing dbname in 'ns' field. +assert.commandFailed(db.adminCommand({applyOps: [{op: 'd', ns: t.getName(), o: {_id: 1}}]})); + +// Missing 'o' field value in an operation of type 'c' (command). +assert.commandFailed(db.adminCommand({applyOps: [{op: 'c', ns: t.getFullName()}]}), + 'applyOps should fail on command operation without "o" field'); + +// Non-object 'o' field value in an operation of type 'c' (command). +assert.commandFailed(db.adminCommand({applyOps: [{op: 'c', ns: t.getFullName(), o: 'bar'}]}), + 'applyOps should fail on command operation with non-object "o" field'); + +// Empty object 'o' field value in an operation of type 'c' (command). +assert.commandFailed(db.adminCommand({applyOps: [{op: 'c', ns: t.getFullName(), o: {}}]}), + 'applyOps should fail on command operation with empty object "o" field'); + +// Unknown key in 'o' field value in an operation of type 'c' (command). +assert.commandFailed(db.adminCommand({applyOps: [{op: 'c', ns: t.getFullName(), o: {a: 1}}]}), + 'applyOps should fail on command operation on unknown key in "o" field'); + +// Empty 'ns' field value in operation type other than 'n'. +assert.commandFailed(db.adminCommand({applyOps: [{op: 'c', ns: ''}]}), + 'applyOps should fail on non-"n" operation type with empty "ns" field value'); + +// Excessively nested applyOps commands gracefully fail. +assert.commandFailed(db.adminCommand({ + "applyOps": [{ + "ts": {"$timestamp": {"t": 1, "i": 100}}, + "h": 0, + "v": 2, + "op": "c", + "ns": "test.$cmd", + "o": { + "applyOps": [{ + "ts": {"$timestamp": {"t": 1, "i": 100}}, + "h": 0, + "v": 2, + "op": "c", + "ns": "test.$cmd", + "o": { + "applyOps": [{ + "ts": {"$timestamp": {"t": 1, "i": 100}}, + "h": 0, + "v": 2, + "op": "c", + "ns": "test.$cmd", + "o": { + "applyOps": [{ + "ts": {"$timestamp": {"t": 1, "i": 100}}, + "h": 0, + "v": 2, + "op": "c", + "ns": "test.$cmd", + "o": { + "applyOps": [{ + "ts": {"$timestamp": {"t": 1, "i": 100}}, + "h": 0, + "v": 2, + "op": "c", + "ns": "test.$cmd", + "o": { + "applyOps": [{ + "ts": {"$timestamp": {"t": 1, "i": 100}}, + "h": 0, + "v": 2, + "op": "c", + "ns": "test.$cmd", + "o": { + "applyOps": [{ + "ts": {"$timestamp": {"t": 1, "i": 100}}, + "h": 0, + "v": 2, + "op": "c", + "ns": "test.$cmd", + "o": { + "applyOps": [{ + "ts": { + "$timestamp": {"t": 1, "i": 100} + }, + "h": 0, + "v": 2, + "op": "c", + "ns": "test.$cmd", + "o": { + "applyOps": [{ + "ts": { + "$timestamp": + {"t": 1, "i": 100} + }, + "h": 0, + "v": 2, + "op": "c", + "ns": "test.$cmd", + "o": { + "applyOps": [{ + "ts": { + "$timestamp": { + "t": 1, + "i": 100 } - }] - } - }] - } - }] - } - }] - } - }] - } - }] - } - }] - } - }] - } - }] - } - }] - }), - "Excessively nested applyOps should be rejected"); - - // Valid 'ns' field value in unknown operation type 'x'. - assert.commandFailed( - db.adminCommand({applyOps: [{op: 'x', ns: t.getFullName()}]}), - 'applyOps should fail on unknown operation type "x" with valid "ns" value'); - - assert.eq(0, t.find().count(), "Non-zero amount of documents in collection to start"); - - /** - * Test function for running CRUD operations on non-existent namespaces using various - * combinations of invalid namespaces (collection/database), allowAtomic and alwaysUpsert, - * and nesting. - * - * Leave 'expectedErrorCode' undefined if this command is expected to run successfully. - */ - function testCrudOperationOnNonExistentNamespace(optype, o, o2, expectedErrorCode) { - expectedErrorCode = expectedErrorCode || ErrorCodes.OK; - const t2 = db.getSiblingDB('apply_ops1_no_such_db').getCollection('t'); - [t, t2].forEach(coll => { - const op = {op: optype, ns: coll.getFullName(), o: o, o2: o2}; - [false, true].forEach(nested => { - const opToRun = - nested ? {op: 'c', ns: 'test.$cmd', o: {applyOps: [op]}, o2: {}} : op; - [false, true].forEach(allowAtomic => { - [false, true].forEach(alwaysUpsert => { - const cmd = { - applyOps: [opToRun], - allowAtomic: allowAtomic, - alwaysUpsert: alwaysUpsert - }; - jsTestLog('Testing applyOps on non-existent namespace: ' + tojson(cmd)); - if (expectedErrorCode === ErrorCodes.OK) { - assert.commandWorked(db.adminCommand(cmd)); - } else { - assert.commandFailedWithCode(db.adminCommand(cmd), expectedErrorCode); + }, + "h": 0, + "v": 2, + "op": "c", + "ns": "test.$cmd", + "o": { + "applyOps": [{ + "ts": { + "$timestamp": { + "t": + 1, + "i": + 100 + } + }, + "h": 0, + "v": 2, + "op": "c", + "ns": + "test.$cmd", + "o": { + "applyOps": + [] + } + }] + } + }] + } + }] + } + }] + } + }] + } + }] + } + }] + } + }] } - }); + }] + } + }] + } + }] +}), + "Excessively nested applyOps should be rejected"); + +// Valid 'ns' field value in unknown operation type 'x'. +assert.commandFailed(db.adminCommand({applyOps: [{op: 'x', ns: t.getFullName()}]}), + 'applyOps should fail on unknown operation type "x" with valid "ns" value'); + +assert.eq(0, t.find().count(), "Non-zero amount of documents in collection to start"); + +/** + * Test function for running CRUD operations on non-existent namespaces using various + * combinations of invalid namespaces (collection/database), allowAtomic and alwaysUpsert, + * and nesting. + * + * Leave 'expectedErrorCode' undefined if this command is expected to run successfully. + */ +function testCrudOperationOnNonExistentNamespace(optype, o, o2, expectedErrorCode) { + expectedErrorCode = expectedErrorCode || ErrorCodes.OK; + const t2 = db.getSiblingDB('apply_ops1_no_such_db').getCollection('t'); + [t, t2].forEach(coll => { + const op = {op: optype, ns: coll.getFullName(), o: o, o2: o2}; + [false, true].forEach(nested => { + const opToRun = nested ? {op: 'c', ns: 'test.$cmd', o: {applyOps: [op]}, o2: {}} : op; + [false, true].forEach(allowAtomic => { + [false, true].forEach(alwaysUpsert => { + const cmd = { + applyOps: [opToRun], + allowAtomic: allowAtomic, + alwaysUpsert: alwaysUpsert + }; + jsTestLog('Testing applyOps on non-existent namespace: ' + tojson(cmd)); + if (expectedErrorCode === ErrorCodes.OK) { + assert.commandWorked(db.adminCommand(cmd)); + } else { + assert.commandFailedWithCode(db.adminCommand(cmd), expectedErrorCode); + } }); }); }); - } - - // Insert and update operations on non-existent collections/databases should return - // NamespaceNotFound. - testCrudOperationOnNonExistentNamespace('i', {_id: 0}, {}, ErrorCodes.NamespaceNotFound); - testCrudOperationOnNonExistentNamespace('u', {x: 0}, {_id: 0}, ErrorCodes.NamespaceNotFound); - - // Delete operations on non-existent collections/databases should return OK for idempotency - // reasons. - testCrudOperationOnNonExistentNamespace('d', {_id: 0}, {}); - - assert.commandWorked(db.createCollection(t.getName())); - var a = assert.commandWorked( - db.adminCommand({applyOps: [{"op": "i", "ns": t.getFullName(), "o": {_id: 5, x: 17}}]})); - assert.eq(1, t.find().count(), "Valid insert failed"); - assert.eq(true, a.results[0], "Bad result value for valid insert"); - - a = assert.commandWorked( - db.adminCommand({applyOps: [{"op": "i", "ns": t.getFullName(), "o": {_id: 5, x: 17}}]})); - assert.eq(1, t.find().count(), "Duplicate insert failed"); - assert.eq(true, a.results[0], "Bad result value for duplicate insert"); - - var o = {_id: 5, x: 17}; - assert.eq(o, t.findOne(), "Mismatching document inserted."); - - // 'o' field is an empty array. - assert.commandFailed(db.adminCommand({applyOps: [{op: 'i', ns: t.getFullName(), o: []}]}), - 'applyOps should fail on insert of object with empty array element'); - - var res = assert.commandWorked(db.runCommand({ - applyOps: [ - {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 18}}}, - {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 19}}} - ] - })); - - o.x++; - o.x++; - - assert.eq(1, t.find().count(), "Updates increased number of documents"); - assert.eq(o, t.findOne(), "Document doesn't match expected"); - assert.eq(true, res.results[0], "Bad result value for valid update"); - assert.eq(true, res.results[1], "Bad result value for valid update"); - - // preCondition fully matches - res = db.runCommand({ - applyOps: [ - {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 20}}}, - {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 21}}} - ], - preCondition: [{ns: t.getFullName(), q: {_id: 5}, res: {x: 19}}] }); - - // The use of preCondition requires applyOps to run atomically. Therefore, it is incompatible - // with {allowAtomic: false}. - assert.commandFailedWithCode( - db.runCommand({ - applyOps: [{op: 'u', ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 22}}}], - preCondition: [{ns: t.getFullName(), q: {_id: 5}, res: {x: 21}}], - allowAtomic: false, - }), - ErrorCodes.InvalidOptions, - 'applyOps should fail when preCondition is present and atomicAllowed is false.'); - - // The use of preCondition is also incompatible with operations that include commands. - assert.commandFailedWithCode( - db.runCommand({ - applyOps: [{op: 'c', ns: t.getCollection('$cmd').getFullName(), o: {applyOps: []}}], - preCondition: [{ns: t.getFullName(), q: {_id: 5}, res: {x: 21}}], - }), - ErrorCodes.InvalidOptions, - 'applyOps should fail when preCondition is present and operations includes commands.'); - - o.x++; - o.x++; - - assert.eq(1, t.find().count(), "Updates increased number of documents"); - assert.eq(o, t.findOne(), "Document doesn't match expected"); - assert.eq(true, res.results[0], "Bad result value for valid update"); - assert.eq(true, res.results[1], "Bad result value for valid update"); - - // preCondition doesn't match ns - res = db.runCommand({ - applyOps: [ - {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 22}}}, - {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 23}}} - ], - preCondition: [{ns: "foo.otherName", q: {_id: 5}, res: {x: 21}}] - }); - - assert.eq(o, t.findOne(), "preCondition didn't match, but ops were still applied"); - - // preCondition doesn't match query - res = db.runCommand({ - applyOps: [ - {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 22}}}, - {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 23}}} - ], - preCondition: [{ns: t.getFullName(), q: {_id: 5}, res: {x: 19}}] - }); - - assert.eq(o, t.findOne(), "preCondition didn't match, but ops were still applied"); - - res = db.runCommand({ - applyOps: [ - {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 22}}}, - {op: "u", ns: t.getFullName(), o2: {_id: 6}, o: {$set: {x: 23}}} - ] - }); - - assert.eq(true, res.results[0], "Valid update failed"); - assert.eq(true, res.results[1], "Valid update failed"); - - // Ops with transaction numbers are valid. - const lsid = { - "id": UUID("3eea4a58-6018-40b6-8743-6a55783bf902"), - "uid": BinData(0, "47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU=") - }; - res = db.runCommand({ - applyOps: [ - { - op: "i", - ns: t.getFullName(), - o: {_id: 7, x: 24}, - lsid: lsid, - txnNumber: NumberLong(1), - stmtId: NumberInt(0) - }, - { - op: "u", - ns: t.getFullName(), - o2: {_id: 8}, - o: {$set: {x: 25}}, - lsid: lsid, - txnNumber: NumberLong(1), - stmtId: NumberInt(1) - }, - { - op: "d", - ns: t.getFullName(), - o: {_id: 7}, - lsid: lsid, - txnNumber: NumberLong(2), - stmtId: NumberInt(0) - }, - ] - }); - - assert.eq(true, res.results[0], "Valid insert with transaction number failed"); - assert.eq(true, res.results[1], "Valid update with transaction number failed"); - assert.eq(true, res.results[2], "Valid delete with transaction number failed"); - - // When applying a "u" (update) op, we default to 'UpdateNode' update semantics, and $set - // operations add new fields in lexicographic order. - res = assert.commandWorked(db.adminCommand({ - applyOps: [ - {"op": "i", "ns": t.getFullName(), "o": {_id: 6}}, - {"op": "u", "ns": t.getFullName(), "o2": {_id: 6}, "o": {$set: {z: 1, a: 2}}} - ] - })); - assert.eq(t.findOne({_id: 6}), {_id: 6, a: 2, z: 1}); // Note: 'a' and 'z' have been sorted. - - // 'ModifierInterface' semantics are not supported, so an update with {$v: 0} should fail. - res = assert.commandFailed(db.adminCommand({ - applyOps: [ - {"op": "i", "ns": t.getFullName(), "o": {_id: 7}}, - { - "op": "u", - "ns": t.getFullName(), - "o2": {_id: 7}, - "o": {$v: NumberLong(0), $set: {z: 1, a: 2}} - } - ] - })); - assert.eq(res.code, 40682); - - // When we explicitly specify {$v: 1}, we should get 'UpdateNode' update semantics, and $set - // operations get performed in lexicographic order. - res = assert.commandWorked(db.adminCommand({ - applyOps: [ - {"op": "i", "ns": t.getFullName(), "o": {_id: 8}}, - { - "op": "u", - "ns": t.getFullName(), - "o2": {_id: 8}, - "o": {$v: NumberLong(1), $set: {z: 1, a: 2}} - } - ] - })); - assert.eq(t.findOne({_id: 8}), {_id: 8, a: 2, z: 1}); // Note: 'a' and 'z' have been sorted. +} + +// Insert and update operations on non-existent collections/databases should return +// NamespaceNotFound. +testCrudOperationOnNonExistentNamespace('i', {_id: 0}, {}, ErrorCodes.NamespaceNotFound); +testCrudOperationOnNonExistentNamespace('u', {x: 0}, {_id: 0}, ErrorCodes.NamespaceNotFound); + +// Delete operations on non-existent collections/databases should return OK for idempotency +// reasons. +testCrudOperationOnNonExistentNamespace('d', {_id: 0}, {}); + +assert.commandWorked(db.createCollection(t.getName())); +var a = assert.commandWorked( + db.adminCommand({applyOps: [{"op": "i", "ns": t.getFullName(), "o": {_id: 5, x: 17}}]})); +assert.eq(1, t.find().count(), "Valid insert failed"); +assert.eq(true, a.results[0], "Bad result value for valid insert"); + +a = assert.commandWorked( + db.adminCommand({applyOps: [{"op": "i", "ns": t.getFullName(), "o": {_id: 5, x: 17}}]})); +assert.eq(1, t.find().count(), "Duplicate insert failed"); +assert.eq(true, a.results[0], "Bad result value for duplicate insert"); + +var o = {_id: 5, x: 17}; +assert.eq(o, t.findOne(), "Mismatching document inserted."); + +// 'o' field is an empty array. +assert.commandFailed(db.adminCommand({applyOps: [{op: 'i', ns: t.getFullName(), o: []}]}), + 'applyOps should fail on insert of object with empty array element'); + +var res = assert.commandWorked(db.runCommand({ + applyOps: [ + {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 18}}}, + {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 19}}} + ] +})); + +o.x++; +o.x++; + +assert.eq(1, t.find().count(), "Updates increased number of documents"); +assert.eq(o, t.findOne(), "Document doesn't match expected"); +assert.eq(true, res.results[0], "Bad result value for valid update"); +assert.eq(true, res.results[1], "Bad result value for valid update"); + +// preCondition fully matches +res = db.runCommand({ + applyOps: [ + {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 20}}}, + {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 21}}} + ], + preCondition: [{ns: t.getFullName(), q: {_id: 5}, res: {x: 19}}] +}); + +// The use of preCondition requires applyOps to run atomically. Therefore, it is incompatible +// with {allowAtomic: false}. +assert.commandFailedWithCode( + db.runCommand({ + applyOps: [{op: 'u', ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 22}}}], + preCondition: [{ns: t.getFullName(), q: {_id: 5}, res: {x: 21}}], + allowAtomic: false, + }), + ErrorCodes.InvalidOptions, + 'applyOps should fail when preCondition is present and atomicAllowed is false.'); + +// The use of preCondition is also incompatible with operations that include commands. +assert.commandFailedWithCode( + db.runCommand({ + applyOps: [{op: 'c', ns: t.getCollection('$cmd').getFullName(), o: {applyOps: []}}], + preCondition: [{ns: t.getFullName(), q: {_id: 5}, res: {x: 21}}], + }), + ErrorCodes.InvalidOptions, + 'applyOps should fail when preCondition is present and operations includes commands.'); + +o.x++; +o.x++; + +assert.eq(1, t.find().count(), "Updates increased number of documents"); +assert.eq(o, t.findOne(), "Document doesn't match expected"); +assert.eq(true, res.results[0], "Bad result value for valid update"); +assert.eq(true, res.results[1], "Bad result value for valid update"); + +// preCondition doesn't match ns +res = db.runCommand({ + applyOps: [ + {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 22}}}, + {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 23}}} + ], + preCondition: [{ns: "foo.otherName", q: {_id: 5}, res: {x: 21}}] +}); + +assert.eq(o, t.findOne(), "preCondition didn't match, but ops were still applied"); + +// preCondition doesn't match query +res = db.runCommand({ + applyOps: [ + {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 22}}}, + {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 23}}} + ], + preCondition: [{ns: t.getFullName(), q: {_id: 5}, res: {x: 19}}] +}); + +assert.eq(o, t.findOne(), "preCondition didn't match, but ops were still applied"); + +res = db.runCommand({ + applyOps: [ + {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$set: {x: 22}}}, + {op: "u", ns: t.getFullName(), o2: {_id: 6}, o: {$set: {x: 23}}} + ] +}); + +assert.eq(true, res.results[0], "Valid update failed"); +assert.eq(true, res.results[1], "Valid update failed"); + +// Ops with transaction numbers are valid. +const lsid = { + "id": UUID("3eea4a58-6018-40b6-8743-6a55783bf902"), + "uid": BinData(0, "47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU=") +}; +res = db.runCommand({ + applyOps: [ + { + op: "i", + ns: t.getFullName(), + o: {_id: 7, x: 24}, + lsid: lsid, + txnNumber: NumberLong(1), + stmtId: NumberInt(0) + }, + { + op: "u", + ns: t.getFullName(), + o2: {_id: 8}, + o: {$set: {x: 25}}, + lsid: lsid, + txnNumber: NumberLong(1), + stmtId: NumberInt(1) + }, + { + op: "d", + ns: t.getFullName(), + o: {_id: 7}, + lsid: lsid, + txnNumber: NumberLong(2), + stmtId: NumberInt(0) + }, + ] +}); + +assert.eq(true, res.results[0], "Valid insert with transaction number failed"); +assert.eq(true, res.results[1], "Valid update with transaction number failed"); +assert.eq(true, res.results[2], "Valid delete with transaction number failed"); + +// When applying a "u" (update) op, we default to 'UpdateNode' update semantics, and $set +// operations add new fields in lexicographic order. +res = assert.commandWorked(db.adminCommand({ + applyOps: [ + {"op": "i", "ns": t.getFullName(), "o": {_id: 6}}, + {"op": "u", "ns": t.getFullName(), "o2": {_id: 6}, "o": {$set: {z: 1, a: 2}}} + ] +})); +assert.eq(t.findOne({_id: 6}), {_id: 6, a: 2, z: 1}); // Note: 'a' and 'z' have been sorted. + +// 'ModifierInterface' semantics are not supported, so an update with {$v: 0} should fail. +res = assert.commandFailed(db.adminCommand({ + applyOps: [ + {"op": "i", "ns": t.getFullName(), "o": {_id: 7}}, + { + "op": "u", + "ns": t.getFullName(), + "o2": {_id: 7}, + "o": {$v: NumberLong(0), $set: {z: 1, a: 2}} + } + ] +})); +assert.eq(res.code, 40682); + +// When we explicitly specify {$v: 1}, we should get 'UpdateNode' update semantics, and $set +// operations get performed in lexicographic order. +res = assert.commandWorked(db.adminCommand({ + applyOps: [ + {"op": "i", "ns": t.getFullName(), "o": {_id: 8}}, + { + "op": "u", + "ns": t.getFullName(), + "o2": {_id: 8}, + "o": {$v: NumberLong(1), $set: {z: 1, a: 2}} + } + ] +})); +assert.eq(t.findOne({_id: 8}), {_id: 8, a: 2, z: 1}); // Note: 'a' and 'z' have been sorted. })(); diff --git a/jstests/core/apply_ops2.js b/jstests/core/apply_ops2.js index caf30364c48..690b8545e4a 100644 --- a/jstests/core/apply_ops2.js +++ b/jstests/core/apply_ops2.js @@ -52,12 +52,7 @@ print("Testing applyOps with default alwaysUpsert"); res = db.runCommand({ applyOps: [ {op: "u", ns: t.getFullName(), o2: {_id: 1}, o: {$set: {x: "upsert=default existing"}}}, - { - op: "u", - ns: t.getFullName(), - o2: {_id: 4}, - o: {$set: {x: "upsert=defaults non-existing"}} - } + {op: "u", ns: t.getFullName(), o2: {_id: 4}, o: {$set: {x: "upsert=defaults non-existing"}}} ] }); diff --git a/jstests/core/apply_ops_dups.js b/jstests/core/apply_ops_dups.js index e18cd01d3f6..85bc04437a6 100644 --- a/jstests/core/apply_ops_dups.js +++ b/jstests/core/apply_ops_dups.js @@ -7,33 +7,33 @@ // ] (function() { - "use strict"; - var t = db.apply_ops_dups; - t.drop(); +"use strict"; +var t = db.apply_ops_dups; +t.drop(); - // Check that duplicate _id fields don't cause an error - assert.writeOK(t.insert({_id: 0, x: 1})); - assert.commandWorked(t.createIndex({x: 1}, {unique: true})); - var a = assert.commandWorked(db.adminCommand({ - applyOps: [ - {"op": "i", "ns": t.getFullName(), "o": {_id: 5, x: -1}}, - {"op": "i", "ns": t.getFullName(), "o": {_id: 5, x: 0}} - ] - })); - printjson(a); - printjson(t.find().toArray()); - assert.eq(2, t.find().count(), "Invalid insert worked"); - assert.eq(true, a.results[0], "Valid insert was rejected"); - assert.eq(true, a.results[1], "Insert should have not failed (but should be ignored"); - printjson(t.find().toArray()); +// Check that duplicate _id fields don't cause an error +assert.writeOK(t.insert({_id: 0, x: 1})); +assert.commandWorked(t.createIndex({x: 1}, {unique: true})); +var a = assert.commandWorked(db.adminCommand({ + applyOps: [ + {"op": "i", "ns": t.getFullName(), "o": {_id: 5, x: -1}}, + {"op": "i", "ns": t.getFullName(), "o": {_id: 5, x: 0}} + ] +})); +printjson(a); +printjson(t.find().toArray()); +assert.eq(2, t.find().count(), "Invalid insert worked"); +assert.eq(true, a.results[0], "Valid insert was rejected"); +assert.eq(true, a.results[1], "Insert should have not failed (but should be ignored"); +printjson(t.find().toArray()); - // Check that dups on non-id cause errors - var a = assert.commandFailedWithCode(db.adminCommand({ - applyOps: [ - {"op": "i", "ns": t.getFullName(), "o": {_id: 1, x: 0}}, - {"op": "i", "ns": t.getFullName(), "o": {_id: 2, x: 1}} - ] - }), - 11000 /*DuplicateKey*/); - assert.eq(2, t.find().count(), "Invalid insert worked"); +// Check that dups on non-id cause errors +var a = assert.commandFailedWithCode(db.adminCommand({ + applyOps: [ + {"op": "i", "ns": t.getFullName(), "o": {_id: 1, x: 0}}, + {"op": "i", "ns": t.getFullName(), "o": {_id: 2, x: 1}} + ] +}), + 11000 /*DuplicateKey*/); +assert.eq(2, t.find().count(), "Invalid insert worked"); })(); diff --git a/jstests/core/apply_ops_index_collation.js b/jstests/core/apply_ops_index_collation.js index d58d3659223..2447a32e101 100644 --- a/jstests/core/apply_ops_index_collation.js +++ b/jstests/core/apply_ops_index_collation.js @@ -13,78 +13,78 @@ // ] (function() { - "use strict"; +"use strict"; - load("jstests/libs/get_index_helpers.js"); - load('jstests/libs/uuid_util.js'); +load("jstests/libs/get_index_helpers.js"); +load('jstests/libs/uuid_util.js'); - const coll = db.apply_ops_index_collation; - coll.drop(); - assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "fr_CA"}})); - const uuid = getUUIDFromListCollections(db, coll.getName()); +const coll = db.apply_ops_index_collation; +coll.drop(); +assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "fr_CA"}})); +const uuid = getUUIDFromListCollections(db, coll.getName()); - // An index created using a createIndexes-style oplog entry with a non-simple collation does not - // inherit the collection default collation. - let res = assert.commandWorked(db.adminCommand({ - applyOps: [{ - op: "c", - ns: coll.getFullName(), - ui: uuid, - o: { - createIndexes: coll.getFullName(), - v: 2, - key: {a: 1}, - name: "a_1_en", - collation: { - locale: "en_US", - caseLevel: false, - caseFirst: "off", - strength: 3, - numericOrdering: false, - alternate: "non-ignorable", - maxVariable: "punct", - normalization: false, - backwards: false, - version: "57.1" - } +// An index created using a createIndexes-style oplog entry with a non-simple collation does not +// inherit the collection default collation. +let res = assert.commandWorked(db.adminCommand({ + applyOps: [{ + op: "c", + ns: coll.getFullName(), + ui: uuid, + o: { + createIndexes: coll.getFullName(), + v: 2, + key: {a: 1}, + name: "a_1_en", + collation: { + locale: "en_US", + caseLevel: false, + caseFirst: "off", + strength: 3, + numericOrdering: false, + alternate: "non-ignorable", + maxVariable: "punct", + normalization: false, + backwards: false, + version: "57.1" } - }] - })); - let allIndexes = coll.getIndexes(); - let spec = GetIndexHelpers.findByName(allIndexes, "a_1_en"); - assert.neq(null, spec, "Index 'a_1_en' not found: " + tojson(allIndexes)); - assert.eq(2, spec.v, tojson(spec)); - assert.eq("en_US", spec.collation.locale, tojson(spec)); + } + }] +})); +let allIndexes = coll.getIndexes(); +let spec = GetIndexHelpers.findByName(allIndexes, "a_1_en"); +assert.neq(null, spec, "Index 'a_1_en' not found: " + tojson(allIndexes)); +assert.eq(2, spec.v, tojson(spec)); +assert.eq("en_US", spec.collation.locale, tojson(spec)); - // An index created using a createIndexes-style oplog entry with a simple collation does not - // inherit the collection default collation. - res = assert.commandWorked(db.adminCommand({ - applyOps: [{ - op: "c", - ns: coll.getFullName(), - ui: uuid, - o: {createIndexes: coll.getFullName(), v: 2, key: {a: 1}, name: "a_1"} - }] - })); - allIndexes = coll.getIndexes(); - spec = GetIndexHelpers.findByName(allIndexes, "a_1"); - assert.neq(null, spec, "Index 'a_1' not found: " + tojson(allIndexes)); - assert.eq(2, spec.v, tojson(spec)); - assert(!spec.hasOwnProperty("collation"), tojson(spec)); +// An index created using a createIndexes-style oplog entry with a simple collation does not +// inherit the collection default collation. +res = assert.commandWorked(db.adminCommand({ + applyOps: [{ + op: "c", + ns: coll.getFullName(), + ui: uuid, + o: {createIndexes: coll.getFullName(), v: 2, key: {a: 1}, name: "a_1"} + }] +})); +allIndexes = coll.getIndexes(); +spec = GetIndexHelpers.findByName(allIndexes, "a_1"); +assert.neq(null, spec, "Index 'a_1' not found: " + tojson(allIndexes)); +assert.eq(2, spec.v, tojson(spec)); +assert(!spec.hasOwnProperty("collation"), tojson(spec)); - // A v=1 index created using a createIndexes-style oplog entry does not inherit the collection - // default collation. - res = assert.commandWorked(db.adminCommand({ - applyOps: [{ - op: "c", - ns: coll.getFullName(), - ui: uuid, - o: {createIndexes: coll.getFullName(), v: 1, key: {b: 1}, name: "b_1"} - }] - })); - allIndexes = coll.getIndexes(); - spec = GetIndexHelpers.findByName(allIndexes, "b_1"); - assert.neq(null, spec, "Index 'b_1' not found: " + tojson(allIndexes)); - assert.eq(1, spec.v, tojson(spec)); - assert(!spec.hasOwnProperty("collation"), tojson(spec)); +// A v=1 index created using a createIndexes-style oplog entry does not inherit the collection +// default collation. +res = assert.commandWorked(db.adminCommand({ + applyOps: [{ + op: "c", + ns: coll.getFullName(), + ui: uuid, + o: {createIndexes: coll.getFullName(), v: 1, key: {b: 1}, name: "b_1"} + }] +})); +allIndexes = coll.getIndexes(); +spec = GetIndexHelpers.findByName(allIndexes, "b_1"); +assert.neq(null, spec, "Index 'b_1' not found: " + tojson(allIndexes)); +assert.eq(1, spec.v, tojson(spec)); +assert(!spec.hasOwnProperty("collation"), tojson(spec)); })(); diff --git a/jstests/core/apply_ops_invalid_index_spec.js b/jstests/core/apply_ops_invalid_index_spec.js index 5ed9e6d8ee6..d602cae29c1 100644 --- a/jstests/core/apply_ops_invalid_index_spec.js +++ b/jstests/core/apply_ops_invalid_index_spec.js @@ -15,71 +15,71 @@ */ (function() { - 'use strict'; +'use strict'; - const t = db.apply_ops_invalid_index_spec; - t.drop(); +const t = db.apply_ops_invalid_index_spec; +t.drop(); - const collNs = t.getFullName(); - const cmdNs = db.getName() + '.$cmd'; - const systemIndexesNs = db.getCollection('system.indexes').getFullName(); +const collNs = t.getFullName(); +const cmdNs = db.getName() + '.$cmd'; +const systemIndexesNs = db.getCollection('system.indexes').getFullName(); - assert.commandWorked(db.createCollection(t.getName())); - assert.writeOK(t.save({_id: 100, a: 100})); +assert.commandWorked(db.createCollection(t.getName())); +assert.writeOK(t.save({_id: 100, a: 100})); - // Tests that db.collection.createIndex() fails when given an index spec containing an unknown - // field. - assert.commandFailedWithCode(t.createIndex({a: 1}, {v: 2, name: 'a_1_base_v2', unknown: 1}), - ErrorCodes.InvalidIndexSpecificationOption); - assert.commandFailedWithCode(t.createIndex({a: 1}, {v: 1, name: 'a_1_base_v1', unknown: 1}), - ErrorCodes.InvalidIndexSpecificationOption); +// Tests that db.collection.createIndex() fails when given an index spec containing an unknown +// field. +assert.commandFailedWithCode(t.createIndex({a: 1}, {v: 2, name: 'a_1_base_v2', unknown: 1}), + ErrorCodes.InvalidIndexSpecificationOption); +assert.commandFailedWithCode(t.createIndex({a: 1}, {v: 1, name: 'a_1_base_v1', unknown: 1}), + ErrorCodes.InvalidIndexSpecificationOption); - // A createIndexes command for a v:2 index with an unknown field in the index spec should fail. - assert.commandFailedWithCode(db.adminCommand({ - applyOps: [{ - op: 'c', - ns: cmdNs, - o: { - createIndexes: t.getName(), - v: 2, - key: {a: 1}, - name: 'a_1_create_v2', - unknown: 1, - }, - }], - }), - ErrorCodes.InvalidIndexSpecificationOption); +// A createIndexes command for a v:2 index with an unknown field in the index spec should fail. +assert.commandFailedWithCode(db.adminCommand({ + applyOps: [{ + op: 'c', + ns: cmdNs, + o: { + createIndexes: t.getName(), + v: 2, + key: {a: 1}, + name: 'a_1_create_v2', + unknown: 1, + }, + }], +}), + ErrorCodes.InvalidIndexSpecificationOption); - // A createIndexes command for a background index with unknown field in the index spec should - // fail. - assert.commandFailedWithCode(db.adminCommand({ - applyOps: [{ - op: 'c', - ns: cmdNs, - o: { - createIndexes: t.getName(), - v: 2, - key: {a: 1}, - background: true, - name: 'a_1_background', - unknown: 1, - }, - }], - }), - ErrorCodes.InvalidIndexSpecificationOption); +// A createIndexes command for a background index with unknown field in the index spec should +// fail. +assert.commandFailedWithCode(db.adminCommand({ + applyOps: [{ + op: 'c', + ns: cmdNs, + o: { + createIndexes: t.getName(), + v: 2, + key: {a: 1}, + background: true, + name: 'a_1_background', + unknown: 1, + }, + }], +}), + ErrorCodes.InvalidIndexSpecificationOption); - // A createIndexes command for a v:1 index with an unknown field in the index spec should work. - const res1 = assert.commandWorked(db.adminCommand({ - applyOps: [{ - op: 'c', - ns: cmdNs, - o: { - createIndexes: t.getName(), - v: 1, - key: {a: 1}, - name: 'a_1_create_v1', - unknown: 1, - }, - }], - })); +// A createIndexes command for a v:1 index with an unknown field in the index spec should work. +const res1 = assert.commandWorked(db.adminCommand({ + applyOps: [{ + op: 'c', + ns: cmdNs, + o: { + createIndexes: t.getName(), + v: 1, + key: {a: 1}, + name: 'a_1_create_v1', + unknown: 1, + }, + }], +})); })(); diff --git a/jstests/core/apply_ops_without_ns.js b/jstests/core/apply_ops_without_ns.js index 3b488078a31..f23587f4a08 100644 --- a/jstests/core/apply_ops_without_ns.js +++ b/jstests/core/apply_ops_without_ns.js @@ -6,10 +6,9 @@ // ] (function() { - 'use strict'; +'use strict'; - // SERVER-33854: This should fail and not cause any invalid memory access. - assert.commandFailed(db.adminCommand({ - applyOps: [{'op': 'c', 'ns': 'admin.$cmd', 'o': {applyOps: [{'op': 'i', 'o': {x: 1}}]}}] - })); +// SERVER-33854: This should fail and not cause any invalid memory access. +assert.commandFailed(db.adminCommand( + {applyOps: [{'op': 'c', 'ns': 'admin.$cmd', 'o': {applyOps: [{'op': 'i', 'o': {x: 1}}]}}]})); })(); diff --git a/jstests/core/arrayfind8.js b/jstests/core/arrayfind8.js index f9693182a7a..87a3a8d701a 100644 --- a/jstests/core/arrayfind8.js +++ b/jstests/core/arrayfind8.js @@ -3,146 +3,142 @@ * Includes tests for bugs described in SERVER-1264 and SERVER-4180. */ (function() { - "use strict"; +"use strict"; - const coll = db.jstests_arrayfind8; - coll.drop(); - - // May be changed during the test. - let currentIndexSpec = {a: 1}; - - /** - * Check that the query results match the documents in the 'expected' array. - */ - function assertResults(expected, query, context) { - assert.eq(expected.length, coll.count(query), 'unexpected count in ' + context); - const results = coll.find(query).toArray(); - const resultsAOnly = results.map((r) => r.a); - assert.sameMembers(resultsAOnly, expected); - } - - /** - * Check matching for different query types. - * @param bothMatch - document matched by both standardQuery and elemMatchQuery - * @param elemMatch - document matched by elemMatchQuery but not standardQuery - * @param notElemMatch - document matched by standardQuery but not elemMatchQuery - */ - function checkMatch( - bothMatch, elemMatch, nonElemMatch, standardQuery, elemMatchQuery, context) { - function mayPush(arr, elt) { - if (elt) { - arr.push(elt); - } - } +const coll = db.jstests_arrayfind8; +coll.drop(); - let expectedStandardQueryResults = []; - mayPush(expectedStandardQueryResults, bothMatch); - mayPush(expectedStandardQueryResults, nonElemMatch); - assertResults(expectedStandardQueryResults, standardQuery, context + ' standard query'); +// May be changed during the test. +let currentIndexSpec = {a: 1}; - let expectedElemMatchQueryResults = []; - mayPush(expectedElemMatchQueryResults, bothMatch); - mayPush(expectedElemMatchQueryResults, elemMatch); - assertResults(expectedElemMatchQueryResults, elemMatchQuery, context + ' elemMatch query'); - } - - /** - * Check matching and for different query types. - * @param subQuery - part of a query, to be provided as is for a standard query and within a - * $elemMatch clause for a $elemMatch query - * @param bothMatch - document matched by both standardQuery and elemMatchQuery - * @param elemMatch - document matched by elemMatchQuery but not standardQuery - * @param notElemMatch - document matched by standardQuery but not elemMatchQuery - * @param additionalConstraints - additional query parameters not generated from @param subQuery - */ - function checkQuery(subQuery, bothMatch, elemMatch, nonElemMatch, additionalConstraints) { - coll.drop(); - additionalConstraints = additionalConstraints || {}; - - // Construct standard and elemMatch queries from subQuery. - const firstSubQueryKey = Object.keySet(subQuery)[0]; - let standardQuery = null; - if (firstSubQueryKey[0] == '$') { - standardQuery = {$and: [{a: subQuery}, additionalConstraints]}; - } else { - // If the subQuery contains a field rather than operators, append to the 'a' field. - let modifiedSubQuery = {}; - modifiedSubQuery['a.' + firstSubQueryKey] = subQuery[firstSubQueryKey]; - standardQuery = {$and: [modifiedSubQuery, additionalConstraints]}; - } - const elemMatchQuery = {$and: [{a: {$elemMatch: subQuery}}, additionalConstraints]}; +/** + * Check that the query results match the documents in the 'expected' array. + */ +function assertResults(expected, query, context) { + assert.eq(expected.length, coll.count(query), 'unexpected count in ' + context); + const results = coll.find(query).toArray(); + const resultsAOnly = results.map((r) => r.a); + assert.sameMembers(resultsAOnly, expected); +} - function insertValueIfNotNull(val) { - if (val) { - assert.commandWorked(coll.insert({a: val})); - } +/** + * Check matching for different query types. + * @param bothMatch - document matched by both standardQuery and elemMatchQuery + * @param elemMatch - document matched by elemMatchQuery but not standardQuery + * @param notElemMatch - document matched by standardQuery but not elemMatchQuery + */ +function checkMatch(bothMatch, elemMatch, nonElemMatch, standardQuery, elemMatchQuery, context) { + function mayPush(arr, elt) { + if (elt) { + arr.push(elt); } + } - // Save all documents and check matching without indexes. - insertValueIfNotNull(bothMatch); - insertValueIfNotNull(elemMatch); - insertValueIfNotNull(nonElemMatch); - - checkMatch(bothMatch, elemMatch, nonElemMatch, standardQuery, elemMatchQuery, 'unindexed'); - - // Check matching and index bounds for a single key index. + let expectedStandardQueryResults = []; + mayPush(expectedStandardQueryResults, bothMatch); + mayPush(expectedStandardQueryResults, nonElemMatch); + assertResults(expectedStandardQueryResults, standardQuery, context + ' standard query'); - assert.eq(coll.drop(), true); - insertValueIfNotNull(bothMatch); - insertValueIfNotNull(elemMatch); - // The nonElemMatch document is not tested here, as it will often make the index multikey. - assert.commandWorked(coll.createIndex(currentIndexSpec)); - checkMatch(bothMatch, elemMatch, null, standardQuery, elemMatchQuery, 'single key index'); + let expectedElemMatchQueryResults = []; + mayPush(expectedElemMatchQueryResults, bothMatch); + mayPush(expectedElemMatchQueryResults, elemMatch); + assertResults(expectedElemMatchQueryResults, elemMatchQuery, context + ' elemMatch query'); +} - // Check matching and index bounds for a multikey index. +/** + * Check matching and for different query types. + * @param subQuery - part of a query, to be provided as is for a standard query and within a + * $elemMatch clause for a $elemMatch query + * @param bothMatch - document matched by both standardQuery and elemMatchQuery + * @param elemMatch - document matched by elemMatchQuery but not standardQuery + * @param notElemMatch - document matched by standardQuery but not elemMatchQuery + * @param additionalConstraints - additional query parameters not generated from @param subQuery + */ +function checkQuery(subQuery, bothMatch, elemMatch, nonElemMatch, additionalConstraints) { + coll.drop(); + additionalConstraints = additionalConstraints || {}; + + // Construct standard and elemMatch queries from subQuery. + const firstSubQueryKey = Object.keySet(subQuery)[0]; + let standardQuery = null; + if (firstSubQueryKey[0] == '$') { + standardQuery = {$and: [{a: subQuery}, additionalConstraints]}; + } else { + // If the subQuery contains a field rather than operators, append to the 'a' field. + let modifiedSubQuery = {}; + modifiedSubQuery['a.' + firstSubQueryKey] = subQuery[firstSubQueryKey]; + standardQuery = {$and: [modifiedSubQuery, additionalConstraints]}; + } + const elemMatchQuery = {$and: [{a: {$elemMatch: subQuery}}, additionalConstraints]}; - // Now the nonElemMatch document is tested. - insertValueIfNotNull(nonElemMatch); - // Force the index to be multikey. - assert.commandWorked(coll.insert({a: [-1, -2]})); - assert.commandWorked(coll.insert({a: {b: [-1, -2]}})); - checkMatch( - bothMatch, elemMatch, nonElemMatch, standardQuery, elemMatchQuery, 'multikey index'); + function insertValueIfNotNull(val) { + if (val) { + assert.commandWorked(coll.insert({a: val})); + } } - // Basic test. - checkQuery({$gt: 4}, [5]); - - // Multiple constraints within a $elemMatch clause. - checkQuery({$gt: 4, $lt: 6}, [5], null, [3, 7]); - checkQuery({$gt: 4, $not: {$gte: 6}}, [5]); - checkQuery({$gt: 4, $not: {$ne: 6}}, [6]); - checkQuery({$gte: 5, $lte: 5}, [5], null, [4, 6]); - checkQuery({$in: [4, 6], $gt: 5}, [6], null, [4, 7]); - checkQuery({$regex: '^a'}, ['a']); - - // Some constraints within a $elemMatch clause and other constraints outside of it. - checkQuery({$gt: 4}, [5], null, null, {a: {$lt: 6}}); - checkQuery({$gte: 5}, [5], null, null, {a: {$lte: 5}}); - checkQuery({$in: [4, 6]}, [6], null, null, {a: {$gt: 5}}); - - // Constraints in different $elemMatch clauses. - checkQuery({$gt: 4}, [5], null, null, {a: {$elemMatch: {$lt: 6}}}); - checkQuery({$gt: 4}, [3, 7], null, null, {a: {$elemMatch: {$lt: 6}}}); - checkQuery({$gte: 5}, [5], null, null, {a: {$elemMatch: {$lte: 5}}}); - checkQuery({$in: [4, 6]}, [6], null, null, {a: {$elemMatch: {$gt: 5}}}); - - checkQuery({$elemMatch: {$in: [5]}}, null, [[5]], [5], null); - - currentIndexSpec = {"a.b": 1}; - checkQuery({$elemMatch: {b: {$gte: 1, $lte: 1}}}, null, [[{b: 1}]], [{b: 1}], null); - checkQuery({$elemMatch: {b: {$gte: 1, $lte: 1}}}, null, [[{b: [0, 2]}]], [{b: [0, 2]}], null); - - // Constraints for a top level (SERVER-1264 style) $elemMatch nested within a non top level - // $elemMatch. - checkQuery({b: {$elemMatch: {$gte: 1, $lte: 1}}}, [{b: [1]}]); - checkQuery({b: {$elemMatch: {$gte: 1, $lte: 4}}}, [{b: [1]}]); - - checkQuery( - {b: {$elemMatch: {$gte: 1, $lte: 4}}}, [{b: [2]}], null, null, {'a.b': {$in: [2, 5]}}); - checkQuery({b: {$elemMatch: {$in: [1, 2]}, $in: [2, 3]}}, - [{b: [2]}], - null, - [{b: [1]}, {b: [3]}], - null); + // Save all documents and check matching without indexes. + insertValueIfNotNull(bothMatch); + insertValueIfNotNull(elemMatch); + insertValueIfNotNull(nonElemMatch); + + checkMatch(bothMatch, elemMatch, nonElemMatch, standardQuery, elemMatchQuery, 'unindexed'); + + // Check matching and index bounds for a single key index. + + assert.eq(coll.drop(), true); + insertValueIfNotNull(bothMatch); + insertValueIfNotNull(elemMatch); + // The nonElemMatch document is not tested here, as it will often make the index multikey. + assert.commandWorked(coll.createIndex(currentIndexSpec)); + checkMatch(bothMatch, elemMatch, null, standardQuery, elemMatchQuery, 'single key index'); + + // Check matching and index bounds for a multikey index. + + // Now the nonElemMatch document is tested. + insertValueIfNotNull(nonElemMatch); + // Force the index to be multikey. + assert.commandWorked(coll.insert({a: [-1, -2]})); + assert.commandWorked(coll.insert({a: {b: [-1, -2]}})); + checkMatch(bothMatch, elemMatch, nonElemMatch, standardQuery, elemMatchQuery, 'multikey index'); +} + +// Basic test. +checkQuery({$gt: 4}, [5]); + +// Multiple constraints within a $elemMatch clause. +checkQuery({$gt: 4, $lt: 6}, [5], null, [3, 7]); +checkQuery({$gt: 4, $not: {$gte: 6}}, [5]); +checkQuery({$gt: 4, $not: {$ne: 6}}, [6]); +checkQuery({$gte: 5, $lte: 5}, [5], null, [4, 6]); +checkQuery({$in: [4, 6], $gt: 5}, [6], null, [4, 7]); +checkQuery({$regex: '^a'}, ['a']); + +// Some constraints within a $elemMatch clause and other constraints outside of it. +checkQuery({$gt: 4}, [5], null, null, {a: {$lt: 6}}); +checkQuery({$gte: 5}, [5], null, null, {a: {$lte: 5}}); +checkQuery({$in: [4, 6]}, [6], null, null, {a: {$gt: 5}}); + +// Constraints in different $elemMatch clauses. +checkQuery({$gt: 4}, [5], null, null, {a: {$elemMatch: {$lt: 6}}}); +checkQuery({$gt: 4}, [3, 7], null, null, {a: {$elemMatch: {$lt: 6}}}); +checkQuery({$gte: 5}, [5], null, null, {a: {$elemMatch: {$lte: 5}}}); +checkQuery({$in: [4, 6]}, [6], null, null, {a: {$elemMatch: {$gt: 5}}}); + +checkQuery({$elemMatch: {$in: [5]}}, null, [[5]], [5], null); + +currentIndexSpec = { + "a.b": 1 +}; +checkQuery({$elemMatch: {b: {$gte: 1, $lte: 1}}}, null, [[{b: 1}]], [{b: 1}], null); +checkQuery({$elemMatch: {b: {$gte: 1, $lte: 1}}}, null, [[{b: [0, 2]}]], [{b: [0, 2]}], null); + +// Constraints for a top level (SERVER-1264 style) $elemMatch nested within a non top level +// $elemMatch. +checkQuery({b: {$elemMatch: {$gte: 1, $lte: 1}}}, [{b: [1]}]); +checkQuery({b: {$elemMatch: {$gte: 1, $lte: 4}}}, [{b: [1]}]); + +checkQuery({b: {$elemMatch: {$gte: 1, $lte: 4}}}, [{b: [2]}], null, null, {'a.b': {$in: [2, 5]}}); +checkQuery( + {b: {$elemMatch: {$in: [1, 2]}, $in: [2, 3]}}, [{b: [2]}], null, [{b: [1]}, {b: [3]}], null); })(); diff --git a/jstests/core/autocomplete.js b/jstests/core/autocomplete.js index 29509b951b9..5e9856cc722 100644 --- a/jstests/core/autocomplete.js +++ b/jstests/core/autocomplete.js @@ -2,54 +2,54 @@ * Validate auto complete works for various javascript types implemented by C++. */ (function() { - 'use strict'; +'use strict'; - function testAutoComplete(prefix) { - // This method updates a global object with an array of strings on success. - shellAutocomplete(prefix); - return __autocomplete__; - } +function testAutoComplete(prefix) { + // This method updates a global object with an array of strings on success. + shellAutocomplete(prefix); + return __autocomplete__; +} - // Create a collection. - db.auto_complete_coll.insert({}); +// Create a collection. +db.auto_complete_coll.insert({}); - // Validate DB auto completion. - const db_stuff = testAutoComplete('db.'); +// Validate DB auto completion. +const db_stuff = testAutoComplete('db.'); - // Verify we enumerate built-in methods. - assert.contains('db.prototype', db_stuff); - assert.contains('db.hasOwnProperty', db_stuff); - assert.contains('db.toString(', db_stuff); +// Verify we enumerate built-in methods. +assert.contains('db.prototype', db_stuff); +assert.contains('db.hasOwnProperty', db_stuff); +assert.contains('db.toString(', db_stuff); - // Verify we have some methods we added. - assert.contains('db.adminCommand(', db_stuff); - assert.contains('db.runCommand(', db_stuff); - assert.contains('db.watch(', db_stuff); +// Verify we have some methods we added. +assert.contains('db.adminCommand(', db_stuff); +assert.contains('db.runCommand(', db_stuff); +assert.contains('db.watch(', db_stuff); - // Verify we enumerate collections. - assert.contains('db.auto_complete_coll', db_stuff); +// Verify we enumerate collections. +assert.contains('db.auto_complete_coll', db_stuff); - // Validate Collection autocompletion. - const coll_stuff = testAutoComplete('db.auto_complete_coll.'); +// Validate Collection autocompletion. +const coll_stuff = testAutoComplete('db.auto_complete_coll.'); - // Verify we enumerate built-in methods. - assert.contains('db.auto_complete_coll.prototype', coll_stuff); - assert.contains('db.auto_complete_coll.hasOwnProperty', coll_stuff); - assert.contains('db.auto_complete_coll.toString(', coll_stuff); +// Verify we enumerate built-in methods. +assert.contains('db.auto_complete_coll.prototype', coll_stuff); +assert.contains('db.auto_complete_coll.hasOwnProperty', coll_stuff); +assert.contains('db.auto_complete_coll.toString(', coll_stuff); - // Verify we have some methods we added. - assert.contains('db.auto_complete_coll.aggregate(', coll_stuff); - assert.contains('db.auto_complete_coll.runCommand(', coll_stuff); +// Verify we have some methods we added. +assert.contains('db.auto_complete_coll.aggregate(', coll_stuff); +assert.contains('db.auto_complete_coll.runCommand(', coll_stuff); - // Validate autocompletion when prefix is specified. - const empty_stuff = testAutoComplete(''); +// Validate autocompletion when prefix is specified. +const empty_stuff = testAutoComplete(''); - assert.contains('Array(', empty_stuff); - assert.contains('print(', empty_stuff); - assert.contains('ErrorCodes', empty_stuff); +assert.contains('Array(', empty_stuff); +assert.contains('print(', empty_stuff); +assert.contains('ErrorCodes', empty_stuff); - // Validate autocompletion returns ErrorCodes when ErrorCodes is specified. - const error_codes_autocomplete = testAutoComplete('ErrorCodes.'); +// Validate autocompletion returns ErrorCodes when ErrorCodes is specified. +const error_codes_autocomplete = testAutoComplete('ErrorCodes.'); - assert.contains('ErrorCodes.BadValue', error_codes_autocomplete); +assert.contains('ErrorCodes.BadValue', error_codes_autocomplete); })(); diff --git a/jstests/core/automation_setparameter.js b/jstests/core/automation_setparameter.js index 5e8ea62f338..6482fdebbfb 100644 --- a/jstests/core/automation_setparameter.js +++ b/jstests/core/automation_setparameter.js @@ -8,50 +8,49 @@ (function() { - // Run isMaster, and if it contains an automation service descriptor, save it, so we can restore - // it later. If it wasn't set, original will just be undefined. - var res = assert.commandWorked(db.runCommand({isMaster: 1})); - var original = res.automationServiceDescriptor; - - // Try to set the descriptor to an invalid value: only strings are supported. - assert.commandFailedWithCode(db.adminCommand({setParameter: 1, automationServiceDescriptor: 0}), - ErrorCodes.TypeMismatch); - - // Try to set the descriptor to an invalid value: Only 64 characters are allowed. - assert.commandFailedWithCode(db.adminCommand({ - setParameter: 1, - automationServiceDescriptor: - "1234567812345678123456781234567812345678123456781234567812345678X" - }), - ErrorCodes.Overflow); - - // Short strings are okay. - res = assert.commandWorked( - db.adminCommand({setParameter: 1, automationServiceDescriptor: "some_service"})); - - // Verify that the setParameter 'was' field contains what we expected. - if (original) - assert.eq(original, res.was); - - // Verify that the 'some_service' string is now echoed back to us in isMaster - res = assert.commandWorked(db.runCommand({isMaster: 1})); - assert.eq(res.automationServiceDescriptor, "some_service"); - - // Verify that setting the descriptor to the empty string is ok, and prevents it from being - // echoed back - assert.commandWorked(db.adminCommand({setParameter: 1, automationServiceDescriptor: ""})); - res = assert.commandWorked(db.runCommand({isMaster: 1})); - assert(!res.hasOwnProperty('automationServiceDescriptor')); - - // Verify that the shell has the correct prompt. - var originalPrompt = db.getMongo().promptPrefix; - assert.commandWorked(db.adminCommand({setParameter: 1, automationServiceDescriptor: "set"})); - db.getMongo().promptPrefix = undefined; - assert(/\[automated\]/.test(defaultPrompt())); - - // Restore whatever was there originally. - if (!original) - original = ""; - assert.commandWorked(db.adminCommand({setParameter: 1, automationServiceDescriptor: original})); - db.getMongo().promptPrefix = originalPrompt; +// Run isMaster, and if it contains an automation service descriptor, save it, so we can restore +// it later. If it wasn't set, original will just be undefined. +var res = assert.commandWorked(db.runCommand({isMaster: 1})); +var original = res.automationServiceDescriptor; + +// Try to set the descriptor to an invalid value: only strings are supported. +assert.commandFailedWithCode(db.adminCommand({setParameter: 1, automationServiceDescriptor: 0}), + ErrorCodes.TypeMismatch); + +// Try to set the descriptor to an invalid value: Only 64 characters are allowed. +assert.commandFailedWithCode(db.adminCommand({ + setParameter: 1, + automationServiceDescriptor: "1234567812345678123456781234567812345678123456781234567812345678X" +}), + ErrorCodes.Overflow); + +// Short strings are okay. +res = assert.commandWorked( + db.adminCommand({setParameter: 1, automationServiceDescriptor: "some_service"})); + +// Verify that the setParameter 'was' field contains what we expected. +if (original) + assert.eq(original, res.was); + +// Verify that the 'some_service' string is now echoed back to us in isMaster +res = assert.commandWorked(db.runCommand({isMaster: 1})); +assert.eq(res.automationServiceDescriptor, "some_service"); + +// Verify that setting the descriptor to the empty string is ok, and prevents it from being +// echoed back +assert.commandWorked(db.adminCommand({setParameter: 1, automationServiceDescriptor: ""})); +res = assert.commandWorked(db.runCommand({isMaster: 1})); +assert(!res.hasOwnProperty('automationServiceDescriptor')); + +// Verify that the shell has the correct prompt. +var originalPrompt = db.getMongo().promptPrefix; +assert.commandWorked(db.adminCommand({setParameter: 1, automationServiceDescriptor: "set"})); +db.getMongo().promptPrefix = undefined; +assert(/\[automated\]/.test(defaultPrompt())); + +// Restore whatever was there originally. +if (!original) + original = ""; +assert.commandWorked(db.adminCommand({setParameter: 1, automationServiceDescriptor: original})); +db.getMongo().promptPrefix = originalPrompt; }()); diff --git a/jstests/core/awaitdata_getmore_cmd.js b/jstests/core/awaitdata_getmore_cmd.js index 11e178f0136..eb1fb194a32 100644 --- a/jstests/core/awaitdata_getmore_cmd.js +++ b/jstests/core/awaitdata_getmore_cmd.js @@ -11,82 +11,95 @@ // ] (function() { - 'use strict'; - - load("jstests/libs/fixture_helpers.js"); - - var cmdRes; - var cursorId; - var defaultBatchSize = 101; - var collName = 'await_data'; - var coll = db[collName]; - - // Create a non-capped collection with 10 documents. - coll.drop(); - for (var i = 0; i < 10; i++) { - assert.writeOK(coll.insert({a: i})); - } - - // Find with tailable flag set should fail for a non-capped collection. - cmdRes = db.runCommand({find: collName, tailable: true}); - assert.commandFailed(cmdRes); - - // Should also fail in the non-capped case if both the tailable and awaitData flags are set. - cmdRes = db.runCommand({find: collName, tailable: true, awaitData: true}); - assert.commandFailed(cmdRes); - - // With a non-existent collection, should succeed but return no data and a closed cursor. - coll.drop(); - cmdRes = assert.commandWorked(db.runCommand({find: collName, tailable: true})); - assert.eq(cmdRes.cursor.id, NumberLong(0)); - assert.eq(cmdRes.cursor.firstBatch.length, 0); - - // Create a capped collection with 10 documents. - assert.commandWorked(db.createCollection(collName, {capped: true, size: 2048})); - for (var i = 0; i < 10; i++) { - assert.writeOK(coll.insert({a: i})); - } - - // GetMore should succeed if query has awaitData but no maxTimeMS is supplied. - cmdRes = db.runCommand({find: collName, batchSize: 2, awaitData: true, tailable: true}); - assert.commandWorked(cmdRes); - assert.gt(cmdRes.cursor.id, NumberLong(0)); - assert.eq(cmdRes.cursor.ns, coll.getFullName()); - assert.eq(cmdRes.cursor.firstBatch.length, 2); - cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName}); - assert.commandWorked(cmdRes); - assert.gt(cmdRes.cursor.id, NumberLong(0)); - assert.eq(cmdRes.cursor.ns, coll.getFullName()); - - // Should also succeed if maxTimeMS is supplied on the original find. - const sixtyMinutes = 60 * 60 * 1000; - cmdRes = db.runCommand( - {find: collName, batchSize: 2, awaitData: true, tailable: true, maxTimeMS: sixtyMinutes}); - assert.commandWorked(cmdRes); - assert.gt(cmdRes.cursor.id, NumberLong(0)); - assert.eq(cmdRes.cursor.ns, coll.getFullName()); - assert.eq(cmdRes.cursor.firstBatch.length, 2); - cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName}); - assert.commandWorked(cmdRes); - assert.gt(cmdRes.cursor.id, NumberLong(0)); - assert.eq(cmdRes.cursor.ns, coll.getFullName()); - - // Check that we can set up a tailable cursor over the capped collection. - cmdRes = db.runCommand({find: collName, batchSize: 5, awaitData: true, tailable: true}); - assert.commandWorked(cmdRes); - assert.gt(cmdRes.cursor.id, NumberLong(0)); - assert.eq(cmdRes.cursor.ns, coll.getFullName()); - assert.eq(cmdRes.cursor.firstBatch.length, 5); - - // Check that tailing the capped collection with awaitData eventually ends up returning an empty - // batch after hitting the timeout. - cmdRes = db.runCommand({find: collName, batchSize: 2, awaitData: true, tailable: true}); - assert.commandWorked(cmdRes); - assert.gt(cmdRes.cursor.id, NumberLong(0)); - assert.eq(cmdRes.cursor.ns, coll.getFullName()); - assert.eq(cmdRes.cursor.firstBatch.length, 2); - - // Issue getMore until we get an empty batch of results. +'use strict'; + +load("jstests/libs/fixture_helpers.js"); + +var cmdRes; +var cursorId; +var defaultBatchSize = 101; +var collName = 'await_data'; +var coll = db[collName]; + +// Create a non-capped collection with 10 documents. +coll.drop(); +for (var i = 0; i < 10; i++) { + assert.writeOK(coll.insert({a: i})); +} + +// Find with tailable flag set should fail for a non-capped collection. +cmdRes = db.runCommand({find: collName, tailable: true}); +assert.commandFailed(cmdRes); + +// Should also fail in the non-capped case if both the tailable and awaitData flags are set. +cmdRes = db.runCommand({find: collName, tailable: true, awaitData: true}); +assert.commandFailed(cmdRes); + +// With a non-existent collection, should succeed but return no data and a closed cursor. +coll.drop(); +cmdRes = assert.commandWorked(db.runCommand({find: collName, tailable: true})); +assert.eq(cmdRes.cursor.id, NumberLong(0)); +assert.eq(cmdRes.cursor.firstBatch.length, 0); + +// Create a capped collection with 10 documents. +assert.commandWorked(db.createCollection(collName, {capped: true, size: 2048})); +for (var i = 0; i < 10; i++) { + assert.writeOK(coll.insert({a: i})); +} + +// GetMore should succeed if query has awaitData but no maxTimeMS is supplied. +cmdRes = db.runCommand({find: collName, batchSize: 2, awaitData: true, tailable: true}); +assert.commandWorked(cmdRes); +assert.gt(cmdRes.cursor.id, NumberLong(0)); +assert.eq(cmdRes.cursor.ns, coll.getFullName()); +assert.eq(cmdRes.cursor.firstBatch.length, 2); +cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName}); +assert.commandWorked(cmdRes); +assert.gt(cmdRes.cursor.id, NumberLong(0)); +assert.eq(cmdRes.cursor.ns, coll.getFullName()); + +// Should also succeed if maxTimeMS is supplied on the original find. +const sixtyMinutes = 60 * 60 * 1000; +cmdRes = db.runCommand( + {find: collName, batchSize: 2, awaitData: true, tailable: true, maxTimeMS: sixtyMinutes}); +assert.commandWorked(cmdRes); +assert.gt(cmdRes.cursor.id, NumberLong(0)); +assert.eq(cmdRes.cursor.ns, coll.getFullName()); +assert.eq(cmdRes.cursor.firstBatch.length, 2); +cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName}); +assert.commandWorked(cmdRes); +assert.gt(cmdRes.cursor.id, NumberLong(0)); +assert.eq(cmdRes.cursor.ns, coll.getFullName()); + +// Check that we can set up a tailable cursor over the capped collection. +cmdRes = db.runCommand({find: collName, batchSize: 5, awaitData: true, tailable: true}); +assert.commandWorked(cmdRes); +assert.gt(cmdRes.cursor.id, NumberLong(0)); +assert.eq(cmdRes.cursor.ns, coll.getFullName()); +assert.eq(cmdRes.cursor.firstBatch.length, 5); + +// Check that tailing the capped collection with awaitData eventually ends up returning an empty +// batch after hitting the timeout. +cmdRes = db.runCommand({find: collName, batchSize: 2, awaitData: true, tailable: true}); +assert.commandWorked(cmdRes); +assert.gt(cmdRes.cursor.id, NumberLong(0)); +assert.eq(cmdRes.cursor.ns, coll.getFullName()); +assert.eq(cmdRes.cursor.firstBatch.length, 2); + +// Issue getMore until we get an empty batch of results. +cmdRes = db.runCommand({ + getMore: cmdRes.cursor.id, + collection: coll.getName(), + batchSize: NumberInt(2), + maxTimeMS: 4000 +}); +assert.commandWorked(cmdRes); +assert.gt(cmdRes.cursor.id, NumberLong(0)); +assert.eq(cmdRes.cursor.ns, coll.getFullName()); + +// Keep issuing getMore until we get an empty batch after the timeout expires. +while (cmdRes.cursor.nextBatch.length > 0) { + var now = new Date(); cmdRes = db.runCommand({ getMore: cmdRes.cursor.id, collection: coll.getName(), @@ -96,111 +109,95 @@ assert.commandWorked(cmdRes); assert.gt(cmdRes.cursor.id, NumberLong(0)); assert.eq(cmdRes.cursor.ns, coll.getFullName()); +} +assert.gte((new Date()) - now, 2000); - // Keep issuing getMore until we get an empty batch after the timeout expires. - while (cmdRes.cursor.nextBatch.length > 0) { - var now = new Date(); - cmdRes = db.runCommand({ - getMore: cmdRes.cursor.id, - collection: coll.getName(), - batchSize: NumberInt(2), - maxTimeMS: 4000 - }); - assert.commandWorked(cmdRes); - assert.gt(cmdRes.cursor.id, NumberLong(0)); - assert.eq(cmdRes.cursor.ns, coll.getFullName()); - } - assert.gte((new Date()) - now, 2000); +// Repeat the test, this time tailing the oplog rather than a user-created capped collection. +// The oplog tailing in not possible on mongos. +if (FixtureHelpers.isReplSet(db)) { + var localDB = db.getSiblingDB("local"); + var oplogColl = localDB.oplog.rs; - // Repeat the test, this time tailing the oplog rather than a user-created capped collection. - // The oplog tailing in not possible on mongos. - if (FixtureHelpers.isReplSet(db)) { - var localDB = db.getSiblingDB("local"); - var oplogColl = localDB.oplog.rs; + cmdRes = localDB.runCommand( + {find: oplogColl.getName(), batchSize: 2, awaitData: true, tailable: true}); + assert.commandWorked(cmdRes); + if (cmdRes.cursor.id > NumberLong(0)) { + assert.eq(cmdRes.cursor.ns, oplogColl.getFullName()); + assert.eq(cmdRes.cursor.firstBatch.length, 2); cmdRes = localDB.runCommand( - {find: oplogColl.getName(), batchSize: 2, awaitData: true, tailable: true}); + {getMore: cmdRes.cursor.id, collection: oplogColl.getName(), maxTimeMS: 1000}); assert.commandWorked(cmdRes); - if (cmdRes.cursor.id > NumberLong(0)) { - assert.eq(cmdRes.cursor.ns, oplogColl.getFullName()); - assert.eq(cmdRes.cursor.firstBatch.length, 2); + assert.gt(cmdRes.cursor.id, NumberLong(0)); + assert.eq(cmdRes.cursor.ns, oplogColl.getFullName()); + while (cmdRes.cursor.nextBatch.length > 0) { + now = new Date(); cmdRes = localDB.runCommand( - {getMore: cmdRes.cursor.id, collection: oplogColl.getName(), maxTimeMS: 1000}); + {getMore: cmdRes.cursor.id, collection: oplogColl.getName(), maxTimeMS: 4000}); assert.commandWorked(cmdRes); assert.gt(cmdRes.cursor.id, NumberLong(0)); assert.eq(cmdRes.cursor.ns, oplogColl.getFullName()); - - while (cmdRes.cursor.nextBatch.length > 0) { - now = new Date(); - cmdRes = localDB.runCommand( - {getMore: cmdRes.cursor.id, collection: oplogColl.getName(), maxTimeMS: 4000}); - assert.commandWorked(cmdRes); - assert.gt(cmdRes.cursor.id, NumberLong(0)); - assert.eq(cmdRes.cursor.ns, oplogColl.getFullName()); - } - assert.gte((new Date()) - now, 2000); } + assert.gte((new Date()) - now, 2000); } - - // Test filtered inserts while writing to a capped collection. - // Find with a filter which doesn't match any documents in the collection. - cmdRes = assert.commandWorked(db.runCommand({ - find: collName, - batchSize: 2, - filter: {x: 1}, - awaitData: true, - tailable: true, - comment: "uniquifier_comment" - })); - assert.gt(cmdRes.cursor.id, NumberLong(0)); - assert.eq(cmdRes.cursor.ns, coll.getFullName()); - assert.eq(cmdRes.cursor.firstBatch.length, 0); - - // Test that a getMore command on a tailable, awaitData cursor does not return a new batch to - // the user if a document was inserted, but it did not match the filter. - let insertshell = startParallelShell(() => { - // Signal to the original shell that the parallel shell has successfully started. - assert.writeOK(db.await_data.insert({_id: "signal parent shell"})); - - // Wait for the parent shell to start watching for the next document. - assert.soon(() => db.currentOp({ - op: "getmore", - "cursor.originatingCommand.comment": "uniquifier_comment" - }).inprog.length == 1, - () => tojson(db.currentOp().inprog)); - - // Now write a non-matching document to the collection. - assert.writeOK(db.await_data.insert({_id: "no match", x: 0})); - - // Make sure the getMore has not ended after a while. - sleep(2000); - assert.eq( - db.currentOp({op: "getmore", "cursor.originatingCommand.comment": "uniquifier_comment"}) - .inprog.length, - 1, - tojson(db.currentOp().inprog)); - - // Now write a matching document to wake it up. - assert.writeOK(db.await_data.insert({_id: "match", x: 1})); - }); - - // Wait until we receive confirmation that the parallel shell has started. - assert.soon(() => db.await_data.findOne({_id: "signal parent shell"}) !== null); - - // Now issue a getMore which will match the parallel shell's currentOp filter, signalling it to - // write a non-matching document into the collection. Confirm that we do not receive this - // document and that we subsequently time out. - now = new Date(); - cmdRes = db.runCommand({ - getMore: cmdRes.cursor.id, - collection: collName, - maxTimeMS: ReplSetTest.kDefaultTimeoutMS - }); - assert.commandWorked(cmdRes); - assert.gt(cmdRes.cursor.id, NumberLong(0)); - assert.eq(cmdRes.cursor.ns, coll.getFullName()); - assert.eq(cmdRes.cursor.nextBatch.length, 1); - assert.docEq(cmdRes.cursor.nextBatch[0], {_id: "match", x: 1}); - insertshell(); +} + +// Test filtered inserts while writing to a capped collection. +// Find with a filter which doesn't match any documents in the collection. +cmdRes = assert.commandWorked(db.runCommand({ + find: collName, + batchSize: 2, + filter: {x: 1}, + awaitData: true, + tailable: true, + comment: "uniquifier_comment" +})); +assert.gt(cmdRes.cursor.id, NumberLong(0)); +assert.eq(cmdRes.cursor.ns, coll.getFullName()); +assert.eq(cmdRes.cursor.firstBatch.length, 0); + +// Test that a getMore command on a tailable, awaitData cursor does not return a new batch to +// the user if a document was inserted, but it did not match the filter. +let insertshell = startParallelShell(() => { + // Signal to the original shell that the parallel shell has successfully started. + assert.writeOK(db.await_data.insert({_id: "signal parent shell"})); + + // Wait for the parent shell to start watching for the next document. + assert.soon(() => db.currentOp({ + op: "getmore", + "cursor.originatingCommand.comment": "uniquifier_comment" + }).inprog.length == 1, + () => tojson(db.currentOp().inprog)); + + // Now write a non-matching document to the collection. + assert.writeOK(db.await_data.insert({_id: "no match", x: 0})); + + // Make sure the getMore has not ended after a while. + sleep(2000); + assert.eq( + db.currentOp({op: "getmore", "cursor.originatingCommand.comment": "uniquifier_comment"}) + .inprog.length, + 1, + tojson(db.currentOp().inprog)); + + // Now write a matching document to wake it up. + assert.writeOK(db.await_data.insert({_id: "match", x: 1})); +}); + +// Wait until we receive confirmation that the parallel shell has started. +assert.soon(() => db.await_data.findOne({_id: "signal parent shell"}) !== null); + +// Now issue a getMore which will match the parallel shell's currentOp filter, signalling it to +// write a non-matching document into the collection. Confirm that we do not receive this +// document and that we subsequently time out. +now = new Date(); +cmdRes = db.runCommand( + {getMore: cmdRes.cursor.id, collection: collName, maxTimeMS: ReplSetTest.kDefaultTimeoutMS}); +assert.commandWorked(cmdRes); +assert.gt(cmdRes.cursor.id, NumberLong(0)); +assert.eq(cmdRes.cursor.ns, coll.getFullName()); +assert.eq(cmdRes.cursor.nextBatch.length, 1); +assert.docEq(cmdRes.cursor.nextBatch[0], {_id: "match", x: 1}); +insertshell(); })(); diff --git a/jstests/core/background_index_multikey.js b/jstests/core/background_index_multikey.js index 0449beb513b..3db0c2d81f1 100644 --- a/jstests/core/background_index_multikey.js +++ b/jstests/core/background_index_multikey.js @@ -7,68 +7,68 @@ */ (function() { - "use strict"; - function testIndexBuilds(isBackground) { - jsTestLog("Testing " + (isBackground ? "background" : "foreground") + " index builds"); - let coll = db["background_index_multikey_" + isBackground]; - coll.drop(); +"use strict"; +function testIndexBuilds(isBackground) { + jsTestLog("Testing " + (isBackground ? "background" : "foreground") + " index builds"); + let coll = db["background_index_multikey_" + isBackground]; + coll.drop(); - // Build index after multikey document is in the collection. - let doc = {_id: 0, a: [1, 2]}; - assert.writeOK(coll.insert(doc)); - assert.commandWorked(coll.createIndex({a: 1}, {background: isBackground})); - assert.eq(1, coll.count({a: 1})); - assert.eq(doc, coll.findOne({a: 1})); - assert.eq(1, coll.count({a: 2})); - assert.eq(doc, coll.findOne({a: 2})); + // Build index after multikey document is in the collection. + let doc = {_id: 0, a: [1, 2]}; + assert.writeOK(coll.insert(doc)); + assert.commandWorked(coll.createIndex({a: 1}, {background: isBackground})); + assert.eq(1, coll.count({a: 1})); + assert.eq(doc, coll.findOne({a: 1})); + assert.eq(1, coll.count({a: 2})); + assert.eq(doc, coll.findOne({a: 2})); - // Build index where multikey is in an embedded document. - doc = {_id: 1, b: {c: [1, 2]}}; - assert.writeOK(coll.insert(doc)); - assert.commandWorked(coll.createIndex({'b.c': 1}, {background: isBackground})); - assert.eq(1, coll.count({'b.c': 1})); - assert.eq(doc, coll.findOne({'b.c': 1})); - assert.eq(1, coll.count({'b.c': 2})); - assert.eq(doc, coll.findOne({'b.c': 2})); + // Build index where multikey is in an embedded document. + doc = {_id: 1, b: {c: [1, 2]}}; + assert.writeOK(coll.insert(doc)); + assert.commandWorked(coll.createIndex({'b.c': 1}, {background: isBackground})); + assert.eq(1, coll.count({'b.c': 1})); + assert.eq(doc, coll.findOne({'b.c': 1})); + assert.eq(1, coll.count({'b.c': 2})); + assert.eq(doc, coll.findOne({'b.c': 2})); - // Add new multikey path to embedded path. - doc = {_id: 2, b: [1, 2]}; - assert.writeOK(coll.insert(doc)); - assert.eq(1, coll.count({b: 1})); - assert.eq(doc, coll.findOne({b: 1})); - assert.eq(1, coll.count({b: 2})); - assert.eq(doc, coll.findOne({b: 2})); + // Add new multikey path to embedded path. + doc = {_id: 2, b: [1, 2]}; + assert.writeOK(coll.insert(doc)); + assert.eq(1, coll.count({b: 1})); + assert.eq(doc, coll.findOne({b: 1})); + assert.eq(1, coll.count({b: 2})); + assert.eq(doc, coll.findOne({b: 2})); - // Build index on a large collection that is not multikey, and then make it multikey. - for (let i = 100; i < 1100; i++) { - assert.writeOK(coll.insert({_id: i, d: i})); - } - assert.commandWorked(coll.createIndex({d: 1}, {background: isBackground})); - doc = {_id: 3, d: [1, 2]}; - assert.writeOK(coll.insert(doc)); - assert.eq(1, coll.count({d: 1})); - assert.eq(doc, coll.findOne({d: 1})); - assert.eq(1, coll.count({d: 2})); - assert.eq(doc, coll.findOne({d: 2})); + // Build index on a large collection that is not multikey, and then make it multikey. + for (let i = 100; i < 1100; i++) { + assert.writeOK(coll.insert({_id: i, d: i})); + } + assert.commandWorked(coll.createIndex({d: 1}, {background: isBackground})); + doc = {_id: 3, d: [1, 2]}; + assert.writeOK(coll.insert(doc)); + assert.eq(1, coll.count({d: 1})); + assert.eq(doc, coll.findOne({d: 1})); + assert.eq(1, coll.count({d: 2})); + assert.eq(doc, coll.findOne({d: 2})); - // Build compound multikey index. - doc = {_id: 4, e: [1, 2]}; - assert.writeOK(coll.insert(doc)); - assert.commandWorked(coll.createIndex({'e': 1, 'f': 1}, {background: isBackground})); - assert.eq(1, coll.count({e: 1})); - assert.eq(doc, coll.findOne({e: 1})); - assert.eq(1, coll.count({e: 2})); - assert.eq(doc, coll.findOne({e: 2})); + // Build compound multikey index. + doc = {_id: 4, e: [1, 2]}; + assert.writeOK(coll.insert(doc)); + assert.commandWorked(coll.createIndex({'e': 1, 'f': 1}, {background: isBackground})); + assert.eq(1, coll.count({e: 1})); + assert.eq(doc, coll.findOne({e: 1})); + assert.eq(1, coll.count({e: 2})); + assert.eq(doc, coll.findOne({e: 2})); - // Add new multikey path to compound index. - doc = {_id: 5, f: [1, 2]}; - assert.writeOK(coll.insert(doc)); - assert.eq(1, coll.count({f: 1})); - assert.eq(doc, coll.findOne({f: 1})); - assert.eq(1, coll.count({f: 2})); - assert.eq(doc, coll.findOne({f: 2})); - } + // Add new multikey path to compound index. + doc = {_id: 5, f: [1, 2]}; + assert.writeOK(coll.insert(doc)); + assert.eq(1, coll.count({f: 1})); + assert.eq(doc, coll.findOne({f: 1})); + assert.eq(1, coll.count({f: 2})); + assert.eq(doc, coll.findOne({f: 2})); +} - testIndexBuilds(false); - testIndexBuilds(true); +testIndexBuilds(false); +testIndexBuilds(true); })(); diff --git a/jstests/core/background_unique_indexes.js b/jstests/core/background_unique_indexes.js index 6d8b02d5199..692aaa0f58f 100644 --- a/jstests/core/background_unique_indexes.js +++ b/jstests/core/background_unique_indexes.js @@ -14,57 +14,57 @@ */ (function() { - "use strict"; +"use strict"; - const dbName = "background_unique_indexes"; - const collName = "test"; +const dbName = "background_unique_indexes"; +const collName = "test"; - let testDB = db.getSiblingDB(dbName); +let testDB = db.getSiblingDB(dbName); - // Setup collection. - testDB[collName].drop(); - assert.commandWorked(testDB.runCommand({create: collName})); +// Setup collection. +testDB[collName].drop(); +assert.commandWorked(testDB.runCommand({create: collName})); - // Each iteration increments and decrements a uniquely indexed value, 'x' while creating and - // dropping an index. The goal is that an index build on a secondary might find a case where the - // unique index constraint is temporarily violated, and an index on x maps to two different - // records. - const nOps = 1000; - const nIterations = 15; +// Each iteration increments and decrements a uniquely indexed value, 'x' while creating and +// dropping an index. The goal is that an index build on a secondary might find a case where the +// unique index constraint is temporarily violated, and an index on x maps to two different +// records. +const nOps = 1000; +const nIterations = 15; - // Write the initial documents. - let bulk = testDB[collName].initializeUnorderedBulkOp(); +// Write the initial documents. +let bulk = testDB[collName].initializeUnorderedBulkOp(); +for (let i = 0; i < nOps; i++) { + bulk.insert({_id: i, x: i, iter: 0}); +} +assert.commandWorked(bulk.execute()); + +// Cycle the value of x in the document {_id: i, x: i} between i and i+1 each iteration. +for (let iteration = 0; iteration < nIterations; iteration++) { + // Reset each document. + let updates = []; for (let i = 0; i < nOps; i++) { - bulk.insert({_id: i, x: i, iter: 0}); + updates[i] = {q: {_id: i}, u: {x: i, iter: iteration}}; } - assert.commandWorked(bulk.execute()); - - // Cycle the value of x in the document {_id: i, x: i} between i and i+1 each iteration. - for (let iteration = 0; iteration < nIterations; iteration++) { - // Reset each document. - let updates = []; - for (let i = 0; i < nOps; i++) { - updates[i] = {q: {_id: i}, u: {x: i, iter: iteration}}; - } - - assert.commandWorked(testDB.runCommand({update: collName, updates: updates})); - // Create a background unique index on the collection. - assert.commandWorked(testDB.runCommand({ - createIndexes: collName, - indexes: [{key: {x: 1}, name: "x_1", background: true, unique: true}] - })); + assert.commandWorked(testDB.runCommand({update: collName, updates: updates})); - // Generate updates that increment x on each document backwards by _id to avoid conficts - // when applied in-order. - updates = []; - for (let i = 0; i < nOps; i++) { - // Apply each operation in reverse order. - updates[nOps - i - 1] = {q: {_id: i}, u: {$inc: {x: 1}}}; - } - assert.commandWorked(testDB.runCommand({update: collName, updates: updates})); + // Create a background unique index on the collection. + assert.commandWorked(testDB.runCommand({ + createIndexes: collName, + indexes: [{key: {x: 1}, name: "x_1", background: true, unique: true}] + })); - assert.commandWorked(testDB.runCommand({dropIndexes: collName, index: "x_1"})); - print("iteration " + iteration); + // Generate updates that increment x on each document backwards by _id to avoid conficts + // when applied in-order. + updates = []; + for (let i = 0; i < nOps; i++) { + // Apply each operation in reverse order. + updates[nOps - i - 1] = {q: {_id: i}, u: {$inc: {x: 1}}}; } + assert.commandWorked(testDB.runCommand({update: collName, updates: updates})); + + assert.commandWorked(testDB.runCommand({dropIndexes: collName, index: "x_1"})); + print("iteration " + iteration); +} })(); diff --git a/jstests/core/batch_size.js b/jstests/core/batch_size.js index b280b8ceb77..ac7e1177c41 100644 --- a/jstests/core/batch_size.js +++ b/jstests/core/batch_size.js @@ -3,127 +3,124 @@ // Test subtleties of batchSize and limit. (function() { - "use strict"; - load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. - var t = db.jstests_batch_size; - t.drop(); - - for (var i = 0; i < 4; i++) { - t.save({_id: i, a: i}); - } - - function runIndexedTests() { - // With limit, indexed. - assert.eq(2, t.find().limit(2).itcount()); - assert.eq(2, t.find().sort({a: 1}).limit(2).itcount()); - - // With batchSize, indexed. - // SERVER-12438: If there is an index that provides the sort, then a plan with an unindexed - // sort should never be used. Consequently, batchSize will NOT be a hard limit in this - // case. WARNING: the behavior described above may change in the future. - assert.eq(4, t.find().batchSize(2).itcount()); - assert.eq(4, t.find().sort({a: 1}).batchSize(2).itcount()); - } - - // Without batch size or limit, unindexed. - assert.eq(4, t.find().itcount()); - assert.eq(4, t.find().sort({a: 1}).itcount()); - - // With limit, unindexed. +"use strict"; +load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. +var t = db.jstests_batch_size; +t.drop(); + +for (var i = 0; i < 4; i++) { + t.save({_id: i, a: i}); +} + +function runIndexedTests() { + // With limit, indexed. assert.eq(2, t.find().limit(2).itcount()); assert.eq(2, t.find().sort({a: 1}).limit(2).itcount()); + // With batchSize, indexed. + // SERVER-12438: If there is an index that provides the sort, then a plan with an unindexed + // sort should never be used. Consequently, batchSize will NOT be a hard limit in this + // case. WARNING: the behavior described above may change in the future. assert.eq(4, t.find().batchSize(2).itcount()); assert.eq(4, t.find().sort({a: 1}).batchSize(2).itcount()); - - // With negative batchSize. A negative batchSize value instructs the server - // to return just a single batch of results. - assert.eq(1, t.find().batchSize(-1).itcount()); - assert.eq(2, t.find().batchSize(-2).itcount()); - - // Run the tests with the index twice in order to double check plan caching. - t.ensureIndex({a: 1}); - for (var i = 0; i < 2; i++) { - runIndexedTests(); - } - - // The next tests make sure that we obey limit and batchSize properly when the sort could be - // either indexed or unindexed. - t.drop(); - t.ensureIndex({a: 1}); - t.ensureIndex({b: 1}); - - for (var i = 0; i < 100; i++) { - t.save({_id: i, a: i, b: 1}); - } - - // Without a hint. Do it twice to make sure caching is ok. - for (var i = 0; i < 2; i++) { - assert.eq(15, t.find({a: {$gte: 85}}).sort({b: 1}).batchSize(2).itcount()); - assert.eq(6, t.find({a: {$gte: 85}}).sort({b: 1}).limit(6).itcount()); - } - - // Hinting 'a'. - assert.eq(15, t.find({a: {$gte: 85}}).sort({b: 1}).hint({a: 1}).batchSize(2).itcount()); - assert.eq(6, t.find({a: {$gte: 85}}).sort({b: 1}).hint({a: 1}).limit(6).itcount()); - - // Hinting 'b'. - assert.eq(15, t.find({a: {$gte: 85}}).sort({b: 1}).hint({b: 1}).batchSize(2).itcount()); - assert.eq(6, t.find({a: {$gte: 85}}).sort({b: 1}).hint({b: 1}).limit(6).itcount()); - - // With explain. - var explain = t.find({a: {$gte: 85}}).sort({b: 1}).batchSize(2).explain("executionStats"); - assert.eq(15, explain.executionStats.nReturned); - explain = t.find({a: {$gte: 85}}).sort({b: 1}).limit(6).explain("executionStats"); - if (FixtureHelpers.isMongos(db)) { - // If we're talking to a mongos, we expect at most one batch from each shard. - assert.eq(FixtureHelpers.numberOfShardsForCollection(t) * 6, - explain.executionStats.nReturned); - } else { - assert.eq(6, explain.executionStats.nReturned); - } - - // Double check that we're not scanning more stuff than we have to. In order to get the sort - // using index 'a', we should need to scan about 50 keys and 50 documents. - var explain = - t.find({a: {$gte: 50}}).sort({b: 1}).hint({a: 1}).limit(6).explain("executionStats"); - assert.lte(explain.executionStats.totalKeysExamined, 60); - assert.lte(explain.executionStats.totalDocsExamined, 60); - if (FixtureHelpers.isMongos(db)) { - // If we're talking to a mongos, we expect at most one batch from each shard. - assert.eq(FixtureHelpers.numberOfShardsForCollection(t) * 6, - explain.executionStats.nReturned); - } else { - assert.eq(6, explain.executionStats.nReturned); - } - - // ------- - - // During plan ranking, we treat ntoreturn as a limit. This prevents us from buffering too much - // data in a blocking sort stage during plan ranking. - t.drop(); - - // Generate big string to use in the object - 1MB+ String - var bigStr = "ABCDEFGHIJKLMNBOPQRSTUVWXYZ012345687890"; - while (bigStr.length < 1000000) { - bigStr = bigStr + "::" + bigStr; - } - - // Insert enough documents to exceed the 32 MB in-memory sort limit. - const nDocs = 40 * FixtureHelpers.numberOfShardsForCollection(t); - for (var i = 0; i < nDocs; i++) { - var doc = {x: 1, y: 1, z: i, big: bigStr}; - t.insert(doc); - } - - // Two indices needed in order to trigger plan ranking. Neither index provides the sort order. - t.ensureIndex({x: 1}); - t.ensureIndex({y: 1}); - - // We should only buffer 3 docs in memory. - var cursor = t.find({x: 1, y: 1}).sort({z: -1}).limit(3); - assert.eq(nDocs - 1, cursor.next().z); - assert.eq(nDocs - 2, cursor.next().z); - assert.eq(nDocs - 3, cursor.next().z); - assert(!cursor.hasNext()); +} + +// Without batch size or limit, unindexed. +assert.eq(4, t.find().itcount()); +assert.eq(4, t.find().sort({a: 1}).itcount()); + +// With limit, unindexed. +assert.eq(2, t.find().limit(2).itcount()); +assert.eq(2, t.find().sort({a: 1}).limit(2).itcount()); + +assert.eq(4, t.find().batchSize(2).itcount()); +assert.eq(4, t.find().sort({a: 1}).batchSize(2).itcount()); + +// With negative batchSize. A negative batchSize value instructs the server +// to return just a single batch of results. +assert.eq(1, t.find().batchSize(-1).itcount()); +assert.eq(2, t.find().batchSize(-2).itcount()); + +// Run the tests with the index twice in order to double check plan caching. +t.ensureIndex({a: 1}); +for (var i = 0; i < 2; i++) { + runIndexedTests(); +} + +// The next tests make sure that we obey limit and batchSize properly when the sort could be +// either indexed or unindexed. +t.drop(); +t.ensureIndex({a: 1}); +t.ensureIndex({b: 1}); + +for (var i = 0; i < 100; i++) { + t.save({_id: i, a: i, b: 1}); +} + +// Without a hint. Do it twice to make sure caching is ok. +for (var i = 0; i < 2; i++) { + assert.eq(15, t.find({a: {$gte: 85}}).sort({b: 1}).batchSize(2).itcount()); + assert.eq(6, t.find({a: {$gte: 85}}).sort({b: 1}).limit(6).itcount()); +} + +// Hinting 'a'. +assert.eq(15, t.find({a: {$gte: 85}}).sort({b: 1}).hint({a: 1}).batchSize(2).itcount()); +assert.eq(6, t.find({a: {$gte: 85}}).sort({b: 1}).hint({a: 1}).limit(6).itcount()); + +// Hinting 'b'. +assert.eq(15, t.find({a: {$gte: 85}}).sort({b: 1}).hint({b: 1}).batchSize(2).itcount()); +assert.eq(6, t.find({a: {$gte: 85}}).sort({b: 1}).hint({b: 1}).limit(6).itcount()); + +// With explain. +var explain = t.find({a: {$gte: 85}}).sort({b: 1}).batchSize(2).explain("executionStats"); +assert.eq(15, explain.executionStats.nReturned); +explain = t.find({a: {$gte: 85}}).sort({b: 1}).limit(6).explain("executionStats"); +if (FixtureHelpers.isMongos(db)) { + // If we're talking to a mongos, we expect at most one batch from each shard. + assert.eq(FixtureHelpers.numberOfShardsForCollection(t) * 6, explain.executionStats.nReturned); +} else { + assert.eq(6, explain.executionStats.nReturned); +} + +// Double check that we're not scanning more stuff than we have to. In order to get the sort +// using index 'a', we should need to scan about 50 keys and 50 documents. +var explain = t.find({a: {$gte: 50}}).sort({b: 1}).hint({a: 1}).limit(6).explain("executionStats"); +assert.lte(explain.executionStats.totalKeysExamined, 60); +assert.lte(explain.executionStats.totalDocsExamined, 60); +if (FixtureHelpers.isMongos(db)) { + // If we're talking to a mongos, we expect at most one batch from each shard. + assert.eq(FixtureHelpers.numberOfShardsForCollection(t) * 6, explain.executionStats.nReturned); +} else { + assert.eq(6, explain.executionStats.nReturned); +} + +// ------- + +// During plan ranking, we treat ntoreturn as a limit. This prevents us from buffering too much +// data in a blocking sort stage during plan ranking. +t.drop(); + +// Generate big string to use in the object - 1MB+ String +var bigStr = "ABCDEFGHIJKLMNBOPQRSTUVWXYZ012345687890"; +while (bigStr.length < 1000000) { + bigStr = bigStr + "::" + bigStr; +} + +// Insert enough documents to exceed the 32 MB in-memory sort limit. +const nDocs = 40 * FixtureHelpers.numberOfShardsForCollection(t); +for (var i = 0; i < nDocs; i++) { + var doc = {x: 1, y: 1, z: i, big: bigStr}; + t.insert(doc); +} + +// Two indices needed in order to trigger plan ranking. Neither index provides the sort order. +t.ensureIndex({x: 1}); +t.ensureIndex({y: 1}); + +// We should only buffer 3 docs in memory. +var cursor = t.find({x: 1, y: 1}).sort({z: -1}).limit(3); +assert.eq(nDocs - 1, cursor.next().z); +assert.eq(nDocs - 2, cursor.next().z); +assert.eq(nDocs - 3, cursor.next().z); +assert(!cursor.hasNext()); }()); diff --git a/jstests/core/batch_write_collation_estsize.js b/jstests/core/batch_write_collation_estsize.js index 819060ec37e..d0e4254d6b2 100644 --- a/jstests/core/batch_write_collation_estsize.js +++ b/jstests/core/batch_write_collation_estsize.js @@ -7,186 +7,178 @@ // specification in the write operation document. (function() { - "use strict"; - - // Setup the test collection. - db.batch_write_collation_estsize.drop(); - assert.writeOK(db.batch_write_collation_estsize.insert({str: "FOO"})); - - if (db.getMongo().writeMode() !== "commands") { - // Cannot use the bulk API to set a collation when using legacy write ops. - let bulk; - - // Test updateOne unordered bulk write operation with collation specification. - bulk = db.batch_write_collation_estsize.initializeUnorderedBulkOp(); - assert.throws(() => { - bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).updateOne({ - str: "BAR" - }); - }); - - // Test update unordered bulk write operation with collation specification. - bulk = db.batch_write_collation_estsize.initializeUnorderedBulkOp(); - assert.throws(() => { - bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).update({str: "BAR"}); - }); - - // Test replaceOne unordered bulk write operation with collation specification. - bulk = db.batch_write_collation_estsize.initializeUnorderedBulkOp(); - assert.throws(() => { - bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).replaceOne({ - str: "BAR" - }); - }); - - // Test removeOne unordered bulk write operation with collation specification. - bulk = db.batch_write_collation_estsize.initializeUnorderedBulkOp(); - assert.throws(() => { - bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).removeOne(); - }); - - // Test remove unordered bulk write operation with collation specification. - bulk = db.batch_write_collation_estsize.initializeUnorderedBulkOp(); - assert.throws(() => { - bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).remove(); - }); - - // Test updateOne ordered bulk write operation with collation specification. - bulk = db.batch_write_collation_estsize.initializeOrderedBulkOp(); - assert.throws(() => { - bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).updateOne({ - str: "BAR" - }); - }); - - // Test update ordered bulk write operation with collation specification. - bulk = db.batch_write_collation_estsize.initializeOrderedBulkOp(); - assert.throws(() => { - bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).update({str: "BAR"}); - }); - - // Test replaceOne ordered bulk write operation with collation specification. - bulk = db.batch_write_collation_estsize.initializeOrderedBulkOp(); - assert.throws(() => { - bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).replaceOne({ - str: "BAR" - }); - }); - - // Test removeOne ordered bulk write operation with collation specification. - bulk = db.batch_write_collation_estsize.initializeOrderedBulkOp(); - assert.throws(() => { - bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).removeOne(); - }); - - // Test remove ordered bulk write operation with collation specification. - bulk = db.batch_write_collation_estsize.initializeOrderedBulkOp(); - assert.throws(() => { - bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).remove(); - }); - } else { - // Setup the bulk write response variable. - let res; - - // Test updateOne bulk write operation with collation specification. - res = db.batch_write_collation_estsize.bulkWrite([{ - updateOne: { - filter: {str: "FOO"}, - update: {$set: {str: "BAR"}}, - collation: { - locale: "en_US", - caseLevel: false, - caseFirst: "off", - strength: 3, - numericOrdering: false, - alternate: "non-ignorable", - maxVariable: "punct", - normalization: false, - backwards: false - } +"use strict"; + +// Setup the test collection. +db.batch_write_collation_estsize.drop(); +assert.writeOK(db.batch_write_collation_estsize.insert({str: "FOO"})); + +if (db.getMongo().writeMode() !== "commands") { + // Cannot use the bulk API to set a collation when using legacy write ops. + let bulk; + + // Test updateOne unordered bulk write operation with collation specification. + bulk = db.batch_write_collation_estsize.initializeUnorderedBulkOp(); + assert.throws(() => { + bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).updateOne({str: "BAR"}); + }); + + // Test update unordered bulk write operation with collation specification. + bulk = db.batch_write_collation_estsize.initializeUnorderedBulkOp(); + assert.throws(() => { + bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).update({str: "BAR"}); + }); + + // Test replaceOne unordered bulk write operation with collation specification. + bulk = db.batch_write_collation_estsize.initializeUnorderedBulkOp(); + assert.throws(() => { + bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).replaceOne({str: "BAR"}); + }); + + // Test removeOne unordered bulk write operation with collation specification. + bulk = db.batch_write_collation_estsize.initializeUnorderedBulkOp(); + assert.throws(() => { + bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).removeOne(); + }); + + // Test remove unordered bulk write operation with collation specification. + bulk = db.batch_write_collation_estsize.initializeUnorderedBulkOp(); + assert.throws(() => { + bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).remove(); + }); + + // Test updateOne ordered bulk write operation with collation specification. + bulk = db.batch_write_collation_estsize.initializeOrderedBulkOp(); + assert.throws(() => { + bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).updateOne({str: "BAR"}); + }); + + // Test update ordered bulk write operation with collation specification. + bulk = db.batch_write_collation_estsize.initializeOrderedBulkOp(); + assert.throws(() => { + bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).update({str: "BAR"}); + }); + + // Test replaceOne ordered bulk write operation with collation specification. + bulk = db.batch_write_collation_estsize.initializeOrderedBulkOp(); + assert.throws(() => { + bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).replaceOne({str: "BAR"}); + }); + + // Test removeOne ordered bulk write operation with collation specification. + bulk = db.batch_write_collation_estsize.initializeOrderedBulkOp(); + assert.throws(() => { + bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).removeOne(); + }); + + // Test remove ordered bulk write operation with collation specification. + bulk = db.batch_write_collation_estsize.initializeOrderedBulkOp(); + assert.throws(() => { + bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).remove(); + }); +} else { + // Setup the bulk write response variable. + let res; + + // Test updateOne bulk write operation with collation specification. + res = db.batch_write_collation_estsize.bulkWrite([{ + updateOne: { + filter: {str: "FOO"}, + update: {$set: {str: "BAR"}}, + collation: { + locale: "en_US", + caseLevel: false, + caseFirst: "off", + strength: 3, + numericOrdering: false, + alternate: "non-ignorable", + maxVariable: "punct", + normalization: false, + backwards: false } - }]); - assert.eq(1, res.matchedCount); - - // Test updateMany bulk write operation with collation specification. - res = db.batch_write_collation_estsize.bulkWrite([{ - updateMany: { - filter: {str: "BAR"}, - update: {$set: {str: "FOO"}}, - collation: { - locale: "en_US", - caseLevel: false, - caseFirst: "off", - strength: 3, - numericOrdering: false, - alternate: "non-ignorable", - maxVariable: "punct", - normalization: false, - backwards: false - } + } + }]); + assert.eq(1, res.matchedCount); + + // Test updateMany bulk write operation with collation specification. + res = db.batch_write_collation_estsize.bulkWrite([{ + updateMany: { + filter: {str: "BAR"}, + update: {$set: {str: "FOO"}}, + collation: { + locale: "en_US", + caseLevel: false, + caseFirst: "off", + strength: 3, + numericOrdering: false, + alternate: "non-ignorable", + maxVariable: "punct", + normalization: false, + backwards: false } - }]); - assert.eq(1, res.matchedCount); - - // Test replaceOne bulk write operation with collation specification. - res = db.batch_write_collation_estsize.bulkWrite([{ - replaceOne: { - filter: {str: "FOO"}, - replacement: {str: "BAR"}, - collation: { - locale: "en_US", - caseLevel: false, - caseFirst: "off", - strength: 3, - numericOrdering: false, - alternate: "non-ignorable", - maxVariable: "punct", - normalization: false, - backwards: false - } + } + }]); + assert.eq(1, res.matchedCount); + + // Test replaceOne bulk write operation with collation specification. + res = db.batch_write_collation_estsize.bulkWrite([{ + replaceOne: { + filter: {str: "FOO"}, + replacement: {str: "BAR"}, + collation: { + locale: "en_US", + caseLevel: false, + caseFirst: "off", + strength: 3, + numericOrdering: false, + alternate: "non-ignorable", + maxVariable: "punct", + normalization: false, + backwards: false } - }]); - assert.eq(1, res.matchedCount); - - // Test deleteMany bulk write operation with collation specification. - res = db.batch_write_collation_estsize.bulkWrite([{ - deleteOne: { - filter: {str: "BAR"}, - collation: { - locale: "en_US", - caseLevel: false, - caseFirst: "off", - strength: 3, - numericOrdering: false, - alternate: "non-ignorable", - maxVariable: "punct", - normalization: false, - backwards: false - } + } + }]); + assert.eq(1, res.matchedCount); + + // Test deleteMany bulk write operation with collation specification. + res = db.batch_write_collation_estsize.bulkWrite([{ + deleteOne: { + filter: {str: "BAR"}, + collation: { + locale: "en_US", + caseLevel: false, + caseFirst: "off", + strength: 3, + numericOrdering: false, + alternate: "non-ignorable", + maxVariable: "punct", + normalization: false, + backwards: false } - }]); - assert.eq(1, res.deletedCount); - - // Reinsert a document to test deleteMany bulk write operation. - assert.writeOK(db.batch_write_collation_estsize.insert({str: "FOO"})); - - // Test deleteMany bulk write operation with collation specification. - res = db.batch_write_collation_estsize.bulkWrite([{ - deleteMany: { - filter: {str: "FOO"}, - collation: { - locale: "en_US", - caseLevel: false, - caseFirst: "off", - strength: 3, - numericOrdering: false, - alternate: "non-ignorable", - maxVariable: "punct", - normalization: false, - backwards: false - } + } + }]); + assert.eq(1, res.deletedCount); + + // Reinsert a document to test deleteMany bulk write operation. + assert.writeOK(db.batch_write_collation_estsize.insert({str: "FOO"})); + + // Test deleteMany bulk write operation with collation specification. + res = db.batch_write_collation_estsize.bulkWrite([{ + deleteMany: { + filter: {str: "FOO"}, + collation: { + locale: "en_US", + caseLevel: false, + caseFirst: "off", + strength: 3, + numericOrdering: false, + alternate: "non-ignorable", + maxVariable: "punct", + normalization: false, + backwards: false } - }]); - assert.eq(1, res.deletedCount); - } + } + }]); + assert.eq(1, res.deletedCount); +} })(); diff --git a/jstests/core/batch_write_command_delete.js b/jstests/core/batch_write_command_delete.js index 48234dbeef5..4004a519412 100644 --- a/jstests/core/batch_write_command_delete.js +++ b/jstests/core/batch_write_command_delete.js @@ -30,7 +30,7 @@ function resultOK(result) { } function resultNOK(result) { - return !result.ok && typeof(result.code) == 'number' && typeof(result.errmsg) == 'string'; + return !result.ok && typeof (result.code) == 'number' && typeof (result.errmsg) == 'string'; } function countEventually(collection, n) { diff --git a/jstests/core/batch_write_command_insert.js b/jstests/core/batch_write_command_insert.js index dcbe065bf19..0480e63f04a 100644 --- a/jstests/core/batch_write_command_insert.js +++ b/jstests/core/batch_write_command_insert.js @@ -33,7 +33,7 @@ function resultOK(result) { } function resultNOK(result) { - return !result.ok && typeof(result.code) == 'number' && typeof(result.errmsg) == 'string'; + return !result.ok && typeof (result.code) == 'number' && typeof (result.errmsg) == 'string'; } function countEventually(collection, n) { diff --git a/jstests/core/batch_write_command_update.js b/jstests/core/batch_write_command_update.js index df1a0ade62a..3d879e0064d 100644 --- a/jstests/core/batch_write_command_update.js +++ b/jstests/core/batch_write_command_update.js @@ -29,7 +29,7 @@ function resultOK(result) { } function resultNOK(result) { - return !result.ok && typeof(result.code) == 'number' && typeof(result.errmsg) == 'string'; + return !result.ok && typeof (result.code) == 'number' && typeof (result.errmsg) == 'string'; } function countEventually(collection, n) { diff --git a/jstests/core/bench_test1.js b/jstests/core/bench_test1.js index 8e316c8b25e..2dd6e36c82a 100644 --- a/jstests/core/bench_test1.js +++ b/jstests/core/bench_test1.js @@ -5,40 +5,45 @@ // uses_multiple_connections, // ] (function() { - "use strict"; - - const t = db.bench_test1; - t.drop(); - - t.insert({_id: 1, x: 1}); - t.insert({_id: 2, x: 1}); - - const ops = [ - {op: "findOne", ns: t.getFullName(), query: {_id: 1}}, - {op: "update", ns: t.getFullName(), query: {_id: 1}, update: {$inc: {x: 1}}} - ]; - - const seconds = 10; - - const benchArgs = {ops: ops, parallel: 2, seconds: seconds, host: db.getMongo().host}; - - if (jsTest.options().auth) { - benchArgs['db'] = 'admin'; - benchArgs['username'] = jsTest.options().authUser; - benchArgs['password'] = jsTest.options().authPassword; - } - const res = benchRun(benchArgs); - - assert.lte(seconds * res.update, t.findOne({_id: 1}).x * 1.5, "A1"); - - assert.eq(1, t.getIndexes().length, "B1"); - benchArgs['ops'] = [{op: "createIndex", ns: t.getFullName(), key: {x: 1}}]; - benchArgs['parallel'] = 1; - benchRun(benchArgs); - assert.eq(2, t.getIndexes().length, "B2"); - benchArgs['ops'] = [{op: "dropIndex", ns: t.getFullName(), key: {x: 1}}]; - benchRun(benchArgs); - assert.soon(function() { - return t.getIndexes().length == 1; - }); +"use strict"; + +const t = db.bench_test1; +t.drop(); + +t.insert({_id: 1, x: 1}); +t.insert({_id: 2, x: 1}); + +const ops = [ + {op: "findOne", ns: t.getFullName(), query: {_id: 1}}, + {op: "update", ns: t.getFullName(), query: {_id: 1}, update: {$inc: {x: 1}}} +]; + +const seconds = 10; + +const benchArgs = { + ops: ops, + parallel: 2, + seconds: seconds, + host: db.getMongo().host +}; + +if (jsTest.options().auth) { + benchArgs['db'] = 'admin'; + benchArgs['username'] = jsTest.options().authUser; + benchArgs['password'] = jsTest.options().authPassword; +} +const res = benchRun(benchArgs); + +assert.lte(seconds * res.update, t.findOne({_id: 1}).x * 1.5, "A1"); + +assert.eq(1, t.getIndexes().length, "B1"); +benchArgs['ops'] = [{op: "createIndex", ns: t.getFullName(), key: {x: 1}}]; +benchArgs['parallel'] = 1; +benchRun(benchArgs); +assert.eq(2, t.getIndexes().length, "B2"); +benchArgs['ops'] = [{op: "dropIndex", ns: t.getFullName(), key: {x: 1}}]; +benchRun(benchArgs); +assert.soon(function() { + return t.getIndexes().length == 1; +}); }()); diff --git a/jstests/core/benchrun_pipeline_updates.js b/jstests/core/benchrun_pipeline_updates.js index 06647c83f06..bf14e51e5ad 100644 --- a/jstests/core/benchrun_pipeline_updates.js +++ b/jstests/core/benchrun_pipeline_updates.js @@ -4,51 +4,51 @@ * @tags: [uses_multiple_connections] */ (function() { - "use strict"; - const coll = db.benchrun_pipeline_updates; - coll.drop(); +"use strict"; +const coll = db.benchrun_pipeline_updates; +coll.drop(); - assert.commandWorked(coll.insert({_id: 0, x: 0})); +assert.commandWorked(coll.insert({_id: 0, x: 0})); - // Test that a basic pipeline can be used by an update op. - let benchArgs = { - ops: [ - { - op: "update", - ns: coll.getFullName(), - query: {_id: 0}, - writeCmd: true, - update: [{$set: {x: {$add: ["$x", 1]}}}] - }, - ], - parallel: 2, - seconds: 1, - host: db.getMongo().host, - }; - if (jsTest.options().auth) { - benchArgs['db'] = 'admin'; - benchArgs['username'] = jsTest.options().authUser; - benchArgs['password'] = jsTest.options().authPassword; - } - let res = benchRun(benchArgs); - assert.eq(res.errCount, 0); - assert.gte( - coll.findOne({_id: 0}).x, 2, "Expected at least one update to succeed and increment 'x'"); +// Test that a basic pipeline can be used by an update op. +let benchArgs = { + ops: [ + { + op: "update", + ns: coll.getFullName(), + query: {_id: 0}, + writeCmd: true, + update: [{$set: {x: {$add: ["$x", 1]}}}] + }, + ], + parallel: 2, + seconds: 1, + host: db.getMongo().host, +}; +if (jsTest.options().auth) { + benchArgs['db'] = 'admin'; + benchArgs['username'] = jsTest.options().authUser; + benchArgs['password'] = jsTest.options().authPassword; +} +let res = benchRun(benchArgs); +assert.eq(res.errCount, 0); +assert.gte( + coll.findOne({_id: 0}).x, 2, "Expected at least one update to succeed and increment 'x'"); - // Now test that the pipeline is still subject to benchRun's keyword replacement. +// Now test that the pipeline is still subject to benchRun's keyword replacement. - // Initialize x to something outside the range we'll expect it to be in below if the updates - // succeed. - assert.commandWorked(coll.updateOne({_id: 0}, {$set: {x: 100}})); - benchArgs.ops = [{ - op: "update", - ns: coll.getFullName(), - query: {_id: 0}, - writeCmd: true, - update: [{$project: {x: {$literal: {"#RAND_INT_PLUS_THREAD": [0, 2]}}}}] - }]; - res = benchRun(benchArgs); - assert.eq(res.errCount, 0); - assert.lte( - coll.findOne({_id: 0}).x, 3, "Expected 'x' to be no more than 3 after randInt replacement"); +// Initialize x to something outside the range we'll expect it to be in below if the updates +// succeed. +assert.commandWorked(coll.updateOne({_id: 0}, {$set: {x: 100}})); +benchArgs.ops = [{ + op: "update", + ns: coll.getFullName(), + query: {_id: 0}, + writeCmd: true, + update: [{$project: {x: {$literal: {"#RAND_INT_PLUS_THREAD": [0, 2]}}}}] +}]; +res = benchRun(benchArgs); +assert.eq(res.errCount, 0); +assert.lte( + coll.findOne({_id: 0}).x, 3, "Expected 'x' to be no more than 3 after randInt replacement"); }()); diff --git a/jstests/core/bindata_indexonly.js b/jstests/core/bindata_indexonly.js index 4eb24476010..f215a17e882 100644 --- a/jstests/core/bindata_indexonly.js +++ b/jstests/core/bindata_indexonly.js @@ -4,75 +4,71 @@ * 2) Can perform index-only data access. */ (function() { - 'use strict'; +'use strict'; - load("jstests/libs/analyze_plan.js"); +load("jstests/libs/analyze_plan.js"); - var coll = db.jstests_bindata_indexonly; +var coll = db.jstests_bindata_indexonly; - coll.drop(); - assert.writeOK(coll.insert({_id: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA"), a: 1})); - assert.writeOK(coll.insert({_id: BinData(0, "AQAAAAEBAAVlbl9VSwAAAAAAAAhv"), a: 2})); - assert.writeOK(coll.insert({_id: BinData(0, "AQAAAAEBAAVlbl9VSwAAAAAAAAhz"), a: 3})); - assert.writeOK(coll.insert({_id: BinData(0, "////////////////////////////"), a: 4})); - assert.commandWorked(coll.createIndex({_id: 1, a: 1})); +coll.drop(); +assert.writeOK(coll.insert({_id: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA"), a: 1})); +assert.writeOK(coll.insert({_id: BinData(0, "AQAAAAEBAAVlbl9VSwAAAAAAAAhv"), a: 2})); +assert.writeOK(coll.insert({_id: BinData(0, "AQAAAAEBAAVlbl9VSwAAAAAAAAhz"), a: 3})); +assert.writeOK(coll.insert({_id: BinData(0, "////////////////////////////"), a: 4})); +assert.commandWorked(coll.createIndex({_id: 1, a: 1})); - assert.throws(function() { - db.mycoll.insert({_id: 0, a: BinData.prototype}); - }, [], "bindata getter did not fail"); +assert.throws(function() { + db.mycoll.insert({_id: 0, a: BinData.prototype}); +}, [], "bindata getter did not fail"); - function testIndexOnlyBinData(blob) { - var explain = - coll.find({$and: [{_id: {$lte: BinData(0, blob)}}, {_id: {$gte: BinData(0, blob)}}]}, - {_id: 1, a: 1}) - .hint({_id: 1, a: 1}) - .explain("executionStats"); +function testIndexOnlyBinData(blob) { + var explain = + coll.find({$and: [{_id: {$lte: BinData(0, blob)}}, {_id: {$gte: BinData(0, blob)}}]}, + {_id: 1, a: 1}) + .hint({_id: 1, a: 1}) + .explain("executionStats"); - assert(isIndexOnly(db, explain.queryPlanner.winningPlan), - "indexonly.BinData(0, " + blob + ") - must be index-only"); - assert.eq(1, - explain.executionStats.nReturned, - "EXACTone.BinData(0, " + blob + ") - should only return one in unique set"); - } + assert(isIndexOnly(db, explain.queryPlanner.winningPlan), + "indexonly.BinData(0, " + blob + ") - must be index-only"); + assert.eq(1, + explain.executionStats.nReturned, + "EXACTone.BinData(0, " + blob + ") - should only return one in unique set"); +} - testIndexOnlyBinData("AAAAAAAAAAAAAAAAAAAAAAAAAAAA"); - testIndexOnlyBinData("AQAAAAEBAAVlbl9VSwAAAAAAAAhv"); - testIndexOnlyBinData("AQAAAAEBAAVlbl9VSwAAAAAAAAhz"); - testIndexOnlyBinData("////////////////////////////"); +testIndexOnlyBinData("AAAAAAAAAAAAAAAAAAAAAAAAAAAA"); +testIndexOnlyBinData("AQAAAAEBAAVlbl9VSwAAAAAAAAhv"); +testIndexOnlyBinData("AQAAAAEBAAVlbl9VSwAAAAAAAAhz"); +testIndexOnlyBinData("////////////////////////////"); - var explain; +var explain; - explain = coll.find({_id: {$lt: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA")}}, {_id: 1, a: 1}) - .hint({_id: 1, a: 1}) - .explain("executionStats"); - assert(isIndexOnly(db, explain), "indexonly.$lt.1 - must be index-only"); - assert.eq(0, - explain.executionStats.nReturned, - "correctcount.$lt.1 - not returning correct documents"); +explain = coll.find({_id: {$lt: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA")}}, {_id: 1, a: 1}) + .hint({_id: 1, a: 1}) + .explain("executionStats"); +assert(isIndexOnly(db, explain), "indexonly.$lt.1 - must be index-only"); +assert.eq( + 0, explain.executionStats.nReturned, "correctcount.$lt.1 - not returning correct documents"); - explain = coll.find({_id: {$gt: BinData(0, "////////////////////////////")}}, {_id: 1, a: 1}) - .hint({_id: 1, a: 1}) - .explain("executionStats"); - assert(isIndexOnly(db, explain), "indexonly.$gt.2 - must be index-only"); - assert.eq(0, - explain.executionStats.nReturned, - "correctcount.$gt.2 - not returning correct documents"); +explain = coll.find({_id: {$gt: BinData(0, "////////////////////////////")}}, {_id: 1, a: 1}) + .hint({_id: 1, a: 1}) + .explain("executionStats"); +assert(isIndexOnly(db, explain), "indexonly.$gt.2 - must be index-only"); +assert.eq( + 0, explain.executionStats.nReturned, "correctcount.$gt.2 - not returning correct documents"); - explain = coll.find({_id: {$lte: BinData(0, "AQAAAAEBAAVlbl9VSwAAAAAAAAhv")}}, {_id: 1, a: 1}) - .hint({_id: 1, a: 1}) - .explain("executionStats"); - assert(isIndexOnly(db, explain), "indexonly.$lte.3 - must be index-only"); - assert.eq(2, - explain.executionStats.nReturned, - "correctcount.$lte.3 - not returning correct documents"); +explain = coll.find({_id: {$lte: BinData(0, "AQAAAAEBAAVlbl9VSwAAAAAAAAhv")}}, {_id: 1, a: 1}) + .hint({_id: 1, a: 1}) + .explain("executionStats"); +assert(isIndexOnly(db, explain), "indexonly.$lte.3 - must be index-only"); +assert.eq( + 2, explain.executionStats.nReturned, "correctcount.$lte.3 - not returning correct documents"); - explain = coll.find({_id: {$gte: BinData(0, "AQAAAAEBAAVlbl9VSwAAAAAAAAhz")}}, {_id: 1, a: 1}) - .hint({_id: 1, a: 1}) - .explain("executionStats"); - assert(isIndexOnly(db, explain), "indexonly.$gte.3 - must be index-only"); - assert.eq(2, - explain.executionStats.nReturned, - "correctcount.$gte.3 - not returning correct documents"); +explain = coll.find({_id: {$gte: BinData(0, "AQAAAAEBAAVlbl9VSwAAAAAAAAhz")}}, {_id: 1, a: 1}) + .hint({_id: 1, a: 1}) + .explain("executionStats"); +assert(isIndexOnly(db, explain), "indexonly.$gte.3 - must be index-only"); +assert.eq( + 2, explain.executionStats.nReturned, "correctcount.$gte.3 - not returning correct documents"); - coll.drop(); +coll.drop(); })(); diff --git a/jstests/core/bittest.js b/jstests/core/bittest.js index 194bf3df048..00785a5efcf 100644 --- a/jstests/core/bittest.js +++ b/jstests/core/bittest.js @@ -2,155 +2,154 @@ * This test ensures that bit test query operators work. */ (function() { - 'use strict'; - - load("jstests/libs/analyze_plan.js"); - - var coll = db.jstests_bitwise; - - function assertQueryCorrect(query, count) { - var explain = coll.find(query).explain("executionStats"); - assert(isCollscan(db, explain.queryPlanner.winningPlan), - "expected bit test query plan to be COLLSCAN"); - assert.eq(count, - explain.executionStats.nReturned, - "bit test query not returning correct documents"); +'use strict'; + +load("jstests/libs/analyze_plan.js"); + +var coll = db.jstests_bitwise; + +function assertQueryCorrect(query, count) { + var explain = coll.find(query).explain("executionStats"); + assert(isCollscan(db, explain.queryPlanner.winningPlan), + "expected bit test query plan to be COLLSCAN"); + assert.eq( + count, explain.executionStats.nReturned, "bit test query not returning correct documents"); +} + +// Tests on numbers. + +coll.drop(); +assert.writeOK(coll.insert({a: 0})); +assert.writeOK(coll.insert({a: 1})); +assert.writeOK(coll.insert({a: 54})); +assert.writeOK(coll.insert({a: 88})); +assert.writeOK(coll.insert({a: 255})); +assert.commandWorked(coll.createIndex({a: 1})); + +// Tests with bitmask. +assertQueryCorrect({a: {$bitsAllSet: 0}}, 5); +assertQueryCorrect({a: {$bitsAllSet: 1}}, 2); +assertQueryCorrect({a: {$bitsAllSet: 16}}, 3); +assertQueryCorrect({a: {$bitsAllSet: 54}}, 2); +assertQueryCorrect({a: {$bitsAllSet: 55}}, 1); +assertQueryCorrect({a: {$bitsAllSet: 88}}, 2); +assertQueryCorrect({a: {$bitsAllSet: 255}}, 1); +assertQueryCorrect({a: {$bitsAllClear: 0}}, 5); +assertQueryCorrect({a: {$bitsAllClear: 1}}, 3); +assertQueryCorrect({a: {$bitsAllClear: 16}}, 2); +assertQueryCorrect({a: {$bitsAllClear: 129}}, 3); +assertQueryCorrect({a: {$bitsAllClear: 255}}, 1); +assertQueryCorrect({a: {$bitsAnySet: 0}}, 0); +assertQueryCorrect({a: {$bitsAnySet: 9}}, 3); +assertQueryCorrect({a: {$bitsAnySet: 255}}, 4); +assertQueryCorrect({a: {$bitsAnyClear: 0}}, 0); +assertQueryCorrect({a: {$bitsAnyClear: 18}}, 3); +assertQueryCorrect({a: {$bitsAnyClear: 24}}, 3); +assertQueryCorrect({a: {$bitsAnyClear: 255}}, 4); + +// Tests with array of bit positions. +assertQueryCorrect({a: {$bitsAllSet: []}}, 5); +assertQueryCorrect({a: {$bitsAllSet: [0]}}, 2); +assertQueryCorrect({a: {$bitsAllSet: [4]}}, 3); +assertQueryCorrect({a: {$bitsAllSet: [1, 2, 4, 5]}}, 2); +assertQueryCorrect({a: {$bitsAllSet: [0, 1, 2, 4, 5]}}, 1); +assertQueryCorrect({a: {$bitsAllSet: [3, 4, 6]}}, 2); +assertQueryCorrect({a: {$bitsAllSet: [0, 1, 2, 3, 4, 5, 6, 7]}}, 1); +assertQueryCorrect({a: {$bitsAllClear: []}}, 5); +assertQueryCorrect({a: {$bitsAllClear: [0]}}, 3); +assertQueryCorrect({a: {$bitsAllClear: [4]}}, 2); +assertQueryCorrect({a: {$bitsAllClear: [1, 7]}}, 3); +assertQueryCorrect({a: {$bitsAllClear: [0, 1, 2, 3, 4, 5, 6, 7]}}, 1); +assertQueryCorrect({a: {$bitsAnySet: []}}, 0); +assertQueryCorrect({a: {$bitsAnySet: [1, 3]}}, 3); +assertQueryCorrect({a: {$bitsAnySet: [0, 1, 2, 3, 4, 5, 6, 7]}}, 4); +assertQueryCorrect({a: {$bitsAnyClear: []}}, 0); +assertQueryCorrect({a: {$bitsAnyClear: [1, 4]}}, 3); +assertQueryCorrect({a: {$bitsAnyClear: [3, 4]}}, 3); +assertQueryCorrect({a: {$bitsAnyClear: [0, 1, 2, 3, 4, 5, 6, 7]}}, 4); + +// Tests with multiple predicates. +assertQueryCorrect({a: {$bitsAllSet: 54, $bitsAllClear: 201}}, 1); + +// Tests on negative numbers. + +coll.drop(); +assert.writeOK(coll.insert({a: -0})); +assert.writeOK(coll.insert({a: -1})); +assert.writeOK(coll.insert({a: -54})); + +// Tests with bitmask. +assertQueryCorrect({a: {$bitsAllSet: 0}}, 3); +assertQueryCorrect({a: {$bitsAllSet: 2}}, 2); +assertQueryCorrect({a: {$bitsAllSet: 127}}, 1); +assertQueryCorrect({a: {$bitsAllSet: 74}}, 2); +assertQueryCorrect({a: {$bitsAllClear: 0}}, 3); +assertQueryCorrect({a: {$bitsAllClear: 53}}, 2); +assertQueryCorrect({a: {$bitsAllClear: 127}}, 1); +assertQueryCorrect({a: {$bitsAnySet: 0}}, 0); +assertQueryCorrect({a: {$bitsAnySet: 2}}, 2); +assertQueryCorrect({a: {$bitsAnySet: 127}}, 2); +assertQueryCorrect({a: {$bitsAnyClear: 0}}, 0); +assertQueryCorrect({a: {$bitsAnyClear: 53}}, 2); +assertQueryCorrect({a: {$bitsAnyClear: 127}}, 2); + +// Tests with array of bit positions. +var allPositions = []; +for (var i = 0; i < 64; i++) { + allPositions.push(i); +} +assertQueryCorrect({a: {$bitsAllSet: []}}, 3); +assertQueryCorrect({a: {$bitsAllSet: [1]}}, 2); +assertQueryCorrect({a: {$bitsAllSet: allPositions}}, 1); +assertQueryCorrect({a: {$bitsAllSet: [1, 7, 6, 3, 100]}}, 2); +assertQueryCorrect({a: {$bitsAllClear: []}}, 3); +assertQueryCorrect({a: {$bitsAllClear: [5, 4, 2, 0]}}, 2); +assertQueryCorrect({a: {$bitsAllClear: allPositions}}, 1); +assertQueryCorrect({a: {$bitsAnySet: []}}, 0); +assertQueryCorrect({a: {$bitsAnySet: [1]}}, 2); +assertQueryCorrect({a: {$bitsAnySet: allPositions}}, 2); +assertQueryCorrect({a: {$bitsAnyClear: []}}, 0); +assertQueryCorrect({a: {$bitsAnyClear: [0, 2, 4, 5, 100]}}, 2); +assertQueryCorrect({a: {$bitsAnyClear: allPositions}}, 2); + +// Tests with multiple predicates. +assertQueryCorrect({a: {$bitsAllSet: 74, $bitsAllClear: 53}}, 1); + +// Tests on BinData. + +coll.drop(); +assert.writeOK(coll.insert({a: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA")})); +assert.writeOK(coll.insert({a: BinData(0, "AANgAAAAAAAAAAAAAAAAAAAAAAAA")})); +assert.writeOK(coll.insert({a: BinData(0, "JANgqwetkqwklEWRbWERKKJREtbq")})); +assert.writeOK(coll.insert({a: BinData(0, "////////////////////////////")})); +assert.commandWorked(coll.createIndex({a: 1})); + +// Tests with binary string bitmask. +assertQueryCorrect({a: {$bitsAllSet: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA")}}, 4); +assertQueryCorrect({a: {$bitsAllSet: BinData(0, "AANgAAAAAAAAAAAAAAAAAAAAAAAA")}}, 3); +assertQueryCorrect({a: {$bitsAllSet: BinData(0, "JANgqwetkqwklEWRbWERKKJREtbq")}}, 2); +assertQueryCorrect({a: {$bitsAllSet: BinData(0, "////////////////////////////")}}, 1); +assertQueryCorrect({a: {$bitsAllClear: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA")}}, 4); +assertQueryCorrect({a: {$bitsAllClear: BinData(0, "AAyfAAAAAAAAAAAAAAAAAAAAAAAA")}}, 3); +assertQueryCorrect({a: {$bitsAllClear: BinData(0, "JAyfqwetkqwklEWRbWERKKJREtbq")}}, 2); +assertQueryCorrect({a: {$bitsAllClear: BinData(0, "////////////////////////////")}}, 1); +assertQueryCorrect({a: {$bitsAnySet: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA")}}, 0); +assertQueryCorrect({a: {$bitsAnySet: BinData(0, "AAyfAAAAAAAAAAAAAAAAAAAAAAAA")}}, 1); +assertQueryCorrect({a: {$bitsAnySet: BinData(0, "JAyfqwetkqwklEWRbWERKKJREtbq")}}, 2); +assertQueryCorrect({a: {$bitsAnySet: BinData(0, "////////////////////////////")}}, 3); +assertQueryCorrect({a: {$bitsAnyClear: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA")}}, 0); +assertQueryCorrect({a: {$bitsAnyClear: BinData(0, "AANgAAAAAAAAAAAAAAAAAAAAAAAA")}}, 1); +assertQueryCorrect({a: {$bitsAnyClear: BinData(0, "JANgqwetkqwklEWRbWERKKJREtbq")}}, 2); +assertQueryCorrect({a: {$bitsAnyClear: BinData(0, "////////////////////////////")}}, 3); + +// Tests with multiple predicates. +assertQueryCorrect({ + a: { + $bitsAllSet: BinData(0, "AANgAAAAAAAAAAAAAAAAAAAAAAAA"), + $bitsAllClear: BinData(0, "//yf////////////////////////") } +}, + 1); - // Tests on numbers. - - coll.drop(); - assert.writeOK(coll.insert({a: 0})); - assert.writeOK(coll.insert({a: 1})); - assert.writeOK(coll.insert({a: 54})); - assert.writeOK(coll.insert({a: 88})); - assert.writeOK(coll.insert({a: 255})); - assert.commandWorked(coll.createIndex({a: 1})); - - // Tests with bitmask. - assertQueryCorrect({a: {$bitsAllSet: 0}}, 5); - assertQueryCorrect({a: {$bitsAllSet: 1}}, 2); - assertQueryCorrect({a: {$bitsAllSet: 16}}, 3); - assertQueryCorrect({a: {$bitsAllSet: 54}}, 2); - assertQueryCorrect({a: {$bitsAllSet: 55}}, 1); - assertQueryCorrect({a: {$bitsAllSet: 88}}, 2); - assertQueryCorrect({a: {$bitsAllSet: 255}}, 1); - assertQueryCorrect({a: {$bitsAllClear: 0}}, 5); - assertQueryCorrect({a: {$bitsAllClear: 1}}, 3); - assertQueryCorrect({a: {$bitsAllClear: 16}}, 2); - assertQueryCorrect({a: {$bitsAllClear: 129}}, 3); - assertQueryCorrect({a: {$bitsAllClear: 255}}, 1); - assertQueryCorrect({a: {$bitsAnySet: 0}}, 0); - assertQueryCorrect({a: {$bitsAnySet: 9}}, 3); - assertQueryCorrect({a: {$bitsAnySet: 255}}, 4); - assertQueryCorrect({a: {$bitsAnyClear: 0}}, 0); - assertQueryCorrect({a: {$bitsAnyClear: 18}}, 3); - assertQueryCorrect({a: {$bitsAnyClear: 24}}, 3); - assertQueryCorrect({a: {$bitsAnyClear: 255}}, 4); - - // Tests with array of bit positions. - assertQueryCorrect({a: {$bitsAllSet: []}}, 5); - assertQueryCorrect({a: {$bitsAllSet: [0]}}, 2); - assertQueryCorrect({a: {$bitsAllSet: [4]}}, 3); - assertQueryCorrect({a: {$bitsAllSet: [1, 2, 4, 5]}}, 2); - assertQueryCorrect({a: {$bitsAllSet: [0, 1, 2, 4, 5]}}, 1); - assertQueryCorrect({a: {$bitsAllSet: [3, 4, 6]}}, 2); - assertQueryCorrect({a: {$bitsAllSet: [0, 1, 2, 3, 4, 5, 6, 7]}}, 1); - assertQueryCorrect({a: {$bitsAllClear: []}}, 5); - assertQueryCorrect({a: {$bitsAllClear: [0]}}, 3); - assertQueryCorrect({a: {$bitsAllClear: [4]}}, 2); - assertQueryCorrect({a: {$bitsAllClear: [1, 7]}}, 3); - assertQueryCorrect({a: {$bitsAllClear: [0, 1, 2, 3, 4, 5, 6, 7]}}, 1); - assertQueryCorrect({a: {$bitsAnySet: []}}, 0); - assertQueryCorrect({a: {$bitsAnySet: [1, 3]}}, 3); - assertQueryCorrect({a: {$bitsAnySet: [0, 1, 2, 3, 4, 5, 6, 7]}}, 4); - assertQueryCorrect({a: {$bitsAnyClear: []}}, 0); - assertQueryCorrect({a: {$bitsAnyClear: [1, 4]}}, 3); - assertQueryCorrect({a: {$bitsAnyClear: [3, 4]}}, 3); - assertQueryCorrect({a: {$bitsAnyClear: [0, 1, 2, 3, 4, 5, 6, 7]}}, 4); - - // Tests with multiple predicates. - assertQueryCorrect({a: {$bitsAllSet: 54, $bitsAllClear: 201}}, 1); - - // Tests on negative numbers. - - coll.drop(); - assert.writeOK(coll.insert({a: -0})); - assert.writeOK(coll.insert({a: -1})); - assert.writeOK(coll.insert({a: -54})); - - // Tests with bitmask. - assertQueryCorrect({a: {$bitsAllSet: 0}}, 3); - assertQueryCorrect({a: {$bitsAllSet: 2}}, 2); - assertQueryCorrect({a: {$bitsAllSet: 127}}, 1); - assertQueryCorrect({a: {$bitsAllSet: 74}}, 2); - assertQueryCorrect({a: {$bitsAllClear: 0}}, 3); - assertQueryCorrect({a: {$bitsAllClear: 53}}, 2); - assertQueryCorrect({a: {$bitsAllClear: 127}}, 1); - assertQueryCorrect({a: {$bitsAnySet: 0}}, 0); - assertQueryCorrect({a: {$bitsAnySet: 2}}, 2); - assertQueryCorrect({a: {$bitsAnySet: 127}}, 2); - assertQueryCorrect({a: {$bitsAnyClear: 0}}, 0); - assertQueryCorrect({a: {$bitsAnyClear: 53}}, 2); - assertQueryCorrect({a: {$bitsAnyClear: 127}}, 2); - - // Tests with array of bit positions. - var allPositions = []; - for (var i = 0; i < 64; i++) { - allPositions.push(i); - } - assertQueryCorrect({a: {$bitsAllSet: []}}, 3); - assertQueryCorrect({a: {$bitsAllSet: [1]}}, 2); - assertQueryCorrect({a: {$bitsAllSet: allPositions}}, 1); - assertQueryCorrect({a: {$bitsAllSet: [1, 7, 6, 3, 100]}}, 2); - assertQueryCorrect({a: {$bitsAllClear: []}}, 3); - assertQueryCorrect({a: {$bitsAllClear: [5, 4, 2, 0]}}, 2); - assertQueryCorrect({a: {$bitsAllClear: allPositions}}, 1); - assertQueryCorrect({a: {$bitsAnySet: []}}, 0); - assertQueryCorrect({a: {$bitsAnySet: [1]}}, 2); - assertQueryCorrect({a: {$bitsAnySet: allPositions}}, 2); - assertQueryCorrect({a: {$bitsAnyClear: []}}, 0); - assertQueryCorrect({a: {$bitsAnyClear: [0, 2, 4, 5, 100]}}, 2); - assertQueryCorrect({a: {$bitsAnyClear: allPositions}}, 2); - - // Tests with multiple predicates. - assertQueryCorrect({a: {$bitsAllSet: 74, $bitsAllClear: 53}}, 1); - - // Tests on BinData. - - coll.drop(); - assert.writeOK(coll.insert({a: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA")})); - assert.writeOK(coll.insert({a: BinData(0, "AANgAAAAAAAAAAAAAAAAAAAAAAAA")})); - assert.writeOK(coll.insert({a: BinData(0, "JANgqwetkqwklEWRbWERKKJREtbq")})); - assert.writeOK(coll.insert({a: BinData(0, "////////////////////////////")})); - assert.commandWorked(coll.createIndex({a: 1})); - - // Tests with binary string bitmask. - assertQueryCorrect({a: {$bitsAllSet: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA")}}, 4); - assertQueryCorrect({a: {$bitsAllSet: BinData(0, "AANgAAAAAAAAAAAAAAAAAAAAAAAA")}}, 3); - assertQueryCorrect({a: {$bitsAllSet: BinData(0, "JANgqwetkqwklEWRbWERKKJREtbq")}}, 2); - assertQueryCorrect({a: {$bitsAllSet: BinData(0, "////////////////////////////")}}, 1); - assertQueryCorrect({a: {$bitsAllClear: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA")}}, 4); - assertQueryCorrect({a: {$bitsAllClear: BinData(0, "AAyfAAAAAAAAAAAAAAAAAAAAAAAA")}}, 3); - assertQueryCorrect({a: {$bitsAllClear: BinData(0, "JAyfqwetkqwklEWRbWERKKJREtbq")}}, 2); - assertQueryCorrect({a: {$bitsAllClear: BinData(0, "////////////////////////////")}}, 1); - assertQueryCorrect({a: {$bitsAnySet: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA")}}, 0); - assertQueryCorrect({a: {$bitsAnySet: BinData(0, "AAyfAAAAAAAAAAAAAAAAAAAAAAAA")}}, 1); - assertQueryCorrect({a: {$bitsAnySet: BinData(0, "JAyfqwetkqwklEWRbWERKKJREtbq")}}, 2); - assertQueryCorrect({a: {$bitsAnySet: BinData(0, "////////////////////////////")}}, 3); - assertQueryCorrect({a: {$bitsAnyClear: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA")}}, 0); - assertQueryCorrect({a: {$bitsAnyClear: BinData(0, "AANgAAAAAAAAAAAAAAAAAAAAAAAA")}}, 1); - assertQueryCorrect({a: {$bitsAnyClear: BinData(0, "JANgqwetkqwklEWRbWERKKJREtbq")}}, 2); - assertQueryCorrect({a: {$bitsAnyClear: BinData(0, "////////////////////////////")}}, 3); - - // Tests with multiple predicates. - assertQueryCorrect({ - a: { - $bitsAllSet: BinData(0, "AANgAAAAAAAAAAAAAAAAAAAAAAAA"), - $bitsAllClear: BinData(0, "//yf////////////////////////") - } - }, - 1); - - coll.drop(); +coll.drop(); })(); diff --git a/jstests/core/bson.js b/jstests/core/bson.js index 6c6d5268bbb..7139c6d3e1e 100644 --- a/jstests/core/bson.js +++ b/jstests/core/bson.js @@ -3,136 +3,131 @@ */ (function() { - 'use strict'; - - var t = db.getCollection("bson"); - t.drop(); - function testObjectsAreEqual(obj1, obj2, equalityFunc, func_name) { - var assert_msg = func_name + " " + tojson(obj1) + " " + tojson(obj2); - assert(equalityFunc(obj1, obj2), assert_msg); - } - - function testObjectsAreNotEqual(obj1, obj2, equalityFunc, func_name) { - var assert_msg = func_name + " " + tojson(obj1) + " " + tojson(obj2); - assert(!equalityFunc(obj1, obj2), assert_msg); - } - - function runTests(func, testFunc) { - // Tests on numbers. - testObjectsAreEqual(0, 0, func, testFunc); - testObjectsAreEqual(-5, -5, func, testFunc); - testObjectsAreEqual(1.1, 1.1, func, testFunc); - testObjectsAreEqual(1, 1, func, testFunc); - testObjectsAreEqual(1.1, 1.10, func, testFunc); - var nl0 = new NumberLong("18014398509481984"); - var nl1 = new NumberLong("18014398509481985"); - testObjectsAreEqual(nl0, nl0, func, testFunc); - testObjectsAreNotEqual(nl0, nl1, func, testFunc); - - // Test on key name. - t.insertMany([{a: 0}, {A: 0}]); - testObjectsAreNotEqual(t.findOne({a: 0}), t.findOne({A: 0}), func, testFunc); - - // Tests on strings. - testObjectsAreEqual("abc", "abc", func, testFunc); - testObjectsAreNotEqual("abc", "aBc", func, testFunc); - - // Tests on boolean. - testObjectsAreEqual(true, true, func, testFunc); - testObjectsAreNotEqual(true, false, func, testFunc); - testObjectsAreEqual(false, false, func, testFunc); - - // Tests on date & timestamp. - var d0 = new Date(0); - var d1 = new Date(1); - var ts0 = new Timestamp(0, 1); - var ts1 = new Timestamp(1, 1); - testObjectsAreEqual(d0, d0, func, testFunc); - testObjectsAreNotEqual(d0, d1, func, testFunc); - testObjectsAreNotEqual(d1, ts1, func, testFunc); - testObjectsAreEqual(ts0, ts0, func, testFunc); - testObjectsAreNotEqual(ts0, ts1, func, testFunc); - - // Tests on regex. - testObjectsAreEqual(/3/, /3/, func, testFunc); - testObjectsAreNotEqual(/3/, /3/i, func, testFunc); - - // Tests on DBPointer. - var dbp0 = new DBPointer("test", new ObjectId()); - var dbp1 = new DBPointer("test", new ObjectId()); - testObjectsAreEqual(dbp0, dbp0, func, testFunc); - testObjectsAreNotEqual(dbp0, dbp1, func, testFunc); - - // Tests on JavaScript. - var js0 = Function.prototype; - var js1 = function() {}; - testObjectsAreEqual(js0, Function.prototype, func, testFunc); - testObjectsAreNotEqual(js0, js1, func, testFunc); - - // Tests on arrays. - testObjectsAreEqual([0, 1], [0, 1], func, testFunc); - testObjectsAreNotEqual([0, 1], [0], func, testFunc); - testObjectsAreNotEqual([1, 0], [0, 1], func, testFunc); - - // Tests on BinData & HexData. - testObjectsAreEqual(new BinData(0, "JANgqwetkqwklEWRbWERKKJREtbq"), - new BinData(0, "JANgqwetkqwklEWRbWERKKJREtbq"), - func, - testFunc); - testObjectsAreEqual(new BinData(0, "AAaa"), new BinData(0, "AAaa"), func, testFunc); - testObjectsAreNotEqual(new BinData(0, "AAaa"), new BinData(0, "aaAA"), func, testFunc); - - testObjectsAreEqual(new HexData(0, "AAaa"), new HexData(0, "AAaa"), func, testFunc); - testObjectsAreEqual(new HexData(0, "AAaa"), new HexData(0, "aaAA"), func, testFunc); - testObjectsAreNotEqual(new HexData(0, "AAaa"), new BinData(0, "AAaa"), func, testFunc); - - // Tests on ObjectId - testObjectsAreEqual(new ObjectId("57d1b31cd311a43091fe592f"), - new ObjectId("57d1b31cd311a43091fe592f"), - func, - testFunc); - testObjectsAreNotEqual(new ObjectId("57d1b31cd311a43091fe592f"), - new ObjectId("57d1b31ed311a43091fe5930"), - func, - testFunc); - - // Tests on miscellaneous types. - testObjectsAreEqual(NaN, NaN, func, testFunc); - testObjectsAreEqual(null, null, func, testFunc); - testObjectsAreNotEqual(null, -null, func, testFunc); - testObjectsAreEqual(MinKey, MinKey, func, testFunc); - testObjectsAreEqual(MaxKey, MaxKey, func, testFunc); - testObjectsAreNotEqual(MinKey, MaxKey, func, testFunc); - - // Test on object ordering. - testObjectsAreNotEqual({a: 1, b: 2}, {b: 2, a: 1}, func, testFunc); - } - - // Create wrapper function for bsonWoCompare, such that it returns boolean result. - var bsonWoCompareWrapper = function(obj1, obj2) { - return bsonWoCompare(obj1, obj2) === 0; - }; - - // Run the tests which work the same for both comparators. - runTests(bsonWoCompareWrapper, "bsonWoCompare"); - runTests(bsonBinaryEqual, "bsonBinaryEqual"); - - // Run the tests which differ between comparators. - testObjectsAreEqual(NaN, -NaN, bsonWoCompareWrapper, "bsonWoCompare"); - testObjectsAreNotEqual(NaN, -NaN, bsonBinaryEqual, "bsonBinaryEqual"); - testObjectsAreEqual(1, NumberLong("1"), bsonWoCompareWrapper, "bsonWoCompare"); - testObjectsAreNotEqual(1, NumberLong("1"), bsonBinaryEqual, "bsonBinaryEqual"); - testObjectsAreEqual(1.0, NumberLong("1"), bsonWoCompareWrapper, "bsonWoCompare"); - testObjectsAreNotEqual(1.0, NumberLong("1"), bsonBinaryEqual, "bsonBinaryEqual"); - testObjectsAreEqual(NumberInt("1"), NumberLong("1"), bsonWoCompareWrapper, "bsonWoCompare"); - testObjectsAreNotEqual(NumberInt("1"), NumberLong("1"), bsonBinaryEqual, "bsonBinaryEqual"); - testObjectsAreEqual( - NumberInt("1"), NumberDecimal("1.0"), bsonWoCompareWrapper, "bsonWoCompare"); - testObjectsAreNotEqual( - NumberInt("1"), NumberDecimal("1.0"), bsonBinaryEqual, "bsonBinaryEqual"); - testObjectsAreEqual( - NumberLong("1"), NumberDecimal("1.0"), bsonWoCompareWrapper, "bsonWoCompare"); - testObjectsAreNotEqual( - NumberLong("1"), NumberDecimal("1.0"), bsonBinaryEqual, "bsonBinaryEqual"); - +'use strict'; + +var t = db.getCollection("bson"); +t.drop(); +function testObjectsAreEqual(obj1, obj2, equalityFunc, func_name) { + var assert_msg = func_name + " " + tojson(obj1) + " " + tojson(obj2); + assert(equalityFunc(obj1, obj2), assert_msg); +} + +function testObjectsAreNotEqual(obj1, obj2, equalityFunc, func_name) { + var assert_msg = func_name + " " + tojson(obj1) + " " + tojson(obj2); + assert(!equalityFunc(obj1, obj2), assert_msg); +} + +function runTests(func, testFunc) { + // Tests on numbers. + testObjectsAreEqual(0, 0, func, testFunc); + testObjectsAreEqual(-5, -5, func, testFunc); + testObjectsAreEqual(1.1, 1.1, func, testFunc); + testObjectsAreEqual(1, 1, func, testFunc); + testObjectsAreEqual(1.1, 1.10, func, testFunc); + var nl0 = new NumberLong("18014398509481984"); + var nl1 = new NumberLong("18014398509481985"); + testObjectsAreEqual(nl0, nl0, func, testFunc); + testObjectsAreNotEqual(nl0, nl1, func, testFunc); + + // Test on key name. + t.insertMany([{a: 0}, {A: 0}]); + testObjectsAreNotEqual(t.findOne({a: 0}), t.findOne({A: 0}), func, testFunc); + + // Tests on strings. + testObjectsAreEqual("abc", "abc", func, testFunc); + testObjectsAreNotEqual("abc", "aBc", func, testFunc); + + // Tests on boolean. + testObjectsAreEqual(true, true, func, testFunc); + testObjectsAreNotEqual(true, false, func, testFunc); + testObjectsAreEqual(false, false, func, testFunc); + + // Tests on date & timestamp. + var d0 = new Date(0); + var d1 = new Date(1); + var ts0 = new Timestamp(0, 1); + var ts1 = new Timestamp(1, 1); + testObjectsAreEqual(d0, d0, func, testFunc); + testObjectsAreNotEqual(d0, d1, func, testFunc); + testObjectsAreNotEqual(d1, ts1, func, testFunc); + testObjectsAreEqual(ts0, ts0, func, testFunc); + testObjectsAreNotEqual(ts0, ts1, func, testFunc); + + // Tests on regex. + testObjectsAreEqual(/3/, /3/, func, testFunc); + testObjectsAreNotEqual(/3/, /3/i, func, testFunc); + + // Tests on DBPointer. + var dbp0 = new DBPointer("test", new ObjectId()); + var dbp1 = new DBPointer("test", new ObjectId()); + testObjectsAreEqual(dbp0, dbp0, func, testFunc); + testObjectsAreNotEqual(dbp0, dbp1, func, testFunc); + + // Tests on JavaScript. + var js0 = Function.prototype; + var js1 = function() {}; + testObjectsAreEqual(js0, Function.prototype, func, testFunc); + testObjectsAreNotEqual(js0, js1, func, testFunc); + + // Tests on arrays. + testObjectsAreEqual([0, 1], [0, 1], func, testFunc); + testObjectsAreNotEqual([0, 1], [0], func, testFunc); + testObjectsAreNotEqual([1, 0], [0, 1], func, testFunc); + + // Tests on BinData & HexData. + testObjectsAreEqual(new BinData(0, "JANgqwetkqwklEWRbWERKKJREtbq"), + new BinData(0, "JANgqwetkqwklEWRbWERKKJREtbq"), + func, + testFunc); + testObjectsAreEqual(new BinData(0, "AAaa"), new BinData(0, "AAaa"), func, testFunc); + testObjectsAreNotEqual(new BinData(0, "AAaa"), new BinData(0, "aaAA"), func, testFunc); + + testObjectsAreEqual(new HexData(0, "AAaa"), new HexData(0, "AAaa"), func, testFunc); + testObjectsAreEqual(new HexData(0, "AAaa"), new HexData(0, "aaAA"), func, testFunc); + testObjectsAreNotEqual(new HexData(0, "AAaa"), new BinData(0, "AAaa"), func, testFunc); + + // Tests on ObjectId + testObjectsAreEqual(new ObjectId("57d1b31cd311a43091fe592f"), + new ObjectId("57d1b31cd311a43091fe592f"), + func, + testFunc); + testObjectsAreNotEqual(new ObjectId("57d1b31cd311a43091fe592f"), + new ObjectId("57d1b31ed311a43091fe5930"), + func, + testFunc); + + // Tests on miscellaneous types. + testObjectsAreEqual(NaN, NaN, func, testFunc); + testObjectsAreEqual(null, null, func, testFunc); + testObjectsAreNotEqual(null, -null, func, testFunc); + testObjectsAreEqual(MinKey, MinKey, func, testFunc); + testObjectsAreEqual(MaxKey, MaxKey, func, testFunc); + testObjectsAreNotEqual(MinKey, MaxKey, func, testFunc); + + // Test on object ordering. + testObjectsAreNotEqual({a: 1, b: 2}, {b: 2, a: 1}, func, testFunc); +} + +// Create wrapper function for bsonWoCompare, such that it returns boolean result. +var bsonWoCompareWrapper = function(obj1, obj2) { + return bsonWoCompare(obj1, obj2) === 0; +}; + +// Run the tests which work the same for both comparators. +runTests(bsonWoCompareWrapper, "bsonWoCompare"); +runTests(bsonBinaryEqual, "bsonBinaryEqual"); + +// Run the tests which differ between comparators. +testObjectsAreEqual(NaN, -NaN, bsonWoCompareWrapper, "bsonWoCompare"); +testObjectsAreNotEqual(NaN, -NaN, bsonBinaryEqual, "bsonBinaryEqual"); +testObjectsAreEqual(1, NumberLong("1"), bsonWoCompareWrapper, "bsonWoCompare"); +testObjectsAreNotEqual(1, NumberLong("1"), bsonBinaryEqual, "bsonBinaryEqual"); +testObjectsAreEqual(1.0, NumberLong("1"), bsonWoCompareWrapper, "bsonWoCompare"); +testObjectsAreNotEqual(1.0, NumberLong("1"), bsonBinaryEqual, "bsonBinaryEqual"); +testObjectsAreEqual(NumberInt("1"), NumberLong("1"), bsonWoCompareWrapper, "bsonWoCompare"); +testObjectsAreNotEqual(NumberInt("1"), NumberLong("1"), bsonBinaryEqual, "bsonBinaryEqual"); +testObjectsAreEqual(NumberInt("1"), NumberDecimal("1.0"), bsonWoCompareWrapper, "bsonWoCompare"); +testObjectsAreNotEqual(NumberInt("1"), NumberDecimal("1.0"), bsonBinaryEqual, "bsonBinaryEqual"); +testObjectsAreEqual(NumberLong("1"), NumberDecimal("1.0"), bsonWoCompareWrapper, "bsonWoCompare"); +testObjectsAreNotEqual(NumberLong("1"), NumberDecimal("1.0"), bsonBinaryEqual, "bsonBinaryEqual"); })(); diff --git a/jstests/core/bson_compare_bug.js b/jstests/core/bson_compare_bug.js index 2a39efd8db7..798af7a6992 100644 --- a/jstests/core/bson_compare_bug.js +++ b/jstests/core/bson_compare_bug.js @@ -1,47 +1,43 @@ (function() { - "use strict"; +"use strict"; - db.bson_compare_bug.drop(); +db.bson_compare_bug.drop(); - // We want some BSON objects for this test. One convenient way to get that is to insert them - // into the database and then get them back through a query. - const coll = db.bson_compare_bug; - assert.commandWorked(coll.insert( - [ - {_id: 1, obj: {val: [], _id: 1}}, - {_id: 2, obj: {val: []}}, - {_id: 3, obj: {_id: 1, val: []}} - ], - {writeConcern: {w: "majority"}})); +// We want some BSON objects for this test. One convenient way to get that is to insert them +// into the database and then get them back through a query. +const coll = db.bson_compare_bug; +assert.commandWorked(coll.insert( + [{_id: 1, obj: {val: [], _id: 1}}, {_id: 2, obj: {val: []}}, {_id: 3, obj: {_id: 1, val: []}}], + {writeConcern: {w: "majority"}})); - // The $replaceRoot is so we can get back two results that have an "_id" field and one that - // doesn't. The first two results from this query are the same, except for that. - // res[0]: {val: [], _id: 1} - // res[1]: {val: []} - const res = coll.aggregate([{$sort: {_id: 1}}, {$replaceRoot: {newRoot: "$obj"}}]).toArray(); - assert.eq(3, res.length); +// The $replaceRoot is so we can get back two results that have an "_id" field and one that +// doesn't. The first two results from this query are the same, except for that. +// res[0]: {val: [], _id: 1} +// res[1]: {val: []} +const res = coll.aggregate([{$sort: {_id: 1}}, {$replaceRoot: {newRoot: "$obj"}}]).toArray(); +assert.eq(3, res.length); - // bsonBinaryEqual() should see that the BSON results from the query are not equal. - assert(!bsonBinaryEqual(res[0], res[1])); +// bsonBinaryEqual() should see that the BSON results from the query are not equal. +assert(!bsonBinaryEqual(res[0], res[1])); - // A magic trick: the shell represents the objects in res[0] and res[1] as JavaScript objects - // that internally store raw BSON data but also maintain JavaScript properties for each of their - // BSON fields. The BSON and JavaScript properties are kept in sync both ways. Reading the "val" - // property for the first time results in a call to BSONInfo::resolve(), which materializes the - // "val" BSON field as a JavaScript property. In this case, the resolve function also - // conservatively marks the object as "altered," because "val" is an array, and there's no way - // to observe modifications to it. - assert.eq(res[0].val, res[1].val); +// A magic trick: the shell represents the objects in res[0] and res[1] as JavaScript objects +// that internally store raw BSON data but also maintain JavaScript properties for each of their +// BSON fields. The BSON and JavaScript properties are kept in sync both ways. Reading the "val" +// property for the first time results in a call to BSONInfo::resolve(), which materializes the +// "val" BSON field as a JavaScript property. In this case, the resolve function also +// conservatively marks the object as "altered," because "val" is an array, and there's no way +// to observe modifications to it. +assert.eq(res[0].val, res[1].val); - // We repeat the BSON comparison, but this time, the objects are "altered," and bsonBinaryEqual - // needs to sync the JavaScript properties back into BSON. Before SERVER-39521, a bug in the - // conversion would ignore the "_id" field unless it was previously resolved, which would cause - // res[0] and res[1] to appear equal. - assert(!bsonBinaryEqual(res[0], res[1])); +// We repeat the BSON comparison, but this time, the objects are "altered," and bsonBinaryEqual +// needs to sync the JavaScript properties back into BSON. Before SERVER-39521, a bug in the +// conversion would ignore the "_id" field unless it was previously resolved, which would cause +// res[0] and res[1] to appear equal. +assert(!bsonBinaryEqual(res[0], res[1])); - // The bug that caused the "_id" field to get dropped in conversion involves code that is - // supposed to move the "_id" field to the front when converting a JavaScript object to BSON. - // This check ensures that "_id" is still getting moved to the front. The value of res[0] should - // now have changed so that both it and res[2] have their _id field first. - assert(bsonBinaryEqual(res[0], res[2])); +// The bug that caused the "_id" field to get dropped in conversion involves code that is +// supposed to move the "_id" field to the front when converting a JavaScript object to BSON. +// This check ensures that "_id" is still getting moved to the front. The value of res[0] should +// now have changed so that both it and res[2] have their _id field first. +assert(bsonBinaryEqual(res[0], res[2])); }()); diff --git a/jstests/core/bulk_insert_capped.js b/jstests/core/bulk_insert_capped.js index 50cc8f460dd..4e1f6b26dec 100644 --- a/jstests/core/bulk_insert_capped.js +++ b/jstests/core/bulk_insert_capped.js @@ -8,24 +8,24 @@ // SERVER-21488 Test that multi inserts into capped collections don't cause corruption. // Note: this file must have a name that starts with "bulk" so it gets run by bulk_gle_passthrough. (function() { - "use strict"; - var t = db.capped_multi_insert; - t.drop(); +"use strict"; +var t = db.capped_multi_insert; +t.drop(); - db.createCollection(t.getName(), {capped: true, size: 16 * 1024, max: 1}); +db.createCollection(t.getName(), {capped: true, size: 16 * 1024, max: 1}); - t.insert([{_id: 1}, {_id: 2}]); - assert.gleSuccess(db); +t.insert([{_id: 1}, {_id: 2}]); +assert.gleSuccess(db); - // Ensure the collection is valid. - var res = t.validate(true); - assert(res.valid, tojson(res)); +// Ensure the collection is valid. +var res = t.validate(true); +assert(res.valid, tojson(res)); - // Ensure that various ways of iterating the collection only return one document. - assert.eq(t.find().itcount(), 1); // Table scan. - assert.eq(t.find({}, {_id: 1}).hint({_id: 1}).itcount(), 1); // Index only (covered). - assert.eq(t.find().hint({_id: 1}).itcount(), 1); // Index scan with fetch. +// Ensure that various ways of iterating the collection only return one document. +assert.eq(t.find().itcount(), 1); // Table scan. +assert.eq(t.find({}, {_id: 1}).hint({_id: 1}).itcount(), 1); // Index only (covered). +assert.eq(t.find().hint({_id: 1}).itcount(), 1); // Index scan with fetch. - // Ensure that the second document is the one that is kept. - assert.eq(t.findOne(), {_id: 2}); +// Ensure that the second document is the one that is kept. +assert.eq(t.findOne(), {_id: 2}); }()); diff --git a/jstests/core/bulk_legacy_enforce_gle.js b/jstests/core/bulk_legacy_enforce_gle.js index 88b7c51e758..6359b277c0b 100644 --- a/jstests/core/bulk_legacy_enforce_gle.js +++ b/jstests/core/bulk_legacy_enforce_gle.js @@ -6,120 +6,120 @@ */ (function() { - "use strict"; - const coll = db.bulk_legacy_enforce_gle; +"use strict"; +const coll = db.bulk_legacy_enforce_gle; - /** - * Inserts 'doc' into the collection, asserting that the write succeeds. This runs a - * getLastError if the insert does not return a response. - */ - function insertDocument(doc) { - let res = coll.insert(doc); - if (res) { - assert.writeOK(res); - } else { - assert.gleOK(db.runCommand({getLastError: 1})); - } +/** + * Inserts 'doc' into the collection, asserting that the write succeeds. This runs a + * getLastError if the insert does not return a response. + */ +function insertDocument(doc) { + let res = coll.insert(doc); + if (res) { + assert.writeOK(res); + } else { + assert.gleOK(db.runCommand({getLastError: 1})); } +} - coll.drop(); - let bulk = coll.initializeUnorderedBulkOp(); - bulk.find({_id: 1}).upsert().updateOne({_id: 1}); - assert.writeOK(bulk.execute()); - let gle = assert.gleOK(db.runCommand({getLastError: 1})); - assert.eq(1, gle.n, tojson(gle)); +coll.drop(); +let bulk = coll.initializeUnorderedBulkOp(); +bulk.find({_id: 1}).upsert().updateOne({_id: 1}); +assert.writeOK(bulk.execute()); +let gle = assert.gleOK(db.runCommand({getLastError: 1})); +assert.eq(1, gle.n, tojson(gle)); - // Batch of size 1 should not call resetError even when it errors out. - assert(coll.drop()); - insertDocument({_id: 1}); - bulk = coll.initializeUnorderedBulkOp(); - bulk.find({none: 1}).upsert().updateOne({_id: 1}); - assert.throws(function() { - bulk.execute(); - }); +// Batch of size 1 should not call resetError even when it errors out. +assert(coll.drop()); +insertDocument({_id: 1}); +bulk = coll.initializeUnorderedBulkOp(); +bulk.find({none: 1}).upsert().updateOne({_id: 1}); +assert.throws(function() { + bulk.execute(); +}); - gle = db.runCommand({getLastError: 1}); - assert(gle.ok, tojson(gle)); - assert.neq(null, gle.err, tojson(gle)); +gle = db.runCommand({getLastError: 1}); +assert(gle.ok, tojson(gle)); +assert.neq(null, gle.err, tojson(gle)); - // Batch with all error except last should not call resetError. - assert(coll.drop()); - insertDocument({_id: 1}); - bulk = coll.initializeUnorderedBulkOp(); - bulk.find({none: 1}).upsert().updateOne({_id: 1}); - bulk.find({none: 1}).upsert().updateOne({_id: 1}); - bulk.find({_id: 0}).upsert().updateOne({_id: 0}); - let res = assert.throws(function() { - bulk.execute(); - }); - assert.eq(2, res.getWriteErrors().length); +// Batch with all error except last should not call resetError. +assert(coll.drop()); +insertDocument({_id: 1}); +bulk = coll.initializeUnorderedBulkOp(); +bulk.find({none: 1}).upsert().updateOne({_id: 1}); +bulk.find({none: 1}).upsert().updateOne({_id: 1}); +bulk.find({_id: 0}).upsert().updateOne({_id: 0}); +let res = assert.throws(function() { + bulk.execute(); +}); +assert.eq(2, res.getWriteErrors().length); - gle = db.runCommand({getLastError: 1}); - assert(gle.ok, tojson(gle)); - assert.eq(1, gle.n, tojson(gle)); +gle = db.runCommand({getLastError: 1}); +assert(gle.ok, tojson(gle)); +assert.eq(1, gle.n, tojson(gle)); - // Batch with error at middle should not call resetError. - assert(coll.drop()); - insertDocument({_id: 1}); - bulk = coll.initializeUnorderedBulkOp(); - bulk.find({_id: 0}).upsert().updateOne({_id: 0}); - bulk.find({none: 1}).upsert().updateOne({_id: 1}); - bulk.find({_id: 2}).upsert().updateOne({_id: 2}); - res = assert.throws(function() { - bulk.execute(); - }); - assert.eq(1, res.getWriteErrors().length); +// Batch with error at middle should not call resetError. +assert(coll.drop()); +insertDocument({_id: 1}); +bulk = coll.initializeUnorderedBulkOp(); +bulk.find({_id: 0}).upsert().updateOne({_id: 0}); +bulk.find({none: 1}).upsert().updateOne({_id: 1}); +bulk.find({_id: 2}).upsert().updateOne({_id: 2}); +res = assert.throws(function() { + bulk.execute(); +}); +assert.eq(1, res.getWriteErrors().length); - gle = db.runCommand({getLastError: 1}); - assert(gle.ok, tojson(gle)); - // For legacy writes, mongos sends the bulk as one while the shell sends the write individually. - assert.gte(gle.n, 1, tojson(gle)); +gle = db.runCommand({getLastError: 1}); +assert(gle.ok, tojson(gle)); +// For legacy writes, mongos sends the bulk as one while the shell sends the write individually. +assert.gte(gle.n, 1, tojson(gle)); - // Batch with error at last should call resetError. - assert(coll.drop()); - insertDocument({_id: 2}); - bulk = coll.initializeUnorderedBulkOp(); - bulk.find({_id: 0}).upsert().updateOne({_id: 0}); - bulk.find({_id: 1}).upsert().updateOne({_id: 1}); - bulk.find({none: 1}).upsert().updateOne({_id: 2}); - res = assert.throws(function() { - bulk.execute(); - }); - assert.eq(1, res.getWriteErrors().length); +// Batch with error at last should call resetError. +assert(coll.drop()); +insertDocument({_id: 2}); +bulk = coll.initializeUnorderedBulkOp(); +bulk.find({_id: 0}).upsert().updateOne({_id: 0}); +bulk.find({_id: 1}).upsert().updateOne({_id: 1}); +bulk.find({none: 1}).upsert().updateOne({_id: 2}); +res = assert.throws(function() { + bulk.execute(); +}); +assert.eq(1, res.getWriteErrors().length); - gle = db.runCommand({getLastError: 1}); - assert(gle.ok, tojson(gle)); - assert.eq(0, gle.n, tojson(gle)); +gle = db.runCommand({getLastError: 1}); +assert(gle.ok, tojson(gle)); +assert.eq(0, gle.n, tojson(gle)); - // Batch with error at last should not call resetError if { w: 1 }. - assert(coll.drop()); - insertDocument({_id: 2}); - bulk = coll.initializeUnorderedBulkOp(); - bulk.find({_id: 0}).upsert().updateOne({_id: 0}); - bulk.find({_id: 1}).upsert().updateOne({_id: 1}); - bulk.find({none: 1}).upsert().updateOne({_id: 2}); - res = assert.throws(function() { - bulk.execute(); - }); - assert.eq(1, res.getWriteErrors().length); +// Batch with error at last should not call resetError if { w: 1 }. +assert(coll.drop()); +insertDocument({_id: 2}); +bulk = coll.initializeUnorderedBulkOp(); +bulk.find({_id: 0}).upsert().updateOne({_id: 0}); +bulk.find({_id: 1}).upsert().updateOne({_id: 1}); +bulk.find({none: 1}).upsert().updateOne({_id: 2}); +res = assert.throws(function() { + bulk.execute(); +}); +assert.eq(1, res.getWriteErrors().length); - gle = db.runCommand({getLastError: 1, w: 1}); - assert(gle.ok, tojson(gle)); - assert.neq(null, gle.err, tojson(gle)); +gle = db.runCommand({getLastError: 1, w: 1}); +assert(gle.ok, tojson(gle)); +assert.neq(null, gle.err, tojson(gle)); - // Batch with error at last should not call resetError if { w: 0 }. - assert(coll.drop()); - insertDocument({_id: 2}); - bulk = coll.initializeUnorderedBulkOp(); - bulk.find({_id: 0}).upsert().updateOne({_id: 0}); - bulk.find({_id: 1}).upsert().updateOne({_id: 1}); - bulk.find({none: 1}).upsert().updateOne({_id: 2}); - res = assert.throws(function() { - bulk.execute(); - }); - assert.eq(1, res.getWriteErrors().length, () => tojson(res)); +// Batch with error at last should not call resetError if { w: 0 }. +assert(coll.drop()); +insertDocument({_id: 2}); +bulk = coll.initializeUnorderedBulkOp(); +bulk.find({_id: 0}).upsert().updateOne({_id: 0}); +bulk.find({_id: 1}).upsert().updateOne({_id: 1}); +bulk.find({none: 1}).upsert().updateOne({_id: 2}); +res = assert.throws(function() { + bulk.execute(); +}); +assert.eq(1, res.getWriteErrors().length, () => tojson(res)); - gle = db.runCommand({getLastError: 1, w: 0}); - assert(gle.ok, tojson(gle)); - assert.neq(null, gle.err, tojson(gle)); +gle = db.runCommand({getLastError: 1, w: 0}); +assert(gle.ok, tojson(gle)); +assert.neq(null, gle.err, tojson(gle)); }()); diff --git a/jstests/core/bypass_doc_validation.js b/jstests/core/bypass_doc_validation.js index 7457e13e4eb..dcf1a0d28dc 100644 --- a/jstests/core/bypass_doc_validation.js +++ b/jstests/core/bypass_doc_validation.js @@ -17,160 +17,159 @@ * - update */ (function() { - 'use strict'; - - // For isWiredTiger. - load("jstests/concurrency/fsm_workload_helpers/server_types.js"); - // For isReplSet - load("jstests/libs/fixture_helpers.js"); - - function assertFailsValidation(res) { - if (res instanceof WriteResult || res instanceof BulkWriteResult) { - assert.writeErrorWithCode(res, ErrorCodes.DocumentValidationFailure, tojson(res)); - } else { - assert.commandFailedWithCode(res, ErrorCodes.DocumentValidationFailure, tojson(res)); - } +'use strict'; + +// For isWiredTiger. +load("jstests/concurrency/fsm_workload_helpers/server_types.js"); +// For isReplSet +load("jstests/libs/fixture_helpers.js"); + +function assertFailsValidation(res) { + if (res instanceof WriteResult || res instanceof BulkWriteResult) { + assert.writeErrorWithCode(res, ErrorCodes.DocumentValidationFailure, tojson(res)); + } else { + assert.commandFailedWithCode(res, ErrorCodes.DocumentValidationFailure, tojson(res)); } +} - const dbName = 'bypass_document_validation'; - const collName = 'bypass_document_validation'; - const myDb = db.getSiblingDB(dbName); - const coll = myDb[collName]; - - /** - * Tests that we can bypass document validation when appropriate when a collection has validator - * 'validator', which should enforce the existence of a field "a". - */ - function runBypassDocumentValidationTest(validator) { - // Use majority write concern to clear the drop-pending that can cause lock conflicts with - // transactions. - coll.drop({writeConcern: {w: "majority"}}); - - // Insert documents into the collection that would not be valid before setting 'validator'. - assert.writeOK(coll.insert({_id: 1})); - assert.writeOK(coll.insert({_id: 2})); - assert.commandWorked(myDb.runCommand({collMod: collName, validator: validator})); - - const isMongos = db.runCommand({isdbgrid: 1}).isdbgrid; - // Test applyOps with a simple insert if not on mongos. - if (!isMongos) { - const op = [{op: 'i', ns: coll.getFullName(), o: {_id: 9}}]; - assertFailsValidation(myDb.runCommand({applyOps: op, bypassDocumentValidation: false})); - assert.eq(0, coll.count({_id: 9})); - assert.commandWorked(myDb.runCommand({applyOps: op, bypassDocumentValidation: true})); - assert.eq(1, coll.count({_id: 9})); - } - - // Test the aggregation command with a $out stage. - const outputCollName = 'bypass_output_coll'; - const outputColl = myDb[outputCollName]; - outputColl.drop(); - assert.commandWorked(myDb.createCollection(outputCollName, {validator: validator})); - const pipeline = - [{$match: {_id: 1}}, {$project: {aggregation: {$add: [1]}}}, {$out: outputCollName}]; - assert.throws(function() { - coll.aggregate(pipeline, {bypassDocumentValidation: false}); - }); - assert.eq(0, outputColl.count({aggregation: 1})); - coll.aggregate(pipeline, {bypassDocumentValidation: true}); - assert.eq(1, outputColl.count({aggregation: 1})); - - // Test the findAndModify command. - assert.throws(function() { - coll.findAndModify( - {update: {$set: {findAndModify: 1}}, bypassDocumentValidation: false}); - }); - assert.eq(0, coll.count({findAndModify: 1})); - coll.findAndModify({update: {$set: {findAndModify: 1}}, bypassDocumentValidation: true}); - assert.eq(1, coll.count({findAndModify: 1})); - - // Test the mapReduce command. - const map = function() { - emit(1, 1); - }; - const reduce = function() { - return 'mapReduce'; - }; - let res = myDb.runCommand({ - mapReduce: collName, - map: map, - reduce: reduce, - out: {replace: outputCollName}, - bypassDocumentValidation: false - }); - assertFailsValidation(res); - assert.eq(0, outputColl.count({value: 'mapReduce'})); - res = myDb.runCommand({ - mapReduce: collName, - map: map, - reduce: reduce, - out: {replace: outputCollName}, - bypassDocumentValidation: true - }); - assert.commandWorked(res); - assert.eq(1, outputColl.count({value: 'mapReduce'})); - - // Test the insert command. Includes a test for a document with no _id (SERVER-20859). - res = myDb.runCommand({insert: collName, documents: [{}], bypassDocumentValidation: false}); - assertFailsValidation(BulkWriteResult(res)); - res = myDb.runCommand( - {insert: collName, documents: [{}, {_id: 6}], bypassDocumentValidation: false}); - assertFailsValidation(BulkWriteResult(res)); - res = myDb.runCommand( - {insert: collName, documents: [{}, {_id: 6}], bypassDocumentValidation: true}); - assert.writeOK(res); - - // Test the update command. - res = myDb.runCommand({ - update: collName, - updates: [{q: {}, u: {$set: {update: 1}}}], - bypassDocumentValidation: false - }); - assertFailsValidation(BulkWriteResult(res)); - assert.eq(0, coll.count({update: 1})); - res = myDb.runCommand({ - update: collName, - updates: [{q: {}, u: {$set: {update: 1}}}], - bypassDocumentValidation: true - }); - assert.writeOK(res); - assert.eq(1, coll.count({update: 1})); - - // Pipeline-style update is only supported for commands and not for OP_UPDATE which cannot - // differentiate between an update object and an array. - res = myDb.runCommand({ - update: collName, - updates: [{q: {}, u: [{$set: {pipeline: 1}}]}], - bypassDocumentValidation: false - }); - assertFailsValidation(BulkWriteResult(res)); - assert.eq(0, coll.count({pipeline: 1})); - - assert.commandWorked(myDb.runCommand({ - update: collName, - updates: [{q: {}, u: [{$set: {pipeline: 1}}]}], - bypassDocumentValidation: true - })); - assert.eq(1, coll.count({pipeline: 1})); - - assert.commandFailed(myDb.runCommand({ - findAndModify: collName, - update: [{$set: {findAndModifyPipeline: 1}}], - bypassDocumentValidation: false - })); - assert.eq(0, coll.count({findAndModifyPipeline: 1})); - - assert.commandWorked(myDb.runCommand({ - findAndModify: collName, - update: [{$set: {findAndModifyPipeline: 1}}], - bypassDocumentValidation: true - })); - assert.eq(1, coll.count({findAndModifyPipeline: 1})); - } +const dbName = 'bypass_document_validation'; +const collName = 'bypass_document_validation'; +const myDb = db.getSiblingDB(dbName); +const coll = myDb[collName]; - // Run the test using a normal validator. - runBypassDocumentValidationTest({a: {$exists: true}}); +/** + * Tests that we can bypass document validation when appropriate when a collection has validator + * 'validator', which should enforce the existence of a field "a". + */ +function runBypassDocumentValidationTest(validator) { + // Use majority write concern to clear the drop-pending that can cause lock conflicts with + // transactions. + coll.drop({writeConcern: {w: "majority"}}); + + // Insert documents into the collection that would not be valid before setting 'validator'. + assert.writeOK(coll.insert({_id: 1})); + assert.writeOK(coll.insert({_id: 2})); + assert.commandWorked(myDb.runCommand({collMod: collName, validator: validator})); + + const isMongos = db.runCommand({isdbgrid: 1}).isdbgrid; + // Test applyOps with a simple insert if not on mongos. + if (!isMongos) { + const op = [{op: 'i', ns: coll.getFullName(), o: {_id: 9}}]; + assertFailsValidation(myDb.runCommand({applyOps: op, bypassDocumentValidation: false})); + assert.eq(0, coll.count({_id: 9})); + assert.commandWorked(myDb.runCommand({applyOps: op, bypassDocumentValidation: true})); + assert.eq(1, coll.count({_id: 9})); + } - // Run the test again with an equivalent JSON Schema validator. - runBypassDocumentValidationTest({$jsonSchema: {required: ['a']}}); + // Test the aggregation command with a $out stage. + const outputCollName = 'bypass_output_coll'; + const outputColl = myDb[outputCollName]; + outputColl.drop(); + assert.commandWorked(myDb.createCollection(outputCollName, {validator: validator})); + const pipeline = + [{$match: {_id: 1}}, {$project: {aggregation: {$add: [1]}}}, {$out: outputCollName}]; + assert.throws(function() { + coll.aggregate(pipeline, {bypassDocumentValidation: false}); + }); + assert.eq(0, outputColl.count({aggregation: 1})); + coll.aggregate(pipeline, {bypassDocumentValidation: true}); + assert.eq(1, outputColl.count({aggregation: 1})); + + // Test the findAndModify command. + assert.throws(function() { + coll.findAndModify({update: {$set: {findAndModify: 1}}, bypassDocumentValidation: false}); + }); + assert.eq(0, coll.count({findAndModify: 1})); + coll.findAndModify({update: {$set: {findAndModify: 1}}, bypassDocumentValidation: true}); + assert.eq(1, coll.count({findAndModify: 1})); + + // Test the mapReduce command. + const map = function() { + emit(1, 1); + }; + const reduce = function() { + return 'mapReduce'; + }; + let res = myDb.runCommand({ + mapReduce: collName, + map: map, + reduce: reduce, + out: {replace: outputCollName}, + bypassDocumentValidation: false + }); + assertFailsValidation(res); + assert.eq(0, outputColl.count({value: 'mapReduce'})); + res = myDb.runCommand({ + mapReduce: collName, + map: map, + reduce: reduce, + out: {replace: outputCollName}, + bypassDocumentValidation: true + }); + assert.commandWorked(res); + assert.eq(1, outputColl.count({value: 'mapReduce'})); + + // Test the insert command. Includes a test for a document with no _id (SERVER-20859). + res = myDb.runCommand({insert: collName, documents: [{}], bypassDocumentValidation: false}); + assertFailsValidation(BulkWriteResult(res)); + res = myDb.runCommand( + {insert: collName, documents: [{}, {_id: 6}], bypassDocumentValidation: false}); + assertFailsValidation(BulkWriteResult(res)); + res = myDb.runCommand( + {insert: collName, documents: [{}, {_id: 6}], bypassDocumentValidation: true}); + assert.writeOK(res); + + // Test the update command. + res = myDb.runCommand({ + update: collName, + updates: [{q: {}, u: {$set: {update: 1}}}], + bypassDocumentValidation: false + }); + assertFailsValidation(BulkWriteResult(res)); + assert.eq(0, coll.count({update: 1})); + res = myDb.runCommand({ + update: collName, + updates: [{q: {}, u: {$set: {update: 1}}}], + bypassDocumentValidation: true + }); + assert.writeOK(res); + assert.eq(1, coll.count({update: 1})); + + // Pipeline-style update is only supported for commands and not for OP_UPDATE which cannot + // differentiate between an update object and an array. + res = myDb.runCommand({ + update: collName, + updates: [{q: {}, u: [{$set: {pipeline: 1}}]}], + bypassDocumentValidation: false + }); + assertFailsValidation(BulkWriteResult(res)); + assert.eq(0, coll.count({pipeline: 1})); + + assert.commandWorked(myDb.runCommand({ + update: collName, + updates: [{q: {}, u: [{$set: {pipeline: 1}}]}], + bypassDocumentValidation: true + })); + assert.eq(1, coll.count({pipeline: 1})); + + assert.commandFailed(myDb.runCommand({ + findAndModify: collName, + update: [{$set: {findAndModifyPipeline: 1}}], + bypassDocumentValidation: false + })); + assert.eq(0, coll.count({findAndModifyPipeline: 1})); + + assert.commandWorked(myDb.runCommand({ + findAndModify: collName, + update: [{$set: {findAndModifyPipeline: 1}}], + bypassDocumentValidation: true + })); + assert.eq(1, coll.count({findAndModifyPipeline: 1})); +} + +// Run the test using a normal validator. +runBypassDocumentValidationTest({a: {$exists: true}}); + +// Run the test again with an equivalent JSON Schema validator. +runBypassDocumentValidationTest({$jsonSchema: {required: ['a']}}); })(); diff --git a/jstests/core/capped6.js b/jstests/core/capped6.js index 9fffa2db7b3..393d8589a60 100644 --- a/jstests/core/capped6.js +++ b/jstests/core/capped6.js @@ -11,103 +11,103 @@ // uses_testing_only_commands, // ] (function() { - var coll = db.capped6; +var coll = db.capped6; - Random.setRandomSeed(); - var maxDocuments = Random.randInt(400) + 100; +Random.setRandomSeed(); +var maxDocuments = Random.randInt(400) + 100; - /** - * Check that documents in the collection are in order according to the value - * of a, which corresponds to the insert order. This is a check that the oldest - * document(s) is/are deleted when space is needed for the newest document. The - * check is performed in both forward and reverse directions. - */ - function checkOrder(i, valueArray) { - res = coll.find().sort({$natural: -1}); - assert(res.hasNext(), "A"); - var j = i; - while (res.hasNext()) { - assert.eq(valueArray[j--].a, res.next().a, "B"); - } +/** + * Check that documents in the collection are in order according to the value + * of a, which corresponds to the insert order. This is a check that the oldest + * document(s) is/are deleted when space is needed for the newest document. The + * check is performed in both forward and reverse directions. + */ +function checkOrder(i, valueArray) { + res = coll.find().sort({$natural: -1}); + assert(res.hasNext(), "A"); + var j = i; + while (res.hasNext()) { + assert.eq(valueArray[j--].a, res.next().a, "B"); + } - res = coll.find().sort({$natural: 1}); - assert(res.hasNext(), "C"); - while (res.hasNext()) { - assert.eq(valueArray[++j].a, res.next().a, "D"); - } - assert.eq(j, i, "E"); + res = coll.find().sort({$natural: 1}); + assert(res.hasNext(), "C"); + while (res.hasNext()) { + assert.eq(valueArray[++j].a, res.next().a, "D"); } + assert.eq(j, i, "E"); +} - /* - * Prepare the values to insert and create the capped collection. - */ - function prepareCollection(shouldReverse) { - coll.drop(); - assert.commandWorked(db.createCollection("capped6", {capped: true, size: 1000})); - var valueArray = new Array(maxDocuments); - var c = ""; - for (i = 0; i < maxDocuments; ++i, c += "-") { - // The a values are strings of increasing length. - valueArray[i] = {a: c}; - } - if (shouldReverse) { - valueArray.reverse(); - } - return valueArray; +/* + * Prepare the values to insert and create the capped collection. + */ +function prepareCollection(shouldReverse) { + coll.drop(); + assert.commandWorked(db.createCollection("capped6", {capped: true, size: 1000})); + var valueArray = new Array(maxDocuments); + var c = ""; + for (i = 0; i < maxDocuments; ++i, c += "-") { + // The a values are strings of increasing length. + valueArray[i] = {a: c}; + } + if (shouldReverse) { + valueArray.reverse(); } + return valueArray; +} - /** - * 1. When this function is called the first time, insert new documents until 'maxDocuments' - * number of documents have been inserted. Note that the collection may not have - * 'maxDocuments' number of documents since it is a capped collection. - * 2. Remove all but one documents via one or more "captrunc" requests. - * 3. For each subsequent call to this function, keep track of the removed documents using - * 'valueArrayIndexes' and re-insert the removed documents each time this function is - * called. - */ - function runCapTrunc(valueArray, valueArrayCurIndex, n, inc) { - // If n <= 0, no documents are removed by captrunc. - assert.gt(n, 0); - assert.gte(valueArray.length, maxDocuments); - for (var i = valueArrayCurIndex; i < maxDocuments; ++i) { - assert.writeOK(coll.insert(valueArray[i])); - } - count = coll.count(); +/** + * 1. When this function is called the first time, insert new documents until 'maxDocuments' + * number of documents have been inserted. Note that the collection may not have + * 'maxDocuments' number of documents since it is a capped collection. + * 2. Remove all but one documents via one or more "captrunc" requests. + * 3. For each subsequent call to this function, keep track of the removed documents using + * 'valueArrayIndexes' and re-insert the removed documents each time this function is + * called. + */ +function runCapTrunc(valueArray, valueArrayCurIndex, n, inc) { + // If n <= 0, no documents are removed by captrunc. + assert.gt(n, 0); + assert.gte(valueArray.length, maxDocuments); + for (var i = valueArrayCurIndex; i < maxDocuments; ++i) { + assert.writeOK(coll.insert(valueArray[i])); + } + count = coll.count(); - // The index corresponding to the last document in the collection. - valueArrayCurIndex = maxDocuments - 1; + // The index corresponding to the last document in the collection. + valueArrayCurIndex = maxDocuments - 1; - // Number of times to call "captrunc" so that (count - 1) documents are removed - // and at least 1 document is left in the array. - var iterations = Math.floor((count - 1) / (n + inc)); + // Number of times to call "captrunc" so that (count - 1) documents are removed + // and at least 1 document is left in the array. + var iterations = Math.floor((count - 1) / (n + inc)); - for (i = 0; i < iterations; ++i) { - assert.commandWorked(db.runCommand({captrunc: "capped6", n: n, inc: inc})); - count -= (n + inc); - valueArrayCurIndex -= (n + inc); - checkOrder(valueArrayCurIndex, valueArray); - } - // We return the index of the next document that should be inserted into the capped - // collection, which would be the document after valueArrayCurIndex. - return valueArrayCurIndex + 1; + for (i = 0; i < iterations; ++i) { + assert.commandWorked(db.runCommand({captrunc: "capped6", n: n, inc: inc})); + count -= (n + inc); + valueArrayCurIndex -= (n + inc); + checkOrder(valueArrayCurIndex, valueArray); } + // We return the index of the next document that should be inserted into the capped + // collection, which would be the document after valueArrayCurIndex. + return valueArrayCurIndex + 1; +} - function doTest(shouldReverse) { - var valueArray = prepareCollection(shouldReverse); - var valueArrayIndex = 0; - valueArrayIndex = runCapTrunc(valueArray, valueArrayIndex, 1, false); - valueArrayIndex = runCapTrunc(valueArray, valueArrayIndex, 1, true); - valueArrayIndex = runCapTrunc(valueArray, valueArrayIndex, 16, true); - valueArrayIndex = runCapTrunc(valueArray, valueArrayIndex, 16, false); - valueArrayIndex = runCapTrunc(valueArray, valueArrayIndex, maxDocuments - 2, true); - valueArrayIndex = runCapTrunc(valueArray, valueArrayIndex, maxDocuments - 2, false); - } +function doTest(shouldReverse) { + var valueArray = prepareCollection(shouldReverse); + var valueArrayIndex = 0; + valueArrayIndex = runCapTrunc(valueArray, valueArrayIndex, 1, false); + valueArrayIndex = runCapTrunc(valueArray, valueArrayIndex, 1, true); + valueArrayIndex = runCapTrunc(valueArray, valueArrayIndex, 16, true); + valueArrayIndex = runCapTrunc(valueArray, valueArrayIndex, 16, false); + valueArrayIndex = runCapTrunc(valueArray, valueArrayIndex, maxDocuments - 2, true); + valueArrayIndex = runCapTrunc(valueArray, valueArrayIndex, maxDocuments - 2, false); +} - // Repeatedly add up to 'maxDocuments' documents and then truncate the newest - // documents. Newer documents take up more space than older documents. - doTest(false); +// Repeatedly add up to 'maxDocuments' documents and then truncate the newest +// documents. Newer documents take up more space than older documents. +doTest(false); - // Same test as above, but now the newer documents take less space than the - // older documents instead of more. - doTest(true); +// Same test as above, but now the newer documents take less space than the +// older documents instead of more. +doTest(true); })(); diff --git a/jstests/core/capped_queries_and_id_index.js b/jstests/core/capped_queries_and_id_index.js index a10a4f60daf..1bf463f05ed 100644 --- a/jstests/core/capped_queries_and_id_index.js +++ b/jstests/core/capped_queries_and_id_index.js @@ -1,24 +1,24 @@ // Tests the behavior of querying or updating a capped collection with and without an _id index. // @tags: [requires_capped] (function() { - "use strict"; - const coll = db.capped9; - coll.drop(); +"use strict"; +const coll = db.capped9; +coll.drop(); - assert.commandWorked(db.createCollection("capped9", {capped: true, size: 1024 * 50})); +assert.commandWorked(db.createCollection("capped9", {capped: true, size: 1024 * 50})); - assert.writeOK(coll.insert({_id: 1, x: 2, y: 3})); +assert.writeOK(coll.insert({_id: 1, x: 2, y: 3})); - assert.eq(1, coll.find({x: 2}).itcount()); - assert.eq(1, coll.find({y: 3}).itcount()); +assert.eq(1, coll.find({x: 2}).itcount()); +assert.eq(1, coll.find({y: 3}).itcount()); - // SERVER-3064 proposes making the following queries/updates by _id result in an error. - assert.eq(1, coll.find({_id: 1}).itcount()); - assert.writeOK(coll.update({_id: 1}, {$set: {y: 4}})); - assert.eq(4, coll.findOne().y); +// SERVER-3064 proposes making the following queries/updates by _id result in an error. +assert.eq(1, coll.find({_id: 1}).itcount()); +assert.writeOK(coll.update({_id: 1}, {$set: {y: 4}})); +assert.eq(4, coll.findOne().y); - assert.commandWorked(coll.createIndex({_id: 1})); - assert.eq(1, coll.find({_id: 1}).itcount()); - assert.writeOK(coll.update({_id: 1}, {$set: {y: 5}})); - assert.eq(5, coll.findOne().y); +assert.commandWorked(coll.createIndex({_id: 1})); +assert.eq(1, coll.find({_id: 1}).itcount()); +assert.writeOK(coll.update({_id: 1}, {$set: {y: 5}})); +assert.eq(5, coll.findOne().y); }()); diff --git a/jstests/core/capped_update.js b/jstests/core/capped_update.js index b75ecb8243e..f11502b45fe 100644 --- a/jstests/core/capped_update.js +++ b/jstests/core/capped_update.js @@ -8,26 +8,26 @@ * ] */ (function() { - 'use strict'; - var t = db.getSiblingDB("local").cannot_change_capped_size; - t.drop(); - assert.commandWorked( - t.getDB().createCollection(t.getName(), {capped: true, size: 1024, autoIndexId: false})); - assert.eq(0, t.getIndexes().length, "the capped collection has indexes"); +'use strict'; +var t = db.getSiblingDB("local").cannot_change_capped_size; +t.drop(); +assert.commandWorked( + t.getDB().createCollection(t.getName(), {capped: true, size: 1024, autoIndexId: false})); +assert.eq(0, t.getIndexes().length, "the capped collection has indexes"); - for (var j = 1; j <= 10; j++) { - assert.writeOK(t.insert({_id: j, s: "Hello, World!"})); - } +for (var j = 1; j <= 10; j++) { + assert.writeOK(t.insert({_id: j, s: "Hello, World!"})); +} - assert.writeOK(t.update({_id: 3}, {s: "Hello, Mongo!"})); // Mongo is same length as World - assert.writeError(t.update({_id: 3}, {$set: {s: "Hello!"}})); - assert.writeError(t.update({_id: 10}, {})); - assert.writeError(t.update({_id: 10}, {s: "Hello, World!!!"})); +assert.writeOK(t.update({_id: 3}, {s: "Hello, Mongo!"})); // Mongo is same length as World +assert.writeError(t.update({_id: 3}, {$set: {s: "Hello!"}})); +assert.writeError(t.update({_id: 10}, {})); +assert.writeError(t.update({_id: 10}, {s: "Hello, World!!!"})); - assert.commandWorked(t.getDB().runCommand({godinsert: t.getName(), obj: {a: 2}})); - var doc = t.findOne({a: 2}); - assert.eq(undefined, doc["_id"], "now has _id after godinsert"); - assert.writeOK(t.update({a: 2}, {$inc: {a: 1}})); - doc = t.findOne({a: 3}); - assert.eq(undefined, doc["_id"], "now has _id after update"); +assert.commandWorked(t.getDB().runCommand({godinsert: t.getName(), obj: {a: 2}})); +var doc = t.findOne({a: 2}); +assert.eq(undefined, doc["_id"], "now has _id after godinsert"); +assert.writeOK(t.update({a: 2}, {$inc: {a: 1}})); +doc = t.findOne({a: 3}); +assert.eq(undefined, doc["_id"], "now has _id after update"); })(); diff --git a/jstests/core/client_metadata_ismaster.js b/jstests/core/client_metadata_ismaster.js index e5aa7d2547a..92a6b9cee2d 100644 --- a/jstests/core/client_metadata_ismaster.js +++ b/jstests/core/client_metadata_ismaster.js @@ -1,12 +1,11 @@ // Test that verifies client metadata behavior for isMaster (function() { - "use strict"; - - // Verify that a isMaster request fails if it contains client metadata, and it is not first. - // The shell sends isMaster on the first connection - var result = db.runCommand({"isMaster": 1, "client": {"application": "foobar"}}); - assert.commandFailed(result); - assert.eq(result.code, ErrorCodes.ClientMetadataCannotBeMutated, tojson(result)); +"use strict"; +// Verify that a isMaster request fails if it contains client metadata, and it is not first. +// The shell sends isMaster on the first connection +var result = db.runCommand({"isMaster": 1, "client": {"application": "foobar"}}); +assert.commandFailed(result); +assert.eq(result.code, ErrorCodes.ClientMetadataCannotBeMutated, tojson(result)); })(); diff --git a/jstests/core/clone_as_capped_nonexistant.js b/jstests/core/clone_as_capped_nonexistant.js index 1a87749002d..a4f8cf9787d 100644 --- a/jstests/core/clone_as_capped_nonexistant.js +++ b/jstests/core/clone_as_capped_nonexistant.js @@ -8,31 +8,30 @@ */ (function() { - "use strict"; - // This test ensures that CloneCollectionAsCapped()ing a nonexistent collection will not - // cause the server to abort (SERVER-13750) +"use strict"; +// This test ensures that CloneCollectionAsCapped()ing a nonexistent collection will not +// cause the server to abort (SERVER-13750) - var dbname = "clone_collection_as_capped_nonexistent"; - var testDb = db.getSiblingDB(dbname); - testDb.dropDatabase(); +var dbname = "clone_collection_as_capped_nonexistent"; +var testDb = db.getSiblingDB(dbname); +testDb.dropDatabase(); - // Database does not exist here - var res = testDb.runCommand({cloneCollectionAsCapped: 'foo', toCollection: 'bar', size: 1024}); - assert.eq(res.ok, 0, "cloning a nonexistent collection to capped should not have worked"); - var isSharded = (db.isMaster().msg == "isdbgrid"); +// Database does not exist here +var res = testDb.runCommand({cloneCollectionAsCapped: 'foo', toCollection: 'bar', size: 1024}); +assert.eq(res.ok, 0, "cloning a nonexistent collection to capped should not have worked"); +var isSharded = (db.isMaster().msg == "isdbgrid"); - assert.eq( - res.errmsg, - isSharded ? "no such cmd: cloneCollectionAsCapped" : "database " + dbname + " not found", - "converting a nonexistent to capped failed but for the wrong reason"); +assert.eq(res.errmsg, + isSharded ? "no such cmd: cloneCollectionAsCapped" : "database " + dbname + " not found", + "converting a nonexistent to capped failed but for the wrong reason"); - // Database exists, but collection doesn't - testDb.coll.insert({}); +// Database exists, but collection doesn't +testDb.coll.insert({}); - var res = testDb.runCommand({cloneCollectionAsCapped: 'foo', toCollection: 'bar', size: 1024}); - assert.eq(res.ok, 0, "cloning a nonexistent collection to capped should not have worked"); - assert.eq(res.errmsg, - isSharded ? "no such cmd: cloneCollectionAsCapped" - : "source collection " + dbname + ".foo does not exist", - "converting a nonexistent to capped failed but for the wrong reason"); +var res = testDb.runCommand({cloneCollectionAsCapped: 'foo', toCollection: 'bar', size: 1024}); +assert.eq(res.ok, 0, "cloning a nonexistent collection to capped should not have worked"); +assert.eq(res.errmsg, + isSharded ? "no such cmd: cloneCollectionAsCapped" + : "source collection " + dbname + ".foo does not exist", + "converting a nonexistent to capped failed but for the wrong reason"); }()); diff --git a/jstests/core/collation.js b/jstests/core/collation.js index f381b9280ee..a3a186349e7 100644 --- a/jstests/core/collation.js +++ b/jstests/core/collation.js @@ -9,95 +9,135 @@ // Integration tests for the collation feature. (function() { - 'use strict'; - - load("jstests/libs/analyze_plan.js"); - load("jstests/libs/get_index_helpers.js"); - // For isWiredTiger. - load("jstests/concurrency/fsm_workload_helpers/server_types.js"); - // For isReplSet - load("jstests/libs/fixture_helpers.js"); - - var coll = db.collation; - coll.drop(); - - var explainRes; - var writeRes; - var planStage; - - var isMaster = db.runCommand("ismaster"); - assert.commandWorked(isMaster); - var isMongos = (isMaster.msg === "isdbgrid"); - - var assertIndexHasCollation = function(keyPattern, collation) { - var indexSpecs = coll.getIndexes(); - var found = GetIndexHelpers.findByKeyPattern(indexSpecs, keyPattern, collation); - assert.neq(null, - found, - "Index with key pattern " + tojson(keyPattern) + " and collation " + - tojson(collation) + " not found: " + tojson(indexSpecs)); - }; - - var getQueryCollation = function(explainRes) { - if (explainRes.queryPlanner.hasOwnProperty("collation")) { - return explainRes.queryPlanner.collation; - } - - if (explainRes.queryPlanner.winningPlan.hasOwnProperty("shards") && - explainRes.queryPlanner.winningPlan.shards.length > 0 && - explainRes.queryPlanner.winningPlan.shards[0].hasOwnProperty("collation")) { - return explainRes.queryPlanner.winningPlan.shards[0].collation; - } - - return null; - }; - - // - // Test using db.createCollection() to make a collection with a default collation. - // - - // Attempting to create a collection with an invalid collation should fail. - assert.commandFailed(db.createCollection("collation", {collation: "not an object"})); - assert.commandFailed(db.createCollection("collation", {collation: {}})); - assert.commandFailed(db.createCollection("collation", {collation: {blah: 1}})); - assert.commandFailed(db.createCollection("collation", {collation: {locale: "en", blah: 1}})); - assert.commandFailed(db.createCollection("collation", {collation: {locale: "xx"}})); - assert.commandFailed( - db.createCollection("collation", {collation: {locale: "en", strength: 99}})); - - // Attempting to create a collection whose collation version does not match the collator version - // produced by ICU should result in failure with a special error code. - assert.commandFailedWithCode( - db.createCollection("collation", {collation: {locale: "en", version: "unknownVersion"}}), - ErrorCodes.IncompatibleCollationVersion); - - // Ensure we can create a collection with the "simple" collation as the collection default. - assert.commandWorked(db.createCollection("collation", {collation: {locale: "simple"}})); - var collectionInfos = db.getCollectionInfos({name: "collation"}); - assert.eq(collectionInfos.length, 1); - assert(!collectionInfos[0].options.hasOwnProperty("collation")); - coll.drop(); +'use strict'; + +load("jstests/libs/analyze_plan.js"); +load("jstests/libs/get_index_helpers.js"); +// For isWiredTiger. +load("jstests/concurrency/fsm_workload_helpers/server_types.js"); +// For isReplSet +load("jstests/libs/fixture_helpers.js"); + +var coll = db.collation; +coll.drop(); + +var explainRes; +var writeRes; +var planStage; + +var isMaster = db.runCommand("ismaster"); +assert.commandWorked(isMaster); +var isMongos = (isMaster.msg === "isdbgrid"); + +var assertIndexHasCollation = function(keyPattern, collation) { + var indexSpecs = coll.getIndexes(); + var found = GetIndexHelpers.findByKeyPattern(indexSpecs, keyPattern, collation); + assert.neq(null, + found, + "Index with key pattern " + tojson(keyPattern) + " and collation " + + tojson(collation) + " not found: " + tojson(indexSpecs)); +}; + +var getQueryCollation = function(explainRes) { + if (explainRes.queryPlanner.hasOwnProperty("collation")) { + return explainRes.queryPlanner.collation; + } - // Ensure that we populate all collation-related fields when we create a collection with a valid - // collation. - assert.commandWorked(db.createCollection("collation", {collation: {locale: "fr_CA"}})); - var collectionInfos = db.getCollectionInfos({name: "collation"}); - assert.eq(collectionInfos.length, 1); - assert.eq(collectionInfos[0].options.collation, { - locale: "fr_CA", - caseLevel: false, - caseFirst: "off", - strength: 3, - numericOrdering: false, - alternate: "non-ignorable", - maxVariable: "punct", - normalization: false, - backwards: true, - version: "57.1", - }); + if (explainRes.queryPlanner.winningPlan.hasOwnProperty("shards") && + explainRes.queryPlanner.winningPlan.shards.length > 0 && + explainRes.queryPlanner.winningPlan.shards[0].hasOwnProperty("collation")) { + return explainRes.queryPlanner.winningPlan.shards[0].collation; + } - // Ensure that an index with no collation inherits the collection-default collation. - assert.commandWorked(coll.ensureIndex({a: 1})); + return null; +}; + +// +// Test using db.createCollection() to make a collection with a default collation. +// + +// Attempting to create a collection with an invalid collation should fail. +assert.commandFailed(db.createCollection("collation", {collation: "not an object"})); +assert.commandFailed(db.createCollection("collation", {collation: {}})); +assert.commandFailed(db.createCollection("collation", {collation: {blah: 1}})); +assert.commandFailed(db.createCollection("collation", {collation: {locale: "en", blah: 1}})); +assert.commandFailed(db.createCollection("collation", {collation: {locale: "xx"}})); +assert.commandFailed(db.createCollection("collation", {collation: {locale: "en", strength: 99}})); + +// Attempting to create a collection whose collation version does not match the collator version +// produced by ICU should result in failure with a special error code. +assert.commandFailedWithCode( + db.createCollection("collation", {collation: {locale: "en", version: "unknownVersion"}}), + ErrorCodes.IncompatibleCollationVersion); + +// Ensure we can create a collection with the "simple" collation as the collection default. +assert.commandWorked(db.createCollection("collation", {collation: {locale: "simple"}})); +var collectionInfos = db.getCollectionInfos({name: "collation"}); +assert.eq(collectionInfos.length, 1); +assert(!collectionInfos[0].options.hasOwnProperty("collation")); +coll.drop(); + +// Ensure that we populate all collation-related fields when we create a collection with a valid +// collation. +assert.commandWorked(db.createCollection("collation", {collation: {locale: "fr_CA"}})); +var collectionInfos = db.getCollectionInfos({name: "collation"}); +assert.eq(collectionInfos.length, 1); +assert.eq(collectionInfos[0].options.collation, { + locale: "fr_CA", + caseLevel: false, + caseFirst: "off", + strength: 3, + numericOrdering: false, + alternate: "non-ignorable", + maxVariable: "punct", + normalization: false, + backwards: true, + version: "57.1", +}); + +// Ensure that an index with no collation inherits the collection-default collation. +assert.commandWorked(coll.ensureIndex({a: 1})); +assertIndexHasCollation({a: 1}, { + locale: "fr_CA", + caseLevel: false, + caseFirst: "off", + strength: 3, + numericOrdering: false, + alternate: "non-ignorable", + maxVariable: "punct", + normalization: false, + backwards: true, + version: "57.1", +}); + +// Ensure that an index which specifies an overriding collation does not use the collection +// default. +assert.commandWorked(coll.ensureIndex({b: 1}, {collation: {locale: "en_US"}})); +assertIndexHasCollation({b: 1}, { + locale: "en_US", + caseLevel: false, + caseFirst: "off", + strength: 3, + numericOrdering: false, + alternate: "non-ignorable", + maxVariable: "punct", + normalization: false, + backwards: false, + version: "57.1", +}); + +// Ensure that an index which specifies the "simple" collation as an overriding collation still +// does not use the collection default. +assert.commandWorked(coll.ensureIndex({d: 1}, {collation: {locale: "simple"}})); +assertIndexHasCollation({d: 1}, {locale: "simple"}); + +// Ensure that a v=1 index doesn't inherit the collection-default collation. +assert.commandWorked(coll.ensureIndex({c: 1}, {v: 1})); +assertIndexHasCollation({c: 1}, {locale: "simple"}); + +// Test that all indexes retain their current collation when the collection is re-indexed. +if (!isMongos) { + assert.commandWorked(coll.reIndex()); assertIndexHasCollation({a: 1}, { locale: "fr_CA", caseLevel: false, @@ -110,10 +150,6 @@ backwards: true, version: "57.1", }); - - // Ensure that an index which specifies an overriding collation does not use the collection - // default. - assert.commandWorked(coll.ensureIndex({b: 1}, {collation: {locale: "en_US"}})); assertIndexHasCollation({b: 1}, { locale: "en_US", caseLevel: false, @@ -126,1838 +162,1766 @@ backwards: false, version: "57.1", }); - - // Ensure that an index which specifies the "simple" collation as an overriding collation still - // does not use the collection default. - assert.commandWorked(coll.ensureIndex({d: 1}, {collation: {locale: "simple"}})); assertIndexHasCollation({d: 1}, {locale: "simple"}); - - // Ensure that a v=1 index doesn't inherit the collection-default collation. - assert.commandWorked(coll.ensureIndex({c: 1}, {v: 1})); assertIndexHasCollation({c: 1}, {locale: "simple"}); - - // Test that all indexes retain their current collation when the collection is re-indexed. - if (!isMongos) { - assert.commandWorked(coll.reIndex()); - assertIndexHasCollation({a: 1}, { - locale: "fr_CA", - caseLevel: false, - caseFirst: "off", - strength: 3, - numericOrdering: false, - alternate: "non-ignorable", - maxVariable: "punct", - normalization: false, - backwards: true, - version: "57.1", - }); - assertIndexHasCollation({b: 1}, { - locale: "en_US", - caseLevel: false, - caseFirst: "off", - strength: 3, - numericOrdering: false, - alternate: "non-ignorable", - maxVariable: "punct", - normalization: false, - backwards: false, - version: "57.1", - }); - assertIndexHasCollation({d: 1}, {locale: "simple"}); - assertIndexHasCollation({c: 1}, {locale: "simple"}); - } - - coll.drop(); - - // - // Creating an index with a collation. - // - - // Attempting to build an index with an invalid collation should fail. - assert.commandFailed(coll.ensureIndex({a: 1}, {collation: "not an object"})); - assert.commandFailed(coll.ensureIndex({a: 1}, {collation: {}})); - assert.commandFailed(coll.ensureIndex({a: 1}, {collation: {blah: 1}})); - assert.commandFailed(coll.ensureIndex({a: 1}, {collation: {locale: "en", blah: 1}})); - assert.commandFailed(coll.ensureIndex({a: 1}, {collation: {locale: "xx"}})); - assert.commandFailed(coll.ensureIndex({a: 1}, {collation: {locale: "en", strength: 99}})); - - // Attempting to create an index whose collation version does not match the collator version - // produced by ICU should result in failure with a special error code. - assert.commandFailedWithCode( - coll.ensureIndex({a: 1}, {collation: {locale: "en", version: "unknownVersion"}}), - ErrorCodes.IncompatibleCollationVersion); - - assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "en_US"}})); - assertIndexHasCollation({a: 1}, { - locale: "en_US", - caseLevel: false, - caseFirst: "off", - strength: 3, - numericOrdering: false, - alternate: "non-ignorable", - maxVariable: "punct", - normalization: false, - backwards: false, - version: "57.1", - }); - - assert.commandWorked(coll.createIndex({b: 1}, {collation: {locale: "en_US"}})); - assertIndexHasCollation({b: 1}, { - locale: "en_US", - caseLevel: false, - caseFirst: "off", - strength: 3, - numericOrdering: false, - alternate: "non-ignorable", - maxVariable: "punct", - normalization: false, - backwards: false, - version: "57.1", - }); - - assert.commandWorked(coll.createIndexes([{c: 1}, {d: 1}], {collation: {locale: "fr_CA"}})); - assertIndexHasCollation({c: 1}, { - locale: "fr_CA", - caseLevel: false, - caseFirst: "off", - strength: 3, - numericOrdering: false, - alternate: "non-ignorable", - maxVariable: "punct", - normalization: false, - backwards: true, - version: "57.1", - }); - assertIndexHasCollation({d: 1}, { - locale: "fr_CA", - caseLevel: false, - caseFirst: "off", - strength: 3, - numericOrdering: false, - alternate: "non-ignorable", - maxVariable: "punct", - normalization: false, - backwards: true, - version: "57.1", - }); - - assert.commandWorked(coll.createIndexes([{e: 1}], {collation: {locale: "simple"}})); - assertIndexHasCollation({e: 1}, {locale: "simple"}); - - // Test that an index with a non-simple collation contains collator-generated comparison keys - // rather than the verbatim indexed strings. - if (db.getMongo().useReadCommands()) { - coll.drop(); - assert.commandWorked(coll.createIndex({a: 1}, {collation: {locale: "fr_CA"}})); - assert.commandWorked(coll.createIndex({b: 1})); - assert.writeOK(coll.insert({a: "foo", b: "foo"})); - assert.eq(1, coll.find().collation({locale: "fr_CA"}).hint({a: 1}).returnKey().itcount()); - assert.neq("foo", - coll.find().collation({locale: "fr_CA"}).hint({a: 1}).returnKey().next().a); - assert.eq(1, coll.find().collation({locale: "fr_CA"}).hint({b: 1}).returnKey().itcount()); - assert.eq("foo", - coll.find().collation({locale: "fr_CA"}).hint({b: 1}).returnKey().next().b); - } - - // Test that a query with a string comparison can use an index with a non-simple collation if it - // has a matching collation. - if (db.getMongo().useReadCommands()) { - coll.drop(); - assert.commandWorked(coll.createIndex({a: 1}, {collation: {locale: "fr_CA"}})); - - // Query has simple collation, but index has fr_CA collation. - explainRes = coll.find({a: "foo"}).explain(); - assert.commandWorked(explainRes); - assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "COLLSCAN")); - - // Query has en_US collation, but index has fr_CA collation. - explainRes = coll.find({a: "foo"}).collation({locale: "en_US"}).explain(); - assert.commandWorked(explainRes); - assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "COLLSCAN")); - - // Matching collations. - explainRes = coll.find({a: "foo"}).collation({locale: "fr_CA"}).explain(); - assert.commandWorked(explainRes); - assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "IXSCAN")); - } - - // Should not be possible to create a text index with an explicit non-simple collation. +} + +coll.drop(); + +// +// Creating an index with a collation. +// + +// Attempting to build an index with an invalid collation should fail. +assert.commandFailed(coll.ensureIndex({a: 1}, {collation: "not an object"})); +assert.commandFailed(coll.ensureIndex({a: 1}, {collation: {}})); +assert.commandFailed(coll.ensureIndex({a: 1}, {collation: {blah: 1}})); +assert.commandFailed(coll.ensureIndex({a: 1}, {collation: {locale: "en", blah: 1}})); +assert.commandFailed(coll.ensureIndex({a: 1}, {collation: {locale: "xx"}})); +assert.commandFailed(coll.ensureIndex({a: 1}, {collation: {locale: "en", strength: 99}})); + +// Attempting to create an index whose collation version does not match the collator version +// produced by ICU should result in failure with a special error code. +assert.commandFailedWithCode( + coll.ensureIndex({a: 1}, {collation: {locale: "en", version: "unknownVersion"}}), + ErrorCodes.IncompatibleCollationVersion); + +assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "en_US"}})); +assertIndexHasCollation({a: 1}, { + locale: "en_US", + caseLevel: false, + caseFirst: "off", + strength: 3, + numericOrdering: false, + alternate: "non-ignorable", + maxVariable: "punct", + normalization: false, + backwards: false, + version: "57.1", +}); + +assert.commandWorked(coll.createIndex({b: 1}, {collation: {locale: "en_US"}})); +assertIndexHasCollation({b: 1}, { + locale: "en_US", + caseLevel: false, + caseFirst: "off", + strength: 3, + numericOrdering: false, + alternate: "non-ignorable", + maxVariable: "punct", + normalization: false, + backwards: false, + version: "57.1", +}); + +assert.commandWorked(coll.createIndexes([{c: 1}, {d: 1}], {collation: {locale: "fr_CA"}})); +assertIndexHasCollation({c: 1}, { + locale: "fr_CA", + caseLevel: false, + caseFirst: "off", + strength: 3, + numericOrdering: false, + alternate: "non-ignorable", + maxVariable: "punct", + normalization: false, + backwards: true, + version: "57.1", +}); +assertIndexHasCollation({d: 1}, { + locale: "fr_CA", + caseLevel: false, + caseFirst: "off", + strength: 3, + numericOrdering: false, + alternate: "non-ignorable", + maxVariable: "punct", + normalization: false, + backwards: true, + version: "57.1", +}); + +assert.commandWorked(coll.createIndexes([{e: 1}], {collation: {locale: "simple"}})); +assertIndexHasCollation({e: 1}, {locale: "simple"}); + +// Test that an index with a non-simple collation contains collator-generated comparison keys +// rather than the verbatim indexed strings. +if (db.getMongo().useReadCommands()) { coll.drop(); - assert.commandFailed(coll.createIndex({a: "text"}, {collation: {locale: "en"}})); - - // Text index builds which inherit a non-simple default collation should fail. + assert.commandWorked(coll.createIndex({a: 1}, {collation: {locale: "fr_CA"}})); + assert.commandWorked(coll.createIndex({b: 1})); + assert.writeOK(coll.insert({a: "foo", b: "foo"})); + assert.eq(1, coll.find().collation({locale: "fr_CA"}).hint({a: 1}).returnKey().itcount()); + assert.neq("foo", coll.find().collation({locale: "fr_CA"}).hint({a: 1}).returnKey().next().a); + assert.eq(1, coll.find().collation({locale: "fr_CA"}).hint({b: 1}).returnKey().itcount()); + assert.eq("foo", coll.find().collation({locale: "fr_CA"}).hint({b: 1}).returnKey().next().b); +} + +// Test that a query with a string comparison can use an index with a non-simple collation if it +// has a matching collation. +if (db.getMongo().useReadCommands()) { coll.drop(); - assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en"}})); - assert.commandFailed(coll.createIndex({a: "text"})); + assert.commandWorked(coll.createIndex({a: 1}, {collation: {locale: "fr_CA"}})); - // Text index build should succeed on a collection with a non-simple default collation if it - // explicitly overrides the default with {locale: "simple"}. - coll.drop(); - assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en"}})); - assert.commandWorked(coll.createIndex({a: "text"}, {collation: {locale: "simple"}})); + // Query has simple collation, but index has fr_CA collation. + explainRes = coll.find({a: "foo"}).explain(); + assert.commandWorked(explainRes); + assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "COLLSCAN")); - // - // Collation tests for aggregation. - // + // Query has en_US collation, but index has fr_CA collation. + explainRes = coll.find({a: "foo"}).collation({locale: "en_US"}).explain(); + assert.commandWorked(explainRes); + assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "COLLSCAN")); - // Aggregation should return correct results when collation specified and collection does not + // Matching collations. + explainRes = coll.find({a: "foo"}).collation({locale: "fr_CA"}).explain(); + assert.commandWorked(explainRes); + assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "IXSCAN")); +} + +// Should not be possible to create a text index with an explicit non-simple collation. +coll.drop(); +assert.commandFailed(coll.createIndex({a: "text"}, {collation: {locale: "en"}})); + +// Text index builds which inherit a non-simple default collation should fail. +coll.drop(); +assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en"}})); +assert.commandFailed(coll.createIndex({a: "text"})); + +// Text index build should succeed on a collection with a non-simple default collation if it +// explicitly overrides the default with {locale: "simple"}. +coll.drop(); +assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en"}})); +assert.commandWorked(coll.createIndex({a: "text"}, {collation: {locale: "simple"}})); + +// +// Collation tests for aggregation. +// + +// Aggregation should return correct results when collation specified and collection does not +// exist. +coll.drop(); +assert.eq(0, coll.aggregate([], {collation: {locale: "fr"}}).itcount()); + +// Aggregation should return correct results when collation specified and collection does exist. +coll.drop(); +assert.writeOK(coll.insert({_id: 1, str: "foo"})); +assert.writeOK(coll.insert({_id: 2, str: "bar"})); +assert.eq(0, coll.aggregate([{$match: {str: "FOO"}}]).itcount()); +assert.eq(1, + coll.aggregate([{$match: {str: "FOO"}}], {collation: {locale: "en_US", strength: 2}}) + .itcount()); + +// Aggregation should return correct results when no collation specified and collection has a +// default collation. +coll.drop(); +assert.commandWorked( + db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); +assert.writeOK(coll.insert({str: "foo"})); +assert.eq(1, coll.aggregate([{$match: {str: "FOO"}}]).itcount()); + +// Aggregation should return correct results when "simple" collation specified and collection +// has a default collation. +coll.drop(); +assert.commandWorked( + db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); +assert.writeOK(coll.insert({str: "foo"})); +assert.eq(0, coll.aggregate([{$match: {str: "FOO"}}], {collation: {locale: "simple"}}).itcount()); + +// Aggregation should select compatible index when no collation specified and collection has a +// default collation. +coll.drop(); +assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}})); +assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "en_US"}})); +var explain = coll.explain("queryPlanner").aggregate([{$match: {a: "foo"}}]); +assert(isIxscan(db, explain.queryPlanner.winningPlan)); + +// Aggregation should not use index when no collation specified and collection default +// collation is incompatible with index collation. +coll.drop(); +assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}})); +assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "simple"}})); +var explain = coll.explain("queryPlanner").aggregate([{$match: {a: "foo"}}]); +assert(isCollscan(db, explain.queryPlanner.winningPlan)); + +// Explain of aggregation with collation should succeed. +assert.commandWorked(coll.explain().aggregate([], {collation: {locale: "fr"}})); + +// +// Collation tests for count. +// + +// Count should return correct results when collation specified and collection does not exist. +coll.drop(); +assert.eq(0, coll.find({str: "FOO"}).collation({locale: "en_US"}).count()); + +// Count should return correct results when collation specified and collection does exist. +coll.drop(); +assert.writeOK(coll.insert({_id: 1, str: "foo"})); +assert.writeOK(coll.insert({_id: 2, str: "bar"})); +assert.eq(0, coll.find({str: "FOO"}).count()); +assert.eq(0, coll.find({str: "FOO"}).collation({locale: "en_US"}).count()); +assert.eq(1, coll.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).count()); +assert.eq(0, coll.count({str: "FOO"})); +assert.eq(0, coll.count({str: "FOO"}, {collation: {locale: "en_US"}})); +assert.eq(1, coll.count({str: "FOO"}, {collation: {locale: "en_US", strength: 2}})); + +// Count should return correct results when no collation specified and collection has a default +// collation. +coll.drop(); +assert.commandWorked( + db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); +assert.writeOK(coll.insert({str: "foo"})); +assert.eq(1, coll.find({str: "FOO"}).count()); + +// Count should return correct results when "simple" collation specified and collection has a +// default collation. +coll.drop(); +assert.commandWorked( + db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); +assert.writeOK(coll.insert({str: "foo"})); +assert.eq(0, coll.find({str: "FOO"}).collation({locale: "simple"}).count()); + +// Count should return correct results when collation specified and when run with explain. +coll.drop(); +assert.writeOK(coll.insert({_id: 1, str: "foo"})); +assert.writeOK(coll.insert({_id: 2, str: "bar"})); +explainRes = coll.explain("executionStats").find({str: "FOO"}).collation({locale: "en_US"}).count(); +assert.commandWorked(explainRes); +planStage = getPlanStage(explainRes.executionStats.executionStages, "COLLSCAN"); +assert.neq(null, planStage); +assert.eq(0, planStage.advanced); +explainRes = coll.explain("executionStats") + .find({str: "FOO"}) + .collation({locale: "en_US", strength: 2}) + .count(); +assert.commandWorked(explainRes); +planStage = getPlanStage(explainRes.executionStats.executionStages, "COLLSCAN"); +assert.neq(null, planStage); +assert.eq(1, planStage.advanced); + +// Explain of COUNT_SCAN stage should include index collation. +coll.drop(); +assert.commandWorked(coll.createIndex({a: 1}, {collation: {locale: "fr_CA"}})); +explainRes = coll.explain("executionStats").find({a: 5}).count(); +assert.commandWorked(explainRes); +planStage = getPlanStage(explainRes.executionStats.executionStages, "COUNT_SCAN"); +assert.neq(null, planStage); +assert.eq(planStage.collation, { + locale: "fr_CA", + caseLevel: false, + caseFirst: "off", + strength: 3, + numericOrdering: false, + alternate: "non-ignorable", + maxVariable: "punct", + normalization: false, + backwards: true, + version: "57.1", +}); + +// Explain of COUNT_SCAN stage should include index collation when index collation is +// inherited from collection default. +coll.drop(); +assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "fr_CA"}})); +assert.commandWorked(coll.createIndex({a: 1})); +explainRes = coll.explain("executionStats").find({a: 5}).count(); +assert.commandWorked(explainRes); +planStage = getPlanStage(explainRes.executionStats.executionStages, "COUNT_SCAN"); +assert.neq(null, planStage); +assert.eq(planStage.collation, { + locale: "fr_CA", + caseLevel: false, + caseFirst: "off", + strength: 3, + numericOrdering: false, + alternate: "non-ignorable", + maxVariable: "punct", + normalization: false, + backwards: true, + version: "57.1", +}); + +// Should be able to use COUNT_SCAN for queries over strings. +coll.drop(); +assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "fr_CA"}})); +assert.commandWorked(coll.createIndex({a: 1})); +explainRes = coll.explain("executionStats").find({a: "foo"}).count(); +assert.commandWorked(explainRes); +assert(planHasStage(db, explainRes.executionStats.executionStages, "COUNT_SCAN")); +assert(!planHasStage(db, explainRes.executionStats.executionStages, "FETCH")); + +// +// Collation tests for distinct. +// + +// Distinct should return correct results when collation specified and collection does not +// exist. +coll.drop(); +assert.eq(0, coll.distinct("str", {}, {collation: {locale: "en_US", strength: 2}}).length); + +// Distinct should return correct results when collation specified and no indexes exist. +coll.drop(); +assert.writeOK(coll.insert({_id: 1, str: "foo"})); +assert.writeOK(coll.insert({_id: 2, str: "FOO"})); +var res = coll.distinct("str", {}, {collation: {locale: "en_US", strength: 2}}); +assert.eq(1, res.length); +assert.eq("foo", res[0].toLowerCase()); +assert.eq(2, coll.distinct("str", {}, {collation: {locale: "en_US", strength: 3}}).length); +assert.eq(2, + coll.distinct("_id", {str: "foo"}, {collation: {locale: "en_US", strength: 2}}).length); + +// Distinct should return correct results when collation specified and compatible index exists. +coll.createIndex({str: 1}, {collation: {locale: "en_US", strength: 2}}); +res = coll.distinct("str", {}, {collation: {locale: "en_US", strength: 2}}); +assert.eq(1, res.length); +assert.eq("foo", res[0].toLowerCase()); +assert.eq(2, coll.distinct("str", {}, {collation: {locale: "en_US", strength: 3}}).length); + +// Distinct should return correct results when no collation specified and collection has a +// default collation. +coll.drop(); +assert.commandWorked( + db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); +assert.writeOK(coll.insert({str: "foo"})); +assert.writeOK(coll.insert({str: "FOO"})); +assert.eq(1, coll.distinct("str").length); +assert.eq(2, coll.distinct("_id", {str: "foo"}).length); + +// Distinct should return correct results when "simple" collation specified and collection has a +// default collation. +coll.drop(); +assert.commandWorked( + db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); +assert.writeOK(coll.insert({str: "foo"})); +assert.writeOK(coll.insert({str: "FOO"})); +assert.eq(2, coll.distinct("str", {}, {collation: {locale: "simple"}}).length); +assert.eq(1, coll.distinct("_id", {str: "foo"}, {collation: {locale: "simple"}}).length); + +// Distinct should select compatible index when no collation specified and collection has a +// default collation. +coll.drop(); +assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}})); +assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "en_US"}})); +var explain = coll.explain("queryPlanner").distinct("a"); +assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN")); +assert(planHasStage(db, explain.queryPlanner.winningPlan, "FETCH")); + +// Distinct scan on strings can be used over an index with a collation when the predicate has +// exact bounds. +explain = coll.explain("queryPlanner").distinct("a", {a: {$gt: "foo"}}); +assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN")); +assert(planHasStage(db, explain.queryPlanner.winningPlan, "FETCH")); +assert(!planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED")); + +// Distinct scan cannot be used over an index with a collation when the predicate has inexact +// bounds. +explain = coll.explain("queryPlanner").distinct("a", {a: {$exists: true}}); +assert(planHasStage(db, explain.queryPlanner.winningPlan, "IXSCAN")); +assert(planHasStage(db, explain.queryPlanner.winningPlan, "FETCH")); +assert(!planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN")); + +// Distinct scan can be used without a fetch when predicate has exact non-string bounds. +explain = coll.explain("queryPlanner").distinct("a", {a: {$gt: 3}}); +assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN")); +assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED")); +assert(!planHasStage(db, explain.queryPlanner.winningPlan, "FETCH")); + +// Distinct should not use index when no collation specified and collection default collation is +// incompatible with index collation. +coll.drop(); +assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}})); +assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "simple"}})); +var explain = coll.explain("queryPlanner").distinct("a"); +assert(isCollscan(db, explain.queryPlanner.winningPlan)); + +// Explain of DISTINCT_SCAN stage should include index collation. +coll.drop(); +assert.commandWorked(coll.createIndex({str: 1}, {collation: {locale: "fr_CA"}})); +explainRes = coll.explain("executionStats").distinct("str", {}, {collation: {locale: "fr_CA"}}); +assert.commandWorked(explainRes); +planStage = getPlanStage(explainRes.executionStats.executionStages, "DISTINCT_SCAN"); +assert.neq(null, planStage); +assert.eq(planStage.collation, { + locale: "fr_CA", + caseLevel: false, + caseFirst: "off", + strength: 3, + numericOrdering: false, + alternate: "non-ignorable", + maxVariable: "punct", + normalization: false, + backwards: true, + version: "57.1", +}); + +// Explain of DISTINCT_SCAN stage should include index collation when index collation is +// inherited from collection default. +coll.drop(); +assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "fr_CA"}})); +assert.commandWorked(coll.createIndex({str: 1})); +explainRes = coll.explain("executionStats").distinct("str"); +assert.commandWorked(explainRes); +planStage = getPlanStage(explainRes.executionStats.executionStages, "DISTINCT_SCAN"); +assert.neq(null, planStage); +assert.eq(planStage.collation, { + locale: "fr_CA", + caseLevel: false, + caseFirst: "off", + strength: 3, + numericOrdering: false, + alternate: "non-ignorable", + maxVariable: "punct", + normalization: false, + backwards: true, + version: "57.1", +}); + +// +// Collation tests for find. +// + +if (db.getMongo().useReadCommands()) { + // Find should return correct results when collation specified and collection does not // exist. coll.drop(); - assert.eq(0, coll.aggregate([], {collation: {locale: "fr"}}).itcount()); + assert.eq(0, coll.find({_id: "FOO"}).collation({locale: "en_US"}).itcount()); - // Aggregation should return correct results when collation specified and collection does exist. + // Find should return correct results when collation specified and filter is a match on _id. coll.drop(); assert.writeOK(coll.insert({_id: 1, str: "foo"})); assert.writeOK(coll.insert({_id: 2, str: "bar"})); - assert.eq(0, coll.aggregate([{$match: {str: "FOO"}}]).itcount()); + assert.writeOK(coll.insert({_id: "foo"})); + assert.eq(0, coll.find({_id: "FOO"}).itcount()); + assert.eq(0, coll.find({_id: "FOO"}).collation({locale: "en_US"}).itcount()); + assert.eq(1, coll.find({_id: "FOO"}).collation({locale: "en_US", strength: 2}).itcount()); + assert.writeOK(coll.remove({_id: "foo"})); + + // Find should return correct results when collation specified and no indexes exist. + assert.eq(0, coll.find({str: "FOO"}).itcount()); + assert.eq(0, coll.find({str: "FOO"}).collation({locale: "en_US"}).itcount()); + assert.eq(1, coll.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).itcount()); assert.eq(1, - coll.aggregate([{$match: {str: "FOO"}}], {collation: {locale: "en_US", strength: 2}}) - .itcount()); - - // Aggregation should return correct results when no collation specified and collection has a - // default collation. - coll.drop(); - assert.commandWorked( - db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); - assert.writeOK(coll.insert({str: "foo"})); - assert.eq(1, coll.aggregate([{$match: {str: "FOO"}}]).itcount()); + coll.find({str: {$ne: "FOO"}}).collation({locale: "en_US", strength: 2}).itcount()); - // Aggregation should return correct results when "simple" collation specified and collection - // has a default collation. - coll.drop(); - assert.commandWorked( - db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); - assert.writeOK(coll.insert({str: "foo"})); - assert.eq(0, - coll.aggregate([{$match: {str: "FOO"}}], {collation: {locale: "simple"}}).itcount()); + // Find should return correct results when collation specified and compatible index exists. + assert.commandWorked(coll.ensureIndex({str: 1}, {collation: {locale: "en_US", strength: 2}})); + assert.eq(0, coll.find({str: "FOO"}).hint({str: 1}).itcount()); + assert.eq(0, coll.find({str: "FOO"}).collation({locale: "en_US"}).hint({str: 1}).itcount()); + assert.eq( + 1, + coll.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).hint({str: 1}).itcount()); + assert.eq(1, + coll.find({str: {$ne: "FOO"}}) + .collation({locale: "en_US", strength: 2}) + .hint({str: 1}) + .itcount()); + assert.commandWorked(coll.dropIndexes()); - // Aggregation should select compatible index when no collation specified and collection has a - // default collation. - coll.drop(); - assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}})); - assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "en_US"}})); - var explain = coll.explain("queryPlanner").aggregate([{$match: {a: "foo"}}]); - assert(isIxscan(db, explain.queryPlanner.winningPlan)); + // Find should return correct results when collation specified and compatible partial index + // exists. + assert.commandWorked(coll.ensureIndex({str: 1}, { + partialFilterExpression: {str: {$lte: "FOO"}}, + collation: {locale: "en_US", strength: 2} + })); + assert.eq( + 1, + coll.find({str: "foo"}).collation({locale: "en_US", strength: 2}).hint({str: 1}).itcount()); + assert.writeOK(coll.insert({_id: 3, str: "goo"})); + assert.eq( + 0, + coll.find({str: "goo"}).collation({locale: "en_US", strength: 2}).hint({str: 1}).itcount()); + assert.writeOK(coll.remove({_id: 3})); + assert.commandWorked(coll.dropIndexes()); - // Aggregation should not use index when no collation specified and collection default - // collation is incompatible with index collation. + // Queries that use a index with a non-matching collation should add a sort + // stage if needed. coll.drop(); - assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}})); - assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "simple"}})); - var explain = coll.explain("queryPlanner").aggregate([{$match: {a: "foo"}}]); - assert(isCollscan(db, explain.queryPlanner.winningPlan)); - - // Explain of aggregation with collation should succeed. - assert.commandWorked(coll.explain().aggregate([], {collation: {locale: "fr"}})); + assert.writeOK(coll.insert([{a: "A"}, {a: "B"}, {a: "b"}, {a: "a"}])); - // - // Collation tests for count. - // - - // Count should return correct results when collation specified and collection does not exist. - coll.drop(); - assert.eq(0, coll.find({str: "FOO"}).collation({locale: "en_US"}).count()); + // Ensure results from an index that doesn't match the query collation are sorted to match + // the requested collation. + assert.commandWorked(coll.ensureIndex({a: 1})); + var res = + coll.find({a: {'$exists': true}}, {_id: 0}).collation({locale: "en_US", strength: 3}).sort({ + a: 1 + }); + assert.eq(res.toArray(), [{a: "a"}, {a: "A"}, {a: "b"}, {a: "B"}]); - // Count should return correct results when collation specified and collection does exist. + // Find should return correct results when collation specified and query contains $expr. coll.drop(); - assert.writeOK(coll.insert({_id: 1, str: "foo"})); - assert.writeOK(coll.insert({_id: 2, str: "bar"})); - assert.eq(0, coll.find({str: "FOO"}).count()); - assert.eq(0, coll.find({str: "FOO"}).collation({locale: "en_US"}).count()); - assert.eq(1, coll.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).count()); - assert.eq(0, coll.count({str: "FOO"})); - assert.eq(0, coll.count({str: "FOO"}, {collation: {locale: "en_US"}})); - assert.eq(1, coll.count({str: "FOO"}, {collation: {locale: "en_US", strength: 2}})); - - // Count should return correct results when no collation specified and collection has a default - // collation. + assert.writeOK(coll.insert([{a: "A"}, {a: "B"}])); + assert.eq( + 1, + coll.find({$expr: {$eq: ["$a", "a"]}}).collation({locale: "en_US", strength: 2}).itcount()); +} + +// Find should return correct results when no collation specified and collection has a default +// collation. +coll.drop(); +assert.commandWorked( + db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); +assert.writeOK(coll.insert({str: "foo"})); +assert.writeOK(coll.insert({str: "FOO"})); +assert.writeOK(coll.insert({str: "bar"})); +assert.eq(3, coll.find({str: {$in: ["foo", "bar"]}}).itcount()); +assert.eq(2, coll.find({str: "foo"}).itcount()); +assert.eq(1, coll.find({str: {$ne: "foo"}}).itcount()); +assert.eq([{str: "bar"}, {str: "foo"}, {str: "FOO"}], + coll.find({}, {_id: 0, str: 1}).sort({str: 1}).toArray()); + +// Find with idhack should return correct results when no collation specified and collection has +// a default collation. +coll.drop(); +assert.commandWorked( + db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); +assert.writeOK(coll.insert({_id: "foo"})); +assert.eq(1, coll.find({_id: "FOO"}).itcount()); + +// Find on _id should use idhack stage when query inherits collection default collation. +coll.drop(); +assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}})); +explainRes = coll.explain("executionStats").find({_id: "foo"}).finish(); +assert.commandWorked(explainRes); +planStage = getPlanStage(explainRes.executionStats.executionStages, "IDHACK"); +assert.neq(null, planStage); + +// Find should return correct results for query containing $expr when no collation specified and +// collection has a default collation. +coll.drop(); +assert.commandWorked( + db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); +assert.writeOK(coll.insert([{a: "A"}, {a: "B"}])); +assert.eq(1, coll.find({$expr: {$eq: ["$a", "a"]}}).itcount()); + +if (db.getMongo().useReadCommands()) { + // Find should return correct results when "simple" collation specified and collection has a + // default collation. coll.drop(); assert.commandWorked( db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); assert.writeOK(coll.insert({str: "foo"})); - assert.eq(1, coll.find({str: "FOO"}).count()); + assert.writeOK(coll.insert({str: "FOO"})); + assert.writeOK(coll.insert({str: "bar"})); + assert.eq(2, coll.find({str: {$in: ["foo", "bar"]}}).collation({locale: "simple"}).itcount()); + assert.eq(1, coll.find({str: "foo"}).collation({locale: "simple"}).itcount()); + assert.eq( + [{str: "FOO"}, {str: "bar"}, {str: "foo"}], + coll.find({}, {_id: 0, str: 1}).sort({str: 1}).collation({locale: "simple"}).toArray()); - // Count should return correct results when "simple" collation specified and collection has a + // Find on _id should return correct results when query collation differs from collection // default collation. coll.drop(); assert.commandWorked( - db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); - assert.writeOK(coll.insert({str: "foo"})); - assert.eq(0, coll.find({str: "FOO"}).collation({locale: "simple"}).count()); + db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 3}})); + assert.writeOK(coll.insert({_id: "foo"})); + assert.writeOK(coll.insert({_id: "FOO"})); + assert.eq(2, coll.find({_id: "foo"}).collation({locale: "en_US", strength: 2}).itcount()); - // Count should return correct results when collation specified and when run with explain. + // Find on _id should use idhack stage when explicitly given query collation matches + // collection default. coll.drop(); - assert.writeOK(coll.insert({_id: 1, str: "foo"})); - assert.writeOK(coll.insert({_id: 2, str: "bar"})); + assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}})); explainRes = - coll.explain("executionStats").find({str: "FOO"}).collation({locale: "en_US"}).count(); + coll.explain("executionStats").find({_id: "foo"}).collation({locale: "en_US"}).finish(); assert.commandWorked(explainRes); - planStage = getPlanStage(explainRes.executionStats.executionStages, "COLLSCAN"); - assert.neq(null, planStage); - assert.eq(0, planStage.advanced); - explainRes = coll.explain("executionStats") - .find({str: "FOO"}) - .collation({locale: "en_US", strength: 2}) - .count(); - assert.commandWorked(explainRes); - planStage = getPlanStage(explainRes.executionStats.executionStages, "COLLSCAN"); + planStage = getPlanStage(explainRes.executionStats.executionStages, "IDHACK"); assert.neq(null, planStage); - assert.eq(1, planStage.advanced); - // Explain of COUNT_SCAN stage should include index collation. + // Find on _id should not use idhack stage when query collation does not match collection + // default. coll.drop(); - assert.commandWorked(coll.createIndex({a: 1}, {collation: {locale: "fr_CA"}})); - explainRes = coll.explain("executionStats").find({a: 5}).count(); + assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}})); + explainRes = + coll.explain("executionStats").find({_id: "foo"}).collation({locale: "fr_CA"}).finish(); assert.commandWorked(explainRes); - planStage = getPlanStage(explainRes.executionStats.executionStages, "COUNT_SCAN"); - assert.neq(null, planStage); - assert.eq(planStage.collation, { - locale: "fr_CA", - caseLevel: false, - caseFirst: "off", - strength: 3, - numericOrdering: false, - alternate: "non-ignorable", - maxVariable: "punct", - normalization: false, - backwards: true, - version: "57.1", - }); - - // Explain of COUNT_SCAN stage should include index collation when index collation is - // inherited from collection default. + planStage = getPlanStage(explainRes.executionStats.executionStages, "IDHACK"); + assert.eq(null, planStage); +} + +// Find should select compatible index when no collation specified and collection has a default +// collation. +coll.drop(); +assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}})); +assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "en_US"}})); +var explain = coll.find({a: "foo"}).explain("queryPlanner"); +assert(isIxscan(db, explain.queryPlanner.winningPlan)); + +// Find should select compatible index when no collation specified and collection default +// collation is "simple". +coll.drop(); +assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "simple"}})); +assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "simple"}})); +var explain = coll.find({a: "foo"}).explain("queryPlanner"); +assert(isIxscan(db, explain.queryPlanner.winningPlan)); + +// Find should not use index when no collation specified, index collation is "simple", and +// collection has a non-"simple" default collation. +coll.drop(); +assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}})); +assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "simple"}})); +var explain = coll.find({a: "foo"}).explain("queryPlanner"); +assert(isCollscan(db, explain.queryPlanner.winningPlan)); + +// Find should select compatible index when "simple" collation specified and collection has a +// non-"simple" default collation. +coll.drop(); +assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}})); +assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "simple"}})); +var explain = coll.find({a: "foo"}).collation({locale: "simple"}).explain("queryPlanner"); +assert(isIxscan(db, explain.queryPlanner.winningPlan)); + +// Find should return correct results when collation specified and run with explain. +coll.drop(); +assert.writeOK(coll.insert({str: "foo"})); +explainRes = + coll.explain("executionStats").find({str: "FOO"}).collation({locale: "en_US"}).finish(); +assert.commandWorked(explainRes); +assert.eq(0, explainRes.executionStats.nReturned); +explainRes = coll.explain("executionStats") + .find({str: "FOO"}) + .collation({locale: "en_US", strength: 2}) + .finish(); +assert.commandWorked(explainRes); +assert.eq(1, explainRes.executionStats.nReturned); + +// Explain of find should include query collation. +coll.drop(); +explainRes = + coll.explain("executionStats").find({str: "foo"}).collation({locale: "fr_CA"}).finish(); +assert.commandWorked(explainRes); +assert.eq(getQueryCollation(explainRes), { + locale: "fr_CA", + caseLevel: false, + caseFirst: "off", + strength: 3, + numericOrdering: false, + alternate: "non-ignorable", + maxVariable: "punct", + normalization: false, + backwards: true, + version: "57.1", +}); + +// Explain of find should include query collation when inherited from collection default. +coll.drop(); +assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "fr_CA"}})); +explainRes = coll.explain("executionStats").find({str: "foo"}).finish(); +assert.commandWorked(explainRes); +assert.eq(getQueryCollation(explainRes), { + locale: "fr_CA", + caseLevel: false, + caseFirst: "off", + strength: 3, + numericOrdering: false, + alternate: "non-ignorable", + maxVariable: "punct", + normalization: false, + backwards: true, + version: "57.1", +}); + +// Explain of IXSCAN stage should include index collation. +coll.drop(); +assert.commandWorked(coll.createIndex({str: 1}, {collation: {locale: "fr_CA"}})); +explainRes = + coll.explain("executionStats").find({str: "foo"}).collation({locale: "fr_CA"}).finish(); +assert.commandWorked(explainRes); +planStage = getPlanStage(explainRes.executionStats.executionStages, "IXSCAN"); +assert.neq(null, planStage); +assert.eq(planStage.collation, { + locale: "fr_CA", + caseLevel: false, + caseFirst: "off", + strength: 3, + numericOrdering: false, + alternate: "non-ignorable", + maxVariable: "punct", + normalization: false, + backwards: true, + version: "57.1", +}); + +// Explain of IXSCAN stage should include index collation when index collation is inherited from +// collection default. +coll.drop(); +assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "fr_CA"}})); +assert.commandWorked(coll.createIndex({str: 1})); +explainRes = coll.explain("executionStats").find({str: "foo"}).finish(); +assert.commandWorked(explainRes); +planStage = getPlanStage(explainRes.executionStats.executionStages, "IXSCAN"); +assert.neq(null, planStage); +assert.eq(planStage.collation, { + locale: "fr_CA", + caseLevel: false, + caseFirst: "off", + strength: 3, + numericOrdering: false, + alternate: "non-ignorable", + maxVariable: "punct", + normalization: false, + backwards: true, + version: "57.1", +}); + +if (!db.getMongo().useReadCommands()) { + // find() shell helper should error if a collation is specified and the shell is not using + // read commands. coll.drop(); - assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "fr_CA"}})); - assert.commandWorked(coll.createIndex({a: 1})); - explainRes = coll.explain("executionStats").find({a: 5}).count(); - assert.commandWorked(explainRes); - planStage = getPlanStage(explainRes.executionStats.executionStages, "COUNT_SCAN"); - assert.neq(null, planStage); - assert.eq(planStage.collation, { - locale: "fr_CA", - caseLevel: false, - caseFirst: "off", - strength: 3, - numericOrdering: false, - alternate: "non-ignorable", - maxVariable: "punct", - normalization: false, - backwards: true, - version: "57.1", + assert.writeOK(coll.insert({_id: 1, str: "foo"})); + assert.writeOK(coll.insert({_id: 2, str: "bar"})); + assert.throws(function() { + coll.find().collation({locale: "fr"}).itcount(); }); - - // Should be able to use COUNT_SCAN for queries over strings. - coll.drop(); - assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "fr_CA"}})); - assert.commandWorked(coll.createIndex({a: 1})); - explainRes = coll.explain("executionStats").find({a: "foo"}).count(); - assert.commandWorked(explainRes); - assert(planHasStage(db, explainRes.executionStats.executionStages, "COUNT_SCAN")); - assert(!planHasStage(db, explainRes.executionStats.executionStages, "FETCH")); - - // - // Collation tests for distinct. - // - - // Distinct should return correct results when collation specified and collection does not - // exist. - coll.drop(); - assert.eq(0, coll.distinct("str", {}, {collation: {locale: "en_US", strength: 2}}).length); - - // Distinct should return correct results when collation specified and no indexes exist. +} + +// +// Collation tests for findAndModify. +// + +// findAndModify should return correct results when collation specified and collection does not +// exist. +coll.drop(); +assert.eq( + null, + coll.findAndModify( + {query: {str: "bar"}, update: {$set: {str: "baz"}}, new: true, collation: {locale: "fr"}})); + +// Update-findAndModify should return correct results when collation specified. +coll.drop(); +assert.writeOK(coll.insert({_id: 1, str: "foo"})); +assert.writeOK(coll.insert({_id: 2, str: "bar"})); +assert.eq({_id: 1, str: "baz"}, coll.findAndModify({ + query: {str: "FOO"}, + update: {$set: {str: "baz"}}, + new: true, + collation: {locale: "en_US", strength: 2} +})); + +// Explain of update-findAndModify should return correct results when collation specified. +explainRes = coll.explain("executionStats").findAndModify({ + query: {str: "BAR"}, + update: {$set: {str: "baz"}}, + new: true, + collation: {locale: "en_US", strength: 2} +}); +assert.commandWorked(explainRes); +planStage = getPlanStage(explainRes.executionStats.executionStages, "UPDATE"); +assert.neq(null, planStage); +assert.eq(1, planStage.nWouldModify); + +// Delete-findAndModify should return correct results when collation specified. +coll.drop(); +assert.writeOK(coll.insert({_id: 1, str: "foo"})); +assert.writeOK(coll.insert({_id: 2, str: "bar"})); +assert.eq({_id: 1, str: "foo"}, + coll.findAndModify( + {query: {str: "FOO"}, remove: true, collation: {locale: "en_US", strength: 2}})); + +// Explain of delete-findAndModify should return correct results when collation specified. +explainRes = coll.explain("executionStats").findAndModify({ + query: {str: "BAR"}, + remove: true, + collation: {locale: "en_US", strength: 2} +}); +assert.commandWorked(explainRes); +planStage = getPlanStage(explainRes.executionStats.executionStages, "DELETE"); +assert.neq(null, planStage); +assert.eq(1, planStage.nWouldDelete); + +// findAndModify should return correct results when no collation specified and collection has a +// default collation. +coll.drop(); +assert.commandWorked( + db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); +assert.writeOK(coll.insert({_id: 1, str: "foo"})); +assert.eq({_id: 1, str: "foo"}, coll.findAndModify({query: {str: "FOO"}, update: {$set: {x: 1}}})); +assert.eq({_id: 1, str: "foo", x: 1}, coll.findAndModify({query: {str: "FOO"}, remove: true})); + +// findAndModify should return correct results when "simple" collation specified and collection +// has a default collation. +coll.drop(); +assert.commandWorked( + db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); +assert.writeOK(coll.insert({_id: 1, str: "foo"})); +assert.eq(null, + coll.findAndModify( + {query: {str: "FOO"}, update: {$set: {x: 1}}, collation: {locale: "simple"}})); +assert.eq(null, + coll.findAndModify({query: {str: "FOO"}, remove: true, collation: {locale: "simple"}})); + +// +// Collation tests for mapReduce. +// + +// mapReduce should return "collection doesn't exist" error when collation specified and +// collection does not exist. +coll.drop(); +assert.throws(function() { + coll.mapReduce( + function() { + emit(this.str, 1); + }, + function(key, values) { + return Array.sum(values); + }, + {out: {inline: 1}, collation: {locale: "fr"}}); +}); + +// mapReduce should return correct results when collation specified and no indexes exist. +coll.drop(); +assert.writeOK(coll.insert({_id: 1, str: "foo"})); +assert.writeOK(coll.insert({_id: 2, str: "bar"})); +var mapReduceOut = coll.mapReduce( + function() { + emit(this.str, 1); + }, + function(key, values) { + return Array.sum(values); + }, + {out: {inline: 1}, query: {str: "FOO"}, collation: {locale: "en_US", strength: 2}}); +assert.commandWorked(mapReduceOut); +assert.eq(mapReduceOut.results.length, 1); + +// mapReduce should return correct results when no collation specified and collection has a +// default collation. +coll.drop(); +assert.commandWorked( + db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); +assert.writeOK(coll.insert({_id: 1, str: "foo"})); +var mapReduceOut = coll.mapReduce( + function() { + emit(this.str, 1); + }, + function(key, values) { + return Array.sum(values); + }, + {out: {inline: 1}, query: {str: "FOO"}}); +assert.commandWorked(mapReduceOut); +assert.eq(mapReduceOut.results.length, 1); + +// mapReduce should return correct results when "simple" collation specified and collection has +// a default collation. +coll.drop(); +assert.commandWorked( + db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); +assert.writeOK(coll.insert({_id: 1, str: "foo"})); +var mapReduceOut = coll.mapReduce( + function() { + emit(this.str, 1); + }, + function(key, values) { + return Array.sum(values); + }, + {out: {inline: 1}, query: {str: "FOO"}, collation: {locale: "simple"}}); +assert.commandWorked(mapReduceOut); +assert.eq(mapReduceOut.results.length, 0); + +// +// Collation tests for remove. +// + +if (db.getMongo().writeMode() === "commands") { + // Remove should succeed when collation specified and collection does not exist. + coll.drop(); + assert.writeOK(coll.remove({str: "foo"}, {justOne: true, collation: {locale: "fr"}})); + + // Remove should return correct results when collation specified. coll.drop(); assert.writeOK(coll.insert({_id: 1, str: "foo"})); - assert.writeOK(coll.insert({_id: 2, str: "FOO"})); - var res = coll.distinct("str", {}, {collation: {locale: "en_US", strength: 2}}); - assert.eq(1, res.length); - assert.eq("foo", res[0].toLowerCase()); - assert.eq(2, coll.distinct("str", {}, {collation: {locale: "en_US", strength: 3}}).length); - assert.eq( - 2, coll.distinct("_id", {str: "foo"}, {collation: {locale: "en_US", strength: 2}}).length); - - // Distinct should return correct results when collation specified and compatible index exists. - coll.createIndex({str: 1}, {collation: {locale: "en_US", strength: 2}}); - res = coll.distinct("str", {}, {collation: {locale: "en_US", strength: 2}}); - assert.eq(1, res.length); - assert.eq("foo", res[0].toLowerCase()); - assert.eq(2, coll.distinct("str", {}, {collation: {locale: "en_US", strength: 3}}).length); - - // Distinct should return correct results when no collation specified and collection has a - // default collation. - coll.drop(); - assert.commandWorked( - db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); - assert.writeOK(coll.insert({str: "foo"})); - assert.writeOK(coll.insert({str: "FOO"})); - assert.eq(1, coll.distinct("str").length); - assert.eq(2, coll.distinct("_id", {str: "foo"}).length); - - // Distinct should return correct results when "simple" collation specified and collection has a - // default collation. - coll.drop(); - assert.commandWorked( - db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); - assert.writeOK(coll.insert({str: "foo"})); - assert.writeOK(coll.insert({str: "FOO"})); - assert.eq(2, coll.distinct("str", {}, {collation: {locale: "simple"}}).length); - assert.eq(1, coll.distinct("_id", {str: "foo"}, {collation: {locale: "simple"}}).length); - - // Distinct should select compatible index when no collation specified and collection has a - // default collation. - coll.drop(); - assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}})); - assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "en_US"}})); - var explain = coll.explain("queryPlanner").distinct("a"); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN")); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "FETCH")); - - // Distinct scan on strings can be used over an index with a collation when the predicate has - // exact bounds. - explain = coll.explain("queryPlanner").distinct("a", {a: {$gt: "foo"}}); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN")); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "FETCH")); - assert(!planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED")); - - // Distinct scan cannot be used over an index with a collation when the predicate has inexact - // bounds. - explain = coll.explain("queryPlanner").distinct("a", {a: {$exists: true}}); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "IXSCAN")); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "FETCH")); - assert(!planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN")); - - // Distinct scan can be used without a fetch when predicate has exact non-string bounds. - explain = coll.explain("queryPlanner").distinct("a", {a: {$gt: 3}}); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN")); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED")); - assert(!planHasStage(db, explain.queryPlanner.winningPlan, "FETCH")); - - // Distinct should not use index when no collation specified and collection default collation is - // incompatible with index collation. - coll.drop(); - assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}})); - assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "simple"}})); - var explain = coll.explain("queryPlanner").distinct("a"); - assert(isCollscan(db, explain.queryPlanner.winningPlan)); + assert.writeOK(coll.insert({_id: 2, str: "foo"})); + writeRes = + coll.remove({str: "FOO"}, {justOne: true, collation: {locale: "en_US", strength: 2}}); + assert.writeOK(writeRes); + assert.eq(1, writeRes.nRemoved); - // Explain of DISTINCT_SCAN stage should include index collation. + // Explain of remove should return correct results when collation specified. coll.drop(); - assert.commandWorked(coll.createIndex({str: 1}, {collation: {locale: "fr_CA"}})); - explainRes = coll.explain("executionStats").distinct("str", {}, {collation: {locale: "fr_CA"}}); - assert.commandWorked(explainRes); - planStage = getPlanStage(explainRes.executionStats.executionStages, "DISTINCT_SCAN"); - assert.neq(null, planStage); - assert.eq(planStage.collation, { - locale: "fr_CA", - caseLevel: false, - caseFirst: "off", - strength: 3, - numericOrdering: false, - alternate: "non-ignorable", - maxVariable: "punct", - normalization: false, - backwards: true, - version: "57.1", + assert.writeOK(coll.insert({_id: 1, str: "foo"})); + assert.writeOK(coll.insert({_id: 2, str: "foo"})); + explainRes = coll.explain("executionStats").remove({str: "FOO"}, { + justOne: true, + collation: {locale: "en_US", strength: 2} }); - - // Explain of DISTINCT_SCAN stage should include index collation when index collation is - // inherited from collection default. - coll.drop(); - assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "fr_CA"}})); - assert.commandWorked(coll.createIndex({str: 1})); - explainRes = coll.explain("executionStats").distinct("str"); assert.commandWorked(explainRes); - planStage = getPlanStage(explainRes.executionStats.executionStages, "DISTINCT_SCAN"); + planStage = getPlanStage(explainRes.executionStats.executionStages, "DELETE"); assert.neq(null, planStage); - assert.eq(planStage.collation, { - locale: "fr_CA", - caseLevel: false, - caseFirst: "off", - strength: 3, - numericOrdering: false, - alternate: "non-ignorable", - maxVariable: "punct", - normalization: false, - backwards: true, - version: "57.1", - }); - - // - // Collation tests for find. - // - - if (db.getMongo().useReadCommands()) { - // Find should return correct results when collation specified and collection does not - // exist. - coll.drop(); - assert.eq(0, coll.find({_id: "FOO"}).collation({locale: "en_US"}).itcount()); - - // Find should return correct results when collation specified and filter is a match on _id. - coll.drop(); - assert.writeOK(coll.insert({_id: 1, str: "foo"})); - assert.writeOK(coll.insert({_id: 2, str: "bar"})); - assert.writeOK(coll.insert({_id: "foo"})); - assert.eq(0, coll.find({_id: "FOO"}).itcount()); - assert.eq(0, coll.find({_id: "FOO"}).collation({locale: "en_US"}).itcount()); - assert.eq(1, coll.find({_id: "FOO"}).collation({locale: "en_US", strength: 2}).itcount()); - assert.writeOK(coll.remove({_id: "foo"})); - - // Find should return correct results when collation specified and no indexes exist. - assert.eq(0, coll.find({str: "FOO"}).itcount()); - assert.eq(0, coll.find({str: "FOO"}).collation({locale: "en_US"}).itcount()); - assert.eq(1, coll.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).itcount()); - assert.eq( - 1, coll.find({str: {$ne: "FOO"}}).collation({locale: "en_US", strength: 2}).itcount()); - - // Find should return correct results when collation specified and compatible index exists. - assert.commandWorked( - coll.ensureIndex({str: 1}, {collation: {locale: "en_US", strength: 2}})); - assert.eq(0, coll.find({str: "FOO"}).hint({str: 1}).itcount()); - assert.eq(0, coll.find({str: "FOO"}).collation({locale: "en_US"}).hint({str: 1}).itcount()); - assert.eq(1, - coll.find({str: "FOO"}) - .collation({locale: "en_US", strength: 2}) - .hint({str: 1}) - .itcount()); - assert.eq(1, - coll.find({str: {$ne: "FOO"}}) - .collation({locale: "en_US", strength: 2}) - .hint({str: 1}) - .itcount()); - assert.commandWorked(coll.dropIndexes()); - - // Find should return correct results when collation specified and compatible partial index - // exists. - assert.commandWorked(coll.ensureIndex({str: 1}, { - partialFilterExpression: {str: {$lte: "FOO"}}, - collation: {locale: "en_US", strength: 2} - })); - assert.eq(1, - coll.find({str: "foo"}) - .collation({locale: "en_US", strength: 2}) - .hint({str: 1}) - .itcount()); - assert.writeOK(coll.insert({_id: 3, str: "goo"})); - assert.eq(0, - coll.find({str: "goo"}) - .collation({locale: "en_US", strength: 2}) - .hint({str: 1}) - .itcount()); - assert.writeOK(coll.remove({_id: 3})); - assert.commandWorked(coll.dropIndexes()); - - // Queries that use a index with a non-matching collation should add a sort - // stage if needed. - coll.drop(); - assert.writeOK(coll.insert([{a: "A"}, {a: "B"}, {a: "b"}, {a: "a"}])); - - // Ensure results from an index that doesn't match the query collation are sorted to match - // the requested collation. - assert.commandWorked(coll.ensureIndex({a: 1})); - var res = coll.find({a: {'$exists': true}}, {_id: 0}) - .collation({locale: "en_US", strength: 3}) - .sort({a: 1}); - assert.eq(res.toArray(), [{a: "a"}, {a: "A"}, {a: "b"}, {a: "B"}]); - - // Find should return correct results when collation specified and query contains $expr. - coll.drop(); - assert.writeOK(coll.insert([{a: "A"}, {a: "B"}])); - assert.eq(1, - coll.find({$expr: {$eq: ["$a", "a"]}}) - .collation({locale: "en_US", strength: 2}) - .itcount()); - } - - // Find should return correct results when no collation specified and collection has a default - // collation. + assert.eq(1, planStage.nWouldDelete); +} + +// Remove should return correct results when no collation specified and collection has a default +// collation. +coll.drop(); +assert.commandWorked( + db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); +assert.writeOK(coll.insert({_id: 1, str: "foo"})); +writeRes = coll.remove({str: "FOO"}, {justOne: true}); +assert.writeOK(writeRes); +assert.eq(1, writeRes.nRemoved); + +// Remove with idhack should return correct results when no collation specified and collection +// has a default collation. +coll.drop(); +assert.commandWorked( + db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); +assert.writeOK(coll.insert({_id: "foo"})); +writeRes = coll.remove({_id: "FOO"}, {justOne: true}); +assert.writeOK(writeRes); +assert.eq(1, writeRes.nRemoved); + +// Remove on _id should use idhack stage when query inherits collection default collation. +coll.drop(); +assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}})); +explainRes = coll.explain("executionStats").remove({_id: "foo"}); +assert.commandWorked(explainRes); +planStage = getPlanStage(explainRes.executionStats.executionStages, "IDHACK"); +assert.neq(null, planStage); + +if (db.getMongo().writeMode() === "commands") { + // Remove should return correct results when "simple" collation specified and collection has + // a default collation. coll.drop(); assert.commandWorked( db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); - assert.writeOK(coll.insert({str: "foo"})); - assert.writeOK(coll.insert({str: "FOO"})); - assert.writeOK(coll.insert({str: "bar"})); - assert.eq(3, coll.find({str: {$in: ["foo", "bar"]}}).itcount()); - assert.eq(2, coll.find({str: "foo"}).itcount()); - assert.eq(1, coll.find({str: {$ne: "foo"}}).itcount()); - assert.eq([{str: "bar"}, {str: "foo"}, {str: "FOO"}], - coll.find({}, {_id: 0, str: 1}).sort({str: 1}).toArray()); + assert.writeOK(coll.insert({_id: 1, str: "foo"})); + writeRes = coll.remove({str: "FOO"}, {justOne: true, collation: {locale: "simple"}}); + assert.writeOK(writeRes); + assert.eq(0, writeRes.nRemoved); - // Find with idhack should return correct results when no collation specified and collection has - // a default collation. + // Remove on _id should return correct results when "simple" collation specified and + // collection has a default collation. coll.drop(); assert.commandWorked( db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); assert.writeOK(coll.insert({_id: "foo"})); - assert.eq(1, coll.find({_id: "FOO"}).itcount()); + writeRes = coll.remove({_id: "FOO"}, {justOne: true, collation: {locale: "simple"}}); + assert.writeOK(writeRes); + assert.eq(0, writeRes.nRemoved); - // Find on _id should use idhack stage when query inherits collection default collation. + // Remove on _id should use idhack stage when explicit query collation matches collection + // default. coll.drop(); assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}})); - explainRes = coll.explain("executionStats").find({_id: "foo"}).finish(); + explainRes = + coll.explain("executionStats").remove({_id: "foo"}, {collation: {locale: "en_US"}}); assert.commandWorked(explainRes); planStage = getPlanStage(explainRes.executionStats.executionStages, "IDHACK"); assert.neq(null, planStage); - // Find should return correct results for query containing $expr when no collation specified and - // collection has a default collation. - coll.drop(); - assert.commandWorked( - db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); - assert.writeOK(coll.insert([{a: "A"}, {a: "B"}])); - assert.eq(1, coll.find({$expr: {$eq: ["$a", "a"]}}).itcount()); - - if (db.getMongo().useReadCommands()) { - // Find should return correct results when "simple" collation specified and collection has a - // default collation. - coll.drop(); - assert.commandWorked( - db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); - assert.writeOK(coll.insert({str: "foo"})); - assert.writeOK(coll.insert({str: "FOO"})); - assert.writeOK(coll.insert({str: "bar"})); - assert.eq(2, - coll.find({str: {$in: ["foo", "bar"]}}).collation({locale: "simple"}).itcount()); - assert.eq(1, coll.find({str: "foo"}).collation({locale: "simple"}).itcount()); - assert.eq( - [{str: "FOO"}, {str: "bar"}, {str: "foo"}], - coll.find({}, {_id: 0, str: 1}).sort({str: 1}).collation({locale: "simple"}).toArray()); - - // Find on _id should return correct results when query collation differs from collection - // default collation. - coll.drop(); - assert.commandWorked( - db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 3}})); - assert.writeOK(coll.insert({_id: "foo"})); - assert.writeOK(coll.insert({_id: "FOO"})); - assert.eq(2, coll.find({_id: "foo"}).collation({locale: "en_US", strength: 2}).itcount()); - - // Find on _id should use idhack stage when explicitly given query collation matches - // collection default. - coll.drop(); - assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}})); - explainRes = - coll.explain("executionStats").find({_id: "foo"}).collation({locale: "en_US"}).finish(); - assert.commandWorked(explainRes); - planStage = getPlanStage(explainRes.executionStats.executionStages, "IDHACK"); - assert.neq(null, planStage); - - // Find on _id should not use idhack stage when query collation does not match collection - // default. - coll.drop(); - assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}})); - explainRes = - coll.explain("executionStats").find({_id: "foo"}).collation({locale: "fr_CA"}).finish(); - assert.commandWorked(explainRes); - planStage = getPlanStage(explainRes.executionStats.executionStages, "IDHACK"); - assert.eq(null, planStage); - } - - // Find should select compatible index when no collation specified and collection has a default - // collation. - coll.drop(); - assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}})); - assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "en_US"}})); - var explain = coll.find({a: "foo"}).explain("queryPlanner"); - assert(isIxscan(db, explain.queryPlanner.winningPlan)); - - // Find should select compatible index when no collation specified and collection default - // collation is "simple". - coll.drop(); - assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "simple"}})); - assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "simple"}})); - var explain = coll.find({a: "foo"}).explain("queryPlanner"); - assert(isIxscan(db, explain.queryPlanner.winningPlan)); - - // Find should not use index when no collation specified, index collation is "simple", and - // collection has a non-"simple" default collation. - coll.drop(); - assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}})); - assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "simple"}})); - var explain = coll.find({a: "foo"}).explain("queryPlanner"); - assert(isCollscan(db, explain.queryPlanner.winningPlan)); - - // Find should select compatible index when "simple" collation specified and collection has a - // non-"simple" default collation. + // Remove on _id should not use idhack stage when query collation does not match collection + // default. coll.drop(); assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}})); - assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "simple"}})); - var explain = coll.find({a: "foo"}).collation({locale: "simple"}).explain("queryPlanner"); - assert(isIxscan(db, explain.queryPlanner.winningPlan)); - - // Find should return correct results when collation specified and run with explain. - coll.drop(); - assert.writeOK(coll.insert({str: "foo"})); explainRes = - coll.explain("executionStats").find({str: "FOO"}).collation({locale: "en_US"}).finish(); - assert.commandWorked(explainRes); - assert.eq(0, explainRes.executionStats.nReturned); - explainRes = coll.explain("executionStats") - .find({str: "FOO"}) - .collation({locale: "en_US", strength: 2}) - .finish(); + coll.explain("executionStats").remove({_id: "foo"}, {collation: {locale: "fr_CA"}}); assert.commandWorked(explainRes); - assert.eq(1, explainRes.executionStats.nReturned); - - // Explain of find should include query collation. - coll.drop(); - explainRes = - coll.explain("executionStats").find({str: "foo"}).collation({locale: "fr_CA"}).finish(); - assert.commandWorked(explainRes); - assert.eq(getQueryCollation(explainRes), { - locale: "fr_CA", - caseLevel: false, - caseFirst: "off", - strength: 3, - numericOrdering: false, - alternate: "non-ignorable", - maxVariable: "punct", - normalization: false, - backwards: true, - version: "57.1", - }); - - // Explain of find should include query collation when inherited from collection default. - coll.drop(); - assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "fr_CA"}})); - explainRes = coll.explain("executionStats").find({str: "foo"}).finish(); - assert.commandWorked(explainRes); - assert.eq(getQueryCollation(explainRes), { - locale: "fr_CA", - caseLevel: false, - caseFirst: "off", - strength: 3, - numericOrdering: false, - alternate: "non-ignorable", - maxVariable: "punct", - normalization: false, - backwards: true, - version: "57.1", - }); + planStage = getPlanStage(explainRes.executionStats.executionStages, "IDHACK"); + assert.eq(null, planStage); +} - // Explain of IXSCAN stage should include index collation. +if (db.getMongo().writeMode() !== "commands") { + // remove() shell helper should error if a collation is specified and the shell is not using + // write commands. coll.drop(); - assert.commandWorked(coll.createIndex({str: 1}, {collation: {locale: "fr_CA"}})); - explainRes = - coll.explain("executionStats").find({str: "foo"}).collation({locale: "fr_CA"}).finish(); - assert.commandWorked(explainRes); - planStage = getPlanStage(explainRes.executionStats.executionStages, "IXSCAN"); - assert.neq(null, planStage); - assert.eq(planStage.collation, { - locale: "fr_CA", - caseLevel: false, - caseFirst: "off", - strength: 3, - numericOrdering: false, - alternate: "non-ignorable", - maxVariable: "punct", - normalization: false, - backwards: true, - version: "57.1", + assert.writeOK(coll.insert({_id: 1, str: "foo"})); + assert.writeOK(coll.insert({_id: 2, str: "foo"})); + assert.throws(function() { + coll.remove({str: "FOO"}, {justOne: true, collation: {locale: "en_US", strength: 2}}); }); - - // Explain of IXSCAN stage should include index collation when index collation is inherited from - // collection default. - coll.drop(); - assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "fr_CA"}})); - assert.commandWorked(coll.createIndex({str: 1})); - explainRes = coll.explain("executionStats").find({str: "foo"}).finish(); - assert.commandWorked(explainRes); - planStage = getPlanStage(explainRes.executionStats.executionStages, "IXSCAN"); - assert.neq(null, planStage); - assert.eq(planStage.collation, { - locale: "fr_CA", - caseLevel: false, - caseFirst: "off", - strength: 3, - numericOrdering: false, - alternate: "non-ignorable", - maxVariable: "punct", - normalization: false, - backwards: true, - version: "57.1", + assert.throws(function() { + coll.explain().remove({str: "FOO"}, + {justOne: true, collation: {locale: "en_US", strength: 2}}); }); +} - if (!db.getMongo().useReadCommands()) { - // find() shell helper should error if a collation is specified and the shell is not using - // read commands. - coll.drop(); - assert.writeOK(coll.insert({_id: 1, str: "foo"})); - assert.writeOK(coll.insert({_id: 2, str: "bar"})); - assert.throws(function() { - coll.find().collation({locale: "fr"}).itcount(); - }); - } - - // - // Collation tests for findAndModify. - // +// +// Collation tests for update. +// - // findAndModify should return correct results when collation specified and collection does not - // exist. +if (db.getMongo().writeMode() === "commands") { + // Update should succeed when collation specified and collection does not exist. coll.drop(); - assert.eq(null, coll.findAndModify({ - query: {str: "bar"}, - update: {$set: {str: "baz"}}, - new: true, - collation: {locale: "fr"} - })); + assert.writeOK( + coll.update({str: "foo"}, {$set: {other: 99}}, {multi: true, collation: {locale: "fr"}})); - // Update-findAndModify should return correct results when collation specified. + // Update should return correct results when collation specified. coll.drop(); assert.writeOK(coll.insert({_id: 1, str: "foo"})); - assert.writeOK(coll.insert({_id: 2, str: "bar"})); - assert.eq({_id: 1, str: "baz"}, coll.findAndModify({ - query: {str: "FOO"}, - update: {$set: {str: "baz"}}, - new: true, - collation: {locale: "en_US", strength: 2} - })); - - // Explain of update-findAndModify should return correct results when collation specified. - explainRes = coll.explain("executionStats").findAndModify({ - query: {str: "BAR"}, - update: {$set: {str: "baz"}}, - new: true, - collation: {locale: "en_US", strength: 2} - }); - assert.commandWorked(explainRes); - planStage = getPlanStage(explainRes.executionStats.executionStages, "UPDATE"); - assert.neq(null, planStage); - assert.eq(1, planStage.nWouldModify); + assert.writeOK(coll.insert({_id: 2, str: "foo"})); + writeRes = coll.update({str: "FOO"}, + {$set: {other: 99}}, + {multi: true, collation: {locale: "en_US", strength: 2}}); + assert.eq(2, writeRes.nModified); - // Delete-findAndModify should return correct results when collation specified. + // Explain of update should return correct results when collation specified. coll.drop(); assert.writeOK(coll.insert({_id: 1, str: "foo"})); - assert.writeOK(coll.insert({_id: 2, str: "bar"})); - assert.eq({_id: 1, str: "foo"}, - coll.findAndModify( - {query: {str: "FOO"}, remove: true, collation: {locale: "en_US", strength: 2}})); - - // Explain of delete-findAndModify should return correct results when collation specified. - explainRes = coll.explain("executionStats").findAndModify({ - query: {str: "BAR"}, - remove: true, + assert.writeOK(coll.insert({_id: 2, str: "foo"})); + explainRes = coll.explain("executionStats").update({str: "FOO"}, {$set: {other: 99}}, { + multi: true, collation: {locale: "en_US", strength: 2} }); assert.commandWorked(explainRes); - planStage = getPlanStage(explainRes.executionStats.executionStages, "DELETE"); + planStage = getPlanStage(explainRes.executionStats.executionStages, "UPDATE"); assert.neq(null, planStage); - assert.eq(1, planStage.nWouldDelete); - - // findAndModify should return correct results when no collation specified and collection has a - // default collation. - coll.drop(); - assert.commandWorked( - db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); - assert.writeOK(coll.insert({_id: 1, str: "foo"})); - assert.eq({_id: 1, str: "foo"}, - coll.findAndModify({query: {str: "FOO"}, update: {$set: {x: 1}}})); - assert.eq({_id: 1, str: "foo", x: 1}, coll.findAndModify({query: {str: "FOO"}, remove: true})); - - // findAndModify should return correct results when "simple" collation specified and collection - // has a default collation. - coll.drop(); - assert.commandWorked( - db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); - assert.writeOK(coll.insert({_id: 1, str: "foo"})); - assert.eq(null, - coll.findAndModify( - {query: {str: "FOO"}, update: {$set: {x: 1}}, collation: {locale: "simple"}})); - assert.eq( - null, - coll.findAndModify({query: {str: "FOO"}, remove: true, collation: {locale: "simple"}})); - - // - // Collation tests for mapReduce. - // - - // mapReduce should return "collection doesn't exist" error when collation specified and - // collection does not exist. - coll.drop(); - assert.throws(function() { - coll.mapReduce( - function() { - emit(this.str, 1); - }, - function(key, values) { - return Array.sum(values); - }, - {out: {inline: 1}, collation: {locale: "fr"}}); - }); - - // mapReduce should return correct results when collation specified and no indexes exist. - coll.drop(); - assert.writeOK(coll.insert({_id: 1, str: "foo"})); - assert.writeOK(coll.insert({_id: 2, str: "bar"})); - var mapReduceOut = coll.mapReduce( - function() { - emit(this.str, 1); - }, - function(key, values) { - return Array.sum(values); - }, - {out: {inline: 1}, query: {str: "FOO"}, collation: {locale: "en_US", strength: 2}}); - assert.commandWorked(mapReduceOut); - assert.eq(mapReduceOut.results.length, 1); - - // mapReduce should return correct results when no collation specified and collection has a - // default collation. - coll.drop(); - assert.commandWorked( - db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); - assert.writeOK(coll.insert({_id: 1, str: "foo"})); - var mapReduceOut = coll.mapReduce( - function() { - emit(this.str, 1); - }, - function(key, values) { - return Array.sum(values); - }, - {out: {inline: 1}, query: {str: "FOO"}}); - assert.commandWorked(mapReduceOut); - assert.eq(mapReduceOut.results.length, 1); - - // mapReduce should return correct results when "simple" collation specified and collection has + assert.eq(2, planStage.nWouldModify); +} + +// Update should return correct results when no collation specified and collection has a default +// collation. +coll.drop(); +assert.commandWorked( + db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); +assert.writeOK(coll.insert({_id: 1, str: "foo"})); +writeRes = coll.update({str: "FOO"}, {$set: {other: 99}}); +assert.writeOK(writeRes); +assert.eq(1, writeRes.nMatched); + +// Update with idhack should return correct results when no collation specified and collection +// has a default collation. +coll.drop(); +assert.commandWorked( + db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); +assert.writeOK(coll.insert({_id: "foo"})); +writeRes = coll.update({_id: "FOO"}, {$set: {other: 99}}); +assert.writeOK(writeRes); +assert.eq(1, writeRes.nMatched); + +// Update on _id should use idhack stage when query inherits collection default collation. +coll.drop(); +assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}})); +explainRes = coll.explain("executionStats").update({_id: "foo"}, {$set: {other: 99}}); +assert.commandWorked(explainRes); +planStage = getPlanStage(explainRes.executionStats.executionStages, "IDHACK"); +assert.neq(null, planStage); + +if (db.getMongo().writeMode() === "commands") { + // Update should return correct results when "simple" collation specified and collection has // a default collation. coll.drop(); assert.commandWorked( db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); assert.writeOK(coll.insert({_id: 1, str: "foo"})); - var mapReduceOut = coll.mapReduce( - function() { - emit(this.str, 1); - }, - function(key, values) { - return Array.sum(values); - }, - {out: {inline: 1}, query: {str: "FOO"}, collation: {locale: "simple"}}); - assert.commandWorked(mapReduceOut); - assert.eq(mapReduceOut.results.length, 0); - - // - // Collation tests for remove. - // - - if (db.getMongo().writeMode() === "commands") { - // Remove should succeed when collation specified and collection does not exist. - coll.drop(); - assert.writeOK(coll.remove({str: "foo"}, {justOne: true, collation: {locale: "fr"}})); - - // Remove should return correct results when collation specified. - coll.drop(); - assert.writeOK(coll.insert({_id: 1, str: "foo"})); - assert.writeOK(coll.insert({_id: 2, str: "foo"})); - writeRes = - coll.remove({str: "FOO"}, {justOne: true, collation: {locale: "en_US", strength: 2}}); - assert.writeOK(writeRes); - assert.eq(1, writeRes.nRemoved); - - // Explain of remove should return correct results when collation specified. - coll.drop(); - assert.writeOK(coll.insert({_id: 1, str: "foo"})); - assert.writeOK(coll.insert({_id: 2, str: "foo"})); - explainRes = coll.explain("executionStats").remove({str: "FOO"}, { - justOne: true, - collation: {locale: "en_US", strength: 2} - }); - assert.commandWorked(explainRes); - planStage = getPlanStage(explainRes.executionStats.executionStages, "DELETE"); - assert.neq(null, planStage); - assert.eq(1, planStage.nWouldDelete); - } - - // Remove should return correct results when no collation specified and collection has a default - // collation. - coll.drop(); - assert.commandWorked( - db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); - assert.writeOK(coll.insert({_id: 1, str: "foo"})); - writeRes = coll.remove({str: "FOO"}, {justOne: true}); + writeRes = coll.update({str: "FOO"}, {$set: {other: 99}}, {collation: {locale: "simple"}}); assert.writeOK(writeRes); - assert.eq(1, writeRes.nRemoved); + assert.eq(0, writeRes.nModified); - // Remove with idhack should return correct results when no collation specified and collection - // has a default collation. + // Update on _id should return correct results when "simple" collation specified and + // collection has a default collation. coll.drop(); assert.commandWorked( db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); assert.writeOK(coll.insert({_id: "foo"})); - writeRes = coll.remove({_id: "FOO"}, {justOne: true}); + writeRes = coll.update({_id: "FOO"}, {$set: {other: 99}}, {collation: {locale: "simple"}}); assert.writeOK(writeRes); - assert.eq(1, writeRes.nRemoved); + assert.eq(0, writeRes.nModified); - // Remove on _id should use idhack stage when query inherits collection default collation. + // Update on _id should use idhack stage when explicitly given query collation matches + // collection default. coll.drop(); assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}})); - explainRes = coll.explain("executionStats").remove({_id: "foo"}); + explainRes = coll.explain("executionStats").update({_id: "foo"}, {$set: {other: 99}}, { + collation: {locale: "en_US"} + }); assert.commandWorked(explainRes); planStage = getPlanStage(explainRes.executionStats.executionStages, "IDHACK"); assert.neq(null, planStage); - if (db.getMongo().writeMode() === "commands") { - // Remove should return correct results when "simple" collation specified and collection has - // a default collation. - coll.drop(); - assert.commandWorked( - db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); - assert.writeOK(coll.insert({_id: 1, str: "foo"})); - writeRes = coll.remove({str: "FOO"}, {justOne: true, collation: {locale: "simple"}}); - assert.writeOK(writeRes); - assert.eq(0, writeRes.nRemoved); - - // Remove on _id should return correct results when "simple" collation specified and - // collection has a default collation. - coll.drop(); - assert.commandWorked( - db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); - assert.writeOK(coll.insert({_id: "foo"})); - writeRes = coll.remove({_id: "FOO"}, {justOne: true, collation: {locale: "simple"}}); - assert.writeOK(writeRes); - assert.eq(0, writeRes.nRemoved); - - // Remove on _id should use idhack stage when explicit query collation matches collection - // default. - coll.drop(); - assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}})); - explainRes = - coll.explain("executionStats").remove({_id: "foo"}, {collation: {locale: "en_US"}}); - assert.commandWorked(explainRes); - planStage = getPlanStage(explainRes.executionStats.executionStages, "IDHACK"); - assert.neq(null, planStage); - - // Remove on _id should not use idhack stage when query collation does not match collection - // default. - coll.drop(); - assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}})); - explainRes = - coll.explain("executionStats").remove({_id: "foo"}, {collation: {locale: "fr_CA"}}); - assert.commandWorked(explainRes); - planStage = getPlanStage(explainRes.executionStats.executionStages, "IDHACK"); - assert.eq(null, planStage); - } - - if (db.getMongo().writeMode() !== "commands") { - // remove() shell helper should error if a collation is specified and the shell is not using - // write commands. - coll.drop(); - assert.writeOK(coll.insert({_id: 1, str: "foo"})); - assert.writeOK(coll.insert({_id: 2, str: "foo"})); - assert.throws(function() { - coll.remove({str: "FOO"}, {justOne: true, collation: {locale: "en_US", strength: 2}}); - }); - assert.throws(function() { - coll.explain().remove({str: "FOO"}, - {justOne: true, collation: {locale: "en_US", strength: 2}}); - }); - } - - // - // Collation tests for update. - // - - if (db.getMongo().writeMode() === "commands") { - // Update should succeed when collation specified and collection does not exist. - coll.drop(); - assert.writeOK(coll.update( - {str: "foo"}, {$set: {other: 99}}, {multi: true, collation: {locale: "fr"}})); - - // Update should return correct results when collation specified. - coll.drop(); - assert.writeOK(coll.insert({_id: 1, str: "foo"})); - assert.writeOK(coll.insert({_id: 2, str: "foo"})); - writeRes = coll.update({str: "FOO"}, - {$set: {other: 99}}, - {multi: true, collation: {locale: "en_US", strength: 2}}); - assert.eq(2, writeRes.nModified); - - // Explain of update should return correct results when collation specified. - coll.drop(); - assert.writeOK(coll.insert({_id: 1, str: "foo"})); - assert.writeOK(coll.insert({_id: 2, str: "foo"})); - explainRes = coll.explain("executionStats").update({str: "FOO"}, {$set: {other: 99}}, { - multi: true, - collation: {locale: "en_US", strength: 2} - }); - assert.commandWorked(explainRes); - planStage = getPlanStage(explainRes.executionStats.executionStages, "UPDATE"); - assert.neq(null, planStage); - assert.eq(2, planStage.nWouldModify); - } - - // Update should return correct results when no collation specified and collection has a default - // collation. - coll.drop(); - assert.commandWorked( - db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); - assert.writeOK(coll.insert({_id: 1, str: "foo"})); - writeRes = coll.update({str: "FOO"}, {$set: {other: 99}}); - assert.writeOK(writeRes); - assert.eq(1, writeRes.nMatched); - - // Update with idhack should return correct results when no collation specified and collection - // has a default collation. - coll.drop(); - assert.commandWorked( - db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); - assert.writeOK(coll.insert({_id: "foo"})); - writeRes = coll.update({_id: "FOO"}, {$set: {other: 99}}); - assert.writeOK(writeRes); - assert.eq(1, writeRes.nMatched); - - // Update on _id should use idhack stage when query inherits collection default collation. + // Update on _id should not use idhack stage when query collation does not match collection + // default. coll.drop(); assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}})); - explainRes = coll.explain("executionStats").update({_id: "foo"}, {$set: {other: 99}}); + explainRes = coll.explain("executionStats").update({_id: "foo"}, {$set: {other: 99}}, { + collation: {locale: "fr_CA"} + }); assert.commandWorked(explainRes); planStage = getPlanStage(explainRes.executionStats.executionStages, "IDHACK"); - assert.neq(null, planStage); + assert.eq(null, planStage); +} - if (db.getMongo().writeMode() === "commands") { - // Update should return correct results when "simple" collation specified and collection has - // a default collation. - coll.drop(); - assert.commandWorked( - db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); - assert.writeOK(coll.insert({_id: 1, str: "foo"})); - writeRes = coll.update({str: "FOO"}, {$set: {other: 99}}, {collation: {locale: "simple"}}); - assert.writeOK(writeRes); - assert.eq(0, writeRes.nModified); - - // Update on _id should return correct results when "simple" collation specified and - // collection has a default collation. - coll.drop(); - assert.commandWorked( - db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); - assert.writeOK(coll.insert({_id: "foo"})); - writeRes = coll.update({_id: "FOO"}, {$set: {other: 99}}, {collation: {locale: "simple"}}); - assert.writeOK(writeRes); - assert.eq(0, writeRes.nModified); - - // Update on _id should use idhack stage when explicitly given query collation matches - // collection default. - coll.drop(); - assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}})); - explainRes = coll.explain("executionStats").update({_id: "foo"}, {$set: {other: 99}}, { - collation: {locale: "en_US"} - }); - assert.commandWorked(explainRes); - planStage = getPlanStage(explainRes.executionStats.executionStages, "IDHACK"); - assert.neq(null, planStage); - - // Update on _id should not use idhack stage when query collation does not match collection - // default. - coll.drop(); - assert.commandWorked(db.createCollection(coll.getName(), {collation: {locale: "en_US"}})); - explainRes = coll.explain("executionStats").update({_id: "foo"}, {$set: {other: 99}}, { - collation: {locale: "fr_CA"} - }); - assert.commandWorked(explainRes); - planStage = getPlanStage(explainRes.executionStats.executionStages, "IDHACK"); - assert.eq(null, planStage); - } - - if (db.getMongo().writeMode() !== "commands") { - // update() shell helper should error if a collation is specified and the shell is not using - // write commands. - coll.drop(); - assert.writeOK(coll.insert({_id: 1, str: "foo"})); - assert.writeOK(coll.insert({_id: 2, str: "foo"})); - assert.throws(function() { - coll.update({str: "FOO"}, - {$set: {other: 99}}, - {multi: true, collation: {locale: "en_US", strength: 2}}); - }); - assert.throws(function() { - coll.explain().update({str: "FOO"}, - {$set: {other: 99}}, - {multi: true, collation: {locale: "en_US", strength: 2}}); - }); - } - - // - // Collation tests for the $geoNear aggregation stage. - // - - // $geoNear should fail when collation is specified but the collection does not exist. - coll.drop(); - assert.commandFailedWithCode(db.runCommand({ - aggregate: coll.getName(), - cursor: {}, - pipeline: [{ - $geoNear: { - near: {type: "Point", coordinates: [0, 0]}, - distanceField: "dist", - } - }], - collation: {locale: "en_US", strength: 2} - }), - ErrorCodes.NamespaceNotFound); - - // $geoNear rejects the now-deprecated "collation" option. +if (db.getMongo().writeMode() !== "commands") { + // update() shell helper should error if a collation is specified and the shell is not using + // write commands. coll.drop(); - assert.writeOK(coll.insert({geo: {type: "Point", coordinates: [0, 0]}, str: "abc"})); - assert.commandFailedWithCode(db.runCommand({ - aggregate: coll.getName(), - cursor: {}, - pipeline: [{ - $geoNear: { - near: {type: "Point", coordinates: [0, 0]}, - distanceField: "dist", - collation: {locale: "en_US"}, - } - }], - }), - 40227); - - const geoNearStage = { + assert.writeOK(coll.insert({_id: 1, str: "foo"})); + assert.writeOK(coll.insert({_id: 2, str: "foo"})); + assert.throws(function() { + coll.update({str: "FOO"}, + {$set: {other: 99}}, + {multi: true, collation: {locale: "en_US", strength: 2}}); + }); + assert.throws(function() { + coll.explain().update({str: "FOO"}, + {$set: {other: 99}}, + {multi: true, collation: {locale: "en_US", strength: 2}}); + }); +} + +// +// Collation tests for the $geoNear aggregation stage. +// + +// $geoNear should fail when collation is specified but the collection does not exist. +coll.drop(); +assert.commandFailedWithCode(db.runCommand({ + aggregate: coll.getName(), + cursor: {}, + pipeline: [{ $geoNear: { near: {type: "Point", coordinates: [0, 0]}, distanceField: "dist", - spherical: true, - query: {str: "ABC"} } - }; + }], + collation: {locale: "en_US", strength: 2} +}), + ErrorCodes.NamespaceNotFound); + +// $geoNear rejects the now-deprecated "collation" option. +coll.drop(); +assert.writeOK(coll.insert({geo: {type: "Point", coordinates: [0, 0]}, str: "abc"})); +assert.commandFailedWithCode(db.runCommand({ + aggregate: coll.getName(), + cursor: {}, + pipeline: [{ + $geoNear: { + near: {type: "Point", coordinates: [0, 0]}, + distanceField: "dist", + collation: {locale: "en_US"}, + } + }], +}), + 40227); + +const geoNearStage = { + $geoNear: { + near: {type: "Point", coordinates: [0, 0]}, + distanceField: "dist", + spherical: true, + query: {str: "ABC"} + } +}; + +// $geoNear should return correct results when collation specified and string predicate not +// indexed. +assert.commandWorked(coll.ensureIndex({geo: "2dsphere"})); +assert.eq(0, coll.aggregate([geoNearStage]).itcount()); +assert.eq(1, coll.aggregate([geoNearStage], {collation: {locale: "en_US", strength: 2}}).itcount()); + +// $geoNear should return correct results when no collation specified and string predicate +// indexed. +assert.commandWorked(coll.dropIndexes()); +assert.commandWorked(coll.ensureIndex({geo: "2dsphere", str: 1})); +assert.eq(0, coll.aggregate([geoNearStage]).itcount()); +assert.eq(1, coll.aggregate([geoNearStage], {collation: {locale: "en_US", strength: 2}}).itcount()); + +// $geoNear should return correct results when collation specified and collation on index is +// incompatible with string predicate. +assert.commandWorked(coll.dropIndexes()); +assert.commandWorked( + coll.ensureIndex({geo: "2dsphere", str: 1}, {collation: {locale: "en_US", strength: 3}})); +assert.eq(0, coll.aggregate([geoNearStage]).itcount()); +assert.eq(1, coll.aggregate([geoNearStage], {collation: {locale: "en_US", strength: 2}}).itcount()); + +// $geoNear should return correct results when collation specified and collation on index is +// compatible with string predicate. +assert.commandWorked(coll.dropIndexes()); +assert.commandWorked( + coll.ensureIndex({geo: "2dsphere", str: 1}, {collation: {locale: "en_US", strength: 2}})); +assert.eq(0, coll.aggregate([geoNearStage]).itcount()); +assert.eq(1, coll.aggregate([geoNearStage], {collation: {locale: "en_US", strength: 2}}).itcount()); + +// $geoNear should return correct results when no collation specified and collection has a +// default collation. +coll.drop(); +assert.commandWorked( + db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); +assert.commandWorked(coll.ensureIndex({geo: "2dsphere"})); +assert.writeOK(coll.insert({geo: {type: "Point", coordinates: [0, 0]}, str: "abc"})); +assert.eq(1, coll.aggregate([geoNearStage]).itcount()); + +// $geoNear should return correct results when "simple" collation specified and collection has +// a default collation. +coll.drop(); +assert.commandWorked( + db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); +assert.commandWorked(coll.ensureIndex({geo: "2dsphere"})); +assert.writeOK(coll.insert({geo: {type: "Point", coordinates: [0, 0]}, str: "abc"})); +assert.eq(0, coll.aggregate([geoNearStage], {collation: {locale: "simple"}}).itcount()); + +// +// Collation tests for find with $nearSphere. +// + +if (db.getMongo().useReadCommands()) { + // Find with $nearSphere should return correct results when collation specified and + // collection does not exist. + coll.drop(); + assert.eq( + 0, + coll.find( + {str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}}) + .collation({locale: "en_US", strength: 2}) + .itcount()); - // $geoNear should return correct results when collation specified and string predicate not - // indexed. + // Find with $nearSphere should return correct results when collation specified and string + // predicate not indexed. + coll.drop(); + assert.writeOK(coll.insert({geo: {type: "Point", coordinates: [0, 0]}, str: "abc"})); assert.commandWorked(coll.ensureIndex({geo: "2dsphere"})); - assert.eq(0, coll.aggregate([geoNearStage]).itcount()); assert.eq( - 1, coll.aggregate([geoNearStage], {collation: {locale: "en_US", strength: 2}}).itcount()); - - // $geoNear should return correct results when no collation specified and string predicate - // indexed. + 0, + coll.find( + {str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}}) + .itcount()); + assert.eq( + 1, + coll.find( + {str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}}) + .collation({locale: "en_US", strength: 2}) + .itcount()); + + // Find with $nearSphere should return correct results when no collation specified and + // string predicate indexed. assert.commandWorked(coll.dropIndexes()); assert.commandWorked(coll.ensureIndex({geo: "2dsphere", str: 1})); - assert.eq(0, coll.aggregate([geoNearStage]).itcount()); assert.eq( - 1, coll.aggregate([geoNearStage], {collation: {locale: "en_US", strength: 2}}).itcount()); - - // $geoNear should return correct results when collation specified and collation on index is - // incompatible with string predicate. + 0, + coll.find( + {str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}}) + .itcount()); + assert.eq( + 1, + coll.find( + {str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}}) + .collation({locale: "en_US", strength: 2}) + .itcount()); + + // Find with $nearSphere should return correct results when collation specified and + // collation on index is incompatible with string predicate. assert.commandWorked(coll.dropIndexes()); assert.commandWorked( coll.ensureIndex({geo: "2dsphere", str: 1}, {collation: {locale: "en_US", strength: 3}})); - assert.eq(0, coll.aggregate([geoNearStage]).itcount()); assert.eq( - 1, coll.aggregate([geoNearStage], {collation: {locale: "en_US", strength: 2}}).itcount()); - - // $geoNear should return correct results when collation specified and collation on index is - // compatible with string predicate. + 0, + coll.find( + {str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}}) + .itcount()); + assert.eq( + 1, + coll.find( + {str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}}) + .collation({locale: "en_US", strength: 2}) + .itcount()); + + // Find with $nearSphere should return correct results when collation specified and + // collation on index is compatible with string predicate. assert.commandWorked(coll.dropIndexes()); assert.commandWorked( coll.ensureIndex({geo: "2dsphere", str: 1}, {collation: {locale: "en_US", strength: 2}})); - assert.eq(0, coll.aggregate([geoNearStage]).itcount()); assert.eq( - 1, coll.aggregate([geoNearStage], {collation: {locale: "en_US", strength: 2}}).itcount()); - - // $geoNear should return correct results when no collation specified and collection has a - // default collation. - coll.drop(); - assert.commandWorked( - db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); - assert.commandWorked(coll.ensureIndex({geo: "2dsphere"})); - assert.writeOK(coll.insert({geo: {type: "Point", coordinates: [0, 0]}, str: "abc"})); - assert.eq(1, coll.aggregate([geoNearStage]).itcount()); - - // $geoNear should return correct results when "simple" collation specified and collection has - // a default collation. - coll.drop(); - assert.commandWorked( - db.createCollection(coll.getName(), {collation: {locale: "en_US", strength: 2}})); - assert.commandWorked(coll.ensureIndex({geo: "2dsphere"})); - assert.writeOK(coll.insert({geo: {type: "Point", coordinates: [0, 0]}, str: "abc"})); - assert.eq(0, coll.aggregate([geoNearStage], {collation: {locale: "simple"}}).itcount()); - - // - // Collation tests for find with $nearSphere. - // - - if (db.getMongo().useReadCommands()) { - // Find with $nearSphere should return correct results when collation specified and - // collection does not exist. - coll.drop(); - assert.eq(0, - coll.find({ - str: "ABC", - geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}} - }) - .collation({locale: "en_US", strength: 2}) - .itcount()); - - // Find with $nearSphere should return correct results when collation specified and string - // predicate not indexed. - coll.drop(); - assert.writeOK(coll.insert({geo: {type: "Point", coordinates: [0, 0]}, str: "abc"})); - assert.commandWorked(coll.ensureIndex({geo: "2dsphere"})); - assert.eq(0, - coll.find({ - str: "ABC", - geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}} - }) - .itcount()); - assert.eq(1, - coll.find({ - str: "ABC", - geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}} - }) - .collation({locale: "en_US", strength: 2}) - .itcount()); - - // Find with $nearSphere should return correct results when no collation specified and - // string predicate indexed. - assert.commandWorked(coll.dropIndexes()); - assert.commandWorked(coll.ensureIndex({geo: "2dsphere", str: 1})); - assert.eq(0, - coll.find({ - str: "ABC", - geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}} - }) - .itcount()); - assert.eq(1, - coll.find({ - str: "ABC", - geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}} - }) - .collation({locale: "en_US", strength: 2}) - .itcount()); - - // Find with $nearSphere should return correct results when collation specified and - // collation on index is incompatible with string predicate. - assert.commandWorked(coll.dropIndexes()); - assert.commandWorked(coll.ensureIndex({geo: "2dsphere", str: 1}, - {collation: {locale: "en_US", strength: 3}})); - assert.eq(0, - coll.find({ - str: "ABC", - geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}} - }) - .itcount()); - assert.eq(1, - coll.find({ - str: "ABC", - geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}} - }) - .collation({locale: "en_US", strength: 2}) - .itcount()); - - // Find with $nearSphere should return correct results when collation specified and - // collation on index is compatible with string predicate. - assert.commandWorked(coll.dropIndexes()); - assert.commandWorked(coll.ensureIndex({geo: "2dsphere", str: 1}, - {collation: {locale: "en_US", strength: 2}})); - assert.eq(0, - coll.find({ - str: "ABC", - geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}} - }) - .itcount()); - assert.eq(1, - coll.find({ - str: "ABC", - geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}} - }) - .collation({locale: "en_US", strength: 2}) - .itcount()); - } - - // - // Tests for the bulk API. - // - - var bulk; - - if (db.getMongo().writeMode() !== "commands") { - coll.drop(); - assert.writeOK(coll.insert({_id: 1, str: "foo"})); - assert.writeOK(coll.insert({_id: 2, str: "foo"})); - - // Can't use the bulk API to set a collation when using legacy write ops. - bulk = coll.initializeUnorderedBulkOp(); - assert.throws(function() { - bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}); - }); - - bulk = coll.initializeOrderedBulkOp(); - assert.throws(function() { - bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}); - }); - } else { - // update(). - coll.drop(); - assert.writeOK(coll.insert({_id: 1, str: "foo"})); - assert.writeOK(coll.insert({_id: 2, str: "foo"})); - bulk = coll.initializeUnorderedBulkOp(); - bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).update({ - $set: {other: 99} - }); - writeRes = bulk.execute(); - assert.writeOK(writeRes); - assert.eq(2, writeRes.nModified); - - // updateOne(). - coll.drop(); - assert.writeOK(coll.insert({_id: 1, str: "foo"})); - assert.writeOK(coll.insert({_id: 2, str: "foo"})); - bulk = coll.initializeUnorderedBulkOp(); - bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).updateOne({ - $set: {other: 99} - }); - writeRes = bulk.execute(); - assert.writeOK(writeRes); - assert.eq(1, writeRes.nModified); - - // replaceOne(). - coll.drop(); - assert.writeOK(coll.insert({_id: 1, str: "foo"})); - assert.writeOK(coll.insert({_id: 2, str: "foo"})); - bulk = coll.initializeUnorderedBulkOp(); - bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).replaceOne({str: "oof"}); - writeRes = bulk.execute(); - assert.writeOK(writeRes); - assert.eq(1, writeRes.nModified); - - // replaceOne() with upsert(). - coll.drop(); - assert.writeOK(coll.insert({_id: 1, str: "foo"})); - assert.writeOK(coll.insert({_id: 2, str: "foo"})); - bulk = coll.initializeUnorderedBulkOp(); - bulk.find({str: "FOO"}).collation({locale: "en_US"}).upsert().replaceOne({str: "foo"}); - writeRes = bulk.execute(); - assert.writeOK(writeRes); - assert.eq(1, writeRes.nUpserted); - assert.eq(0, writeRes.nModified); - - bulk = coll.initializeUnorderedBulkOp(); - bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).upsert().replaceOne({ - str: "foo" - }); - writeRes = bulk.execute(); - assert.writeOK(writeRes); - assert.eq(0, writeRes.nUpserted); - assert.eq(1, writeRes.nModified); - - // removeOne(). - coll.drop(); - assert.writeOK(coll.insert({_id: 1, str: "foo"})); - assert.writeOK(coll.insert({_id: 2, str: "foo"})); - bulk = coll.initializeUnorderedBulkOp(); - bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).removeOne(); - writeRes = bulk.execute(); - assert.writeOK(writeRes); - assert.eq(1, writeRes.nRemoved); - - // remove(). - coll.drop(); - assert.writeOK(coll.insert({_id: 1, str: "foo"})); - assert.writeOK(coll.insert({_id: 2, str: "foo"})); - bulk = coll.initializeUnorderedBulkOp(); - bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).remove(); - writeRes = bulk.execute(); - assert.writeOK(writeRes); - assert.eq(2, writeRes.nRemoved); - } + 0, + coll.find( + {str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}}) + .itcount()); + assert.eq( + 1, + coll.find( + {str: "ABC", geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}}) + .collation({locale: "en_US", strength: 2}) + .itcount()); +} - // - // Tests for the CRUD API. - // +// +// Tests for the bulk API. +// - // deleteOne(). - coll.drop(); - assert.writeOK(coll.insert({_id: 1, str: "foo"})); - assert.writeOK(coll.insert({_id: 2, str: "foo"})); - if (db.getMongo().writeMode() === "commands") { - var res = coll.deleteOne({str: "FOO"}, {collation: {locale: "en_US", strength: 2}}); - assert.eq(1, res.deletedCount); - } else { - assert.throws(function() { - coll.deleteOne({str: "FOO"}, {collation: {locale: "en_US", strength: 2}}); - }); - } +var bulk; - // deleteMany(). +if (db.getMongo().writeMode() !== "commands") { coll.drop(); assert.writeOK(coll.insert({_id: 1, str: "foo"})); assert.writeOK(coll.insert({_id: 2, str: "foo"})); - if (db.getMongo().writeMode() === "commands") { - var res = coll.deleteMany({str: "FOO"}, {collation: {locale: "en_US", strength: 2}}); - assert.eq(2, res.deletedCount); - } else { - assert.throws(function() { - coll.deleteMany({str: "FOO"}, {collation: {locale: "en_US", strength: 2}}); - }); - } - // findOneAndDelete(). - coll.drop(); - assert.writeOK(coll.insert({_id: 1, str: "foo"})); - assert.eq({_id: 1, str: "foo"}, - coll.findOneAndDelete({str: "FOO"}, {collation: {locale: "en_US", strength: 2}})); - assert.eq(null, coll.findOne({_id: 1})); + // Can't use the bulk API to set a collation when using legacy write ops. + bulk = coll.initializeUnorderedBulkOp(); + assert.throws(function() { + bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}); + }); - // findOneAndReplace(). + bulk = coll.initializeOrderedBulkOp(); + assert.throws(function() { + bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}); + }); +} else { + // update(). coll.drop(); assert.writeOK(coll.insert({_id: 1, str: "foo"})); - assert.eq({_id: 1, str: "foo"}, - coll.findOneAndReplace( - {str: "FOO"}, {str: "bar"}, {collation: {locale: "en_US", strength: 2}})); - assert.neq(null, coll.findOne({str: "bar"})); + assert.writeOK(coll.insert({_id: 2, str: "foo"})); + bulk = coll.initializeUnorderedBulkOp(); + bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).update({$set: {other: 99}}); + writeRes = bulk.execute(); + assert.writeOK(writeRes); + assert.eq(2, writeRes.nModified); - // findOneAndUpdate(). + // updateOne(). coll.drop(); assert.writeOK(coll.insert({_id: 1, str: "foo"})); - assert.eq({_id: 1, str: "foo"}, - coll.findOneAndUpdate( - {str: "FOO"}, {$set: {other: 99}}, {collation: {locale: "en_US", strength: 2}})); - assert.neq(null, coll.findOne({other: 99})); + assert.writeOK(coll.insert({_id: 2, str: "foo"})); + bulk = coll.initializeUnorderedBulkOp(); + bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).updateOne({ + $set: {other: 99} + }); + writeRes = bulk.execute(); + assert.writeOK(writeRes); + assert.eq(1, writeRes.nModified); // replaceOne(). coll.drop(); assert.writeOK(coll.insert({_id: 1, str: "foo"})); assert.writeOK(coll.insert({_id: 2, str: "foo"})); - if (db.getMongo().writeMode() === "commands") { - var res = coll.replaceOne( - {str: "FOO"}, {str: "bar"}, {collation: {locale: "en_US", strength: 2}}); - assert.eq(1, res.modifiedCount); - } else { - assert.throws(function() { - coll.replaceOne( - {str: "FOO"}, {str: "bar"}, {collation: {locale: "en_US", strength: 2}}); - }); - } + bulk = coll.initializeUnorderedBulkOp(); + bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).replaceOne({str: "oof"}); + writeRes = bulk.execute(); + assert.writeOK(writeRes); + assert.eq(1, writeRes.nModified); - // updateOne(). + // replaceOne() with upsert(). coll.drop(); assert.writeOK(coll.insert({_id: 1, str: "foo"})); assert.writeOK(coll.insert({_id: 2, str: "foo"})); - if (db.getMongo().writeMode() === "commands") { - var res = coll.updateOne( - {str: "FOO"}, {$set: {other: 99}}, {collation: {locale: "en_US", strength: 2}}); - assert.eq(1, res.modifiedCount); - } else { - assert.throws(function() { - coll.updateOne( - {str: "FOO"}, {$set: {other: 99}}, {collation: {locale: "en_US", strength: 2}}); - }); - } + bulk = coll.initializeUnorderedBulkOp(); + bulk.find({str: "FOO"}).collation({locale: "en_US"}).upsert().replaceOne({str: "foo"}); + writeRes = bulk.execute(); + assert.writeOK(writeRes); + assert.eq(1, writeRes.nUpserted); + assert.eq(0, writeRes.nModified); - // updateMany(). + bulk = coll.initializeUnorderedBulkOp(); + bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).upsert().replaceOne({ + str: "foo" + }); + writeRes = bulk.execute(); + assert.writeOK(writeRes); + assert.eq(0, writeRes.nUpserted); + assert.eq(1, writeRes.nModified); + + // removeOne(). coll.drop(); assert.writeOK(coll.insert({_id: 1, str: "foo"})); assert.writeOK(coll.insert({_id: 2, str: "foo"})); - if (db.getMongo().writeMode() === "commands") { - var res = coll.updateMany( - {str: "FOO"}, {$set: {other: 99}}, {collation: {locale: "en_US", strength: 2}}); - assert.eq(2, res.modifiedCount); - } else { - assert.throws(function() { - coll.updateMany( - {str: "FOO"}, {$set: {other: 99}}, {collation: {locale: "en_US", strength: 2}}); - }); - } + bulk = coll.initializeUnorderedBulkOp(); + bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).removeOne(); + writeRes = bulk.execute(); + assert.writeOK(writeRes); + assert.eq(1, writeRes.nRemoved); - // updateOne with bulkWrite(). + // remove(). coll.drop(); assert.writeOK(coll.insert({_id: 1, str: "foo"})); assert.writeOK(coll.insert({_id: 2, str: "foo"})); - if (db.getMongo().writeMode() === "commands") { - var res = coll.bulkWrite([{ + bulk = coll.initializeUnorderedBulkOp(); + bulk.find({str: "FOO"}).collation({locale: "en_US", strength: 2}).remove(); + writeRes = bulk.execute(); + assert.writeOK(writeRes); + assert.eq(2, writeRes.nRemoved); +} + +// +// Tests for the CRUD API. +// + +// deleteOne(). +coll.drop(); +assert.writeOK(coll.insert({_id: 1, str: "foo"})); +assert.writeOK(coll.insert({_id: 2, str: "foo"})); +if (db.getMongo().writeMode() === "commands") { + var res = coll.deleteOne({str: "FOO"}, {collation: {locale: "en_US", strength: 2}}); + assert.eq(1, res.deletedCount); +} else { + assert.throws(function() { + coll.deleteOne({str: "FOO"}, {collation: {locale: "en_US", strength: 2}}); + }); +} + +// deleteMany(). +coll.drop(); +assert.writeOK(coll.insert({_id: 1, str: "foo"})); +assert.writeOK(coll.insert({_id: 2, str: "foo"})); +if (db.getMongo().writeMode() === "commands") { + var res = coll.deleteMany({str: "FOO"}, {collation: {locale: "en_US", strength: 2}}); + assert.eq(2, res.deletedCount); +} else { + assert.throws(function() { + coll.deleteMany({str: "FOO"}, {collation: {locale: "en_US", strength: 2}}); + }); +} + +// findOneAndDelete(). +coll.drop(); +assert.writeOK(coll.insert({_id: 1, str: "foo"})); +assert.eq({_id: 1, str: "foo"}, + coll.findOneAndDelete({str: "FOO"}, {collation: {locale: "en_US", strength: 2}})); +assert.eq(null, coll.findOne({_id: 1})); + +// findOneAndReplace(). +coll.drop(); +assert.writeOK(coll.insert({_id: 1, str: "foo"})); +assert.eq({_id: 1, str: "foo"}, + coll.findOneAndReplace( + {str: "FOO"}, {str: "bar"}, {collation: {locale: "en_US", strength: 2}})); +assert.neq(null, coll.findOne({str: "bar"})); + +// findOneAndUpdate(). +coll.drop(); +assert.writeOK(coll.insert({_id: 1, str: "foo"})); +assert.eq({_id: 1, str: "foo"}, + coll.findOneAndUpdate( + {str: "FOO"}, {$set: {other: 99}}, {collation: {locale: "en_US", strength: 2}})); +assert.neq(null, coll.findOne({other: 99})); + +// replaceOne(). +coll.drop(); +assert.writeOK(coll.insert({_id: 1, str: "foo"})); +assert.writeOK(coll.insert({_id: 2, str: "foo"})); +if (db.getMongo().writeMode() === "commands") { + var res = + coll.replaceOne({str: "FOO"}, {str: "bar"}, {collation: {locale: "en_US", strength: 2}}); + assert.eq(1, res.modifiedCount); +} else { + assert.throws(function() { + coll.replaceOne({str: "FOO"}, {str: "bar"}, {collation: {locale: "en_US", strength: 2}}); + }); +} + +// updateOne(). +coll.drop(); +assert.writeOK(coll.insert({_id: 1, str: "foo"})); +assert.writeOK(coll.insert({_id: 2, str: "foo"})); +if (db.getMongo().writeMode() === "commands") { + var res = coll.updateOne( + {str: "FOO"}, {$set: {other: 99}}, {collation: {locale: "en_US", strength: 2}}); + assert.eq(1, res.modifiedCount); +} else { + assert.throws(function() { + coll.updateOne( + {str: "FOO"}, {$set: {other: 99}}, {collation: {locale: "en_US", strength: 2}}); + }); +} + +// updateMany(). +coll.drop(); +assert.writeOK(coll.insert({_id: 1, str: "foo"})); +assert.writeOK(coll.insert({_id: 2, str: "foo"})); +if (db.getMongo().writeMode() === "commands") { + var res = coll.updateMany( + {str: "FOO"}, {$set: {other: 99}}, {collation: {locale: "en_US", strength: 2}}); + assert.eq(2, res.modifiedCount); +} else { + assert.throws(function() { + coll.updateMany( + {str: "FOO"}, {$set: {other: 99}}, {collation: {locale: "en_US", strength: 2}}); + }); +} + +// updateOne with bulkWrite(). +coll.drop(); +assert.writeOK(coll.insert({_id: 1, str: "foo"})); +assert.writeOK(coll.insert({_id: 2, str: "foo"})); +if (db.getMongo().writeMode() === "commands") { + var res = coll.bulkWrite([{ + updateOne: { + filter: {str: "FOO"}, + update: {$set: {other: 99}}, + collation: {locale: "en_US", strength: 2} + } + }]); + assert.eq(1, res.matchedCount); +} else { + assert.throws(function() { + coll.bulkWrite([{ updateOne: { filter: {str: "FOO"}, update: {$set: {other: 99}}, collation: {locale: "en_US", strength: 2} } }]); - assert.eq(1, res.matchedCount); - } else { - assert.throws(function() { - coll.bulkWrite([{ - updateOne: { - filter: {str: "FOO"}, - update: {$set: {other: 99}}, - collation: {locale: "en_US", strength: 2} - } - }]); - }); - } - - // updateMany with bulkWrite(). - coll.drop(); - assert.writeOK(coll.insert({_id: 1, str: "foo"})); - assert.writeOK(coll.insert({_id: 2, str: "foo"})); - if (db.getMongo().writeMode() === "commands") { - var res = coll.bulkWrite([{ + }); +} + +// updateMany with bulkWrite(). +coll.drop(); +assert.writeOK(coll.insert({_id: 1, str: "foo"})); +assert.writeOK(coll.insert({_id: 2, str: "foo"})); +if (db.getMongo().writeMode() === "commands") { + var res = coll.bulkWrite([{ + updateMany: { + filter: {str: "FOO"}, + update: {$set: {other: 99}}, + collation: {locale: "en_US", strength: 2} + } + }]); + assert.eq(2, res.matchedCount); +} else { + assert.throws(function() { + coll.bulkWrite([{ updateMany: { filter: {str: "FOO"}, update: {$set: {other: 99}}, collation: {locale: "en_US", strength: 2} } }]); - assert.eq(2, res.matchedCount); - } else { - assert.throws(function() { - coll.bulkWrite([{ - updateMany: { - filter: {str: "FOO"}, - update: {$set: {other: 99}}, - collation: {locale: "en_US", strength: 2} - } - }]); - }); - } - - // replaceOne with bulkWrite(). - coll.drop(); - assert.writeOK(coll.insert({_id: 1, str: "foo"})); - assert.writeOK(coll.insert({_id: 2, str: "foo"})); - if (db.getMongo().writeMode() === "commands") { - var res = coll.bulkWrite([{ + }); +} + +// replaceOne with bulkWrite(). +coll.drop(); +assert.writeOK(coll.insert({_id: 1, str: "foo"})); +assert.writeOK(coll.insert({_id: 2, str: "foo"})); +if (db.getMongo().writeMode() === "commands") { + var res = coll.bulkWrite([{ + replaceOne: { + filter: {str: "FOO"}, + replacement: {str: "bar"}, + collation: {locale: "en_US", strength: 2} + } + }]); + assert.eq(1, res.matchedCount); +} else { + assert.throws(function() { + coll.bulkWrite([{ replaceOne: { filter: {str: "FOO"}, replacement: {str: "bar"}, collation: {locale: "en_US", strength: 2} } }]); - assert.eq(1, res.matchedCount); - } else { - assert.throws(function() { - coll.bulkWrite([{ - replaceOne: { - filter: {str: "FOO"}, - replacement: {str: "bar"}, - collation: {locale: "en_US", strength: 2} - } - }]); - }); - } - - // deleteOne with bulkWrite(). - coll.drop(); - assert.writeOK(coll.insert({_id: 1, str: "foo"})); - assert.writeOK(coll.insert({_id: 2, str: "foo"})); - if (db.getMongo().writeMode() === "commands") { - var res = coll.bulkWrite( + }); +} + +// deleteOne with bulkWrite(). +coll.drop(); +assert.writeOK(coll.insert({_id: 1, str: "foo"})); +assert.writeOK(coll.insert({_id: 2, str: "foo"})); +if (db.getMongo().writeMode() === "commands") { + var res = coll.bulkWrite( + [{deleteOne: {filter: {str: "FOO"}, collation: {locale: "en_US", strength: 2}}}]); + assert.eq(1, res.deletedCount); +} else { + assert.throws(function() { + coll.bulkWrite( [{deleteOne: {filter: {str: "FOO"}, collation: {locale: "en_US", strength: 2}}}]); - assert.eq(1, res.deletedCount); - } else { - assert.throws(function() { - coll.bulkWrite( - [{deleteOne: {filter: {str: "FOO"}, collation: {locale: "en_US", strength: 2}}}]); - }); - } - - // deleteMany with bulkWrite(). - coll.drop(); - assert.writeOK(coll.insert({_id: 1, str: "foo"})); - assert.writeOK(coll.insert({_id: 2, str: "foo"})); - if (db.getMongo().writeMode() === "commands") { - var res = coll.bulkWrite( + }); +} + +// deleteMany with bulkWrite(). +coll.drop(); +assert.writeOK(coll.insert({_id: 1, str: "foo"})); +assert.writeOK(coll.insert({_id: 2, str: "foo"})); +if (db.getMongo().writeMode() === "commands") { + var res = coll.bulkWrite( + [{deleteMany: {filter: {str: "FOO"}, collation: {locale: "en_US", strength: 2}}}]); + assert.eq(2, res.deletedCount); +} else { + assert.throws(function() { + coll.bulkWrite( [{deleteMany: {filter: {str: "FOO"}, collation: {locale: "en_US", strength: 2}}}]); - assert.eq(2, res.deletedCount); - } else { - assert.throws(function() { - coll.bulkWrite( - [{deleteMany: {filter: {str: "FOO"}, collation: {locale: "en_US", strength: 2}}}]); - }); - } - - // Two deleteOne ops with bulkWrite using different collations. - coll.drop(); - assert.writeOK(coll.insert({_id: 1, str: "foo"})); - assert.writeOK(coll.insert({_id: 2, str: "bar"})); - if (db.getMongo().writeMode() === "commands") { - var res = coll.bulkWrite([ + }); +} + +// Two deleteOne ops with bulkWrite using different collations. +coll.drop(); +assert.writeOK(coll.insert({_id: 1, str: "foo"})); +assert.writeOK(coll.insert({_id: 2, str: "bar"})); +if (db.getMongo().writeMode() === "commands") { + var res = coll.bulkWrite([ + {deleteOne: {filter: {str: "FOO"}, collation: {locale: "fr", strength: 2}}}, + {deleteOne: {filter: {str: "BAR"}, collation: {locale: "en_US", strength: 2}}} + ]); + assert.eq(2, res.deletedCount); +} else { + assert.throws(function() { + coll.bulkWrite([ {deleteOne: {filter: {str: "FOO"}, collation: {locale: "fr", strength: 2}}}, {deleteOne: {filter: {str: "BAR"}, collation: {locale: "en_US", strength: 2}}} ]); - assert.eq(2, res.deletedCount); - } else { - assert.throws(function() { - coll.bulkWrite([ - {deleteOne: {filter: {str: "FOO"}, collation: {locale: "fr", strength: 2}}}, - {deleteOne: {filter: {str: "BAR"}, collation: {locale: "en_US", strength: 2}}} - ]); - }); - } + }); +} - // applyOps. - if (!isMongos) { - coll.drop(); - assert.commandWorked( - db.createCollection("collation", {collation: {locale: "en_US", strength: 2}})); - assert.writeOK(coll.insert({_id: "foo", x: 5, str: "bar"})); - - // preCondition.q respects collection default collation. - assert.commandFailed(db.runCommand({ - applyOps: [{op: "u", ns: coll.getFullName(), o2: {_id: "foo"}, o: {$set: {x: 6}}}], - preCondition: [{ns: coll.getFullName(), q: {_id: "not foo"}, res: {str: "bar"}}] - })); - assert.eq(5, coll.findOne({_id: "foo"}).x); - assert.commandWorked(db.runCommand({ - applyOps: [{op: "u", ns: coll.getFullName(), o2: {_id: "foo"}, o: {$set: {x: 6}}}], - preCondition: [{ns: coll.getFullName(), q: {_id: "FOO"}, res: {str: "bar"}}] - })); - assert.eq(6, coll.findOne({_id: "foo"}).x); - - // preCondition.res respects collection default collation. - assert.commandFailed(db.runCommand({ - applyOps: [{op: "u", ns: coll.getFullName(), o2: {_id: "foo"}, o: {$set: {x: 7}}}], - preCondition: [{ns: coll.getFullName(), q: {_id: "foo"}, res: {str: "not bar"}}] - })); - assert.eq(6, coll.findOne({_id: "foo"}).x); - assert.commandWorked(db.runCommand({ - applyOps: [{op: "u", ns: coll.getFullName(), o2: {_id: "foo"}, o: {$set: {x: 7}}}], - preCondition: [{ns: coll.getFullName(), q: {_id: "foo"}, res: {str: "BAR"}}] - })); - assert.eq(7, coll.findOne({_id: "foo"}).x); - - // <operation>.o2 respects collection default collation. - assert.commandWorked(db.runCommand( - {applyOps: [{op: "u", ns: coll.getFullName(), o2: {_id: "FOO"}, o: {$set: {x: 8}}}]})); - assert.eq(8, coll.findOne({_id: "foo"}).x); - } +// applyOps. +if (!isMongos) { + coll.drop(); + assert.commandWorked( + db.createCollection("collation", {collation: {locale: "en_US", strength: 2}})); + assert.writeOK(coll.insert({_id: "foo", x: 5, str: "bar"})); - // Test that the collection created with the "cloneCollectionAsCapped" command inherits the - // default collation of the corresponding collection. We skip running this command in a sharded - // cluster because it isn't supported by mongos. - if (!isMongos) { - const clonedColl = db.collation_cloned; - - coll.drop(); - clonedColl.drop(); - - // Create a collection with a non-simple default collation. - assert.commandWorked( - db.runCommand({create: coll.getName(), collation: {locale: "en", strength: 2}})); - const originalCollectionInfos = db.getCollectionInfos({name: coll.getName()}); - assert.eq(originalCollectionInfos.length, 1, tojson(originalCollectionInfos)); - - assert.writeOK(coll.insert({_id: "FOO"})); - assert.writeOK(coll.insert({_id: "bar"})); - assert.eq([{_id: "FOO"}], - coll.find({_id: "foo"}).toArray(), - "query should have performed a case-insensitive match"); - - var cloneCollOutput = db.runCommand({ - cloneCollectionAsCapped: coll.getName(), - toCollection: clonedColl.getName(), - size: 4096 - }); - if (jsTest.options().storageEngine === "mobile") { - // Capped collections are not supported by the mobile storage engine - assert.commandFailedWithCode(cloneCollOutput, ErrorCodes.InvalidOptions); - } else { - assert.commandWorked(cloneCollOutput); - const clonedCollectionInfos = db.getCollectionInfos({name: clonedColl.getName()}); - assert.eq(clonedCollectionInfos.length, 1, tojson(clonedCollectionInfos)); - assert.eq(originalCollectionInfos[0].options.collation, - clonedCollectionInfos[0].options.collation); - assert.eq([{_id: "FOO"}], clonedColl.find({_id: "foo"}).toArray()); - } - } + // preCondition.q respects collection default collation. + assert.commandFailed(db.runCommand({ + applyOps: [{op: "u", ns: coll.getFullName(), o2: {_id: "foo"}, o: {$set: {x: 6}}}], + preCondition: [{ns: coll.getFullName(), q: {_id: "not foo"}, res: {str: "bar"}}] + })); + assert.eq(5, coll.findOne({_id: "foo"}).x); + assert.commandWorked(db.runCommand({ + applyOps: [{op: "u", ns: coll.getFullName(), o2: {_id: "foo"}, o: {$set: {x: 6}}}], + preCondition: [{ns: coll.getFullName(), q: {_id: "FOO"}, res: {str: "bar"}}] + })); + assert.eq(6, coll.findOne({_id: "foo"}).x); + + // preCondition.res respects collection default collation. + assert.commandFailed(db.runCommand({ + applyOps: [{op: "u", ns: coll.getFullName(), o2: {_id: "foo"}, o: {$set: {x: 7}}}], + preCondition: [{ns: coll.getFullName(), q: {_id: "foo"}, res: {str: "not bar"}}] + })); + assert.eq(6, coll.findOne({_id: "foo"}).x); + assert.commandWorked(db.runCommand({ + applyOps: [{op: "u", ns: coll.getFullName(), o2: {_id: "foo"}, o: {$set: {x: 7}}}], + preCondition: [{ns: coll.getFullName(), q: {_id: "foo"}, res: {str: "BAR"}}] + })); + assert.eq(7, coll.findOne({_id: "foo"}).x); + + // <operation>.o2 respects collection default collation. + assert.commandWorked(db.runCommand( + {applyOps: [{op: "u", ns: coll.getFullName(), o2: {_id: "FOO"}, o: {$set: {x: 8}}}]})); + assert.eq(8, coll.findOne({_id: "foo"}).x); +} + +// Test that the collection created with the "cloneCollectionAsCapped" command inherits the +// default collation of the corresponding collection. We skip running this command in a sharded +// cluster because it isn't supported by mongos. +if (!isMongos) { + const clonedColl = db.collation_cloned; + + coll.drop(); + clonedColl.drop(); - // Test that the find command's min/max options respect the collation. - if (db.getMongo().useReadCommands()) { - coll.drop(); - assert.writeOK(coll.insert({str: "a"})); - assert.writeOK(coll.insert({str: "A"})); - assert.writeOK(coll.insert({str: "b"})); - assert.writeOK(coll.insert({str: "B"})); - assert.writeOK(coll.insert({str: "c"})); - assert.writeOK(coll.insert({str: "C"})); - assert.writeOK(coll.insert({str: "d"})); - assert.writeOK(coll.insert({str: "D"})); - - // This query should fail, since there is no index to support the min/max. - let err = assert.throws(() => coll.find() - .min({str: "b"}) - .max({str: "D"}) - .collation({locale: "en_US", strength: 2}) - .itcount()); - assert.commandFailedWithCode(err, 51173); - - // Even after building an index with the right key pattern, the query should fail since the - // collations don't match. - assert.commandWorked(coll.createIndex({str: 1}, {name: "noCollation"})); - err = assert.throws(() => coll.find() + // Create a collection with a non-simple default collation. + assert.commandWorked( + db.runCommand({create: coll.getName(), collation: {locale: "en", strength: 2}})); + const originalCollectionInfos = db.getCollectionInfos({name: coll.getName()}); + assert.eq(originalCollectionInfos.length, 1, tojson(originalCollectionInfos)); + + assert.writeOK(coll.insert({_id: "FOO"})); + assert.writeOK(coll.insert({_id: "bar"})); + assert.eq([{_id: "FOO"}], + coll.find({_id: "foo"}).toArray(), + "query should have performed a case-insensitive match"); + + var cloneCollOutput = db.runCommand( + {cloneCollectionAsCapped: coll.getName(), toCollection: clonedColl.getName(), size: 4096}); + if (jsTest.options().storageEngine === "mobile") { + // Capped collections are not supported by the mobile storage engine + assert.commandFailedWithCode(cloneCollOutput, ErrorCodes.InvalidOptions); + } else { + assert.commandWorked(cloneCollOutput); + const clonedCollectionInfos = db.getCollectionInfos({name: clonedColl.getName()}); + assert.eq(clonedCollectionInfos.length, 1, tojson(clonedCollectionInfos)); + assert.eq(originalCollectionInfos[0].options.collation, + clonedCollectionInfos[0].options.collation); + assert.eq([{_id: "FOO"}], clonedColl.find({_id: "foo"}).toArray()); + } +} + +// Test that the find command's min/max options respect the collation. +if (db.getMongo().useReadCommands()) { + coll.drop(); + assert.writeOK(coll.insert({str: "a"})); + assert.writeOK(coll.insert({str: "A"})); + assert.writeOK(coll.insert({str: "b"})); + assert.writeOK(coll.insert({str: "B"})); + assert.writeOK(coll.insert({str: "c"})); + assert.writeOK(coll.insert({str: "C"})); + assert.writeOK(coll.insert({str: "d"})); + assert.writeOK(coll.insert({str: "D"})); + + // This query should fail, since there is no index to support the min/max. + let err = assert.throws(() => coll.find() .min({str: "b"}) .max({str: "D"}) .collation({locale: "en_US", strength: 2}) - .hint({str: 1}) .itcount()); - assert.commandFailedWithCode(err, 51174); - - // This query should fail, because the hinted index does not match the requested - // collation, and the 'max' value is a string, which means we cannot ignore the - // collation. - const caseInsensitive = {locale: "en", strength: 2}; - assert.commandWorked(coll.dropIndexes()); - assert.commandWorked(coll.createIndex({str: 1})); - err = assert.throws(() => coll.find({}, {_id: 0}) - .min({str: MinKey}) - .max({str: "Hello1"}) - .hint({str: 1}) - .collation(caseInsensitive) - .toArray()); - assert.commandFailedWithCode(err, 51174); - - // After building an index with the case-insensitive US English collation, the query should - // work. Furthermore, the bounds defined by the min and max should respect the - // case-insensitive collation. - assert.commandWorked(coll.createIndex( - {str: 1}, {name: "withCollation", collation: {locale: "en_US", strength: 2}})); - assert.eq(4, - coll.find() - .min({str: "b"}) - .max({str: "D"}) - .collation({locale: "en_US", strength: 2}) - .hint("withCollation") - .itcount()); - - // Ensure results from index with min/max query are sorted to match requested collation. - coll.drop(); - assert.commandWorked(coll.ensureIndex({a: 1, b: 1})); - assert.writeOK(coll.insert( - [{a: 1, b: 1}, {a: 1, b: 2}, {a: 1, b: "A"}, {a: 1, b: "a"}, {a: 2, b: 2}])); - var expected = [{a: 1, b: 1}, {a: 1, b: 2}, {a: 1, b: "a"}, {a: 1, b: "A"}, {a: 2, b: 2}]; - res = coll.find({}, {_id: 0}) - .hint({a: 1, b: 1}) - .min({a: 1, b: 1}) - .max({a: 2, b: 3}) - .collation({locale: "en_US", strength: 3}) - .sort({a: 1, b: 1}); - assert.eq(res.toArray(), expected); - res = coll.find({}, {_id: 0}) - .hint({a: 1, b: 1}) - .min({a: 1, b: 1}) - .collation({locale: "en_US", strength: 3}) - .sort({a: 1, b: 1}); - assert.eq(res.toArray(), expected); - res = coll.find({}, {_id: 0}) - .hint({a: 1, b: 1}) - .max({a: 2, b: 3}) - .collation({locale: "en_US", strength: 3}) - .sort({a: 1, b: 1}); - assert.eq(res.toArray(), expected); - - // A min/max query that can use an index whose collation doesn't match should require a sort - // stage if there are any in-bounds strings. Verify this using explain. - explainRes = coll.find({}, {_id: 0}) - .hint({a: 1, b: 1}) - .max({a: 2, b: 3}) - .collation({locale: "en_US", strength: 3}) - .sort({a: 1, b: 1}) - .explain(); - assert.commandWorked(explainRes); - assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "SORT")); - - // This query should fail since min has a string as one of it's boundaries, and the - // collation doesn't match that of the index. - assert.throws(() => coll.find({}, {_id: 0}) - .hint({a: 1, b: 1}) - .min({a: 1, b: "A"}) - .max({a: 2, b: 1}) - .collation({locale: "en_US", strength: 3}) - .sort({a: 1, b: 1}) - .itcount()); - } + assert.commandFailedWithCode(err, 51173); + + // Even after building an index with the right key pattern, the query should fail since the + // collations don't match. + assert.commandWorked(coll.createIndex({str: 1}, {name: "noCollation"})); + err = assert.throws(() => coll.find() + .min({str: "b"}) + .max({str: "D"}) + .collation({locale: "en_US", strength: 2}) + .hint({str: 1}) + .itcount()); + assert.commandFailedWithCode(err, 51174); + + // This query should fail, because the hinted index does not match the requested + // collation, and the 'max' value is a string, which means we cannot ignore the + // collation. + const caseInsensitive = {locale: "en", strength: 2}; + assert.commandWorked(coll.dropIndexes()); + assert.commandWorked(coll.createIndex({str: 1})); + err = assert.throws(() => coll.find({}, {_id: 0}) + .min({str: MinKey}) + .max({str: "Hello1"}) + .hint({str: 1}) + .collation(caseInsensitive) + .toArray()); + assert.commandFailedWithCode(err, 51174); + + // After building an index with the case-insensitive US English collation, the query should + // work. Furthermore, the bounds defined by the min and max should respect the + // case-insensitive collation. + assert.commandWorked(coll.createIndex( + {str: 1}, {name: "withCollation", collation: {locale: "en_US", strength: 2}})); + assert.eq(4, + coll.find() + .min({str: "b"}) + .max({str: "D"}) + .collation({locale: "en_US", strength: 2}) + .hint("withCollation") + .itcount()); + + // Ensure results from index with min/max query are sorted to match requested collation. + coll.drop(); + assert.commandWorked(coll.ensureIndex({a: 1, b: 1})); + assert.writeOK( + coll.insert([{a: 1, b: 1}, {a: 1, b: 2}, {a: 1, b: "A"}, {a: 1, b: "a"}, {a: 2, b: 2}])); + var expected = [{a: 1, b: 1}, {a: 1, b: 2}, {a: 1, b: "a"}, {a: 1, b: "A"}, {a: 2, b: 2}]; + res = coll.find({}, {_id: 0}) + .hint({a: 1, b: 1}) + .min({a: 1, b: 1}) + .max({a: 2, b: 3}) + .collation({locale: "en_US", strength: 3}) + .sort({a: 1, b: 1}); + assert.eq(res.toArray(), expected); + res = coll.find({}, {_id: 0}) + .hint({a: 1, b: 1}) + .min({a: 1, b: 1}) + .collation({locale: "en_US", strength: 3}) + .sort({a: 1, b: 1}); + assert.eq(res.toArray(), expected); + res = coll.find({}, {_id: 0}) + .hint({a: 1, b: 1}) + .max({a: 2, b: 3}) + .collation({locale: "en_US", strength: 3}) + .sort({a: 1, b: 1}); + assert.eq(res.toArray(), expected); + + // A min/max query that can use an index whose collation doesn't match should require a sort + // stage if there are any in-bounds strings. Verify this using explain. + explainRes = coll.find({}, {_id: 0}) + .hint({a: 1, b: 1}) + .max({a: 2, b: 3}) + .collation({locale: "en_US", strength: 3}) + .sort({a: 1, b: 1}) + .explain(); + assert.commandWorked(explainRes); + assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "SORT")); + + // This query should fail since min has a string as one of it's boundaries, and the + // collation doesn't match that of the index. + assert.throws(() => coll.find({}, {_id: 0}) + .hint({a: 1, b: 1}) + .min({a: 1, b: "A"}) + .max({a: 2, b: 1}) + .collation({locale: "en_US", strength: 3}) + .sort({a: 1, b: 1}) + .itcount()); +} })(); diff --git a/jstests/core/collation_convert_to_capped.js b/jstests/core/collation_convert_to_capped.js index e1f79bafb76..237156e86d7 100644 --- a/jstests/core/collation_convert_to_capped.js +++ b/jstests/core/collation_convert_to_capped.js @@ -9,28 +9,27 @@ */ (function() { - "use strict"; +"use strict"; - let testDb = db.getSiblingDB("collation_convert_to_capped"); - let coll = testDb.coll; - testDb.dropDatabase(); +let testDb = db.getSiblingDB("collation_convert_to_capped"); +let coll = testDb.coll; +testDb.dropDatabase(); - // Create a collection with a non-simple default collation. - assert.commandWorked( - testDb.runCommand({create: coll.getName(), collation: {locale: "en", strength: 2}})); - const originalCollectionInfos = testDb.getCollectionInfos({name: coll.getName()}); - assert.eq(originalCollectionInfos.length, 1, tojson(originalCollectionInfos)); +// Create a collection with a non-simple default collation. +assert.commandWorked( + testDb.runCommand({create: coll.getName(), collation: {locale: "en", strength: 2}})); +const originalCollectionInfos = testDb.getCollectionInfos({name: coll.getName()}); +assert.eq(originalCollectionInfos.length, 1, tojson(originalCollectionInfos)); - assert.writeOK(coll.insert({_id: "FOO"})); - assert.writeOK(coll.insert({_id: "bar"})); - assert.eq([{_id: "FOO"}], - coll.find({_id: "foo"}).toArray(), - "query should have performed a case-insensitive match"); +assert.writeOK(coll.insert({_id: "FOO"})); +assert.writeOK(coll.insert({_id: "bar"})); +assert.eq([{_id: "FOO"}], + coll.find({_id: "foo"}).toArray(), + "query should have performed a case-insensitive match"); - assert.commandWorked(testDb.runCommand({convertToCapped: coll.getName(), size: 4096})); - const cappedCollectionInfos = testDb.getCollectionInfos({name: coll.getName()}); - assert.eq(cappedCollectionInfos.length, 1, tojson(cappedCollectionInfos)); - assert.eq(originalCollectionInfos[0].options.collation, - cappedCollectionInfos[0].options.collation); - assert.eq([{_id: "FOO"}], coll.find({_id: "foo"}).toArray()); +assert.commandWorked(testDb.runCommand({convertToCapped: coll.getName(), size: 4096})); +const cappedCollectionInfos = testDb.getCollectionInfos({name: coll.getName()}); +assert.eq(cappedCollectionInfos.length, 1, tojson(cappedCollectionInfos)); +assert.eq(originalCollectionInfos[0].options.collation, cappedCollectionInfos[0].options.collation); +assert.eq([{_id: "FOO"}], coll.find({_id: "foo"}).toArray()); })(); diff --git a/jstests/core/collation_find_and_modify.js b/jstests/core/collation_find_and_modify.js index ea9b355516f..6c0fd704dcc 100644 --- a/jstests/core/collation_find_and_modify.js +++ b/jstests/core/collation_find_and_modify.js @@ -4,84 +4,90 @@ // Integration tests for collation-aware findAndModify. (function() { - 'use strict'; - var coll = db.getCollection("find_and_modify_update_test"); +'use strict'; +var coll = db.getCollection("find_and_modify_update_test"); - const caseInsensitive = {locale: "en_US", strength: 2}; - const caseSensitive = {locale: "en_US", strength: 3}; +const caseInsensitive = { + locale: "en_US", + strength: 2 +}; +const caseSensitive = { + locale: "en_US", + strength: 3 +}; - // We restrict testing pipeline-style update to commands as they are not supported for OP_UPDATE - // which cannot differentiate an update object from an array. - if (db.getMongo().writeMode() === "commands") { - // - // Pipeline-style update respects collection default collation. - // +// We restrict testing pipeline-style update to commands as they are not supported for OP_UPDATE +// which cannot differentiate an update object from an array. +if (db.getMongo().writeMode() === "commands") { + // + // Pipeline-style update respects collection default collation. + // - coll.drop(); - assert.commandWorked(db.createCollection(coll.getName(), {collation: caseInsensitive})); - assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]})); - let doc = coll.findAndModify( - {update: [{$set: {newField: {$indexOfArray: ["$x", "B"]}}}], new: true}); - assert.eq(doc.newField, 3, doc); + coll.drop(); + assert.commandWorked(db.createCollection(coll.getName(), {collation: caseInsensitive})); + assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]})); + let doc = + coll.findAndModify({update: [{$set: {newField: {$indexOfArray: ["$x", "B"]}}}], new: true}); + assert.eq(doc.newField, 3, doc); - // - // Pipeline-style findAndModify respects query collation. - // + // + // Pipeline-style findAndModify respects query collation. + // - // Case sensitive $indexOfArray on "B" matches "B". - assert(coll.drop()); - assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]})); - doc = coll.findAndModify({ - update: [{$set: {newField: {$indexOfArray: ["$x", "B"]}}}], - collation: caseSensitive, - new: true - }); - assert.eq(doc.newField, 5, doc); + // Case sensitive $indexOfArray on "B" matches "B". + assert(coll.drop()); + assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]})); + doc = coll.findAndModify({ + update: [{$set: {newField: {$indexOfArray: ["$x", "B"]}}}], + collation: caseSensitive, + new: true + }); + assert.eq(doc.newField, 5, doc); - assert(coll.drop()); - assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]})); - doc = coll.findAndModify({ - update: [{$project: {newField: {$indexOfArray: ["$x", "B"]}}}], - collation: caseSensitive, - new: true - }); - assert.eq(doc.newField, 5, doc); + assert(coll.drop()); + assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]})); + doc = coll.findAndModify({ + update: [{$project: {newField: {$indexOfArray: ["$x", "B"]}}}], + collation: caseSensitive, + new: true + }); + assert.eq(doc.newField, 5, doc); - assert(coll.drop()); - assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]})); - doc = coll.findAndModify({ - update: [{$replaceWith: {newField: {$indexOfArray: ["$x", "B"]}}}], - collation: caseSensitive, - new: true - }); - assert.eq(doc.newField, 5, doc); + assert(coll.drop()); + assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]})); + doc = coll.findAndModify({ + update: [{$replaceWith: {newField: {$indexOfArray: ["$x", "B"]}}}], + collation: caseSensitive, + new: true + }); + assert.eq(doc.newField, 5, doc); - // Case insensitive $indexOfArray on "B" matches "b". - assert(coll.drop()); - assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]})); - doc = coll.findAndModify({ - update: [{$set: {newField: {$indexOfArray: ["$x", "B"]}}}], - collation: caseInsensitive, - new: true - }); - assert.eq(doc.newField, 3, doc); + // Case insensitive $indexOfArray on "B" matches "b". + assert(coll.drop()); + assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]})); + doc = coll.findAndModify({ + update: [{$set: {newField: {$indexOfArray: ["$x", "B"]}}}], + collation: caseInsensitive, + new: true + }); + assert.eq(doc.newField, 3, doc); - assert(coll.drop()); - assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]})); - doc = coll.findAndModify({ - update: [{$project: {newField: {$indexOfArray: ["$x", "B"]}}}], - collation: caseInsensitive, - new: true - }); - assert.eq(doc.newField, 3, doc); + assert(coll.drop()); + assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]})); + doc = coll.findAndModify({ + update: [{$project: {newField: {$indexOfArray: ["$x", "B"]}}}], + collation: caseInsensitive, + new: true + }); + assert.eq(doc.newField, 3, doc); - assert(coll.drop()); - assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]})); - doc = coll.findAndModify({ - update: [{$replaceWith: {newField: {$indexOfArray: ["$x", "B"]}}}], - collation: caseInsensitive, - new: true - }); - assert.eq(doc.newField, 3, doc); - } + assert(coll.drop()); + assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]})); + doc = coll.findAndModify({ + update: [{$replaceWith: {newField: {$indexOfArray: ["$x", "B"]}}}], + collation: caseInsensitive, + new: true + }); + assert.eq(doc.newField, 3, doc); +} })(); diff --git a/jstests/core/collation_plan_cache.js b/jstests/core/collation_plan_cache.js index 70b63acd33b..07507938cc3 100644 --- a/jstests/core/collation_plan_cache.js +++ b/jstests/core/collation_plan_cache.js @@ -8,244 +8,236 @@ // does_not_support_stepdowns, // ] (function() { - 'use strict'; - - var coll = db.collation_plan_cache; - coll.drop(); - - assert.writeOK(coll.insert({a: 'foo', b: 5})); - - // We need two indexes that each query can use so that a plan cache entry is created. - assert.commandWorked(coll.createIndex({a: 1}, {collation: {locale: 'en_US'}})); - assert.commandWorked(coll.createIndex({a: 1, b: 1}, {collation: {locale: 'en_US'}})); - - // We need an index with a different collation, so that string comparisons affect the query - // shape. - assert.commandWorked(coll.createIndex({b: 1}, {collation: {locale: 'fr_CA'}})); - - // listQueryShapes(). - - // Run a query so that an entry is inserted into the cache. - assert.commandWorked( - coll.runCommand("find", {filter: {a: 'foo', b: 5}, collation: {locale: "en_US"}}), - 'find command failed'); - - // The query shape should have been added. - var shapes = coll.getPlanCache().listQueryShapes(); - assert.eq(1, shapes.length, 'unexpected cache size after running query'); - let filteredShape0 = shapes[0]; - delete filteredShape0.queryHash; - assert.eq(filteredShape0, - { - query: {a: 'foo', b: 5}, - sort: {}, - projection: {}, - collation: { - locale: 'en_US', - caseLevel: false, - caseFirst: 'off', - strength: 3, - numericOrdering: false, - alternate: 'non-ignorable', - maxVariable: 'punct', - normalization: false, - backwards: false, - version: '57.1' - } - }, - 'unexpected query shape returned from listQueryShapes()'); - - coll.getPlanCache().clear(); - - // getPlansByQuery(). - - // Passing a query with an empty collation object should throw. - assert.throws(function() { - coll.getPlanCache().getPlansByQuery( - {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {}}); - }, [], 'empty collation object should throw'); - - // Passing a query with an invalid collation object should throw. - assert.throws(function() { - coll.getPlanCache().getPlansByQuery( - {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {bad: "value"}}); - }, [], 'invalid collation object should throw'); - - // Run a query so that an entry is inserted into the cache. - assert.commandWorked( - coll.runCommand("find", {filter: {a: 'foo', b: 5}, collation: {locale: "en_US"}}), - 'find command failed'); - - // The query should have cached plans. - assert.lt( - 0, - coll.getPlanCache() - .getPlansByQuery( - {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {locale: 'en_US'}}) - .plans.length, - 'unexpected number of cached plans for query'); - - // Test passing the query, sort, projection, and collation to getPlansByQuery() as separate - // arguments. - assert.lt(0, - coll.getPlanCache() - .getPlansByQuery({a: 'foo', b: 5}, {}, {}, {locale: 'en_US'}) - .plans.length, - 'unexpected number of cached plans for query'); - - // Test passing the query, sort, projection, and collation to getPlansByQuery() as separate - // arguments. - assert.eq(0, - coll.getPlanCache().getPlansByQuery({a: 'foo', b: 5}).plans.length, - 'unexpected number of cached plans for query'); - - // A query with a different collation should have no cached plans. - assert.eq( - 0, - coll.getPlanCache() - .getPlansByQuery( - {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {locale: 'fr_CA'}}) - .plans.length, - 'unexpected number of cached plans for query'); - - // A query with different string locations should have no cached plans. - assert.eq(0, - coll.getPlanCache() - .getPlansByQuery({ - query: {a: 'foo', b: 'bar'}, - sort: {}, - projection: {}, - collation: {locale: 'en_US'} - }) - .plans.length, - 'unexpected number of cached plans for query'); - - coll.getPlanCache().clear(); - - // clearPlansByQuery(). - - // Passing a query with an empty collation object should throw. - assert.throws(function() { - coll.getPlanCache().clearPlansByQuery( - {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {}}); - }, [], 'empty collation object should throw'); - - // Passing a query with an invalid collation object should throw. - assert.throws(function() { - coll.getPlanCache().clearPlansByQuery( - {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {bad: "value"}}); - }, [], 'invalid collation object should throw'); - - // Run a query so that an entry is inserted into the cache. - assert.commandWorked( - coll.runCommand("find", {filter: {a: 'foo', b: 5}, collation: {locale: "en_US"}}), - 'find command failed'); - assert.eq(1, - coll.getPlanCache().listQueryShapes().length, - 'unexpected cache size after running query'); - - // Dropping a query shape with a different collation should have no effect. - coll.getPlanCache().clearPlansByQuery( - {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {locale: 'fr_CA'}}); - assert.eq(1, - coll.getPlanCache().listQueryShapes().length, - 'unexpected cache size after dropping uncached query shape'); - - // Dropping a query shape with different string locations should have no effect. +'use strict'; + +var coll = db.collation_plan_cache; +coll.drop(); + +assert.writeOK(coll.insert({a: 'foo', b: 5})); + +// We need two indexes that each query can use so that a plan cache entry is created. +assert.commandWorked(coll.createIndex({a: 1}, {collation: {locale: 'en_US'}})); +assert.commandWorked(coll.createIndex({a: 1, b: 1}, {collation: {locale: 'en_US'}})); + +// We need an index with a different collation, so that string comparisons affect the query +// shape. +assert.commandWorked(coll.createIndex({b: 1}, {collation: {locale: 'fr_CA'}})); + +// listQueryShapes(). + +// Run a query so that an entry is inserted into the cache. +assert.commandWorked( + coll.runCommand("find", {filter: {a: 'foo', b: 5}, collation: {locale: "en_US"}}), + 'find command failed'); + +// The query shape should have been added. +var shapes = coll.getPlanCache().listQueryShapes(); +assert.eq(1, shapes.length, 'unexpected cache size after running query'); +let filteredShape0 = shapes[0]; +delete filteredShape0.queryHash; +assert.eq(filteredShape0, + { + query: {a: 'foo', b: 5}, + sort: {}, + projection: {}, + collation: { + locale: 'en_US', + caseLevel: false, + caseFirst: 'off', + strength: 3, + numericOrdering: false, + alternate: 'non-ignorable', + maxVariable: 'punct', + normalization: false, + backwards: false, + version: '57.1' + } + }, + 'unexpected query shape returned from listQueryShapes()'); + +coll.getPlanCache().clear(); + +// getPlansByQuery(). + +// Passing a query with an empty collation object should throw. +assert.throws(function() { + coll.getPlanCache().getPlansByQuery( + {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {}}); +}, [], 'empty collation object should throw'); + +// Passing a query with an invalid collation object should throw. +assert.throws(function() { + coll.getPlanCache().getPlansByQuery( + {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {bad: "value"}}); +}, [], 'invalid collation object should throw'); + +// Run a query so that an entry is inserted into the cache. +assert.commandWorked( + coll.runCommand("find", {filter: {a: 'foo', b: 5}, collation: {locale: "en_US"}}), + 'find command failed'); + +// The query should have cached plans. +assert.lt(0, + coll.getPlanCache() + .getPlansByQuery( + {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {locale: 'en_US'}}) + .plans.length, + 'unexpected number of cached plans for query'); + +// Test passing the query, sort, projection, and collation to getPlansByQuery() as separate +// arguments. +assert.lt( + 0, + coll.getPlanCache().getPlansByQuery({a: 'foo', b: 5}, {}, {}, {locale: 'en_US'}).plans.length, + 'unexpected number of cached plans for query'); + +// Test passing the query, sort, projection, and collation to getPlansByQuery() as separate +// arguments. +assert.eq(0, + coll.getPlanCache().getPlansByQuery({a: 'foo', b: 5}).plans.length, + 'unexpected number of cached plans for query'); + +// A query with a different collation should have no cached plans. +assert.eq(0, + coll.getPlanCache() + .getPlansByQuery( + {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {locale: 'fr_CA'}}) + .plans.length, + 'unexpected number of cached plans for query'); + +// A query with different string locations should have no cached plans. +assert.eq( + 0, + coll.getPlanCache() + .getPlansByQuery( + {query: {a: 'foo', b: 'bar'}, sort: {}, projection: {}, collation: {locale: 'en_US'}}) + .plans.length, + 'unexpected number of cached plans for query'); + +coll.getPlanCache().clear(); + +// clearPlansByQuery(). + +// Passing a query with an empty collation object should throw. +assert.throws(function() { coll.getPlanCache().clearPlansByQuery( - {query: {a: 'foo', b: 'bar'}, sort: {}, projection: {}, collation: {locale: 'en_US'}}); - assert.eq(1, - coll.getPlanCache().listQueryShapes().length, - 'unexpected cache size after dropping uncached query shape'); + {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {}}); +}, [], 'empty collation object should throw'); - // Dropping query shape. +// Passing a query with an invalid collation object should throw. +assert.throws(function() { coll.getPlanCache().clearPlansByQuery( - {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {locale: 'en_US'}}); - assert.eq(0, - coll.getPlanCache().listQueryShapes().length, - 'unexpected cache size after dropping query shapes'); - - // Index filter commands. - - // planCacheSetFilter should fail if 'collation' is an empty object. - assert.commandFailed( - coll.runCommand('planCacheSetFilter', - {query: {a: 'foo', b: 5}, collation: {}, indexes: [{a: 1, b: 1}]}), - 'planCacheSetFilter should fail on empty collation object'); - - // planCacheSetFilter should fail if 'collation' is an invalid object. - assert.commandFailed( - coll.runCommand( - 'planCacheSetFilter', - {query: {a: 'foo', b: 5}, collation: {bad: "value"}, indexes: [{a: 1, b: 1}]}), - 'planCacheSetFilter should fail on invalid collation object'); - - // Set a plan cache filter. - assert.commandWorked( - coll.runCommand( - 'planCacheSetFilter', - {query: {a: 'foo', b: 5}, collation: {locale: 'en_US'}, indexes: [{a: 1, b: 1}]}), - 'planCacheSetFilter failed'); - - // Check the plan cache filter was added. - var res = coll.runCommand('planCacheListFilters'); - assert.commandWorked(res, 'planCacheListFilters failed'); - assert.eq(1, res.filters.length, 'unexpected number of plan cache filters'); - assert.eq(res.filters[0], - { - query: {a: 'foo', b: 5}, - sort: {}, - projection: {}, - collation: { - locale: 'en_US', - caseLevel: false, - caseFirst: 'off', - strength: 3, - numericOrdering: false, - alternate: 'non-ignorable', - maxVariable: 'punct', - normalization: false, - backwards: false, - version: '57.1' - }, - indexes: [{a: 1, b: 1}] + {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {bad: "value"}}); +}, [], 'invalid collation object should throw'); + +// Run a query so that an entry is inserted into the cache. +assert.commandWorked( + coll.runCommand("find", {filter: {a: 'foo', b: 5}, collation: {locale: "en_US"}}), + 'find command failed'); +assert.eq( + 1, coll.getPlanCache().listQueryShapes().length, 'unexpected cache size after running query'); + +// Dropping a query shape with a different collation should have no effect. +coll.getPlanCache().clearPlansByQuery( + {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {locale: 'fr_CA'}}); +assert.eq(1, + coll.getPlanCache().listQueryShapes().length, + 'unexpected cache size after dropping uncached query shape'); + +// Dropping a query shape with different string locations should have no effect. +coll.getPlanCache().clearPlansByQuery( + {query: {a: 'foo', b: 'bar'}, sort: {}, projection: {}, collation: {locale: 'en_US'}}); +assert.eq(1, + coll.getPlanCache().listQueryShapes().length, + 'unexpected cache size after dropping uncached query shape'); + +// Dropping query shape. +coll.getPlanCache().clearPlansByQuery( + {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {locale: 'en_US'}}); +assert.eq(0, + coll.getPlanCache().listQueryShapes().length, + 'unexpected cache size after dropping query shapes'); + +// Index filter commands. + +// planCacheSetFilter should fail if 'collation' is an empty object. +assert.commandFailed( + coll.runCommand('planCacheSetFilter', + {query: {a: 'foo', b: 5}, collation: {}, indexes: [{a: 1, b: 1}]}), + 'planCacheSetFilter should fail on empty collation object'); + +// planCacheSetFilter should fail if 'collation' is an invalid object. +assert.commandFailed( + coll.runCommand('planCacheSetFilter', + {query: {a: 'foo', b: 5}, collation: {bad: "value"}, indexes: [{a: 1, b: 1}]}), + 'planCacheSetFilter should fail on invalid collation object'); + +// Set a plan cache filter. +assert.commandWorked( + coll.runCommand( + 'planCacheSetFilter', + {query: {a: 'foo', b: 5}, collation: {locale: 'en_US'}, indexes: [{a: 1, b: 1}]}), + 'planCacheSetFilter failed'); + +// Check the plan cache filter was added. +var res = coll.runCommand('planCacheListFilters'); +assert.commandWorked(res, 'planCacheListFilters failed'); +assert.eq(1, res.filters.length, 'unexpected number of plan cache filters'); +assert.eq(res.filters[0], + { + query: {a: 'foo', b: 5}, + sort: {}, + projection: {}, + collation: { + locale: 'en_US', + caseLevel: false, + caseFirst: 'off', + strength: 3, + numericOrdering: false, + alternate: 'non-ignorable', + maxVariable: 'punct', + normalization: false, + backwards: false, + version: '57.1' }, - 'unexpected plan cache filter'); - - // planCacheClearFilters should fail if 'collation' is an empty object. - assert.commandFailed( - coll.runCommand('planCacheClearFilters', {query: {a: 'foo', b: 5}, collation: {}}), - 'planCacheClearFilters should fail on empty collation object'); - - // planCacheSetFilter should fail if 'collation' is an invalid object. - assert.commandFailed(coll.runCommand('planCacheClearFilters', - {query: {a: 'foo', b: 5}, collation: {bad: 'value'}}), - 'planCacheClearFilters should fail on invalid collation object'); - - // Clearing a plan cache filter with no collation should have no effect. - assert.commandWorked(coll.runCommand('planCacheClearFilters', {query: {a: 'foo', b: 5}})); - assert.eq(1, - coll.runCommand('planCacheListFilters').filters.length, - 'unexpected number of plan cache filters'); - - // Clearing a plan cache filter with a different collation should have no effect. - assert.commandWorked(coll.runCommand('planCacheClearFilters', - {query: {a: 'foo', b: 5}, collation: {locale: 'fr_CA'}})); - assert.eq(1, - coll.runCommand('planCacheListFilters').filters.length, - 'unexpected number of plan cache filters'); - - // Clearing a plan cache filter with different string locations should have no effect. - assert.commandWorked(coll.runCommand( - 'planCacheClearFilters', {query: {a: 'foo', b: 'bar', collation: {locale: 'en_US'}}})); - assert.eq(1, - coll.runCommand('planCacheListFilters').filters.length, - 'unexpected number of plan cache filters'); - - // Clear plan cache filter. - assert.commandWorked(coll.runCommand('planCacheClearFilters', - {query: {a: 'foo', b: 5}, collation: {locale: 'en_US'}})); - assert.eq(0, - coll.runCommand('planCacheListFilters').filters.length, - 'unexpected number of plan cache filters'); + indexes: [{a: 1, b: 1}] + }, + 'unexpected plan cache filter'); + +// planCacheClearFilters should fail if 'collation' is an empty object. +assert.commandFailed( + coll.runCommand('planCacheClearFilters', {query: {a: 'foo', b: 5}, collation: {}}), + 'planCacheClearFilters should fail on empty collation object'); + +// planCacheSetFilter should fail if 'collation' is an invalid object. +assert.commandFailed( + coll.runCommand('planCacheClearFilters', {query: {a: 'foo', b: 5}, collation: {bad: 'value'}}), + 'planCacheClearFilters should fail on invalid collation object'); + +// Clearing a plan cache filter with no collation should have no effect. +assert.commandWorked(coll.runCommand('planCacheClearFilters', {query: {a: 'foo', b: 5}})); +assert.eq(1, + coll.runCommand('planCacheListFilters').filters.length, + 'unexpected number of plan cache filters'); + +// Clearing a plan cache filter with a different collation should have no effect. +assert.commandWorked(coll.runCommand('planCacheClearFilters', + {query: {a: 'foo', b: 5}, collation: {locale: 'fr_CA'}})); +assert.eq(1, + coll.runCommand('planCacheListFilters').filters.length, + 'unexpected number of plan cache filters'); + +// Clearing a plan cache filter with different string locations should have no effect. +assert.commandWorked(coll.runCommand('planCacheClearFilters', + {query: {a: 'foo', b: 'bar', collation: {locale: 'en_US'}}})); +assert.eq(1, + coll.runCommand('planCacheListFilters').filters.length, + 'unexpected number of plan cache filters'); + +// Clear plan cache filter. +assert.commandWorked(coll.runCommand('planCacheClearFilters', + {query: {a: 'foo', b: 5}, collation: {locale: 'en_US'}})); +assert.eq(0, + coll.runCommand('planCacheListFilters').filters.length, + 'unexpected number of plan cache filters'); })(); diff --git a/jstests/core/collation_update.js b/jstests/core/collation_update.js index 82098e51cd3..32538e15653 100644 --- a/jstests/core/collation_update.js +++ b/jstests/core/collation_update.js @@ -4,320 +4,325 @@ // Integration tests for collation-aware updates. (function() { - 'use strict'; - var coll = db.getCollection("collation_update_test"); - - const caseInsensitive = {collation: {locale: "en_US", strength: 2}}; - const caseSensitive = {collation: {locale: "en_US", strength: 3}}; - const numericOrdering = {collation: {locale: "en_US", numericOrdering: true}}; - - // Update modifiers respect collection default collation on simple _id query. +'use strict'; +var coll = db.getCollection("collation_update_test"); + +const caseInsensitive = { + collation: {locale: "en_US", strength: 2} +}; +const caseSensitive = { + collation: {locale: "en_US", strength: 3} +}; +const numericOrdering = { + collation: {locale: "en_US", numericOrdering: true} +}; + +// Update modifiers respect collection default collation on simple _id query. +coll.drop(); +assert.commandWorked(db.createCollection(coll.getName(), numericOrdering)); +assert.writeOK(coll.insert({_id: 1, a: "124"})); +assert.writeOK(coll.update({_id: 1}, {$min: {a: "1234"}})); +assert.eq(coll.find({a: "124"}).count(), 1); + +// $min respects query collation. +if (db.getMongo().writeMode() === "commands") { coll.drop(); - assert.commandWorked(db.createCollection(coll.getName(), numericOrdering)); - assert.writeOK(coll.insert({_id: 1, a: "124"})); - assert.writeOK(coll.update({_id: 1}, {$min: {a: "1234"}})); - assert.eq(coll.find({a: "124"}).count(), 1); - - // $min respects query collation. - if (db.getMongo().writeMode() === "commands") { - coll.drop(); - - // 1234 > 124, so no change should occur. - assert.writeOK(coll.insert({a: "124"})); - assert.writeOK(coll.update({a: "124"}, {$min: {a: "1234"}}, numericOrdering)); - assert.eq(coll.find({a: "124"}).count(), 1); - - // "1234" < "124" (non-numeric ordering), so an update should occur. - assert.writeOK(coll.update({a: "124"}, {$min: {a: "1234"}}, caseSensitive)); - assert.eq(coll.find({a: "1234"}).count(), 1); - } - // $min respects collection default collation. - coll.drop(); - assert.commandWorked(db.createCollection(coll.getName(), numericOrdering)); + // 1234 > 124, so no change should occur. assert.writeOK(coll.insert({a: "124"})); - assert.writeOK(coll.update({a: "124"}, {$min: {a: "1234"}})); + assert.writeOK(coll.update({a: "124"}, {$min: {a: "1234"}}, numericOrdering)); assert.eq(coll.find({a: "124"}).count(), 1); - // $max respects query collation. - if (db.getMongo().writeMode() === "commands") { - coll.drop(); - - // "1234" < "124", so an update should not occur. - assert.writeOK(coll.insert({a: "124"})); - assert.writeOK(coll.update({a: "124"}, {$max: {a: "1234"}}, caseSensitive)); - assert.eq(coll.find({a: "124"}).count(), 1); + // "1234" < "124" (non-numeric ordering), so an update should occur. + assert.writeOK(coll.update({a: "124"}, {$min: {a: "1234"}}, caseSensitive)); + assert.eq(coll.find({a: "1234"}).count(), 1); +} - // 1234 > 124, so an update should occur. - assert.writeOK(coll.update({a: "124"}, {$max: {a: "1234"}}, numericOrdering)); - assert.eq(coll.find({a: "1234"}).count(), 1); - } +// $min respects collection default collation. +coll.drop(); +assert.commandWorked(db.createCollection(coll.getName(), numericOrdering)); +assert.writeOK(coll.insert({a: "124"})); +assert.writeOK(coll.update({a: "124"}, {$min: {a: "1234"}})); +assert.eq(coll.find({a: "124"}).count(), 1); - // $max respects collection default collation. +// $max respects query collation. +if (db.getMongo().writeMode() === "commands") { coll.drop(); - assert.commandWorked(db.createCollection(coll.getName(), numericOrdering)); + + // "1234" < "124", so an update should not occur. assert.writeOK(coll.insert({a: "124"})); - assert.writeOK(coll.update({a: "124"}, {$max: {a: "1234"}})); + assert.writeOK(coll.update({a: "124"}, {$max: {a: "1234"}}, caseSensitive)); + assert.eq(coll.find({a: "124"}).count(), 1); + + // 1234 > 124, so an update should occur. + assert.writeOK(coll.update({a: "124"}, {$max: {a: "1234"}}, numericOrdering)); assert.eq(coll.find({a: "1234"}).count(), 1); +} - // $addToSet respects query collation. - if (db.getMongo().writeMode() === "commands") { - coll.drop(); - - // "foo" == "FOO" (case-insensitive), so set isn't extended. - assert.writeOK(coll.insert({a: ["foo"]})); - assert.writeOK(coll.update({}, {$addToSet: {a: "FOO"}}, caseInsensitive)); - var set = coll.findOne().a; - assert.eq(set.length, 1); - - // "foo" != "FOO" (case-sensitive), so set is extended. - assert.writeOK(coll.update({}, {$addToSet: {a: "FOO"}}, caseSensitive)); - set = coll.findOne().a; - assert.eq(set.length, 2); - - coll.drop(); - - // $each and $addToSet respect collation - assert.writeOK(coll.insert({a: ["foo", "bar", "FOO"]})); - assert.writeOK( - coll.update({}, {$addToSet: {a: {$each: ["FOO", "BAR", "str"]}}}, caseInsensitive)); - set = coll.findOne().a; - assert.eq(set.length, 4); - assert(set.includes("foo")); - assert(set.includes("FOO")); - assert(set.includes("bar")); - assert(set.includes("str")); - } +// $max respects collection default collation. +coll.drop(); +assert.commandWorked(db.createCollection(coll.getName(), numericOrdering)); +assert.writeOK(coll.insert({a: "124"})); +assert.writeOK(coll.update({a: "124"}, {$max: {a: "1234"}})); +assert.eq(coll.find({a: "1234"}).count(), 1); +// $addToSet respects query collation. +if (db.getMongo().writeMode() === "commands") { coll.drop(); - assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive)); + // "foo" == "FOO" (case-insensitive), so set isn't extended. assert.writeOK(coll.insert({a: ["foo"]})); - assert.writeOK(coll.update({}, {$addToSet: {a: "FOO"}})); + assert.writeOK(coll.update({}, {$addToSet: {a: "FOO"}}, caseInsensitive)); var set = coll.findOne().a; assert.eq(set.length, 1); - // $pull respects query collation. - if (db.getMongo().writeMode() === "commands") { - coll.drop(); - - // "foo" != "FOO" (case-sensitive), so it is not pulled. - assert.writeOK(coll.insert({a: ["foo", "FOO"]})); - assert.writeOK(coll.update({}, {$pull: {a: "foo"}}, caseSensitive)); - var arr = coll.findOne().a; - assert.eq(arr.length, 1); - assert(arr.includes("FOO")); - - // "foo" == "FOO" (case-insensitive), so "FOO" is pulled. - assert.writeOK(coll.update({}, {$pull: {a: "foo"}}, caseInsensitive)); - arr = coll.findOne().a; - assert.eq(arr.length, 0); - - // collation-aware $pull removes all instances that match. - coll.drop(); - assert.writeOK(coll.insert({a: ["foo", "FOO"]})); - assert.writeOK(coll.update({}, {$pull: {a: "foo"}}, caseInsensitive)); - arr = coll.findOne().a; - assert.eq(arr.length, 0); - - // collation-aware $pull with comparisons removes matching instances. - coll.drop(); - - // "124" > "1234" (case-sensitive), so it is not removed. - assert.writeOK(coll.insert({a: ["124", "1234"]})); - assert.writeOK(coll.update({}, {$pull: {a: {$lt: "1234"}}}, caseSensitive)); - arr = coll.findOne().a; - assert.eq(arr.length, 2); - - // 124 < 1234 (numeric ordering), so it is removed. - assert.writeOK(coll.update({}, {$pull: {a: {$lt: "1234"}}}, numericOrdering)); - arr = coll.findOne().a; - assert.eq(arr.length, 1); - assert(arr.includes("1234")); - } - - // $pull respects collection default collation. + // "foo" != "FOO" (case-sensitive), so set is extended. + assert.writeOK(coll.update({}, {$addToSet: {a: "FOO"}}, caseSensitive)); + set = coll.findOne().a; + assert.eq(set.length, 2); + coll.drop(); - assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive)); + + // $each and $addToSet respect collation + assert.writeOK(coll.insert({a: ["foo", "bar", "FOO"]})); + assert.writeOK( + coll.update({}, {$addToSet: {a: {$each: ["FOO", "BAR", "str"]}}}, caseInsensitive)); + set = coll.findOne().a; + assert.eq(set.length, 4); + assert(set.includes("foo")); + assert(set.includes("FOO")); + assert(set.includes("bar")); + assert(set.includes("str")); +} + +coll.drop(); +assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive)); +// "foo" == "FOO" (case-insensitive), so set isn't extended. +assert.writeOK(coll.insert({a: ["foo"]})); +assert.writeOK(coll.update({}, {$addToSet: {a: "FOO"}})); +var set = coll.findOne().a; +assert.eq(set.length, 1); + +// $pull respects query collation. +if (db.getMongo().writeMode() === "commands") { + coll.drop(); + + // "foo" != "FOO" (case-sensitive), so it is not pulled. assert.writeOK(coll.insert({a: ["foo", "FOO"]})); - assert.writeOK(coll.update({}, {$pull: {a: "foo"}})); + assert.writeOK(coll.update({}, {$pull: {a: "foo"}}, caseSensitive)); var arr = coll.findOne().a; + assert.eq(arr.length, 1); + assert(arr.includes("FOO")); + + // "foo" == "FOO" (case-insensitive), so "FOO" is pulled. + assert.writeOK(coll.update({}, {$pull: {a: "foo"}}, caseInsensitive)); + arr = coll.findOne().a; assert.eq(arr.length, 0); - // $pullAll respects query collation. - if (db.getMongo().writeMode() === "commands") { - coll.drop(); + // collation-aware $pull removes all instances that match. + coll.drop(); + assert.writeOK(coll.insert({a: ["foo", "FOO"]})); + assert.writeOK(coll.update({}, {$pull: {a: "foo"}}, caseInsensitive)); + arr = coll.findOne().a; + assert.eq(arr.length, 0); - // "foo" != "FOO" (case-sensitive), so no changes are made. - assert.writeOK(coll.insert({a: ["foo", "bar"]})); - assert.writeOK(coll.update({}, {$pullAll: {a: ["FOO", "BAR"]}}, caseSensitive)); - var arr = coll.findOne().a; - assert.eq(arr.length, 2); + // collation-aware $pull with comparisons removes matching instances. + coll.drop(); - // "foo" == "FOO", "bar" == "BAR" (case-insensitive), so both are removed. - assert.writeOK(coll.update({}, {$pullAll: {a: ["FOO", "BAR"]}}, caseInsensitive)); - arr = coll.findOne().a; - assert.eq(arr.length, 0); - } + // "124" > "1234" (case-sensitive), so it is not removed. + assert.writeOK(coll.insert({a: ["124", "1234"]})); + assert.writeOK(coll.update({}, {$pull: {a: {$lt: "1234"}}}, caseSensitive)); + arr = coll.findOne().a; + assert.eq(arr.length, 2); - // $pullAll respects collection default collation. + // 124 < 1234 (numeric ordering), so it is removed. + assert.writeOK(coll.update({}, {$pull: {a: {$lt: "1234"}}}, numericOrdering)); + arr = coll.findOne().a; + assert.eq(arr.length, 1); + assert(arr.includes("1234")); +} + +// $pull respects collection default collation. +coll.drop(); +assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive)); +assert.writeOK(coll.insert({a: ["foo", "FOO"]})); +assert.writeOK(coll.update({}, {$pull: {a: "foo"}})); +var arr = coll.findOne().a; +assert.eq(arr.length, 0); + +// $pullAll respects query collation. +if (db.getMongo().writeMode() === "commands") { coll.drop(); - assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive)); + + // "foo" != "FOO" (case-sensitive), so no changes are made. assert.writeOK(coll.insert({a: ["foo", "bar"]})); - assert.writeOK(coll.update({}, {$pullAll: {a: ["FOO", "BAR"]}})); + assert.writeOK(coll.update({}, {$pullAll: {a: ["FOO", "BAR"]}}, caseSensitive)); var arr = coll.findOne().a; - assert.eq(arr.length, 0); + assert.eq(arr.length, 2); - // $push with $sort respects query collation. - if (db.getMongo().writeMode() === "commands") { - coll.drop(); - - // "1230" < "1234" < "124" (case-sensitive) - assert.writeOK(coll.insert({a: ["1234", "124"]})); - assert.writeOK(coll.update({}, {$push: {a: {$each: ["1230"], $sort: 1}}}, caseSensitive)); - var arr = coll.findOne().a; - assert.eq(arr.length, 3); - assert.eq(arr[0], "1230"); - assert.eq(arr[1], "1234"); - assert.eq(arr[2], "124"); - - // "124" < "1230" < "1234" (numeric ordering) - coll.drop(); - assert.writeOK(coll.insert({a: ["1234", "124"]})); - assert.writeOK(coll.update({}, {$push: {a: {$each: ["1230"], $sort: 1}}}, numericOrdering)); - arr = coll.findOne().a; - assert.eq(arr.length, 3); - assert.eq(arr[0], "124"); - assert.eq(arr[1], "1230"); - assert.eq(arr[2], "1234"); - } - - // $push with $sort respects collection default collation. + // "foo" == "FOO", "bar" == "BAR" (case-insensitive), so both are removed. + assert.writeOK(coll.update({}, {$pullAll: {a: ["FOO", "BAR"]}}, caseInsensitive)); + arr = coll.findOne().a; + assert.eq(arr.length, 0); +} + +// $pullAll respects collection default collation. +coll.drop(); +assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive)); +assert.writeOK(coll.insert({a: ["foo", "bar"]})); +assert.writeOK(coll.update({}, {$pullAll: {a: ["FOO", "BAR"]}})); +var arr = coll.findOne().a; +assert.eq(arr.length, 0); + +// $push with $sort respects query collation. +if (db.getMongo().writeMode() === "commands") { coll.drop(); - assert.commandWorked(db.createCollection(coll.getName(), numericOrdering)); + + // "1230" < "1234" < "124" (case-sensitive) assert.writeOK(coll.insert({a: ["1234", "124"]})); - assert.writeOK(coll.update({}, {$push: {a: {$each: ["1230"], $sort: 1}}})); + assert.writeOK(coll.update({}, {$push: {a: {$each: ["1230"], $sort: 1}}}, caseSensitive)); var arr = coll.findOne().a; assert.eq(arr.length, 3); + assert.eq(arr[0], "1230"); + assert.eq(arr[1], "1234"); + assert.eq(arr[2], "124"); + + // "124" < "1230" < "1234" (numeric ordering) + coll.drop(); + assert.writeOK(coll.insert({a: ["1234", "124"]})); + assert.writeOK(coll.update({}, {$push: {a: {$each: ["1230"], $sort: 1}}}, numericOrdering)); + arr = coll.findOne().a; + assert.eq(arr.length, 3); assert.eq(arr[0], "124"); assert.eq(arr[1], "1230"); assert.eq(arr[2], "1234"); - - // $ positional operator respects query collation on $set. - if (db.getMongo().writeMode() === "commands") { - coll.drop(); - - // "foo" != "FOO" (case-sensitive) so no update occurs. - assert.writeOK(coll.insert({a: ["foo", "FOO"]})); - assert.writeOK(coll.update({a: "FOO"}, {$set: {"a.$": "FOO"}}, caseSensitive)); - var arr = coll.findOne().a; - assert.eq(arr.length, 2); - assert.eq(arr[0], "foo"); - assert.eq(arr[1], "FOO"); - - // "foo" == "FOO" (case-insensitive) so no update occurs. - assert.writeOK(coll.insert({a: ["foo", "FOO"]})); - assert.writeOK(coll.update({a: "FOO"}, {$set: {"a.$": "FOO"}}, caseInsensitive)); - var arr = coll.findOne().a; - assert.eq(arr.length, 2); - assert.eq(arr[0], "FOO"); - assert.eq(arr[1], "FOO"); - } - - // $ positional operator respects collection default collation on $set. +} + +// $push with $sort respects collection default collation. +coll.drop(); +assert.commandWorked(db.createCollection(coll.getName(), numericOrdering)); +assert.writeOK(coll.insert({a: ["1234", "124"]})); +assert.writeOK(coll.update({}, {$push: {a: {$each: ["1230"], $sort: 1}}})); +var arr = coll.findOne().a; +assert.eq(arr.length, 3); +assert.eq(arr[0], "124"); +assert.eq(arr[1], "1230"); +assert.eq(arr[2], "1234"); + +// $ positional operator respects query collation on $set. +if (db.getMongo().writeMode() === "commands") { coll.drop(); - assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive)); + + // "foo" != "FOO" (case-sensitive) so no update occurs. assert.writeOK(coll.insert({a: ["foo", "FOO"]})); - assert.writeOK(coll.update({a: "FOO"}, {$set: {"a.$": "FOO"}})); + assert.writeOK(coll.update({a: "FOO"}, {$set: {"a.$": "FOO"}}, caseSensitive)); var arr = coll.findOne().a; assert.eq(arr.length, 2); - assert.eq(arr[0], "FOO"); + assert.eq(arr[0], "foo"); assert.eq(arr[1], "FOO"); - // Pipeline-style update respects collection default collation. - // We restrict testing pipeline-style update to commands as they are not supported for OP_UPDATE - // which cannot differentiate an update object from an array. - if (db.getMongo().writeMode() === "commands") { - assert(coll.drop()); - assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive)); - assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]})); - assert.commandWorked( - coll.update({}, [{$addFields: {newField: {$indexOfArray: ["$x", "B"]}}}])); - assert.eq(coll.findOne().newField, 3, `actual=${coll.findOne()}`); - } - - // Pipeline-style update respects query collation. - if (db.getMongo().writeMode() === "commands") { - // Case sensitive $indexOfArray on "B" matches "B". - assert(coll.drop()); - assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]})); - assert.commandWorked(coll.update( - {}, [{$addFields: {newField: {$indexOfArray: ["$x", "B"]}}}], caseSensitive)); - assert.eq(coll.findOne().newField, 5, `actual=${coll.findOne()}`); - - assert(coll.drop()); - assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]})); - assert.commandWorked( - coll.update({}, [{$project: {newField: {$indexOfArray: ["$x", "B"]}}}], caseSensitive)); - assert.eq(coll.findOne().newField, 5, `actual=${coll.findOne()}`); - - assert(coll.drop()); - assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]})); - assert.commandWorked(coll.update( - {}, [{$replaceWith: {newField: {$indexOfArray: ["$x", "B"]}}}], caseSensitive)); - assert.eq(coll.findOne().newField, 5, `actual=${coll.findOne()}`); - - // Case insensitive $indexOfArray on "B" matches "b". - assert(coll.drop()); - assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]})); - assert.commandWorked(coll.update( - {}, [{$addFields: {newField: {$indexOfArray: ["$x", "B"]}}}], caseInsensitive)); - assert.eq(coll.findOne().newField, 3, `actual=${coll.findOne()}`); - - assert(coll.drop()); - assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]})); - assert.commandWorked(coll.update( - {}, [{$project: {newField: {$indexOfArray: ["$x", "B"]}}}], caseInsensitive)); - assert.eq(coll.findOne().newField, 3, `actual=${coll.findOne()}`); - - assert(coll.drop()); - assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]})); - assert.commandWorked(coll.update( - {}, [{$replaceWith: {newField: {$indexOfArray: ["$x", "B"]}}}], caseInsensitive)); - assert.eq(coll.findOne().newField, 3, `actual=${coll.findOne()}`); - - // Collation is respected for pipeline-style bulk update. - assert(coll.drop()); - assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]})); - assert.commandWorked(coll.bulkWrite([{ - updateOne: { - filter: {}, - update: [{$addFields: {newField: {$indexOfArray: ["$x", "B"]}}}], - collation: caseInsensitive.collation - } - }])); - assert.eq(coll.findOne().newField, 3, `actual=${coll.findOne()}`); - - assert(coll.drop()); - assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]})); - assert.commandWorked(coll.bulkWrite([{ - updateOne: { - filter: {}, - update: [{$project: {newField: {$indexOfArray: ["$x", "B"]}}}], - collation: caseInsensitive.collation - } - }])); - assert.eq(coll.findOne().newField, 3, `actual=${coll.findOne()}`); - - assert(coll.drop()); - assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]})); - assert.commandWorked(coll.bulkWrite([{ - updateOne: { - filter: {}, - update: [{$replaceWith: {newField: {$indexOfArray: ["$x", "B"]}}}], - collation: caseInsensitive.collation - } - }])); - assert.eq(coll.findOne().newField, 3, `actual=${coll.findOne()}`); - } + // "foo" == "FOO" (case-insensitive) so no update occurs. + assert.writeOK(coll.insert({a: ["foo", "FOO"]})); + assert.writeOK(coll.update({a: "FOO"}, {$set: {"a.$": "FOO"}}, caseInsensitive)); + var arr = coll.findOne().a; + assert.eq(arr.length, 2); + assert.eq(arr[0], "FOO"); + assert.eq(arr[1], "FOO"); +} + +// $ positional operator respects collection default collation on $set. +coll.drop(); +assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive)); +assert.writeOK(coll.insert({a: ["foo", "FOO"]})); +assert.writeOK(coll.update({a: "FOO"}, {$set: {"a.$": "FOO"}})); +var arr = coll.findOne().a; +assert.eq(arr.length, 2); +assert.eq(arr[0], "FOO"); +assert.eq(arr[1], "FOO"); + +// Pipeline-style update respects collection default collation. +// We restrict testing pipeline-style update to commands as they are not supported for OP_UPDATE +// which cannot differentiate an update object from an array. +if (db.getMongo().writeMode() === "commands") { + assert(coll.drop()); + assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive)); + assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]})); + assert.commandWorked(coll.update({}, [{$addFields: {newField: {$indexOfArray: ["$x", "B"]}}}])); + assert.eq(coll.findOne().newField, 3, `actual=${coll.findOne()}`); +} + +// Pipeline-style update respects query collation. +if (db.getMongo().writeMode() === "commands") { + // Case sensitive $indexOfArray on "B" matches "B". + assert(coll.drop()); + assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]})); + assert.commandWorked( + coll.update({}, [{$addFields: {newField: {$indexOfArray: ["$x", "B"]}}}], caseSensitive)); + assert.eq(coll.findOne().newField, 5, `actual=${coll.findOne()}`); + + assert(coll.drop()); + assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]})); + assert.commandWorked( + coll.update({}, [{$project: {newField: {$indexOfArray: ["$x", "B"]}}}], caseSensitive)); + assert.eq(coll.findOne().newField, 5, `actual=${coll.findOne()}`); + + assert(coll.drop()); + assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]})); + assert.commandWorked( + coll.update({}, [{$replaceWith: {newField: {$indexOfArray: ["$x", "B"]}}}], caseSensitive)); + assert.eq(coll.findOne().newField, 5, `actual=${coll.findOne()}`); + + // Case insensitive $indexOfArray on "B" matches "b". + assert(coll.drop()); + assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]})); + assert.commandWorked( + coll.update({}, [{$addFields: {newField: {$indexOfArray: ["$x", "B"]}}}], caseInsensitive)); + assert.eq(coll.findOne().newField, 3, `actual=${coll.findOne()}`); + + assert(coll.drop()); + assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]})); + assert.commandWorked( + coll.update({}, [{$project: {newField: {$indexOfArray: ["$x", "B"]}}}], caseInsensitive)); + assert.eq(coll.findOne().newField, 3, `actual=${coll.findOne()}`); + + assert(coll.drop()); + assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]})); + assert.commandWorked(coll.update( + {}, [{$replaceWith: {newField: {$indexOfArray: ["$x", "B"]}}}], caseInsensitive)); + assert.eq(coll.findOne().newField, 3, `actual=${coll.findOne()}`); + + // Collation is respected for pipeline-style bulk update. + assert(coll.drop()); + assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]})); + assert.commandWorked(coll.bulkWrite([{ + updateOne: { + filter: {}, + update: [{$addFields: {newField: {$indexOfArray: ["$x", "B"]}}}], + collation: caseInsensitive.collation + } + }])); + assert.eq(coll.findOne().newField, 3, `actual=${coll.findOne()}`); + + assert(coll.drop()); + assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]})); + assert.commandWorked(coll.bulkWrite([{ + updateOne: { + filter: {}, + update: [{$project: {newField: {$indexOfArray: ["$x", "B"]}}}], + collation: caseInsensitive.collation + } + }])); + assert.eq(coll.findOne().newField, 3, `actual=${coll.findOne()}`); + + assert(coll.drop()); + assert.commandWorked(coll.insert({x: [1, 2, "a", "b", "c", "B"]})); + assert.commandWorked(coll.bulkWrite([{ + updateOne: { + filter: {}, + update: [{$replaceWith: {newField: {$indexOfArray: ["$x", "B"]}}}], + collation: caseInsensitive.collation + } + }])); + assert.eq(coll.findOne().newField, 3, `actual=${coll.findOne()}`); +} })(); diff --git a/jstests/core/collation_with_reverse_index.js b/jstests/core/collation_with_reverse_index.js index af246187348..d586038b8b8 100644 --- a/jstests/core/collation_with_reverse_index.js +++ b/jstests/core/collation_with_reverse_index.js @@ -1,12 +1,12 @@ // Regression test for SERVER-34846. (function() { - const coll = db.collation_with_reverse_index; - coll.drop(); +const coll = db.collation_with_reverse_index; +coll.drop(); - coll.insertOne({int: 1, text: "hello world"}); - coll.createIndex({int: -1, text: -1}, {collation: {locale: "en", strength: 1}}); - const res = coll.find({int: 1}, {_id: 0, int: 1, text: 1}).toArray(); +coll.insertOne({int: 1, text: "hello world"}); +coll.createIndex({int: -1, text: -1}, {collation: {locale: "en", strength: 1}}); +const res = coll.find({int: 1}, {_id: 0, int: 1, text: 1}).toArray(); - assert.eq(res.length, 1); - assert.eq(res[0].text, "hello world"); +assert.eq(res.length, 1); +assert.eq(res[0].text, "hello world"); })(); diff --git a/jstests/core/collmod_bad_spec.js b/jstests/core/collmod_bad_spec.js index a41e3c4f46e..c0519f375f5 100644 --- a/jstests/core/collmod_bad_spec.js +++ b/jstests/core/collmod_bad_spec.js @@ -7,21 +7,21 @@ // Tests that a collMod with a bad specification does not cause any changes, and does not crash the // server. (function() { - "use strict"; +"use strict"; - var collName = "collModBadSpec"; - var coll = db.getCollection(collName); +var collName = "collModBadSpec"; +var coll = db.getCollection(collName); - coll.drop(); - assert.commandWorked(db.createCollection(collName)); +coll.drop(); +assert.commandWorked(db.createCollection(collName)); - // Get the original collection options for the collection. - var originalResult = db.getCollectionInfos({name: collName}); +// Get the original collection options for the collection. +var originalResult = db.getCollectionInfos({name: collName}); - // Issue an invalid command. - assert.commandFailed(coll.runCommand("collMod", {validationLevel: "off", unknownField: "x"})); +// Issue an invalid command. +assert.commandFailed(coll.runCommand("collMod", {validationLevel: "off", unknownField: "x"})); - // Make sure the options are unchanged. - var newResult = db.getCollectionInfos({name: collName}); - assert.eq(originalResult, newResult); +// Make sure the options are unchanged. +var newResult = db.getCollectionInfos({name: collName}); +assert.eq(originalResult, newResult); })(); diff --git a/jstests/core/collmod_without_uuid.js b/jstests/core/collmod_without_uuid.js index 5beb1864ad0..d3b2ca5a287 100644 --- a/jstests/core/collmod_without_uuid.js +++ b/jstests/core/collmod_without_uuid.js @@ -10,20 +10,20 @@ */ (function() { - "use strict"; - const collName = "collmod_without_uuid"; +"use strict"; +const collName = "collmod_without_uuid"; - function checkUUIDs() { - let infos = db.getCollectionInfos(); - assert(infos.every((coll) => coll.name != collName || coll.info.uuid != undefined), - "Not all collections have UUIDs: " + tojson({infos})); - } +function checkUUIDs() { + let infos = db.getCollectionInfos(); + assert(infos.every((coll) => coll.name != collName || coll.info.uuid != undefined), + "Not all collections have UUIDs: " + tojson({infos})); +} - db[collName].drop(); - assert.writeOK(db[collName].insert({})); - checkUUIDs(); - let cmd = {applyOps: [{ns: "test.$cmd", op: "c", o: {collMod: collName}}]}; - let res = db.runCommand(cmd); - assert.commandWorked(res, tojson(cmd)); - checkUUIDs(); +db[collName].drop(); +assert.writeOK(db[collName].insert({})); +checkUUIDs(); +let cmd = {applyOps: [{ns: "test.$cmd", op: "c", o: {collMod: collName}}]}; +let res = db.runCommand(cmd); +assert.commandWorked(res, tojson(cmd)); +checkUUIDs(); })(); diff --git a/jstests/core/commands_namespace_parsing.js b/jstests/core/commands_namespace_parsing.js index 7a48c4df7a8..88431388eca 100644 --- a/jstests/core/commands_namespace_parsing.js +++ b/jstests/core/commands_namespace_parsing.js @@ -14,345 +14,336 @@ // Note that for each command, a properly formatted command object must be passed to the helper // function, regardless of the namespace used in the command object. (function() { - "use strict"; - - const isFullyQualified = true; - const isNotFullyQualified = false; - const isAdminCommand = true; - const isNotAdminCommand = false; - - // If the command expects the namespace to be fully qualified, set `isFullyQualified` to true. - // If the command must be run against the admin database, set `isAdminCommand` to true. - function assertFailsWithInvalidNamespacesForField( - field, command, isFullyQualified, isAdminCommand) { - const invalidNamespaces = []; - invalidNamespaces.push(isFullyQualified ? "mydb." : ""); - invalidNamespaces.push(isFullyQualified ? "mydb.\0" : "\0"); - invalidNamespaces.push(isFullyQualified ? "mydb.a\0b" : "a\0b"); - - const cmds = []; - for (let ns of invalidNamespaces) { - const cmd = Object.extend({}, command, /* deep copy */ true); - - const fieldNames = field.split("."); - const lastFieldNameIndex = fieldNames.length - 1; - let objToUpdate = cmd; - for (let i = 0; i < lastFieldNameIndex; i++) { - objToUpdate = objToUpdate[fieldNames[i]]; - } - objToUpdate[fieldNames[lastFieldNameIndex]] = ns; - - cmds.push(cmd); +"use strict"; + +const isFullyQualified = true; +const isNotFullyQualified = false; +const isAdminCommand = true; +const isNotAdminCommand = false; + +// If the command expects the namespace to be fully qualified, set `isFullyQualified` to true. +// If the command must be run against the admin database, set `isAdminCommand` to true. +function assertFailsWithInvalidNamespacesForField( + field, command, isFullyQualified, isAdminCommand) { + const invalidNamespaces = []; + invalidNamespaces.push(isFullyQualified ? "mydb." : ""); + invalidNamespaces.push(isFullyQualified ? "mydb.\0" : "\0"); + invalidNamespaces.push(isFullyQualified ? "mydb.a\0b" : "a\0b"); + + const cmds = []; + for (let ns of invalidNamespaces) { + const cmd = Object.extend({}, command, /* deep copy */ true); + + const fieldNames = field.split("."); + const lastFieldNameIndex = fieldNames.length - 1; + let objToUpdate = cmd; + for (let i = 0; i < lastFieldNameIndex; i++) { + objToUpdate = objToUpdate[fieldNames[i]]; } + objToUpdate[fieldNames[lastFieldNameIndex]] = ns; - const dbCmd = isAdminCommand ? db.adminCommand : db.runCommand; - for (let cmd of cmds) { - assert.commandFailedWithCode(dbCmd.apply(db, [cmd]), ErrorCodes.InvalidNamespace); - } + cmds.push(cmd); } - const isMaster = db.runCommand("ismaster"); - assert.commandWorked(isMaster); - const isMongos = (isMaster.msg === "isdbgrid"); - - db.commands_namespace_parsing.drop(); - assert.writeOK(db.commands_namespace_parsing.insert({a: 1})); - - // Test aggregate fails with an invalid collection name. - assertFailsWithInvalidNamespacesForField( - "aggregate", {aggregate: "", pipeline: []}, isNotFullyQualified, isNotAdminCommand); - - // Test count fails with an invalid collection name. - assertFailsWithInvalidNamespacesForField( - "count", {count: ""}, isNotFullyQualified, isNotAdminCommand); - - // Test distinct fails with an invalid collection name. - assertFailsWithInvalidNamespacesForField( - "distinct", {distinct: "", key: "a"}, isNotFullyQualified, isNotAdminCommand); - - // Test mapReduce fails with an invalid input collection name. - assertFailsWithInvalidNamespacesForField("mapreduce", - { - mapreduce: "", - map: function() { - emit(this.a, 1); - }, - reduce: function(key, values) { - return Array.sum(values); - }, - out: "commands_namespace_parsing_out" - }, - isNotFullyQualified, - isNotAdminCommand); - // Test mapReduce fails with an invalid output collection name. - assertFailsWithInvalidNamespacesForField("out", - { - mapreduce: "commands_namespace_parsing", - map: function() { - emit(this.a, 1); - }, - reduce: function(key, values) { - return Array.sum(values); - }, - out: "" - }, - isNotFullyQualified, - isNotAdminCommand); - - if (!isMongos) { - // Test geoSearch fails with an invalid collection name. - assertFailsWithInvalidNamespacesForField( - "geoSearch", - {geoSearch: "", search: {}, near: [0.0, 0.0], maxDistance: 10}, - isNotFullyQualified, - isNotAdminCommand); + const dbCmd = isAdminCommand ? db.adminCommand : db.runCommand; + for (let cmd of cmds) { + assert.commandFailedWithCode(dbCmd.apply(db, [cmd]), ErrorCodes.InvalidNamespace); } +} - // Test find fails with an invalid collection name. - assertFailsWithInvalidNamespacesForField( - "find", {find: ""}, isNotFullyQualified, isNotAdminCommand); - - // Test insert fails with an invalid collection name. - assertFailsWithInvalidNamespacesForField("insert", - {insert: "", documents: [{q: {a: 1}, u: {a: 2}}]}, - isNotFullyQualified, - isNotAdminCommand); - - // Test update fails with an invalid collection name. - assertFailsWithInvalidNamespacesForField("update", - {update: "", updates: [{q: {a: 1}, u: {a: 2}}]}, - isNotFullyQualified, - isNotAdminCommand); - - // Test delete fails with an invalid collection name. - assertFailsWithInvalidNamespacesForField("delete", - {delete: "", deletes: [{q: {a: 1}, limit: 1}]}, - isNotFullyQualified, - isNotAdminCommand); +const isMaster = db.runCommand("ismaster"); +assert.commandWorked(isMaster); +const isMongos = (isMaster.msg === "isdbgrid"); - // Test findAndModify fails with an invalid collection name. - assertFailsWithInvalidNamespacesForField("findAndModify", - {findAndModify: "", update: {a: 2}}, - isNotFullyQualified, - isNotAdminCommand); +db.commands_namespace_parsing.drop(); +assert.writeOK(db.commands_namespace_parsing.insert({a: 1})); - // Test getMore fails with an invalid collection name. - assertFailsWithInvalidNamespacesForField("collection", - {getMore: NumberLong("123456"), collection: ""}, - isNotFullyQualified, - isNotAdminCommand); +// Test aggregate fails with an invalid collection name. +assertFailsWithInvalidNamespacesForField( + "aggregate", {aggregate: "", pipeline: []}, isNotFullyQualified, isNotAdminCommand); - if (!isMongos) { - // Test godinsert fails with an invalid collection name. - assertFailsWithInvalidNamespacesForField( - "godinsert", {godinsert: "", obj: {_id: 1}}, isNotFullyQualified, isNotAdminCommand); - } +// Test count fails with an invalid collection name. +assertFailsWithInvalidNamespacesForField( + "count", {count: ""}, isNotFullyQualified, isNotAdminCommand); - // Test planCacheListFilters fails with an invalid collection name. - assertFailsWithInvalidNamespacesForField( - "planCacheListFilters", {planCacheListFilters: ""}, isNotFullyQualified, isNotAdminCommand); +// Test distinct fails with an invalid collection name. +assertFailsWithInvalidNamespacesForField( + "distinct", {distinct: "", key: "a"}, isNotFullyQualified, isNotAdminCommand); - // Test planCacheSetFilter fails with an invalid collection name. - assertFailsWithInvalidNamespacesForField("planCacheSetFilter", - {planCacheSetFilter: "", query: {}, indexes: [{a: 1}]}, - isNotFullyQualified, - isNotAdminCommand); - - // Test planCacheClearFilters fails with an invalid collection name. - assertFailsWithInvalidNamespacesForField("planCacheClearFilters", - {planCacheClearFilters: ""}, - isNotFullyQualified, - isNotAdminCommand); - - // Test planCacheListQueryShapes fails with an invalid collection name. - assertFailsWithInvalidNamespacesForField("planCacheListQueryShapes", - {planCacheListQueryShapes: ""}, - isNotFullyQualified, - isNotAdminCommand); - - // Test planCacheListPlans fails with an invalid collection name. - assertFailsWithInvalidNamespacesForField("planCacheListPlans", - {planCacheListPlans: "", query: {}}, - isNotFullyQualified, - isNotAdminCommand); - - // Test planCacheClear fails with an invalid collection name. - assertFailsWithInvalidNamespacesForField( - "planCacheClear", {planCacheClear: ""}, isNotFullyQualified, isNotAdminCommand); - - if (!isMongos) { - // Test cleanupOrphaned fails with an invalid collection name. - assertFailsWithInvalidNamespacesForField( - "cleanupOrphaned", {cleanupOrphaned: ""}, isFullyQualified, isAdminCommand); - } - - if (isMongos) { - // Test enableSharding fails with an invalid database name. - assertFailsWithInvalidNamespacesForField( - "enableSharding", {enableSharding: ""}, isNotFullyQualified, isAdminCommand); - - // Test mergeChunks fails with an invalid collection name. - assertFailsWithInvalidNamespacesForField( - "mergeChunks", - {mergeChunks: "", bounds: [{_id: MinKey()}, {_id: MaxKey()}]}, - isFullyQualified, - isAdminCommand); - - // Test shardCollection fails with an invalid collection name. - assertFailsWithInvalidNamespacesForField("shardCollection", - {shardCollection: "", key: {_id: 1}}, - isFullyQualified, - isAdminCommand); - - // Test split fails with an invalid collection name. - assertFailsWithInvalidNamespacesForField( - "split", {split: "", find: {}}, isFullyQualified, isAdminCommand); - - // Test moveChunk fails with an invalid collection name. - assertFailsWithInvalidNamespacesForField( - "moveChunk", - {moveChunk: "", find: {}, to: "commands_namespace_parsing_out"}, - isNotFullyQualified, - isAdminCommand); - - // Test movePrimary fails with an invalid database name. - assertFailsWithInvalidNamespacesForField( - "movePrimary", {movePrimary: "", to: "dummy"}, isNotFullyQualified, isAdminCommand); - - // Test updateZoneKeyRange fails with an invalid collection name. - assertFailsWithInvalidNamespacesForField( - "updateZoneKeyRange", - {updateZoneKeyRange: "", min: {_id: MinKey()}, max: {_id: MaxKey()}, zone: "3"}, - isNotFullyQualified, - isAdminCommand); - } +// Test mapReduce fails with an invalid input collection name. +assertFailsWithInvalidNamespacesForField("mapreduce", + { + mapreduce: "", + map: function() { + emit(this.a, 1); + }, + reduce: function(key, values) { + return Array.sum(values); + }, + out: "commands_namespace_parsing_out" + }, + isNotFullyQualified, + isNotAdminCommand); +// Test mapReduce fails with an invalid output collection name. +assertFailsWithInvalidNamespacesForField("out", + { + mapreduce: "commands_namespace_parsing", + map: function() { + emit(this.a, 1); + }, + reduce: function(key, values) { + return Array.sum(values); + }, + out: "" + }, + isNotFullyQualified, + isNotAdminCommand); - // Test renameCollection fails with an invalid source collection name. +if (!isMongos) { + // Test geoSearch fails with an invalid collection name. assertFailsWithInvalidNamespacesForField( - "renameCollection", {renameCollection: "", to: "test.b"}, isFullyQualified, isAdminCommand); - // Test renameCollection fails with an invalid target collection name. + "geoSearch", + {geoSearch: "", search: {}, near: [0.0, 0.0], maxDistance: 10}, + isNotFullyQualified, + isNotAdminCommand); +} + +// Test find fails with an invalid collection name. +assertFailsWithInvalidNamespacesForField( + "find", {find: ""}, isNotFullyQualified, isNotAdminCommand); + +// Test insert fails with an invalid collection name. +assertFailsWithInvalidNamespacesForField("insert", + {insert: "", documents: [{q: {a: 1}, u: {a: 2}}]}, + isNotFullyQualified, + isNotAdminCommand); + +// Test update fails with an invalid collection name. +assertFailsWithInvalidNamespacesForField("update", + {update: "", updates: [{q: {a: 1}, u: {a: 2}}]}, + isNotFullyQualified, + isNotAdminCommand); + +// Test delete fails with an invalid collection name. +assertFailsWithInvalidNamespacesForField("delete", + {delete: "", deletes: [{q: {a: 1}, limit: 1}]}, + isNotFullyQualified, + isNotAdminCommand); + +// Test findAndModify fails with an invalid collection name. +assertFailsWithInvalidNamespacesForField( + "findAndModify", {findAndModify: "", update: {a: 2}}, isNotFullyQualified, isNotAdminCommand); + +// Test getMore fails with an invalid collection name. +assertFailsWithInvalidNamespacesForField("collection", + {getMore: NumberLong("123456"), collection: ""}, + isNotFullyQualified, + isNotAdminCommand); + +if (!isMongos) { + // Test godinsert fails with an invalid collection name. assertFailsWithInvalidNamespacesForField( - "to", {renameCollection: "test.b", to: ""}, isFullyQualified, isAdminCommand); - - // Test drop fails with an invalid collection name. + "godinsert", {godinsert: "", obj: {_id: 1}}, isNotFullyQualified, isNotAdminCommand); +} + +// Test planCacheListFilters fails with an invalid collection name. +assertFailsWithInvalidNamespacesForField( + "planCacheListFilters", {planCacheListFilters: ""}, isNotFullyQualified, isNotAdminCommand); + +// Test planCacheSetFilter fails with an invalid collection name. +assertFailsWithInvalidNamespacesForField("planCacheSetFilter", + {planCacheSetFilter: "", query: {}, indexes: [{a: 1}]}, + isNotFullyQualified, + isNotAdminCommand); + +// Test planCacheClearFilters fails with an invalid collection name. +assertFailsWithInvalidNamespacesForField( + "planCacheClearFilters", {planCacheClearFilters: ""}, isNotFullyQualified, isNotAdminCommand); + +// Test planCacheListQueryShapes fails with an invalid collection name. +assertFailsWithInvalidNamespacesForField("planCacheListQueryShapes", + {planCacheListQueryShapes: ""}, + isNotFullyQualified, + isNotAdminCommand); + +// Test planCacheListPlans fails with an invalid collection name. +assertFailsWithInvalidNamespacesForField("planCacheListPlans", + {planCacheListPlans: "", query: {}}, + isNotFullyQualified, + isNotAdminCommand); + +// Test planCacheClear fails with an invalid collection name. +assertFailsWithInvalidNamespacesForField( + "planCacheClear", {planCacheClear: ""}, isNotFullyQualified, isNotAdminCommand); + +if (!isMongos) { + // Test cleanupOrphaned fails with an invalid collection name. assertFailsWithInvalidNamespacesForField( - "drop", {drop: ""}, isNotFullyQualified, isNotAdminCommand); + "cleanupOrphaned", {cleanupOrphaned: ""}, isFullyQualified, isAdminCommand); +} - // Test create fails with an invalid collection name. +if (isMongos) { + // Test enableSharding fails with an invalid database name. assertFailsWithInvalidNamespacesForField( - "create", {create: ""}, isNotFullyQualified, isNotAdminCommand); - - if (!isMongos) { - // Test cloneCollectionAsCapped fails with an invalid source collection name. - assertFailsWithInvalidNamespacesForField( - "cloneCollectionAsCapped", - {cloneCollectionAsCapped: "", toCollection: "b", size: 1024}, - isNotFullyQualified, - isNotAdminCommand); - // Test cloneCollectionAsCapped fails with an invalid target collection name. - assertFailsWithInvalidNamespacesForField( - "toCollection", - {cloneCollectionAsCapped: "commands_namespace_parsing", toCollection: "", size: 1024}, - isNotFullyQualified, - isNotAdminCommand); - - // Test convertToCapped fails with an invalid collection name. - assertFailsWithInvalidNamespacesForField("convertToCapped", - {convertToCapped: "", size: 1024}, - isNotFullyQualified, - isNotAdminCommand); - } + "enableSharding", {enableSharding: ""}, isNotFullyQualified, isAdminCommand); - // Test filemd5 fails with an invalid collection name. - // Note: for this command, it is OK to pass 'root: ""', so do not use the helper function. - assert.commandFailedWithCode(db.runCommand({filemd5: ObjectId(), root: "\0"}), - ErrorCodes.InvalidNamespace); - assert.commandFailedWithCode(db.runCommand({filemd5: ObjectId(), root: "a\0b"}), - ErrorCodes.InvalidNamespace); - - // Test createIndexes fails with an invalid collection name. + // Test mergeChunks fails with an invalid collection name. assertFailsWithInvalidNamespacesForField( - "createIndexes", - {createIndexes: "", indexes: [{key: {a: 1}, name: "a1"}]}, - isNotFullyQualified, - isNotAdminCommand); + "mergeChunks", + {mergeChunks: "", bounds: [{_id: MinKey()}, {_id: MaxKey()}]}, + isFullyQualified, + isAdminCommand); - // Test listIndexes fails with an invalid collection name. + // Test shardCollection fails with an invalid collection name. assertFailsWithInvalidNamespacesForField( - "listIndexes", {listIndexes: ""}, isNotFullyQualified, isNotAdminCommand); + "shardCollection", {shardCollection: "", key: {_id: 1}}, isFullyQualified, isAdminCommand); - // Test dropIndexes fails with an invalid collection name. + // Test split fails with an invalid collection name. assertFailsWithInvalidNamespacesForField( - "dropIndexes", {dropIndexes: "", index: "*"}, isNotFullyQualified, isNotAdminCommand); - - if (!isMongos) { - // Test compact fails with an invalid collection name. - assertFailsWithInvalidNamespacesForField( - "compact", {compact: ""}, isNotFullyQualified, isNotAdminCommand); - } + "split", {split: "", find: {}}, isFullyQualified, isAdminCommand); - // Test collMod fails with an invalid collection name. + // Test moveChunk fails with an invalid collection name. assertFailsWithInvalidNamespacesForField( - "collMod", - {collMod: "", index: {keyPattern: {a: 1}, expireAfterSeconds: 60}}, + "moveChunk", + {moveChunk: "", find: {}, to: "commands_namespace_parsing_out"}, isNotFullyQualified, - isNotAdminCommand); + isAdminCommand); - // Test reIndex fails with an invalid collection name. - if (!isMongos) { - assertFailsWithInvalidNamespacesForField( - "reIndex", {reIndex: ""}, isNotFullyQualified, isNotAdminCommand); - } - - // Test collStats fails with an invalid collection name. - assertFailsWithInvalidNamespacesForField( - "collStats", {collStats: ""}, isNotFullyQualified, isNotAdminCommand); - - // Test dataSize fails with an invalid collection name. - assertFailsWithInvalidNamespacesForField( - "dataSize", {dataSize: ""}, isFullyQualified, isNotAdminCommand); - - // Test explain of aggregate fails with an invalid collection name. - assertFailsWithInvalidNamespacesForField("aggregate", - {aggregate: "", pipeline: [], explain: true}, - isNotFullyQualified, - isNotAdminCommand); - - // Test explain of count fails with an invalid collection name. + // Test movePrimary fails with an invalid database name. assertFailsWithInvalidNamespacesForField( - "explain.count", {explain: {count: ""}}, isNotFullyQualified, isNotAdminCommand); - - // Test explain of distinct fails with an invalid collection name. - assertFailsWithInvalidNamespacesForField("explain.distinct", - {explain: {distinct: "", key: "a"}}, - isNotFullyQualified, - isNotAdminCommand); + "movePrimary", {movePrimary: "", to: "dummy"}, isNotFullyQualified, isAdminCommand); - // Test explain of find fails with an invalid collection name. + // Test updateZoneKeyRange fails with an invalid collection name. assertFailsWithInvalidNamespacesForField( - "explain.find", {explain: {find: ""}}, isNotFullyQualified, isNotAdminCommand); - - // Test explain of findAndModify fails with an invalid collection name. - assertFailsWithInvalidNamespacesForField("explain.findAndModify", - {explain: {findAndModify: "", update: {a: 2}}}, - isNotFullyQualified, - isNotAdminCommand); - - // Test explain of delete fails with an invalid collection name. + "updateZoneKeyRange", + {updateZoneKeyRange: "", min: {_id: MinKey()}, max: {_id: MaxKey()}, zone: "3"}, + isNotFullyQualified, + isAdminCommand); +} + +// Test renameCollection fails with an invalid source collection name. +assertFailsWithInvalidNamespacesForField( + "renameCollection", {renameCollection: "", to: "test.b"}, isFullyQualified, isAdminCommand); +// Test renameCollection fails with an invalid target collection name. +assertFailsWithInvalidNamespacesForField( + "to", {renameCollection: "test.b", to: ""}, isFullyQualified, isAdminCommand); + +// Test drop fails with an invalid collection name. +assertFailsWithInvalidNamespacesForField( + "drop", {drop: ""}, isNotFullyQualified, isNotAdminCommand); + +// Test create fails with an invalid collection name. +assertFailsWithInvalidNamespacesForField( + "create", {create: ""}, isNotFullyQualified, isNotAdminCommand); + +if (!isMongos) { + // Test cloneCollectionAsCapped fails with an invalid source collection name. assertFailsWithInvalidNamespacesForField( - "explain.delete", - {explain: {delete: "", deletes: [{q: {a: 1}, limit: 1}]}}, + "cloneCollectionAsCapped", + {cloneCollectionAsCapped: "", toCollection: "b", size: 1024}, isNotFullyQualified, isNotAdminCommand); - - // Test explain of update fails with an invalid collection name. + // Test cloneCollectionAsCapped fails with an invalid target collection name. assertFailsWithInvalidNamespacesForField( - "explain.update", - {explain: {update: "", updates: [{q: {a: 1}, u: {a: 2}}]}}, + "toCollection", + {cloneCollectionAsCapped: "commands_namespace_parsing", toCollection: "", size: 1024}, isNotFullyQualified, isNotAdminCommand); - // Test validate fails with an invalid collection name. + // Test convertToCapped fails with an invalid collection name. + assertFailsWithInvalidNamespacesForField("convertToCapped", + {convertToCapped: "", size: 1024}, + isNotFullyQualified, + isNotAdminCommand); +} + +// Test filemd5 fails with an invalid collection name. +// Note: for this command, it is OK to pass 'root: ""', so do not use the helper function. +assert.commandFailedWithCode(db.runCommand({filemd5: ObjectId(), root: "\0"}), + ErrorCodes.InvalidNamespace); +assert.commandFailedWithCode(db.runCommand({filemd5: ObjectId(), root: "a\0b"}), + ErrorCodes.InvalidNamespace); + +// Test createIndexes fails with an invalid collection name. +assertFailsWithInvalidNamespacesForField("createIndexes", + {createIndexes: "", indexes: [{key: {a: 1}, name: "a1"}]}, + isNotFullyQualified, + isNotAdminCommand); + +// Test listIndexes fails with an invalid collection name. +assertFailsWithInvalidNamespacesForField( + "listIndexes", {listIndexes: ""}, isNotFullyQualified, isNotAdminCommand); + +// Test dropIndexes fails with an invalid collection name. +assertFailsWithInvalidNamespacesForField( + "dropIndexes", {dropIndexes: "", index: "*"}, isNotFullyQualified, isNotAdminCommand); + +if (!isMongos) { + // Test compact fails with an invalid collection name. + assertFailsWithInvalidNamespacesForField( + "compact", {compact: ""}, isNotFullyQualified, isNotAdminCommand); +} + +// Test collMod fails with an invalid collection name. +assertFailsWithInvalidNamespacesForField( + "collMod", + {collMod: "", index: {keyPattern: {a: 1}, expireAfterSeconds: 60}}, + isNotFullyQualified, + isNotAdminCommand); + +// Test reIndex fails with an invalid collection name. +if (!isMongos) { assertFailsWithInvalidNamespacesForField( - "validate", {validate: ""}, isNotFullyQualified, isNotAdminCommand); + "reIndex", {reIndex: ""}, isNotFullyQualified, isNotAdminCommand); +} + +// Test collStats fails with an invalid collection name. +assertFailsWithInvalidNamespacesForField( + "collStats", {collStats: ""}, isNotFullyQualified, isNotAdminCommand); + +// Test dataSize fails with an invalid collection name. +assertFailsWithInvalidNamespacesForField( + "dataSize", {dataSize: ""}, isFullyQualified, isNotAdminCommand); + +// Test explain of aggregate fails with an invalid collection name. +assertFailsWithInvalidNamespacesForField("aggregate", + {aggregate: "", pipeline: [], explain: true}, + isNotFullyQualified, + isNotAdminCommand); + +// Test explain of count fails with an invalid collection name. +assertFailsWithInvalidNamespacesForField( + "explain.count", {explain: {count: ""}}, isNotFullyQualified, isNotAdminCommand); + +// Test explain of distinct fails with an invalid collection name. +assertFailsWithInvalidNamespacesForField("explain.distinct", + {explain: {distinct: "", key: "a"}}, + isNotFullyQualified, + isNotAdminCommand); + +// Test explain of find fails with an invalid collection name. +assertFailsWithInvalidNamespacesForField( + "explain.find", {explain: {find: ""}}, isNotFullyQualified, isNotAdminCommand); + +// Test explain of findAndModify fails with an invalid collection name. +assertFailsWithInvalidNamespacesForField("explain.findAndModify", + {explain: {findAndModify: "", update: {a: 2}}}, + isNotFullyQualified, + isNotAdminCommand); + +// Test explain of delete fails with an invalid collection name. +assertFailsWithInvalidNamespacesForField("explain.delete", + {explain: {delete: "", deletes: [{q: {a: 1}, limit: 1}]}}, + isNotFullyQualified, + isNotAdminCommand); + +// Test explain of update fails with an invalid collection name. +assertFailsWithInvalidNamespacesForField("explain.update", + {explain: {update: "", updates: [{q: {a: 1}, u: {a: 2}}]}}, + isNotFullyQualified, + isNotAdminCommand); + +// Test validate fails with an invalid collection name. +assertFailsWithInvalidNamespacesForField( + "validate", {validate: ""}, isNotFullyQualified, isNotAdminCommand); })(); diff --git a/jstests/core/commands_that_do_not_write_do_not_accept_wc.js b/jstests/core/commands_that_do_not_write_do_not_accept_wc.js index 17396961a74..bb471851bb5 100644 --- a/jstests/core/commands_that_do_not_write_do_not_accept_wc.js +++ b/jstests/core/commands_that_do_not_write_do_not_accept_wc.js @@ -10,52 +10,52 @@ */ (function() { - "use strict"; - var collName = 'leaves'; +"use strict"; +var collName = 'leaves'; - var commands = []; +var commands = []; - commands.push({find: collName, query: {_id: 1}}); +commands.push({find: collName, query: {_id: 1}}); - commands.push({distinct: collName, key: "_id"}); +commands.push({distinct: collName, key: "_id"}); - commands.push({count: collName, query: {type: 'oak'}}); +commands.push({count: collName, query: {type: 'oak'}}); - commands.push({ - mapReduce: collName, - map: function() { - this.tags.forEach(function(z) { - emit(z, 1); - }); - }, - reduce: function(key, values) { - return {count: values.length}; - }, - out: {inline: 1} - }); - - function assertWriteConcernNotSupportedError(res) { - assert.commandFailed(res); - assert.eq(res.code, ErrorCodes.InvalidOptions); - assert(!res.writeConcernError); - } - - // Test a variety of valid and invalid writeConcerns to confirm that they still all get - // the correct error. - var writeConcerns = [{w: 'invalid'}, {w: 1}]; - - function testUnsupportedWriteConcern(wc, cmd) { - cmd.writeConcern = wc; - jsTest.log("Testing " + tojson(cmd)); - - var res = db.runCommand(cmd); - assertWriteConcernNotSupportedError(res); - } - - // Verify that each command gets a writeConcernNotSupported error. - commands.forEach(function(cmd) { - writeConcerns.forEach(function(wc) { - testUnsupportedWriteConcern(wc, cmd); +commands.push({ + mapReduce: collName, + map: function() { + this.tags.forEach(function(z) { + emit(z, 1); }); + }, + reduce: function(key, values) { + return {count: values.length}; + }, + out: {inline: 1} +}); + +function assertWriteConcernNotSupportedError(res) { + assert.commandFailed(res); + assert.eq(res.code, ErrorCodes.InvalidOptions); + assert(!res.writeConcernError); +} + +// Test a variety of valid and invalid writeConcerns to confirm that they still all get +// the correct error. +var writeConcerns = [{w: 'invalid'}, {w: 1}]; + +function testUnsupportedWriteConcern(wc, cmd) { + cmd.writeConcern = wc; + jsTest.log("Testing " + tojson(cmd)); + + var res = db.runCommand(cmd); + assertWriteConcernNotSupportedError(res); +} + +// Verify that each command gets a writeConcernNotSupported error. +commands.forEach(function(cmd) { + writeConcerns.forEach(function(wc) { + testUnsupportedWriteConcern(wc, cmd); }); +}); })(); diff --git a/jstests/core/commands_with_uuid.js b/jstests/core/commands_with_uuid.js index 69b889b394a..c64384675c9 100644 --- a/jstests/core/commands_with_uuid.js +++ b/jstests/core/commands_with_uuid.js @@ -1,102 +1,108 @@ /** -* Tests that using a UUID as an argument to commands will retrieve results from the correct -* collection. -* -* @tags: [ -* requires_fastcount, -* -* incompatible_with_embedded, -* ] -*/ + * Tests that using a UUID as an argument to commands will retrieve results from the correct + * collection. + * + * @tags: [ + * requires_fastcount, + * + * incompatible_with_embedded, + * ] + */ (function() { - 'use strict'; - const mainCollName = 'main_coll'; - const subCollName = 'sub_coll'; - const kOtherDbName = 'commands_with_uuid_db'; - db.runCommand({drop: mainCollName}); - db.runCommand({drop: subCollName}); - assert.commandWorked(db.runCommand({create: mainCollName})); - assert.commandWorked(db.runCommand({create: subCollName})); +'use strict'; +const mainCollName = 'main_coll'; +const subCollName = 'sub_coll'; +const kOtherDbName = 'commands_with_uuid_db'; +db.runCommand({drop: mainCollName}); +db.runCommand({drop: subCollName}); +assert.commandWorked(db.runCommand({create: mainCollName})); +assert.commandWorked(db.runCommand({create: subCollName})); - // Check if UUIDs are enabled / supported. - let collectionInfos = db.getCollectionInfos({name: mainCollName}); - let uuid = collectionInfos[0].info.uuid; - if (uuid == null) { - return; - } +// Check if UUIDs are enabled / supported. +let collectionInfos = db.getCollectionInfos({name: mainCollName}); +let uuid = collectionInfos[0].info.uuid; +if (uuid == null) { + return; +} - // No support for UUIDs on mongos. - const isMaster = db.runCommand("ismaster"); - assert.commandWorked(isMaster); - const isMongos = (isMaster.msg === "isdbgrid"); - if (isMongos) { - return; - } +// No support for UUIDs on mongos. +const isMaster = db.runCommand("ismaster"); +assert.commandWorked(isMaster); +const isMongos = (isMaster.msg === "isdbgrid"); +if (isMongos) { + return; +} - assert.commandWorked(db.runCommand({insert: mainCollName, documents: [{fooField: 'FOO'}]})); - assert.commandWorked( - db.runCommand({insert: subCollName, documents: [{fooField: 'BAR'}, {fooField: 'FOOBAR'}]})); +assert.commandWorked(db.runCommand({insert: mainCollName, documents: [{fooField: 'FOO'}]})); +assert.commandWorked( + db.runCommand({insert: subCollName, documents: [{fooField: 'BAR'}, {fooField: 'FOOBAR'}]})); - // Ensure passing a UUID to find retrieves results from the correct collection. - let cmd = {find: uuid}; - let res = db.runCommand(cmd); - assert.commandWorked(res, 'could not run ' + tojson(cmd)); - let cursor = new DBCommandCursor(db, res); - let errMsg = 'expected more data from command ' + tojson(cmd) + ', with result ' + tojson(res); - assert(cursor.hasNext(), errMsg); - let doc = cursor.next(); - assert.eq(doc.fooField, 'FOO'); - assert(!cursor.hasNext(), 'expected to have exhausted cursor for results ' + tojson(res)); +// Ensure passing a UUID to find retrieves results from the correct collection. +let cmd = {find: uuid}; +let res = db.runCommand(cmd); +assert.commandWorked(res, 'could not run ' + tojson(cmd)); +let cursor = new DBCommandCursor(db, res); +let errMsg = 'expected more data from command ' + tojson(cmd) + ', with result ' + tojson(res); +assert(cursor.hasNext(), errMsg); +let doc = cursor.next(); +assert.eq(doc.fooField, 'FOO'); +assert(!cursor.hasNext(), 'expected to have exhausted cursor for results ' + tojson(res)); - // Although we check for both string type and BinData type for the collection identifier - // argument to a find command to accomodate for searching both by name and by UUID, if an - // invalid type is passed, the parsing error message should say the expected type is string and - // not BinData to avoid confusing the user. - cmd = {find: 1.0}; - res = db.runCommand(cmd); - assert.commandFailed(res, 'expected ' + tojson(cmd) + ' to fail.'); - assert(res.errmsg.includes('field must be of BSON type string'), - 'expected the error message of ' + tojson(res) + ' to include string type'); +// Although we check for both string type and BinData type for the collection identifier +// argument to a find command to accomodate for searching both by name and by UUID, if an +// invalid type is passed, the parsing error message should say the expected type is string and +// not BinData to avoid confusing the user. +cmd = { + find: 1.0 +}; +res = db.runCommand(cmd); +assert.commandFailed(res, 'expected ' + tojson(cmd) + ' to fail.'); +assert(res.errmsg.includes('field must be of BSON type string'), + 'expected the error message of ' + tojson(res) + ' to include string type'); - // Ensure passing a missing UUID to commands taking UUIDs uasserts that the UUID is not found. - const missingUUID = UUID(); - for (cmd of[{count: missingUUID}, {find: missingUUID}, {listIndexes: missingUUID}]) { - assert.commandFailedWithCode( - db.runCommand(cmd), ErrorCodes.NamespaceNotFound, "command: " + tojson(cmd)); - } +// Ensure passing a missing UUID to commands taking UUIDs uasserts that the UUID is not found. +const missingUUID = UUID(); +for (cmd of [{count: missingUUID}, {find: missingUUID}, {listIndexes: missingUUID}]) { + assert.commandFailedWithCode( + db.runCommand(cmd), ErrorCodes.NamespaceNotFound, "command: " + tojson(cmd)); +} - // Ensure passing a UUID to listIndexes retrieves results from the correct collection. - cmd = {listIndexes: uuid}; - res = db.runCommand(cmd); - assert.commandWorked(res, 'could not run ' + tojson(cmd)); - cursor = new DBCommandCursor(db, res); - cursor.forEach(function(doc) { - assert.eq(doc.ns, 'test.' + mainCollName); - }); +// Ensure passing a UUID to listIndexes retrieves results from the correct collection. +cmd = { + listIndexes: uuid +}; +res = db.runCommand(cmd); +assert.commandWorked(res, 'could not run ' + tojson(cmd)); +cursor = new DBCommandCursor(db, res); +cursor.forEach(function(doc) { + assert.eq(doc.ns, 'test.' + mainCollName); +}); - // Ensure passing a UUID to count retrieves results from the correct collection. - cmd = {count: uuid}; - res = db.runCommand(cmd); - assert.commandWorked(res, 'could not run ' + tojson(cmd)); - assert.eq(res.n, 1, "expected to count a single document with command: " + tojson(cmd)); +// Ensure passing a UUID to count retrieves results from the correct collection. +cmd = { + count: uuid +}; +res = db.runCommand(cmd); +assert.commandWorked(res, 'could not run ' + tojson(cmd)); +assert.eq(res.n, 1, "expected to count a single document with command: " + tojson(cmd)); - // Test that UUID resolution fails when the UUID belongs to a different database. First, we - // create a collection in another database. - const dbWithUUID = db.getSiblingDB(kOtherDbName); - dbWithUUID.getCollection(mainCollName).drop(); - assert.commandWorked(dbWithUUID.runCommand({create: mainCollName})); - collectionInfos = dbWithUUID.getCollectionInfos({name: mainCollName}); - uuid = collectionInfos[0].info.uuid; - assert.neq(null, uuid); - assert.commandWorked(dbWithUUID.runCommand({find: uuid})); +// Test that UUID resolution fails when the UUID belongs to a different database. First, we +// create a collection in another database. +const dbWithUUID = db.getSiblingDB(kOtherDbName); +dbWithUUID.getCollection(mainCollName).drop(); +assert.commandWorked(dbWithUUID.runCommand({create: mainCollName})); +collectionInfos = dbWithUUID.getCollectionInfos({name: mainCollName}); +uuid = collectionInfos[0].info.uuid; +assert.neq(null, uuid); +assert.commandWorked(dbWithUUID.runCommand({find: uuid})); - // Run read commands supporting UUIDs against the original database, passing the UUID from a - // different database, and verify that the UUID resolution fails with the correct error code. We - // also test that the same command succeeds when there is no database mismatch. - for (cmd of[{count: uuid}, {distinct: uuid, key: "a"}, {find: uuid}, {listIndexes: uuid}]) { - assert.commandWorked(dbWithUUID.runCommand(cmd)); - assert.commandFailedWithCode( - db.runCommand(cmd), ErrorCodes.NamespaceNotFound, "command: " + tojson(cmd)); - } +// Run read commands supporting UUIDs against the original database, passing the UUID from a +// different database, and verify that the UUID resolution fails with the correct error code. We +// also test that the same command succeeds when there is no database mismatch. +for (cmd of [{count: uuid}, {distinct: uuid, key: "a"}, {find: uuid}, {listIndexes: uuid}]) { + assert.commandWorked(dbWithUUID.runCommand(cmd)); + assert.commandFailedWithCode( + db.runCommand(cmd), ErrorCodes.NamespaceNotFound, "command: " + tojson(cmd)); +} }()); diff --git a/jstests/core/compact_keeps_indexes.js b/jstests/core/compact_keeps_indexes.js index 1a050d27469..25b3909df1c 100644 --- a/jstests/core/compact_keeps_indexes.js +++ b/jstests/core/compact_keeps_indexes.js @@ -8,39 +8,38 @@ // ] (function() { - 'use strict'; +'use strict'; - var coll = db.compact_keeps_indexes; +var coll = db.compact_keeps_indexes; - coll.drop(); - coll.insert({_id: 1, x: 1}); - coll.ensureIndex({x: 1}); +coll.drop(); +coll.insert({_id: 1, x: 1}); +coll.ensureIndex({x: 1}); - assert.eq(coll.getIndexes().length, 2); +assert.eq(coll.getIndexes().length, 2); - // force:true is for replset passthroughs - var res = coll.runCommand('compact', {force: true}); - // Some storage engines (for example, inMemoryExperiment) do not support the compact command. - if (res.code == 115) { // CommandNotSupported - return; - } - assert.commandWorked(res); +// force:true is for replset passthroughs +var res = coll.runCommand('compact', {force: true}); +// Some storage engines (for example, inMemoryExperiment) do not support the compact command. +if (res.code == 115) { // CommandNotSupported + return; +} +assert.commandWorked(res); - assert.eq(coll.getIndexes().length, 2); - assert.eq(coll.find({_id: 1}).itcount(), 1); - assert.eq(coll.find({x: 1}).itcount(), 1); +assert.eq(coll.getIndexes().length, 2); +assert.eq(coll.find({_id: 1}).itcount(), 1); +assert.eq(coll.find({x: 1}).itcount(), 1); - var dropCollectionShell = startParallelShell(function() { - var t = db.getSiblingDB('test_compact_keeps_indexes_drop').testcoll; +var dropCollectionShell = startParallelShell(function() { + var t = db.getSiblingDB('test_compact_keeps_indexes_drop').testcoll; + t.drop(); + for (var i = 0; i < 100; i++) { + t.save({a: 1}); t.drop(); - for (var i = 0; i < 100; i++) { - t.save({a: 1}); - t.drop(); - } - }); - for (var i = 0; i < 10; i++) { - coll.runCommand('compact'); } - dropCollectionShell(); - +}); +for (var i = 0; i < 10; i++) { + coll.runCommand('compact'); +} +dropCollectionShell(); }()); diff --git a/jstests/core/compare_timestamps.js b/jstests/core/compare_timestamps.js index 2440fac3fe1..b88bb003483 100644 --- a/jstests/core/compare_timestamps.js +++ b/jstests/core/compare_timestamps.js @@ -1,9 +1,9 @@ // SERVER-21160: Check that timestamp comparisons are unsigned (function() { - 'use strict'; - var t = db.compare_timestamps; - t.drop(); - assert.writeOK(t.insert({a: new Timestamp(0xffffffff, 3), b: "non-zero"})); - assert.writeOK(t.insert({a: new Timestamp(0, 0), b: "zero"})); - assert.eq(t.find().sort({a: 1}).limit(1).next().b, "zero", "timestamp must compare unsigned"); +'use strict'; +var t = db.compare_timestamps; +t.drop(); +assert.writeOK(t.insert({a: new Timestamp(0xffffffff, 3), b: "non-zero"})); +assert.writeOK(t.insert({a: new Timestamp(0, 0), b: "zero"})); +assert.eq(t.find().sort({a: 1}).limit(1).next().b, "zero", "timestamp must compare unsigned"); }()); diff --git a/jstests/core/connection_status.js b/jstests/core/connection_status.js index efb3eb3fd4f..e50122f7cee 100644 --- a/jstests/core/connection_status.js +++ b/jstests/core/connection_status.js @@ -8,90 +8,85 @@ // Tests the connectionStatus command (function() { - "use strict"; - var dbName = 'connection_status'; - var myDB = db.getSiblingDB(dbName); - myDB.dropAllUsers(); +"use strict"; +var dbName = 'connection_status'; +var myDB = db.getSiblingDB(dbName); +myDB.dropAllUsers(); - /** - * Test that the output of connectionStatus makes sense. - */ - function validateConnectionStatus(expectedUser, expectedRole, showPrivileges) { - var connectionStatus = - myDB.runCommand({"connectionStatus": 1, "showPrivileges": showPrivileges}); - assert.commandWorked(connectionStatus); - var authInfo = connectionStatus.authInfo; +/** + * Test that the output of connectionStatus makes sense. + */ +function validateConnectionStatus(expectedUser, expectedRole, showPrivileges) { + var connectionStatus = + myDB.runCommand({"connectionStatus": 1, "showPrivileges": showPrivileges}); + assert.commandWorked(connectionStatus); + var authInfo = connectionStatus.authInfo; - // Test that authenticated users are properly returned. - var users = authInfo.authenticatedUsers; - var matches = 0; - var infoStr = tojson(authInfo); - for (var i = 0; i < users.length; i++) { - var user = users[i].user; - var db = users[i].db; - assert(isString(user), - "each authenticatedUsers should have a 'user' string:" + infoStr); - assert(isString(db), "each authenticatedUsers should have a 'db' string:" + infoStr); - if (user === expectedUser.user && db === expectedUser.db) { - matches++; - } + // Test that authenticated users are properly returned. + var users = authInfo.authenticatedUsers; + var matches = 0; + var infoStr = tojson(authInfo); + for (var i = 0; i < users.length; i++) { + var user = users[i].user; + var db = users[i].db; + assert(isString(user), "each authenticatedUsers should have a 'user' string:" + infoStr); + assert(isString(db), "each authenticatedUsers should have a 'db' string:" + infoStr); + if (user === expectedUser.user && db === expectedUser.db) { + matches++; } - assert.eq( - matches, 1, "expected user should be present once in authenticatedUsers:" + infoStr); + } + assert.eq(matches, 1, "expected user should be present once in authenticatedUsers:" + infoStr); - // Test that authenticated roles are properly returned. - var roles = authInfo.authenticatedUserRoles; - matches = 0; - for (var i = 0; i < roles.length; i++) { - var role = roles[i].role; - var db = roles[i].db; - assert(isString(role), - "each authenticatedUserRole should have a 'role' string:" + infoStr); - assert(isString(db), "each authenticatedUserRole should have a 'db' string:" + infoStr); - if (role === expectedRole.role && db === expectedRole.db) { - matches++; - } + // Test that authenticated roles are properly returned. + var roles = authInfo.authenticatedUserRoles; + matches = 0; + for (var i = 0; i < roles.length; i++) { + var role = roles[i].role; + var db = roles[i].db; + assert(isString(role), "each authenticatedUserRole should have a 'role' string:" + infoStr); + assert(isString(db), "each authenticatedUserRole should have a 'db' string:" + infoStr); + if (role === expectedRole.role && db === expectedRole.db) { + matches++; } - // Role will be duplicated when users with the same role are logged in at the same time. - assert.gte( - matches, 1, "expected role should be present in authenticatedUserRoles:" + infoStr); + } + // Role will be duplicated when users with the same role are logged in at the same time. + assert.gte(matches, 1, "expected role should be present in authenticatedUserRoles:" + infoStr); - var privileges = authInfo.authenticatedUserPrivileges; - if (showPrivileges) { - for (var i = 0; i < privileges.length; i++) { - assert( - isObject(privileges[i].resource), - "each authenticatedUserPrivilege should have a 'resource' object:" + infoStr); - var actions = privileges[i].actions; - for (var j = 0; j < actions.length; j++) { - assert(isString(actions[j]), - "each authenticatedUserPrivilege action should be a string:" + infoStr); - } + var privileges = authInfo.authenticatedUserPrivileges; + if (showPrivileges) { + for (var i = 0; i < privileges.length; i++) { + assert(isObject(privileges[i].resource), + "each authenticatedUserPrivilege should have a 'resource' object:" + infoStr); + var actions = privileges[i].actions; + for (var j = 0; j < actions.length; j++) { + assert(isString(actions[j]), + "each authenticatedUserPrivilege action should be a string:" + infoStr); } - - } else { - // Test that privileges are not returned without asking - assert.eq(privileges, - undefined, - "authenticatedUserPrivileges should not be returned by default:" + infoStr); } + + } else { + // Test that privileges are not returned without asking + assert.eq(privileges, + undefined, + "authenticatedUserPrivileges should not be returned by default:" + infoStr); } +} - function test(userName) { - var user = {user: userName, db: dbName}; - var role = {role: "root", db: "admin"}; - myDB.createUser({user: userName, pwd: "weak password", roles: [role]}); - myDB.auth(userName, "weak password"); +function test(userName) { + var user = {user: userName, db: dbName}; + var role = {role: "root", db: "admin"}; + myDB.createUser({user: userName, pwd: "weak password", roles: [role]}); + myDB.auth(userName, "weak password"); - // Validate with and without showPrivileges - validateConnectionStatus(user, role, true); - validateConnectionStatus(user, role, false); + // Validate with and without showPrivileges + validateConnectionStatus(user, role, true); + validateConnectionStatus(user, role, false); - // Clean up. - myDB.dropAllUsers(); - myDB.logout(); - } + // Clean up. + myDB.dropAllUsers(); + myDB.logout(); +} - test("someone"); - test("someone else"); +test("someone"); +test("someone else"); })(); diff --git a/jstests/core/constructors.js b/jstests/core/constructors.js index 27b0b7f7406..0e28150e701 100644 --- a/jstests/core/constructors.js +++ b/jstests/core/constructors.js @@ -26,7 +26,7 @@ function clientEvalConstructorTest(constructorList) { try { eval(constructor); } catch (e) { - throw("valid constructor: " + constructor + " failed in eval context: " + e); + throw ("valid constructor: " + constructor + " failed in eval context: " + e); } }); constructorList.invalid.forEach(function(constructor) { @@ -56,7 +56,7 @@ function mapReduceConstructorTest(constructorList) { res = t.mapReduce(m, r, {out: "mr_constructors_out", scope: {xx: 1}}); } catch (e) { - throw("valid constructor: " + constructor + " failed in mapReduce context: " + e); + throw ("valid constructor: " + constructor + " failed in mapReduce context: " + e); } }); constructorList.invalid.forEach(function(constructor) { @@ -83,7 +83,7 @@ function whereConstructorTest(constructorList) { try { t.findOne({$where: constructor}); } catch (e) { - throw("valid constructor: " + constructor + " failed in $where query: " + e); + throw ("valid constructor: " + constructor + " failed in $where query: " + e); } }); constructorList.invalid.forEach(function(constructor) { diff --git a/jstests/core/contained_or_with_nested_or.js b/jstests/core/contained_or_with_nested_or.js index b0add6fdeb1..7407498e1ce 100644 --- a/jstests/core/contained_or_with_nested_or.js +++ b/jstests/core/contained_or_with_nested_or.js @@ -1,41 +1,41 @@ // This test was designed to reproduce a memory leak that was fixed by SERVER-35455. (function() { - "use strict"; +"use strict"; - const coll = db.contained_or_with_nested_or; - coll.drop(); - assert.commandWorked(coll.insert([ - // Should not match the query: - {_id: 0, active: false, loc: "USA", agency: "FBI", vip: false}, - {_id: 1, active: false, loc: "RUS", agency: "OTHER", vip: true}, - {_id: 2, active: true, loc: "RUS", agency: "OTHER", vip: false}, - {_id: 3, active: true, loc: "USA", agency: "OTHER", vip: false}, - {_id: 4, active: true, loc: "UK", agency: "OTHER", vip: false}, - {_id: 5, active: true, loc: "UK", agency: "OTHER", vip: true}, - {_id: 6, active: true}, - // Should match the query: - {_id: 7, active: true, loc: "USA", agency: "FBI", vip: false}, - {_id: 8, active: true, loc: "USA", agency: "CIA", vip: true}, - {_id: 9, active: true, loc: "RUS", agency: "OTHER", vip: true}, - {_id: 10, active: true, loc: "RUS", agency: "KGB"}, - ])); - assert.commandWorked(coll.createIndexes([{loc: 1}, {agency: 1}, {vip: 1}])); +const coll = db.contained_or_with_nested_or; +coll.drop(); +assert.commandWorked(coll.insert([ + // Should not match the query: + {_id: 0, active: false, loc: "USA", agency: "FBI", vip: false}, + {_id: 1, active: false, loc: "RUS", agency: "OTHER", vip: true}, + {_id: 2, active: true, loc: "RUS", agency: "OTHER", vip: false}, + {_id: 3, active: true, loc: "USA", agency: "OTHER", vip: false}, + {_id: 4, active: true, loc: "UK", agency: "OTHER", vip: false}, + {_id: 5, active: true, loc: "UK", agency: "OTHER", vip: true}, + {_id: 6, active: true}, + // Should match the query: + {_id: 7, active: true, loc: "USA", agency: "FBI", vip: false}, + {_id: 8, active: true, loc: "USA", agency: "CIA", vip: true}, + {_id: 9, active: true, loc: "RUS", agency: "OTHER", vip: true}, + {_id: 10, active: true, loc: "RUS", agency: "KGB"}, +])); +assert.commandWorked(coll.createIndexes([{loc: 1}, {agency: 1}, {vip: 1}])); - // The following query reproduced the memory leak described in SERVER-38601. To catch a - // regression, we would only expect this test to fail on ASAN variants. Before SERVER-35455 we - // would construct a plan for one clause of the $or, then realize that the other clause could - // not be indexed and discard the plan for the first clause in a way that leaks memory. - const results = coll.find({ - active: true, - $or: [ - {loc: "USA", $or: [{agency: "FBI"}, {vip: true}]}, - {loc: "RUS", $or: [{agency: "KGB"}, {vip: true}]} - ] - }) - .toArray(); +// The following query reproduced the memory leak described in SERVER-38601. To catch a +// regression, we would only expect this test to fail on ASAN variants. Before SERVER-35455 we +// would construct a plan for one clause of the $or, then realize that the other clause could +// not be indexed and discard the plan for the first clause in a way that leaks memory. +const results = coll.find({ + active: true, + $or: [ + {loc: "USA", $or: [{agency: "FBI"}, {vip: true}]}, + {loc: "RUS", $or: [{agency: "KGB"}, {vip: true}]} + ] + }) + .toArray(); - // Just assert on the matching _ids. We avoid adding a sort to the query above to avoid - // restricting the plans the query planner can consider. - const matchingIds = results.map(result => result._id); - assert.setEq(new Set(matchingIds), new Set([7, 8, 9, 10])); +// Just assert on the matching _ids. We avoid adding a sort to the query above to avoid +// restricting the plans the query planner can consider. +const matchingIds = results.map(result => result._id); +assert.setEq(new Set(matchingIds), new Set([7, 8, 9, 10])); }()); diff --git a/jstests/core/convert_to_capped.js b/jstests/core/convert_to_capped.js index e9a05f09450..58731299dae 100644 --- a/jstests/core/convert_to_capped.js +++ b/jstests/core/convert_to_capped.js @@ -8,20 +8,20 @@ */ (function() { - "use strict"; +"use strict"; - let testDb = db.getSiblingDB("convert_to_capped"); - let coll = testDb.coll; - testDb.dropDatabase(); +let testDb = db.getSiblingDB("convert_to_capped"); +let coll = testDb.coll; +testDb.dropDatabase(); - // Create a collection with some data. - let num = 10; - for (let i = 0; i < num; ++i) { - assert.writeOK(coll.insert({_id: i})); - } +// Create a collection with some data. +let num = 10; +for (let i = 0; i < num; ++i) { + assert.writeOK(coll.insert({_id: i})); +} - // Ensure we do not allow overflowing the size long long on the server (SERVER-33078). - assert.commandFailedWithCode( - testDb.runCommand({convertToCapped: coll.getName(), size: 5308156746568725891247}), - ErrorCodes.BadValue); +// Ensure we do not allow overflowing the size long long on the server (SERVER-33078). +assert.commandFailedWithCode( + testDb.runCommand({convertToCapped: coll.getName(), size: 5308156746568725891247}), + ErrorCodes.BadValue); })(); diff --git a/jstests/core/count_hint.js b/jstests/core/count_hint.js index d508a46fd1a..9bb485410ef 100644 --- a/jstests/core/count_hint.js +++ b/jstests/core/count_hint.js @@ -8,55 +8,55 @@ * @tags: [requires_fastcount] */ (function() { - "use strict"; +"use strict"; - var coll = db.jstests_count_hint; - coll.drop(); +var coll = db.jstests_count_hint; +coll.drop(); - assert.writeOK(coll.insert({i: 1})); - assert.writeOK(coll.insert({i: 2})); +assert.writeOK(coll.insert({i: 1})); +assert.writeOK(coll.insert({i: 2})); - assert.eq(2, coll.find().count()); +assert.eq(2, coll.find().count()); - assert.commandWorked(coll.ensureIndex({i: 1})); +assert.commandWorked(coll.ensureIndex({i: 1})); - assert.eq(2, coll.find().hint("i_1").count()); - assert.eq(2, coll.find().hint({i: 1}).count()); +assert.eq(2, coll.find().hint("i_1").count()); +assert.eq(2, coll.find().hint({i: 1}).count()); - assert.eq(1, coll.find({i: 1}).hint("_id_").count()); - assert.eq(1, coll.find({i: 1}).hint({_id: 1}).count()); +assert.eq(1, coll.find({i: 1}).hint("_id_").count()); +assert.eq(1, coll.find({i: 1}).hint({_id: 1}).count()); - assert.eq(2, coll.find().hint("_id_").count()); - assert.eq(2, coll.find().hint({_id: 1}).count()); +assert.eq(2, coll.find().hint("_id_").count()); +assert.eq(2, coll.find().hint({_id: 1}).count()); - // Create a sparse index which should have no entries. - assert.commandWorked(coll.ensureIndex({x: 1}, {sparse: true})); +// Create a sparse index which should have no entries. +assert.commandWorked(coll.ensureIndex({x: 1}, {sparse: true})); - // A hint should be respected, even if it results in the wrong answer. - assert.eq(0, coll.find().hint("x_1").count()); - assert.eq(0, coll.find().hint({x: 1}).count()); +// A hint should be respected, even if it results in the wrong answer. +assert.eq(0, coll.find().hint("x_1").count()); +assert.eq(0, coll.find().hint({x: 1}).count()); - assert.eq(0, coll.find({i: 1}).hint("x_1").count()); - assert.eq(0, coll.find({i: 1}).hint({x: 1}).count()); +assert.eq(0, coll.find({i: 1}).hint("x_1").count()); +assert.eq(0, coll.find({i: 1}).hint({x: 1}).count()); - // SERVER-14792: bad hints should cause the count to fail, even if there is no query predicate. - assert.throws(function() { - coll.find().hint({bad: 1, hint: 1}).count(); - }); - assert.throws(function() { - coll.find({i: 1}).hint({bad: 1, hint: 1}).count(); - }); +// SERVER-14792: bad hints should cause the count to fail, even if there is no query predicate. +assert.throws(function() { + coll.find().hint({bad: 1, hint: 1}).count(); +}); +assert.throws(function() { + coll.find({i: 1}).hint({bad: 1, hint: 1}).count(); +}); - assert.throws(function() { - coll.find().hint("BAD HINT").count(); - }); - assert.throws(function() { - coll.find({i: 1}).hint("BAD HINT").count(); - }); +assert.throws(function() { + coll.find().hint("BAD HINT").count(); +}); +assert.throws(function() { + coll.find({i: 1}).hint("BAD HINT").count(); +}); - // Test that a bad hint fails with the correct error code. - let cmdRes = db.runCommand({count: coll.getName(), hint: {bad: 1, hint: 1}}); - assert.commandFailedWithCode(cmdRes, ErrorCodes.BadValue, tojson(cmdRes)); - var regex = new RegExp("hint provided does not correspond to an existing index"); - assert(regex.test(cmdRes.errmsg)); +// Test that a bad hint fails with the correct error code. +let cmdRes = db.runCommand({count: coll.getName(), hint: {bad: 1, hint: 1}}); +assert.commandFailedWithCode(cmdRes, ErrorCodes.BadValue, tojson(cmdRes)); +var regex = new RegExp("hint provided does not correspond to an existing index"); +assert(regex.test(cmdRes.errmsg)); })(); diff --git a/jstests/core/counta.js b/jstests/core/counta.js index 027402af3ee..8d7df953e3d 100644 --- a/jstests/core/counta.js +++ b/jstests/core/counta.js @@ -3,28 +3,28 @@ // @tags: [requires_fastcount] (function() { - 'use strict'; +'use strict'; - var t = db.jstests_counta; - t.drop(); +var t = db.jstests_counta; +t.drop(); - for (var i = 0; i < 10; ++i) { - t.save({a: i}); - } +for (var i = 0; i < 10; ++i) { + t.save({a: i}); +} - // f() is undefined, causing an assertion - assert.throws(function() { - t.count({ - $where: function() { - if (this.a < 5) { - return true; - } else { - f(); - } +// f() is undefined, causing an assertion +assert.throws(function() { + t.count({ + $where: function() { + if (this.a < 5) { + return true; + } else { + f(); } - }); + } }); +}); - // count must return error if collection name is absent - assert.commandFailedWithCode(db.runCommand("count"), ErrorCodes.InvalidNamespace); +// count must return error if collection name is absent +assert.commandFailedWithCode(db.runCommand("count"), ErrorCodes.InvalidNamespace); })(); diff --git a/jstests/core/coveredIndex1.js b/jstests/core/coveredIndex1.js index 7776a48c014..2be0cae4bf9 100644 --- a/jstests/core/coveredIndex1.js +++ b/jstests/core/coveredIndex1.js @@ -7,82 +7,82 @@ * @tags: [assumes_unsharded_collection] */ (function() { - "use strict"; +"use strict"; - const coll = db["jstests_coveredIndex1"]; - coll.drop(); +const coll = db["jstests_coveredIndex1"]; +coll.drop(); - // Include helpers for analyzing explain output. - load("jstests/libs/analyze_plan.js"); +// Include helpers for analyzing explain output. +load("jstests/libs/analyze_plan.js"); - assert.writeOK(coll.insert({order: 0, fn: "john", ln: "doe"})); - assert.writeOK(coll.insert({order: 1, fn: "jack", ln: "doe"})); - assert.writeOK(coll.insert({order: 2, fn: "john", ln: "smith"})); - assert.writeOK(coll.insert({order: 3, fn: "jack", ln: "black"})); - assert.writeOK(coll.insert({order: 4, fn: "bob", ln: "murray"})); - assert.writeOK(coll.insert({order: 5, fn: "aaa", ln: "bbb", obj: {a: 1, b: "blah"}})); +assert.writeOK(coll.insert({order: 0, fn: "john", ln: "doe"})); +assert.writeOK(coll.insert({order: 1, fn: "jack", ln: "doe"})); +assert.writeOK(coll.insert({order: 2, fn: "john", ln: "smith"})); +assert.writeOK(coll.insert({order: 3, fn: "jack", ln: "black"})); +assert.writeOK(coll.insert({order: 4, fn: "bob", ln: "murray"})); +assert.writeOK(coll.insert({order: 5, fn: "aaa", ln: "bbb", obj: {a: 1, b: "blah"}})); - /** - * Asserts that running the find command with query 'query' and projection 'projection' is - * covered if 'isCovered' is true, or not covered otherwise. - * - * If 'hint' is specified, use 'hint' as the suggested index. - */ - function assertIfQueryIsCovered(query, projection, isCovered, hint) { - let cursor = coll.find(query, projection); - if (hint) { - cursor = cursor.hint(hint); - } - const explain = cursor.explain(); - assert.commandWorked(explain); +/** + * Asserts that running the find command with query 'query' and projection 'projection' is + * covered if 'isCovered' is true, or not covered otherwise. + * + * If 'hint' is specified, use 'hint' as the suggested index. + */ +function assertIfQueryIsCovered(query, projection, isCovered, hint) { + let cursor = coll.find(query, projection); + if (hint) { + cursor = cursor.hint(hint); + } + const explain = cursor.explain(); + assert.commandWorked(explain); - assert(explain.hasOwnProperty("queryPlanner"), tojson(explain)); - assert(explain.queryPlanner.hasOwnProperty("winningPlan"), tojson(explain)); - const winningPlan = explain.queryPlanner.winningPlan; - if (isCovered) { - assert(isIndexOnly(db, winningPlan), - "Query " + tojson(query) + " with projection " + tojson(projection) + - " should have been covered, but got this plan: " + tojson(winningPlan)); - } else { - assert(!isIndexOnly(db, winningPlan), - "Query " + tojson(query) + " with projection " + tojson(projection) + - " should not have been covered, but got this plan: " + tojson(winningPlan)); - } + assert(explain.hasOwnProperty("queryPlanner"), tojson(explain)); + assert(explain.queryPlanner.hasOwnProperty("winningPlan"), tojson(explain)); + const winningPlan = explain.queryPlanner.winningPlan; + if (isCovered) { + assert(isIndexOnly(db, winningPlan), + "Query " + tojson(query) + " with projection " + tojson(projection) + + " should have been covered, but got this plan: " + tojson(winningPlan)); + } else { + assert(!isIndexOnly(db, winningPlan), + "Query " + tojson(query) + " with projection " + tojson(projection) + + " should not have been covered, but got this plan: " + tojson(winningPlan)); } +} - // Create an index on one field. - assert.commandWorked(coll.createIndex({ln: 1})); - assertIfQueryIsCovered({}, {}, false); - assertIfQueryIsCovered({ln: "doe"}, {}, false); - assertIfQueryIsCovered({ln: "doe"}, {ln: 1}, false); - assertIfQueryIsCovered({ln: "doe"}, {ln: 1, _id: 0}, true, {ln: 1}); +// Create an index on one field. +assert.commandWorked(coll.createIndex({ln: 1})); +assertIfQueryIsCovered({}, {}, false); +assertIfQueryIsCovered({ln: "doe"}, {}, false); +assertIfQueryIsCovered({ln: "doe"}, {ln: 1}, false); +assertIfQueryIsCovered({ln: "doe"}, {ln: 1, _id: 0}, true, {ln: 1}); - // Create a compound index. - assert.commandWorked(coll.dropIndex({ln: 1})); - assert.commandWorked(coll.createIndex({ln: 1, fn: 1})); - assertIfQueryIsCovered({ln: "doe"}, {ln: 1, _id: 0}, true); - assertIfQueryIsCovered({ln: "doe"}, {ln: 1, fn: 1, _id: 0}, true); - assertIfQueryIsCovered({ln: "doe", fn: "john"}, {ln: 1, fn: 1, _id: 0}, true); - assertIfQueryIsCovered({fn: "john", ln: "doe"}, {fn: 1, ln: 1, _id: 0}, true); - assertIfQueryIsCovered({fn: "john"}, {fn: 1, _id: 0}, false); +// Create a compound index. +assert.commandWorked(coll.dropIndex({ln: 1})); +assert.commandWorked(coll.createIndex({ln: 1, fn: 1})); +assertIfQueryIsCovered({ln: "doe"}, {ln: 1, _id: 0}, true); +assertIfQueryIsCovered({ln: "doe"}, {ln: 1, fn: 1, _id: 0}, true); +assertIfQueryIsCovered({ln: "doe", fn: "john"}, {ln: 1, fn: 1, _id: 0}, true); +assertIfQueryIsCovered({fn: "john", ln: "doe"}, {fn: 1, ln: 1, _id: 0}, true); +assertIfQueryIsCovered({fn: "john"}, {fn: 1, _id: 0}, false); - // Repeat the above test, but with a compound index involving _id. - assert.commandWorked(coll.dropIndex({ln: 1, fn: 1})); - assert.commandWorked(coll.createIndex({_id: 1, ln: 1})); - assertIfQueryIsCovered({_id: 123, ln: "doe"}, {_id: 1}, true); - assertIfQueryIsCovered({_id: 123, ln: "doe"}, {ln: 1}, true); - assertIfQueryIsCovered({ln: "doe", _id: 123}, {ln: 1, _id: 1}, true); - assertIfQueryIsCovered({ln: "doe"}, {ln: 1}, false); +// Repeat the above test, but with a compound index involving _id. +assert.commandWorked(coll.dropIndex({ln: 1, fn: 1})); +assert.commandWorked(coll.createIndex({_id: 1, ln: 1})); +assertIfQueryIsCovered({_id: 123, ln: "doe"}, {_id: 1}, true); +assertIfQueryIsCovered({_id: 123, ln: "doe"}, {ln: 1}, true); +assertIfQueryIsCovered({ln: "doe", _id: 123}, {ln: 1, _id: 1}, true); +assertIfQueryIsCovered({ln: "doe"}, {ln: 1}, false); - // Create an index on an embedded object. - assert.commandWorked(coll.dropIndex({_id: 1, ln: 1})); - assert.commandWorked(coll.createIndex({obj: 1})); - assertIfQueryIsCovered({"obj.a": 1}, {obj: 1}, false); - assertIfQueryIsCovered({obj: {a: 1, b: "blah"}}, false); - assertIfQueryIsCovered({obj: {a: 1, b: "blah"}}, {obj: 1, _id: 0}, true); +// Create an index on an embedded object. +assert.commandWorked(coll.dropIndex({_id: 1, ln: 1})); +assert.commandWorked(coll.createIndex({obj: 1})); +assertIfQueryIsCovered({"obj.a": 1}, {obj: 1}, false); +assertIfQueryIsCovered({obj: {a: 1, b: "blah"}}, false); +assertIfQueryIsCovered({obj: {a: 1, b: "blah"}}, {obj: 1, _id: 0}, true); - // Create indexes on fields inside an embedded object. - assert.commandWorked(coll.dropIndex({obj: 1})); - assert.commandWorked(coll.createIndex({"obj.a": 1, "obj.b": 1})); - assertIfQueryIsCovered({"obj.a": 1}, {obj: 1}, false); +// Create indexes on fields inside an embedded object. +assert.commandWorked(coll.dropIndex({obj: 1})); +assert.commandWorked(coll.createIndex({"obj.a": 1, "obj.b": 1})); +assertIfQueryIsCovered({"obj.a": 1}, {obj: 1}, false); }()); diff --git a/jstests/core/coveredIndex3.js b/jstests/core/coveredIndex3.js index 4bfedda888b..3a6621a2a72 100644 --- a/jstests/core/coveredIndex3.js +++ b/jstests/core/coveredIndex3.js @@ -12,13 +12,15 @@ if (0) { // SERVER-4975 // Insert an array, which will make the { a:1 } index multikey and should disable covered // index // matching. - p1 = startParallelShell('for( i = 0; i < 60; ++i ) { \ + p1 = startParallelShell( + 'for( i = 0; i < 60; ++i ) { \ db.jstests_coveredIndex3.save( { a:[ 2000, 2001 ] } ); \ sleep( 300 ); \ }'); // Frequent writes cause the find operation to yield. - p2 = startParallelShell('for( i = 0; i < 1800; ++i ) { \ + p2 = startParallelShell( + 'for( i = 0; i < 1800; ++i ) { \ db.jstests_coveredIndex3_other.save( {} ); \ sleep( 10 ); \ }'); diff --git a/jstests/core/covered_index_sort_no_fetch_optimization.js b/jstests/core/covered_index_sort_no_fetch_optimization.js index d7545b49761..81853b0f02b 100644 --- a/jstests/core/covered_index_sort_no_fetch_optimization.js +++ b/jstests/core/covered_index_sort_no_fetch_optimization.js @@ -6,229 +6,233 @@ // must be fetched to support the SHARDING_FILTER stage. // @tags: [assumes_unsharded_collection] (function() { - "use strict"; - - load("jstests/libs/analyze_plan.js"); - - const collName = "covered_index_sort_no_fetch_optimization"; - const coll = db.getCollection(collName); - coll.drop(); - - assert.commandWorked(coll.createIndex({a: 1, b: 1})); - - assert.commandWorked(coll.insert([ - {a: 1, b: 1, c: 1}, - {a: 1, b: 2, c: 2}, - {a: 2, b: 1, c: 3}, - {a: 2, b: 2, c: 4}, - {a: -1, b: 1, c: 5} - ])); - - const kIsCovered = true; - const kNotCovered = false; - const kBlockingSort = true; - const kNonBlockingSort = false; - - function assertExpectedResult(findCmd, expectedResult, isCovered, isBlockingSort) { - const result = assert.commandWorked(db.runCommand(findCmd)); - assert.eq(result.cursor.firstBatch, expectedResult, result); - - const explainResult = - assert.commandWorked(db.runCommand({explain: findCmd, verbosity: "executionStats"})); - assert.eq( - isCovered, isIndexOnly(db, explainResult.queryPlanner.winningPlan), explainResult); - assert.eq(isBlockingSort, - planHasStage(db, explainResult.queryPlanner.winningPlan, "SORT"), - explainResult); - } - - // Test correctness of basic covered queries. Here, the sort predicate is not the same order - // as the index order, but uses the same keys. - let findCmd = {find: collName, filter: {a: {$lt: 2}}, projection: {b: 1, _id: 0}, sort: {b: 1}}; - let expected = [{"b": 1}, {"b": 1}, {"b": 2}]; - assertExpectedResult(findCmd, expected, kIsCovered, kBlockingSort); - - findCmd = { - find: collName, - filter: {a: {$gt: 0}}, - projection: {a: 1, b: 1, _id: 0}, - sort: {b: 1, a: 1} - }; - expected = [{"a": 1, "b": 1}, {"a": 2, "b": 1}, {"a": 1, "b": 2}, {"a": 2, "b": 2}]; - assertExpectedResult(findCmd, expected, kIsCovered, kBlockingSort); - - findCmd = { - find: collName, - filter: {a: {$gt: 0}}, - projection: {a: 1, b: 1, _id: 0}, - sort: {b: 1, a: -1} - }; - expected = [{"a": 2, "b": 1}, {"a": 1, "b": 1}, {"a": 2, "b": 2}, {"a": 1, "b": 2}]; - assertExpectedResult(findCmd, expected, kIsCovered, kBlockingSort); - - // Test correctness of queries where sort is not covered because not all sort keys are in the - // index. - findCmd = { - find: collName, - filter: {a: {$gt: 0}}, - projection: {b: 1, c: 1, _id: 0}, - sort: {c: 1, b: 1} - }; - expected = [{"b": 1, "c": 1}, {"b": 2, "c": 2}, {"b": 1, "c": 3}, {"b": 2, "c": 4}]; - assertExpectedResult(findCmd, expected, kNotCovered, kBlockingSort); - - findCmd = - {find: collName, filter: {a: {$gt: 0}}, projection: {b: 1, _id: 0}, sort: {c: 1, b: 1}}; - expected = [{"b": 1}, {"b": 2}, {"b": 1}, {"b": 2}]; - assertExpectedResult(findCmd, expected, kNotCovered, kBlockingSort); - - // When the sort key is multikey, we cannot cover the sort using the index. - assert.commandWorked(coll.insert({a: 1, b: [4, 5, 6]})); - assert.commandWorked(coll.insert({a: 1, b: [-1, 11, 12]})); - findCmd = {find: collName, filter: {a: {$gt: 0}}, projection: {b: 1, _id: 0}, sort: {b: 1}}; - expected = [{"b": [-1, 11, 12]}, {"b": 1}, {"b": 1}, {"b": 2}, {"b": 2}, {"b": [4, 5, 6]}]; - assertExpectedResult(findCmd, expected, kNotCovered, kBlockingSort); - - // Collation Tests. - - // If you have an index with the same index key pattern and the same collation as the sort key, - // then no blocking sort is required. - assert(coll.drop()); - // Note that {locale: "en_US", strength: 3} differ from the simple collation with respect to - // case ordering. "en_US" collation puts lowercase letters first, whereas the simple collation - // puts uppercase first. - assert.commandWorked( - coll.createIndex({a: 1, b: 1}, {collation: {locale: "en_US", strength: 3}})); - assert.commandWorked( - coll.insert([{a: 1, b: 1}, {a: 1, b: 2}, {a: 1, b: "A"}, {a: 1, b: "a"}, {a: 2, b: 2}])); - - findCmd = { - find: collName, - filter: {}, - projection: {a: 1, b: 1, _id: 0}, - collation: {locale: "en_US", strength: 3}, - sort: {a: 1, b: 1}, - hint: {a: 1, b: 1} - }; - expected = [ - {"a": 1, "b": 1}, - {"a": 1, "b": 2}, - {"a": 1, "b": "a"}, - {"a": 1, "b": "A"}, - {"a": 2, "b": 2} - ]; - assertExpectedResult(findCmd, expected, kNotCovered, kNonBlockingSort); - - // This tests the case where there is a collation, and we need to do a blocking SORT, but that - // SORT could be computed using the index keys. However, this query cannot be covered due the - // index having a non-simple collation. - findCmd = { - find: collName, - filter: {a: {$lt: 2}}, - projection: {b: 1, _id: 0}, - collation: {locale: "en_US", strength: 3}, - sort: {b: 1}, - hint: {a: 1, b: 1} - }; - expected = [ - {"b": 1}, - {"b": 2}, - {"b": "a"}, - {"b": "A"}, - ]; - assertExpectedResult(findCmd, expected, kNotCovered, kBlockingSort); - - // The index has the same key pattern as the sort but a different collation. - // We expect to add a fetch stage here as 'b' is not guaranteed to be in the correct order. - assert.commandWorked(coll.dropIndex({a: 1, b: 1})); - assert.commandWorked( - coll.createIndex({a: 1, b: 1}, {collation: {locale: "en_US", strength: 1}})); - - findCmd = { - find: collName, - filter: {}, - projection: {a: 1, b: 1, _id: 0}, - collation: {locale: "en_US", strength: 3}, - sort: {a: 1, b: 1}, - hint: {a: 1, b: 1} - }; - expected = [{a: 1, b: 1}, {a: 1, b: 2}, {a: 1, b: "a"}, {a: 1, b: "A"}, {a: 2, b: 2}]; - assertExpectedResult(findCmd, expected, kNotCovered, kBlockingSort); - - // The index has a collation but the query sort does not. - // We expect to add a fetch stage here as 'b' is not guaranteed to be in the correct order. - assert.commandWorked(coll.dropIndex({a: 1, b: 1})); - assert.commandWorked( - coll.createIndex({a: 1, b: 1}, {collation: {locale: "en_US", strength: 3}})); - findCmd = { - find: collName, - filter: {}, - projection: {a: 1, b: 1, _id: 0}, - sort: {a: 1, b: 1}, - hint: {a: 1, b: 1} - }; - expected = [{a: 1, b: 1}, {a: 1, b: 2}, {a: 1, b: "A"}, {a: 1, b: "a"}, {a: 2, b: 2}]; - assertExpectedResult(findCmd, expected, kNotCovered, kBlockingSort); - - // The index has a collation but the query does not. However, our index bounds do not contain - // strings, so we can apply the no-fetch optimization. - findCmd = { - find: collName, - filter: {a: {$gte: 1}, b: 2}, - projection: {a: 1, b: 1, _id: 0}, - sort: {b: 1, a: 1}, - hint: {a: 1, b: 1} - }; - expected = [{a: 1, b: 2}, {a: 2, b: 2}]; - assertExpectedResult(findCmd, expected, kIsCovered, kBlockingSort); - - // The index does not have a special collation, but the query asks for one. The no-fetch - // optimization will be applied in this case. The server must correctly respect the collation - // when sorting the index keys, as the index keys do not already reflect the collation. - assert.commandWorked(coll.dropIndex({a: 1, b: 1})); - assert.commandWorked(coll.createIndex({a: 1, b: 1})); - - findCmd = { - find: collName, - filter: {}, - projection: {a: 1, b: 1, _id: 0}, - collation: {locale: "en_US", strength: 3}, - sort: {a: 1, b: 1}, - hint: {a: 1, b: 1} - }; - - expected = [{a: 1, b: 1}, {a: 1, b: 2}, {a: 1, b: "a"}, {a: 1, b: "A"}, {a: 2, b: 2}]; - assertExpectedResult(findCmd, expected, kIsCovered, kBlockingSort); - - // Test covered sort plan possible with non-multikey dotted field in sort key. - assert(coll.drop()); - assert.commandWorked(coll.createIndex({a: 1, "b.c": 1})); - assert.commandWorked(coll.insert([ - {a: 0, b: {c: 1}}, - {a: 1, b: {c: 2}}, - {a: 2, b: {c: "A"}}, - {a: 3, b: {c: "a"}}, - {a: 4, b: {c: 3}} - ])); - - findCmd = { - find: collName, - filter: {a: {$gt: 0}}, - projection: {a: 1, "b.c": 1, _id: 0}, - sort: {"b.c": 1} - }; - expected = [ - {"a": 1, "b": {"c": 2}}, - {"a": 4, "b": {"c": 3}}, - {"a": 2, "b": {"c": "A"}}, - {"a": 3, "b": {"c": "a"}} - ]; - assertExpectedResult(findCmd, expected, kIsCovered, kBlockingSort); - - assert.commandWorked(coll.insert({a: [1], b: {c: 1}})); - findCmd = - {find: collName, filter: {a: {$gt: 0}}, projection: {"b.c": 1, _id: 0}, sort: {"b.c": 1}}; - expected = - [{"b": {"c": 1}}, {"b": {"c": 2}}, {"b": {"c": 3}}, {"b": {"c": "A"}}, {"b": {"c": "a"}}]; - assertExpectedResult(findCmd, expected, kIsCovered, kBlockingSort); +"use strict"; + +load("jstests/libs/analyze_plan.js"); + +const collName = "covered_index_sort_no_fetch_optimization"; +const coll = db.getCollection(collName); +coll.drop(); + +assert.commandWorked(coll.createIndex({a: 1, b: 1})); + +assert.commandWorked(coll.insert([ + {a: 1, b: 1, c: 1}, + {a: 1, b: 2, c: 2}, + {a: 2, b: 1, c: 3}, + {a: 2, b: 2, c: 4}, + {a: -1, b: 1, c: 5} +])); + +const kIsCovered = true; +const kNotCovered = false; +const kBlockingSort = true; +const kNonBlockingSort = false; + +function assertExpectedResult(findCmd, expectedResult, isCovered, isBlockingSort) { + const result = assert.commandWorked(db.runCommand(findCmd)); + assert.eq(result.cursor.firstBatch, expectedResult, result); + + const explainResult = + assert.commandWorked(db.runCommand({explain: findCmd, verbosity: "executionStats"})); + assert.eq(isCovered, isIndexOnly(db, explainResult.queryPlanner.winningPlan), explainResult); + assert.eq(isBlockingSort, + planHasStage(db, explainResult.queryPlanner.winningPlan, "SORT"), + explainResult); +} + +// Test correctness of basic covered queries. Here, the sort predicate is not the same order +// as the index order, but uses the same keys. +let findCmd = {find: collName, filter: {a: {$lt: 2}}, projection: {b: 1, _id: 0}, sort: {b: 1}}; +let expected = [{"b": 1}, {"b": 1}, {"b": 2}]; +assertExpectedResult(findCmd, expected, kIsCovered, kBlockingSort); + +findCmd = { + find: collName, + filter: {a: {$gt: 0}}, + projection: {a: 1, b: 1, _id: 0}, + sort: {b: 1, a: 1} +}; +expected = [{"a": 1, "b": 1}, {"a": 2, "b": 1}, {"a": 1, "b": 2}, {"a": 2, "b": 2}]; +assertExpectedResult(findCmd, expected, kIsCovered, kBlockingSort); + +findCmd = { + find: collName, + filter: {a: {$gt: 0}}, + projection: {a: 1, b: 1, _id: 0}, + sort: {b: 1, a: -1} +}; +expected = [{"a": 2, "b": 1}, {"a": 1, "b": 1}, {"a": 2, "b": 2}, {"a": 1, "b": 2}]; +assertExpectedResult(findCmd, expected, kIsCovered, kBlockingSort); + +// Test correctness of queries where sort is not covered because not all sort keys are in the +// index. +findCmd = { + find: collName, + filter: {a: {$gt: 0}}, + projection: {b: 1, c: 1, _id: 0}, + sort: {c: 1, b: 1} +}; +expected = [{"b": 1, "c": 1}, {"b": 2, "c": 2}, {"b": 1, "c": 3}, {"b": 2, "c": 4}]; +assertExpectedResult(findCmd, expected, kNotCovered, kBlockingSort); + +findCmd = { + find: collName, + filter: {a: {$gt: 0}}, + projection: {b: 1, _id: 0}, + sort: {c: 1, b: 1} +}; +expected = [{"b": 1}, {"b": 2}, {"b": 1}, {"b": 2}]; +assertExpectedResult(findCmd, expected, kNotCovered, kBlockingSort); + +// When the sort key is multikey, we cannot cover the sort using the index. +assert.commandWorked(coll.insert({a: 1, b: [4, 5, 6]})); +assert.commandWorked(coll.insert({a: 1, b: [-1, 11, 12]})); +findCmd = { + find: collName, + filter: {a: {$gt: 0}}, + projection: {b: 1, _id: 0}, + sort: {b: 1} +}; +expected = [{"b": [-1, 11, 12]}, {"b": 1}, {"b": 1}, {"b": 2}, {"b": 2}, {"b": [4, 5, 6]}]; +assertExpectedResult(findCmd, expected, kNotCovered, kBlockingSort); + +// Collation Tests. + +// If you have an index with the same index key pattern and the same collation as the sort key, +// then no blocking sort is required. +assert(coll.drop()); +// Note that {locale: "en_US", strength: 3} differ from the simple collation with respect to +// case ordering. "en_US" collation puts lowercase letters first, whereas the simple collation +// puts uppercase first. +assert.commandWorked(coll.createIndex({a: 1, b: 1}, {collation: {locale: "en_US", strength: 3}})); +assert.commandWorked( + coll.insert([{a: 1, b: 1}, {a: 1, b: 2}, {a: 1, b: "A"}, {a: 1, b: "a"}, {a: 2, b: 2}])); + +findCmd = { + find: collName, + filter: {}, + projection: {a: 1, b: 1, _id: 0}, + collation: {locale: "en_US", strength: 3}, + sort: {a: 1, b: 1}, + hint: {a: 1, b: 1} +}; +expected = + [{"a": 1, "b": 1}, {"a": 1, "b": 2}, {"a": 1, "b": "a"}, {"a": 1, "b": "A"}, {"a": 2, "b": 2}]; +assertExpectedResult(findCmd, expected, kNotCovered, kNonBlockingSort); + +// This tests the case where there is a collation, and we need to do a blocking SORT, but that +// SORT could be computed using the index keys. However, this query cannot be covered due the +// index having a non-simple collation. +findCmd = { + find: collName, + filter: {a: {$lt: 2}}, + projection: {b: 1, _id: 0}, + collation: {locale: "en_US", strength: 3}, + sort: {b: 1}, + hint: {a: 1, b: 1} +}; +expected = [ + {"b": 1}, + {"b": 2}, + {"b": "a"}, + {"b": "A"}, +]; +assertExpectedResult(findCmd, expected, kNotCovered, kBlockingSort); + +// The index has the same key pattern as the sort but a different collation. +// We expect to add a fetch stage here as 'b' is not guaranteed to be in the correct order. +assert.commandWorked(coll.dropIndex({a: 1, b: 1})); +assert.commandWorked(coll.createIndex({a: 1, b: 1}, {collation: {locale: "en_US", strength: 1}})); + +findCmd = { + find: collName, + filter: {}, + projection: {a: 1, b: 1, _id: 0}, + collation: {locale: "en_US", strength: 3}, + sort: {a: 1, b: 1}, + hint: {a: 1, b: 1} +}; +expected = [{a: 1, b: 1}, {a: 1, b: 2}, {a: 1, b: "a"}, {a: 1, b: "A"}, {a: 2, b: 2}]; +assertExpectedResult(findCmd, expected, kNotCovered, kBlockingSort); + +// The index has a collation but the query sort does not. +// We expect to add a fetch stage here as 'b' is not guaranteed to be in the correct order. +assert.commandWorked(coll.dropIndex({a: 1, b: 1})); +assert.commandWorked(coll.createIndex({a: 1, b: 1}, {collation: {locale: "en_US", strength: 3}})); +findCmd = { + find: collName, + filter: {}, + projection: {a: 1, b: 1, _id: 0}, + sort: {a: 1, b: 1}, + hint: {a: 1, b: 1} +}; +expected = [{a: 1, b: 1}, {a: 1, b: 2}, {a: 1, b: "A"}, {a: 1, b: "a"}, {a: 2, b: 2}]; +assertExpectedResult(findCmd, expected, kNotCovered, kBlockingSort); + +// The index has a collation but the query does not. However, our index bounds do not contain +// strings, so we can apply the no-fetch optimization. +findCmd = { + find: collName, + filter: {a: {$gte: 1}, b: 2}, + projection: {a: 1, b: 1, _id: 0}, + sort: {b: 1, a: 1}, + hint: {a: 1, b: 1} +}; +expected = [{a: 1, b: 2}, {a: 2, b: 2}]; +assertExpectedResult(findCmd, expected, kIsCovered, kBlockingSort); + +// The index does not have a special collation, but the query asks for one. The no-fetch +// optimization will be applied in this case. The server must correctly respect the collation +// when sorting the index keys, as the index keys do not already reflect the collation. +assert.commandWorked(coll.dropIndex({a: 1, b: 1})); +assert.commandWorked(coll.createIndex({a: 1, b: 1})); + +findCmd = { + find: collName, + filter: {}, + projection: {a: 1, b: 1, _id: 0}, + collation: {locale: "en_US", strength: 3}, + sort: {a: 1, b: 1}, + hint: {a: 1, b: 1} +}; + +expected = [{a: 1, b: 1}, {a: 1, b: 2}, {a: 1, b: "a"}, {a: 1, b: "A"}, {a: 2, b: 2}]; +assertExpectedResult(findCmd, expected, kIsCovered, kBlockingSort); + +// Test covered sort plan possible with non-multikey dotted field in sort key. +assert(coll.drop()); +assert.commandWorked(coll.createIndex({a: 1, "b.c": 1})); +assert.commandWorked(coll.insert([ + {a: 0, b: {c: 1}}, + {a: 1, b: {c: 2}}, + {a: 2, b: {c: "A"}}, + {a: 3, b: {c: "a"}}, + {a: 4, b: {c: 3}} +])); + +findCmd = { + find: collName, + filter: {a: {$gt: 0}}, + projection: {a: 1, "b.c": 1, _id: 0}, + sort: {"b.c": 1} +}; +expected = [ + {"a": 1, "b": {"c": 2}}, + {"a": 4, "b": {"c": 3}}, + {"a": 2, "b": {"c": "A"}}, + {"a": 3, "b": {"c": "a"}} +]; +assertExpectedResult(findCmd, expected, kIsCovered, kBlockingSort); + +assert.commandWorked(coll.insert({a: [1], b: {c: 1}})); +findCmd = { + find: collName, + filter: {a: {$gt: 0}}, + projection: {"b.c": 1, _id: 0}, + sort: {"b.c": 1} +}; +expected = + [{"b": {"c": 1}}, {"b": {"c": 2}}, {"b": {"c": 3}}, {"b": {"c": "A"}}, {"b": {"c": "a"}}]; +assertExpectedResult(findCmd, expected, kIsCovered, kBlockingSort); })(); diff --git a/jstests/core/covered_multikey.js b/jstests/core/covered_multikey.js index ec4ed0d5c0b..cb5e97d8dbb 100644 --- a/jstests/core/covered_multikey.js +++ b/jstests/core/covered_multikey.js @@ -7,103 +7,103 @@ * Test covering behavior for queries over a multikey index. */ (function() { - "use strict"; +"use strict"; - // For making assertions about explain output. - load("jstests/libs/analyze_plan.js"); +// For making assertions about explain output. +load("jstests/libs/analyze_plan.js"); - let coll = db.covered_multikey; - coll.drop(); +let coll = db.covered_multikey; +coll.drop(); - assert.writeOK(coll.insert({a: 1, b: [2, 3, 4]})); - assert.commandWorked(coll.createIndex({a: 1, b: 1})); +assert.writeOK(coll.insert({a: 1, b: [2, 3, 4]})); +assert.commandWorked(coll.createIndex({a: 1, b: 1})); - assert.eq(1, coll.find({a: 1, b: 2}, {_id: 0, a: 1}).itcount()); - assert.eq({a: 1}, coll.findOne({a: 1, b: 2}, {_id: 0, a: 1})); - let explainRes = coll.explain("queryPlanner").find({a: 1, b: 2}, {_id: 0, a: 1}).finish(); - assert(isIxscan(db, explainRes.queryPlanner.winningPlan)); - assert(!planHasStage(db, explainRes.queryPlanner.winningPlan, "FETCH")); +assert.eq(1, coll.find({a: 1, b: 2}, {_id: 0, a: 1}).itcount()); +assert.eq({a: 1}, coll.findOne({a: 1, b: 2}, {_id: 0, a: 1})); +let explainRes = coll.explain("queryPlanner").find({a: 1, b: 2}, {_id: 0, a: 1}).finish(); +assert(isIxscan(db, explainRes.queryPlanner.winningPlan)); +assert(!planHasStage(db, explainRes.queryPlanner.winningPlan, "FETCH")); - coll.drop(); - assert.writeOK(coll.insert({a: 1, b: [1, 2, 3], c: 3, d: 5})); - assert.writeOK(coll.insert({a: [1, 2, 3], b: 1, c: 4, d: 6})); - assert.commandWorked(coll.createIndex({a: 1, b: 1, c: -1, d: -1})); +coll.drop(); +assert.writeOK(coll.insert({a: 1, b: [1, 2, 3], c: 3, d: 5})); +assert.writeOK(coll.insert({a: [1, 2, 3], b: 1, c: 4, d: 6})); +assert.commandWorked(coll.createIndex({a: 1, b: 1, c: -1, d: -1})); - let cursor = coll.find({a: 1, b: 1}, {_id: 0, c: 1, d: 1}).sort({c: -1, d: -1}); - assert.eq(cursor.next(), {c: 4, d: 6}); - assert.eq(cursor.next(), {c: 3, d: 5}); - assert(!cursor.hasNext()); - explainRes = coll.explain("queryPlanner") - .find({a: 1, b: 1}, {_id: 0, c: 1, d: 1}) - .sort({c: -1, d: -1}) - .finish(); - assert(!planHasStage(db, explainRes.queryPlanner.winningPlan, "FETCH")); +let cursor = coll.find({a: 1, b: 1}, {_id: 0, c: 1, d: 1}).sort({c: -1, d: -1}); +assert.eq(cursor.next(), {c: 4, d: 6}); +assert.eq(cursor.next(), {c: 3, d: 5}); +assert(!cursor.hasNext()); +explainRes = coll.explain("queryPlanner") + .find({a: 1, b: 1}, {_id: 0, c: 1, d: 1}) + .sort({c: -1, d: -1}) + .finish(); +assert(!planHasStage(db, explainRes.queryPlanner.winningPlan, "FETCH")); - // Verify that a query cannot be covered over a path which is multikey due to an empty array. - coll.drop(); - assert.writeOK(coll.insert({a: []})); - assert.commandWorked(coll.createIndex({a: 1})); - assert.eq({a: []}, coll.findOne({a: []}, {_id: 0, a: 1})); - explainRes = coll.explain("queryPlanner").find({a: []}, {_id: 0, a: 1}).finish(); - assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "IXSCAN")); - assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "FETCH")); - let ixscanStage = getPlanStage(explainRes.queryPlanner.winningPlan, "IXSCAN"); - assert.eq(true, ixscanStage.isMultiKey); +// Verify that a query cannot be covered over a path which is multikey due to an empty array. +coll.drop(); +assert.writeOK(coll.insert({a: []})); +assert.commandWorked(coll.createIndex({a: 1})); +assert.eq({a: []}, coll.findOne({a: []}, {_id: 0, a: 1})); +explainRes = coll.explain("queryPlanner").find({a: []}, {_id: 0, a: 1}).finish(); +assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "IXSCAN")); +assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "FETCH")); +let ixscanStage = getPlanStage(explainRes.queryPlanner.winningPlan, "IXSCAN"); +assert.eq(true, ixscanStage.isMultiKey); - // Verify that a query cannot be covered over a path which is multikey due to a single-element - // array. - coll.drop(); - assert.writeOK(coll.insert({a: [2]})); - assert.commandWorked(coll.createIndex({a: 1})); - assert.eq({a: [2]}, coll.findOne({a: 2}, {_id: 0, a: 1})); - explainRes = coll.explain("queryPlanner").find({a: 2}, {_id: 0, a: 1}).finish(); - assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "IXSCAN")); - assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "FETCH")); - ixscanStage = getPlanStage(explainRes.queryPlanner.winningPlan, "IXSCAN"); - assert.eq(true, ixscanStage.isMultiKey); +// Verify that a query cannot be covered over a path which is multikey due to a single-element +// array. +coll.drop(); +assert.writeOK(coll.insert({a: [2]})); +assert.commandWorked(coll.createIndex({a: 1})); +assert.eq({a: [2]}, coll.findOne({a: 2}, {_id: 0, a: 1})); +explainRes = coll.explain("queryPlanner").find({a: 2}, {_id: 0, a: 1}).finish(); +assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "IXSCAN")); +assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "FETCH")); +ixscanStage = getPlanStage(explainRes.queryPlanner.winningPlan, "IXSCAN"); +assert.eq(true, ixscanStage.isMultiKey); - // Verify that a query cannot be covered over a path which is multikey due to a single-element - // array, where the path is made multikey by an update rather than an insert. - coll.drop(); - assert.writeOK(coll.insert({a: 2})); - assert.commandWorked(coll.createIndex({a: 1})); - assert.writeOK(coll.update({}, {$set: {a: [2]}})); - assert.eq({a: [2]}, coll.findOne({a: 2}, {_id: 0, a: 1})); - explainRes = coll.explain("queryPlanner").find({a: 2}, {_id: 0, a: 1}).finish(); - assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "IXSCAN")); - assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "FETCH")); - ixscanStage = getPlanStage(explainRes.queryPlanner.winningPlan, "IXSCAN"); - assert.eq(true, ixscanStage.isMultiKey); +// Verify that a query cannot be covered over a path which is multikey due to a single-element +// array, where the path is made multikey by an update rather than an insert. +coll.drop(); +assert.writeOK(coll.insert({a: 2})); +assert.commandWorked(coll.createIndex({a: 1})); +assert.writeOK(coll.update({}, {$set: {a: [2]}})); +assert.eq({a: [2]}, coll.findOne({a: 2}, {_id: 0, a: 1})); +explainRes = coll.explain("queryPlanner").find({a: 2}, {_id: 0, a: 1}).finish(); +assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "IXSCAN")); +assert(planHasStage(db, explainRes.queryPlanner.winningPlan, "FETCH")); +ixscanStage = getPlanStage(explainRes.queryPlanner.winningPlan, "IXSCAN"); +assert.eq(true, ixscanStage.isMultiKey); - // Verify that a trailing empty array makes a 2dsphere index multikey. - coll.drop(); - assert.commandWorked(coll.createIndex({"a.b": 1, c: "2dsphere"})); - assert.writeOK(coll.insert({a: {b: 1}, c: {type: "Point", coordinates: [0, 0]}})); - explainRes = coll.explain().find().hint({"a.b": 1, c: "2dsphere"}).finish(); - ixscanStage = getPlanStage(explainRes.queryPlanner.winningPlan, "IXSCAN"); - assert.neq(null, ixscanStage); - assert.eq(false, ixscanStage.isMultiKey); - assert.writeOK(coll.insert({a: {b: []}, c: {type: "Point", coordinates: [0, 0]}})); - explainRes = coll.explain().find().hint({"a.b": 1, c: "2dsphere"}).finish(); - ixscanStage = getPlanStage(explainRes.queryPlanner.winningPlan, "IXSCAN"); - assert.neq(null, ixscanStage); - assert.eq(true, ixscanStage.isMultiKey); +// Verify that a trailing empty array makes a 2dsphere index multikey. +coll.drop(); +assert.commandWorked(coll.createIndex({"a.b": 1, c: "2dsphere"})); +assert.writeOK(coll.insert({a: {b: 1}, c: {type: "Point", coordinates: [0, 0]}})); +explainRes = coll.explain().find().hint({"a.b": 1, c: "2dsphere"}).finish(); +ixscanStage = getPlanStage(explainRes.queryPlanner.winningPlan, "IXSCAN"); +assert.neq(null, ixscanStage); +assert.eq(false, ixscanStage.isMultiKey); +assert.writeOK(coll.insert({a: {b: []}, c: {type: "Point", coordinates: [0, 0]}})); +explainRes = coll.explain().find().hint({"a.b": 1, c: "2dsphere"}).finish(); +ixscanStage = getPlanStage(explainRes.queryPlanner.winningPlan, "IXSCAN"); +assert.neq(null, ixscanStage); +assert.eq(true, ixscanStage.isMultiKey); - // Verify that a mid-path empty array makes a 2dsphere index multikey. - coll.drop(); - assert.commandWorked(coll.createIndex({"a.b": 1, c: "2dsphere"})); - assert.writeOK(coll.insert({a: [], c: {type: "Point", coordinates: [0, 0]}})); - explainRes = coll.explain().find().hint({"a.b": 1, c: "2dsphere"}).finish(); - ixscanStage = getPlanStage(explainRes.queryPlanner.winningPlan, "IXSCAN"); - assert.neq(null, ixscanStage); - assert.eq(true, ixscanStage.isMultiKey); +// Verify that a mid-path empty array makes a 2dsphere index multikey. +coll.drop(); +assert.commandWorked(coll.createIndex({"a.b": 1, c: "2dsphere"})); +assert.writeOK(coll.insert({a: [], c: {type: "Point", coordinates: [0, 0]}})); +explainRes = coll.explain().find().hint({"a.b": 1, c: "2dsphere"}).finish(); +ixscanStage = getPlanStage(explainRes.queryPlanner.winningPlan, "IXSCAN"); +assert.neq(null, ixscanStage); +assert.eq(true, ixscanStage.isMultiKey); - // Verify that a single-element array makes a 2dsphere index multikey. - coll.drop(); - assert.commandWorked(coll.createIndex({"a.b": 1, c: "2dsphere"})); - assert.writeOK(coll.insert({a: {b: [3]}, c: {type: "Point", coordinates: [0, 0]}})); - explainRes = coll.explain().find().hint({"a.b": 1, c: "2dsphere"}).finish(); - ixscanStage = getPlanStage(explainRes.queryPlanner.winningPlan, "IXSCAN"); - assert.neq(null, ixscanStage); - assert.eq(true, ixscanStage.isMultiKey); +// Verify that a single-element array makes a 2dsphere index multikey. +coll.drop(); +assert.commandWorked(coll.createIndex({"a.b": 1, c: "2dsphere"})); +assert.writeOK(coll.insert({a: {b: [3]}, c: {type: "Point", coordinates: [0, 0]}})); +explainRes = coll.explain().find().hint({"a.b": 1, c: "2dsphere"}).finish(); +ixscanStage = getPlanStage(explainRes.queryPlanner.winningPlan, "IXSCAN"); +assert.neq(null, ixscanStage); +assert.eq(true, ixscanStage.isMultiKey); }()); diff --git a/jstests/core/create_collection.js b/jstests/core/create_collection.js index 00368b38462..6c8e5169dac 100644 --- a/jstests/core/create_collection.js +++ b/jstests/core/create_collection.js @@ -5,169 +5,166 @@ // Tests for the "create" command. (function() { - "use strict"; - - load("jstests/libs/get_index_helpers.js"); - - // "create" command rejects invalid options. - db.create_collection.drop(); - assert.commandFailedWithCode(db.createCollection("create_collection", {unknown: 1}), 40415); - - // Cannot create a collection with null characters. - assert.commandFailedWithCode(db.createCollection("\0ab"), ErrorCodes.InvalidNamespace); - assert.commandFailedWithCode(db.createCollection("a\0b"), ErrorCodes.InvalidNamespace); - assert.commandFailedWithCode(db.createCollection("ab\0"), ErrorCodes.InvalidNamespace); - - // The collection name length limit was removed in 4.4, try creating a collection with a longer - // name than previously allowed. - const longCollName = 'a'.repeat(8192); - db[longCollName].drop(); - assert.commandWorked(db.createCollection(longCollName)); - - // - // Tests for "idIndex" field. - // - - // "idIndex" field not allowed with "viewOn". - db.create_collection.drop(); - assert.commandWorked(db.createCollection("create_collection")); - assert.commandFailedWithCode(db.runCommand({ - create: "create_view", - viewOn: "create_collection", - idIndex: {key: {_id: 1}, name: "_id_"} - }), - ErrorCodes.InvalidOptions); - - // "idIndex" field not allowed with "autoIndexId". - db.create_collection.drop(); - assert.commandFailedWithCode( - db.createCollection("create_collection", - {autoIndexId: false, idIndex: {key: {_id: 1}, name: "_id_"}}), - ErrorCodes.InvalidOptions); - - // "idIndex" field must be an object. - db.create_collection.drop(); - assert.commandFailedWithCode(db.createCollection("create_collection", {idIndex: 1}), - ErrorCodes.TypeMismatch); - - // "idIndex" field cannot be empty. - db.create_collection.drop(); - assert.commandFailedWithCode(db.createCollection("create_collection", {idIndex: {}}), - ErrorCodes.FailedToParse); - - // "idIndex" field must be a specification for an _id index. - db.create_collection.drop(); - assert.commandFailedWithCode( - db.createCollection("create_collection", {idIndex: {key: {a: 1}, name: "a_1"}}), - ErrorCodes.BadValue); - - // "idIndex" field must have "key" equal to {_id: 1}. - db.create_collection.drop(); - assert.commandFailedWithCode( - db.createCollection("create_collection", {idIndex: {key: {a: 1}, name: "_id_"}}), - ErrorCodes.BadValue); - - // The name of an _id index gets corrected to "_id_". - db.create_collection.drop(); - assert.commandWorked( - db.createCollection("create_collection", {idIndex: {key: {_id: 1}, name: "a_1"}})); - var indexSpec = GetIndexHelpers.findByKeyPattern(db.create_collection.getIndexes(), {_id: 1}); - assert.neq(indexSpec, null); - assert.eq(indexSpec.name, "_id_", tojson(indexSpec)); - - // "idIndex" field must only contain fields that are allowed for an _id index. - db.create_collection.drop(); - assert.commandFailedWithCode( - db.createCollection("create_collection", - {idIndex: {key: {_id: 1}, name: "_id_", sparse: true}}), - ErrorCodes.InvalidIndexSpecificationOption); - - // "create" creates v=2 _id index when "v" is not specified in "idIndex". - db.create_collection.drop(); - assert.commandWorked( - db.createCollection("create_collection", {idIndex: {key: {_id: 1}, name: "_id_"}})); - indexSpec = GetIndexHelpers.findByName(db.create_collection.getIndexes(), "_id_"); - assert.neq(indexSpec, null); - assert.eq(indexSpec.v, 2, tojson(indexSpec)); - - // "create" creates v=1 _id index when "idIndex" has "v" equal to 1. - db.create_collection.drop(); - assert.commandWorked( - db.createCollection("create_collection", {idIndex: {key: {_id: 1}, name: "_id_", v: 1}})); - indexSpec = GetIndexHelpers.findByName(db.create_collection.getIndexes(), "_id_"); - assert.neq(indexSpec, null); - assert.eq(indexSpec.v, 1, tojson(indexSpec)); - - // "create" creates v=2 _id index when "idIndex" has "v" equal to 2. - db.create_collection.drop(); - assert.commandWorked( - db.createCollection("create_collection", {idIndex: {key: {_id: 1}, name: "_id_", v: 2}})); - indexSpec = GetIndexHelpers.findByName(db.create_collection.getIndexes(), "_id_"); - assert.neq(indexSpec, null); - assert.eq(indexSpec.v, 2, tojson(indexSpec)); - - // "collation" field of "idIndex" must match collection default collation. - db.create_collection.drop(); - assert.commandFailedWithCode( - db.createCollection("create_collection", - {idIndex: {key: {_id: 1}, name: "_id_", collation: {locale: "en_US"}}}), - ErrorCodes.BadValue); - - db.create_collection.drop(); - assert.commandFailedWithCode(db.createCollection("create_collection", { - collation: {locale: "fr_CA"}, - idIndex: {key: {_id: 1}, name: "_id_", collation: {locale: "en_US"}} - }), - ErrorCodes.BadValue); - - db.create_collection.drop(); - assert.commandFailedWithCode(db.createCollection("create_collection", { - collation: {locale: "fr_CA"}, - idIndex: {key: {_id: 1}, name: "_id_", collation: {locale: "simple"}} - }), - ErrorCodes.BadValue); - - db.create_collection.drop(); - assert.commandWorked(db.createCollection("create_collection", { - collation: {locale: "en_US", strength: 3}, - idIndex: {key: {_id: 1}, name: "_id_", collation: {locale: "en_US"}} - })); - indexSpec = GetIndexHelpers.findByName(db.create_collection.getIndexes(), "_id_"); - assert.neq(indexSpec, null); - assert.eq(indexSpec.collation.locale, "en_US", tojson(indexSpec)); - - // If "collation" field is not present in "idIndex", _id index inherits collection default - // collation. - db.create_collection.drop(); - assert.commandWorked(db.createCollection( - "create_collection", - {collation: {locale: "en_US"}, idIndex: {key: {_id: 1}, name: "_id_"}})); - indexSpec = GetIndexHelpers.findByName(db.create_collection.getIndexes(), "_id_"); - assert.neq(indexSpec, null); - assert.eq(indexSpec.collation.locale, "en_US", tojson(indexSpec)); - - // - // Tests the combination of the "capped", "size" and "max" fields in createCollection(). - // - - // When "capped" is true, the "size" field needs to be present. - assert.commandFailedWithCode(db.createCollection('capped_no_size_no_max', {capped: true}), - ErrorCodes.InvalidOptions); - assert.commandFailedWithCode(db.createCollection('capped_no_size', {capped: true, max: 10}), - ErrorCodes.InvalidOptions); - db.no_capped.drop(); - assert.commandWorked(db.createCollection('no_capped'), {capped: false}); - db.capped_no_max.drop(); - assert.commandWorked(db.createCollection('capped_no_max', {capped: true, size: 256})); - db.capped_with_max_and_size.drop(); - assert.commandWorked( - db.createCollection('capped_with_max_and_size', {capped: true, max: 10, size: 256})); - - // When the "size" field is present, "capped" needs to be true. - assert.commandFailedWithCode(db.createCollection('size_no_capped', {size: 256}), - ErrorCodes.InvalidOptions); - assert.commandFailedWithCode( - db.createCollection('size_capped_false', {capped: false, size: 256}), - ErrorCodes.InvalidOptions); - +"use strict"; + +load("jstests/libs/get_index_helpers.js"); + +// "create" command rejects invalid options. +db.create_collection.drop(); +assert.commandFailedWithCode(db.createCollection("create_collection", {unknown: 1}), 40415); + +// Cannot create a collection with null characters. +assert.commandFailedWithCode(db.createCollection("\0ab"), ErrorCodes.InvalidNamespace); +assert.commandFailedWithCode(db.createCollection("a\0b"), ErrorCodes.InvalidNamespace); +assert.commandFailedWithCode(db.createCollection("ab\0"), ErrorCodes.InvalidNamespace); + +// The collection name length limit was removed in 4.4, try creating a collection with a longer +// name than previously allowed. +const longCollName = 'a'.repeat(8192); +db[longCollName].drop(); +assert.commandWorked(db.createCollection(longCollName)); + +// +// Tests for "idIndex" field. +// + +// "idIndex" field not allowed with "viewOn". +db.create_collection.drop(); +assert.commandWorked(db.createCollection("create_collection")); +assert.commandFailedWithCode(db.runCommand({ + create: "create_view", + viewOn: "create_collection", + idIndex: {key: {_id: 1}, name: "_id_"} +}), + ErrorCodes.InvalidOptions); + +// "idIndex" field not allowed with "autoIndexId". +db.create_collection.drop(); +assert.commandFailedWithCode( + db.createCollection("create_collection", + {autoIndexId: false, idIndex: {key: {_id: 1}, name: "_id_"}}), + ErrorCodes.InvalidOptions); + +// "idIndex" field must be an object. +db.create_collection.drop(); +assert.commandFailedWithCode(db.createCollection("create_collection", {idIndex: 1}), + ErrorCodes.TypeMismatch); + +// "idIndex" field cannot be empty. +db.create_collection.drop(); +assert.commandFailedWithCode(db.createCollection("create_collection", {idIndex: {}}), + ErrorCodes.FailedToParse); + +// "idIndex" field must be a specification for an _id index. +db.create_collection.drop(); +assert.commandFailedWithCode( + db.createCollection("create_collection", {idIndex: {key: {a: 1}, name: "a_1"}}), + ErrorCodes.BadValue); + +// "idIndex" field must have "key" equal to {_id: 1}. +db.create_collection.drop(); +assert.commandFailedWithCode( + db.createCollection("create_collection", {idIndex: {key: {a: 1}, name: "_id_"}}), + ErrorCodes.BadValue); + +// The name of an _id index gets corrected to "_id_". +db.create_collection.drop(); +assert.commandWorked( + db.createCollection("create_collection", {idIndex: {key: {_id: 1}, name: "a_1"}})); +var indexSpec = GetIndexHelpers.findByKeyPattern(db.create_collection.getIndexes(), {_id: 1}); +assert.neq(indexSpec, null); +assert.eq(indexSpec.name, "_id_", tojson(indexSpec)); + +// "idIndex" field must only contain fields that are allowed for an _id index. +db.create_collection.drop(); +assert.commandFailedWithCode( + db.createCollection("create_collection", + {idIndex: {key: {_id: 1}, name: "_id_", sparse: true}}), + ErrorCodes.InvalidIndexSpecificationOption); + +// "create" creates v=2 _id index when "v" is not specified in "idIndex". +db.create_collection.drop(); +assert.commandWorked( + db.createCollection("create_collection", {idIndex: {key: {_id: 1}, name: "_id_"}})); +indexSpec = GetIndexHelpers.findByName(db.create_collection.getIndexes(), "_id_"); +assert.neq(indexSpec, null); +assert.eq(indexSpec.v, 2, tojson(indexSpec)); + +// "create" creates v=1 _id index when "idIndex" has "v" equal to 1. +db.create_collection.drop(); +assert.commandWorked( + db.createCollection("create_collection", {idIndex: {key: {_id: 1}, name: "_id_", v: 1}})); +indexSpec = GetIndexHelpers.findByName(db.create_collection.getIndexes(), "_id_"); +assert.neq(indexSpec, null); +assert.eq(indexSpec.v, 1, tojson(indexSpec)); + +// "create" creates v=2 _id index when "idIndex" has "v" equal to 2. +db.create_collection.drop(); +assert.commandWorked( + db.createCollection("create_collection", {idIndex: {key: {_id: 1}, name: "_id_", v: 2}})); +indexSpec = GetIndexHelpers.findByName(db.create_collection.getIndexes(), "_id_"); +assert.neq(indexSpec, null); +assert.eq(indexSpec.v, 2, tojson(indexSpec)); + +// "collation" field of "idIndex" must match collection default collation. +db.create_collection.drop(); +assert.commandFailedWithCode( + db.createCollection("create_collection", + {idIndex: {key: {_id: 1}, name: "_id_", collation: {locale: "en_US"}}}), + ErrorCodes.BadValue); + +db.create_collection.drop(); +assert.commandFailedWithCode(db.createCollection("create_collection", { + collation: {locale: "fr_CA"}, + idIndex: {key: {_id: 1}, name: "_id_", collation: {locale: "en_US"}} +}), + ErrorCodes.BadValue); + +db.create_collection.drop(); +assert.commandFailedWithCode(db.createCollection("create_collection", { + collation: {locale: "fr_CA"}, + idIndex: {key: {_id: 1}, name: "_id_", collation: {locale: "simple"}} +}), + ErrorCodes.BadValue); + +db.create_collection.drop(); +assert.commandWorked(db.createCollection("create_collection", { + collation: {locale: "en_US", strength: 3}, + idIndex: {key: {_id: 1}, name: "_id_", collation: {locale: "en_US"}} +})); +indexSpec = GetIndexHelpers.findByName(db.create_collection.getIndexes(), "_id_"); +assert.neq(indexSpec, null); +assert.eq(indexSpec.collation.locale, "en_US", tojson(indexSpec)); + +// If "collation" field is not present in "idIndex", _id index inherits collection default +// collation. +db.create_collection.drop(); +assert.commandWorked(db.createCollection( + "create_collection", {collation: {locale: "en_US"}, idIndex: {key: {_id: 1}, name: "_id_"}})); +indexSpec = GetIndexHelpers.findByName(db.create_collection.getIndexes(), "_id_"); +assert.neq(indexSpec, null); +assert.eq(indexSpec.collation.locale, "en_US", tojson(indexSpec)); + +// +// Tests the combination of the "capped", "size" and "max" fields in createCollection(). +// + +// When "capped" is true, the "size" field needs to be present. +assert.commandFailedWithCode(db.createCollection('capped_no_size_no_max', {capped: true}), + ErrorCodes.InvalidOptions); +assert.commandFailedWithCode(db.createCollection('capped_no_size', {capped: true, max: 10}), + ErrorCodes.InvalidOptions); +db.no_capped.drop(); +assert.commandWorked(db.createCollection('no_capped'), {capped: false}); +db.capped_no_max.drop(); +assert.commandWorked(db.createCollection('capped_no_max', {capped: true, size: 256})); +db.capped_with_max_and_size.drop(); +assert.commandWorked( + db.createCollection('capped_with_max_and_size', {capped: true, max: 10, size: 256})); + +// When the "size" field is present, "capped" needs to be true. +assert.commandFailedWithCode(db.createCollection('size_no_capped', {size: 256}), + ErrorCodes.InvalidOptions); +assert.commandFailedWithCode(db.createCollection('size_capped_false', {capped: false, size: 256}), + ErrorCodes.InvalidOptions); })(); diff --git a/jstests/core/create_index_same_spec_different_name.js b/jstests/core/create_index_same_spec_different_name.js index 660a2a714fc..7b08f9f55ca 100644 --- a/jstests/core/create_index_same_spec_different_name.js +++ b/jstests/core/create_index_same_spec_different_name.js @@ -3,15 +3,14 @@ * 'IndexOptionsConflict' error. */ (function() { - 'use strict'; +'use strict'; - const coll = "create_index_same_spec_different_name"; - db.coll.drop(); +const coll = "create_index_same_spec_different_name"; +db.coll.drop(); - assert.commandWorked( - db.runCommand({createIndexes: coll, indexes: [{key: {x: 1}, name: "x_1"}]})); +assert.commandWorked(db.runCommand({createIndexes: coll, indexes: [{key: {x: 1}, name: "x_1"}]})); - assert.commandFailedWithCode( - db.runCommand({createIndexes: coll, indexes: [{key: {x: 1}, name: "x_2"}]}), - ErrorCodes.IndexOptionsConflict); +assert.commandFailedWithCode( + db.runCommand({createIndexes: coll, indexes: [{key: {x: 1}, name: "x_2"}]}), + ErrorCodes.IndexOptionsConflict); }()); diff --git a/jstests/core/create_indexes.js b/jstests/core/create_indexes.js index 9d9f7b536bf..c3f2e0bfdd2 100644 --- a/jstests/core/create_indexes.js +++ b/jstests/core/create_indexes.js @@ -4,185 +4,183 @@ * ] */ (function() { - 'use strict'; - - var isMongos = ("isdbgrid" == db.runCommand("ismaster").msg); - - var extractResult = function(obj) { - if (!isMongos) - return obj; - - // Sample mongos format: - // { - // raw: { - // "localhost:30000": { - // createdCollectionAutomatically: false, - // numIndexesBefore: 3, - // numIndexesAfter: 5, - // ok: 1 - // } - // }, - // ok: 1 - // } - - var numFields = 0; - var result = null; - for (var field in obj.raw) { - result = obj.raw[field]; - numFields++; - } - - assert.neq(null, result); - assert.eq(1, numFields); - return result; - }; - - var checkImplicitCreate = function(createIndexResult, isMongos) { - let allowImplicit = !isMongos; - assert.eq(allowImplicit, createIndexResult.createdCollectionAutomatically); - }; - - var dbTest = db.getSisterDB('create_indexes_db'); - dbTest.dropDatabase(); - - // Database does not exist - var collDbNotExist = dbTest.create_indexes_no_db; - var res = assert.commandWorked( - collDbNotExist.runCommand('createIndexes', {indexes: [{key: {x: 1}, name: 'x_1'}]})); - res = extractResult(res); - checkImplicitCreate(res, isMongos); - assert.eq(1, res.numIndexesBefore); - assert.eq(2, res.numIndexesAfter); - assert.isnull(res.note, - 'createIndexes.note should not be present in results when adding a new index: ' + - tojson(res)); - - // Collection does not exist, but database does - var t = dbTest.create_indexes; - var res = assert.commandWorked( - t.runCommand('createIndexes', {indexes: [{key: {x: 1}, name: 'x_1'}]})); - res = extractResult(res); - checkImplicitCreate(res, isMongos); - assert.eq(1, res.numIndexesBefore); - assert.eq(2, res.numIndexesAfter); - assert.isnull(res.note, - 'createIndexes.note should not be present in results when adding a new index: ' + - tojson(res)); - - // Both database and collection exist - res = assert.commandWorked( - t.runCommand('createIndexes', {indexes: [{key: {x: 1}, name: 'x_1'}]})); - res = extractResult(res); - assert(!res.createdCollectionAutomatically); - assert.eq(2, res.numIndexesBefore); - assert.eq(2, - res.numIndexesAfter, - 'numIndexesAfter missing from createIndexes result when adding a duplicate index: ' + - tojson(res)); - assert(res.note, - 'createIndexes.note should be present in results when adding a duplicate index: ' + - tojson(res)); - - res = t.runCommand("createIndexes", - {indexes: [{key: {"x": 1}, name: "x_1"}, {key: {"y": 1}, name: "y_1"}]}); - res = extractResult(res); - assert(!res.createdCollectionAutomatically); - assert.eq(2, res.numIndexesBefore); - assert.eq(3, res.numIndexesAfter); - - res = assert.commandWorked(t.runCommand( - 'createIndexes', {indexes: [{key: {a: 1}, name: 'a_1'}, {key: {b: 1}, name: 'b_1'}]})); - res = extractResult(res); - assert(!res.createdCollectionAutomatically); - assert.eq(3, res.numIndexesBefore); - assert.eq(5, res.numIndexesAfter); - assert.isnull(res.note, - 'createIndexes.note should not be present in results when adding new indexes: ' + - tojson(res)); - - res = assert.commandWorked(t.runCommand( - 'createIndexes', {indexes: [{key: {a: 1}, name: 'a_1'}, {key: {b: 1}, name: 'b_1'}]})); - - res = extractResult(res); - assert.eq(5, res.numIndexesBefore); - assert.eq(5, - res.numIndexesAfter, - 'numIndexesAfter missing from createIndexes result when adding duplicate indexes: ' + - tojson(res)); - assert(res.note, - 'createIndexes.note should be present in results when adding a duplicate index: ' + - tojson(res)); - - res = t.runCommand("createIndexes", {indexes: [{}]}); - assert(!res.ok); - - res = t.runCommand("createIndexes", {indexes: [{}, {key: {m: 1}, name: "asd"}]}); - assert(!res.ok); - - assert.eq(5, t.getIndexes().length); - - res = t.runCommand("createIndexes", {indexes: [{key: {"c": 1}, sparse: true, name: "c_1"}]}); - assert.eq(6, t.getIndexes().length); - assert.eq(1, - t.getIndexes() - .filter(function(z) { - return z.sparse; - }) - .length); - - res = t.runCommand("createIndexes", {indexes: [{key: {"x": "foo"}, name: "x_1"}]}); - assert(!res.ok); - - assert.eq(6, t.getIndexes().length); - - res = t.runCommand("createIndexes", {indexes: [{key: {"x": 1}, name: ""}]}); - assert(!res.ok); - - assert.eq(6, t.getIndexes().length); - - // Test that v0 indexes cannot be created. - res = t.runCommand('createIndexes', {indexes: [{key: {d: 1}, name: 'd_1', v: 0}]}); - assert.commandFailed(res, 'v0 index creation should fail'); - - // Test that v1 indexes can be created explicitly. - res = t.runCommand('createIndexes', {indexes: [{key: {d: 1}, name: 'd_1', v: 1}]}); - assert.commandWorked(res, 'v1 index creation should succeed'); - - // Test that index creation fails with an invalid top-level field. - res = t.runCommand('createIndexes', {indexes: [{key: {e: 1}, name: 'e_1'}], 'invalidField': 1}); - assert.commandFailedWithCode(res, ErrorCodes.BadValue); - - // Test that index creation fails with an invalid field in the index spec for index version V2. - res = t.runCommand('createIndexes', - {indexes: [{key: {e: 1}, name: 'e_1', 'v': 2, 'invalidField': 1}]}); - assert.commandFailedWithCode(res, ErrorCodes.InvalidIndexSpecificationOption); - - // Test that index creation fails with an invalid field in the index spec for index version V1. - res = t.runCommand('createIndexes', - {indexes: [{key: {e: 1}, name: 'e_1', 'v': 1, 'invalidField': 1}]}); - assert.commandFailedWithCode(res, ErrorCodes.InvalidIndexSpecificationOption); - - // Test that index creation fails with an index named '*'. - res = t.runCommand('createIndexes', {indexes: [{key: {star: 1}, name: '*'}]}); - assert.commandFailedWithCode(res, ErrorCodes.BadValue); - - // Test that index creation fails with an index value of empty string. - res = t.runCommand('createIndexes', {indexes: [{key: {f: ""}, name: 'f_1'}]}); - assert.commandFailedWithCode(res, ErrorCodes.CannotCreateIndex); - - // Test that index creation fails with duplicate index names in the index specs. - res = t.runCommand('createIndexes', { - indexes: [ - {key: {g: 1}, name: 'myidx'}, - {key: {h: 1}, name: 'myidx'}, - ], - }); - assert.commandFailedWithCode(res, ErrorCodes.IndexKeySpecsConflict); - - // Test that user is not allowed to create indexes in config.transactions. - var configDB = db.getSiblingDB('config'); - res = configDB.runCommand( - {createIndexes: 'transactions', indexes: [{key: {star: 1}, name: 'star'}]}); - assert.commandFailedWithCode(res, ErrorCodes.IllegalOperation); - +'use strict'; + +var isMongos = ("isdbgrid" == db.runCommand("ismaster").msg); + +var extractResult = function(obj) { + if (!isMongos) + return obj; + + // Sample mongos format: + // { + // raw: { + // "localhost:30000": { + // createdCollectionAutomatically: false, + // numIndexesBefore: 3, + // numIndexesAfter: 5, + // ok: 1 + // } + // }, + // ok: 1 + // } + + var numFields = 0; + var result = null; + for (var field in obj.raw) { + result = obj.raw[field]; + numFields++; + } + + assert.neq(null, result); + assert.eq(1, numFields); + return result; +}; + +var checkImplicitCreate = function(createIndexResult, isMongos) { + let allowImplicit = !isMongos; + assert.eq(allowImplicit, createIndexResult.createdCollectionAutomatically); +}; + +var dbTest = db.getSisterDB('create_indexes_db'); +dbTest.dropDatabase(); + +// Database does not exist +var collDbNotExist = dbTest.create_indexes_no_db; +var res = assert.commandWorked( + collDbNotExist.runCommand('createIndexes', {indexes: [{key: {x: 1}, name: 'x_1'}]})); +res = extractResult(res); +checkImplicitCreate(res, isMongos); +assert.eq(1, res.numIndexesBefore); +assert.eq(2, res.numIndexesAfter); +assert.isnull( + res.note, + 'createIndexes.note should not be present in results when adding a new index: ' + tojson(res)); + +// Collection does not exist, but database does +var t = dbTest.create_indexes; +var res = + assert.commandWorked(t.runCommand('createIndexes', {indexes: [{key: {x: 1}, name: 'x_1'}]})); +res = extractResult(res); +checkImplicitCreate(res, isMongos); +assert.eq(1, res.numIndexesBefore); +assert.eq(2, res.numIndexesAfter); +assert.isnull( + res.note, + 'createIndexes.note should not be present in results when adding a new index: ' + tojson(res)); + +// Both database and collection exist +res = assert.commandWorked(t.runCommand('createIndexes', {indexes: [{key: {x: 1}, name: 'x_1'}]})); +res = extractResult(res); +assert(!res.createdCollectionAutomatically); +assert.eq(2, res.numIndexesBefore); +assert.eq(2, + res.numIndexesAfter, + 'numIndexesAfter missing from createIndexes result when adding a duplicate index: ' + + tojson(res)); +assert(res.note, + 'createIndexes.note should be present in results when adding a duplicate index: ' + + tojson(res)); + +res = t.runCommand("createIndexes", + {indexes: [{key: {"x": 1}, name: "x_1"}, {key: {"y": 1}, name: "y_1"}]}); +res = extractResult(res); +assert(!res.createdCollectionAutomatically); +assert.eq(2, res.numIndexesBefore); +assert.eq(3, res.numIndexesAfter); + +res = assert.commandWorked(t.runCommand( + 'createIndexes', {indexes: [{key: {a: 1}, name: 'a_1'}, {key: {b: 1}, name: 'b_1'}]})); +res = extractResult(res); +assert(!res.createdCollectionAutomatically); +assert.eq(3, res.numIndexesBefore); +assert.eq(5, res.numIndexesAfter); +assert.isnull( + res.note, + 'createIndexes.note should not be present in results when adding new indexes: ' + tojson(res)); + +res = assert.commandWorked(t.runCommand( + 'createIndexes', {indexes: [{key: {a: 1}, name: 'a_1'}, {key: {b: 1}, name: 'b_1'}]})); + +res = extractResult(res); +assert.eq(5, res.numIndexesBefore); +assert.eq(5, + res.numIndexesAfter, + 'numIndexesAfter missing from createIndexes result when adding duplicate indexes: ' + + tojson(res)); +assert(res.note, + 'createIndexes.note should be present in results when adding a duplicate index: ' + + tojson(res)); + +res = t.runCommand("createIndexes", {indexes: [{}]}); +assert(!res.ok); + +res = t.runCommand("createIndexes", {indexes: [{}, {key: {m: 1}, name: "asd"}]}); +assert(!res.ok); + +assert.eq(5, t.getIndexes().length); + +res = t.runCommand("createIndexes", {indexes: [{key: {"c": 1}, sparse: true, name: "c_1"}]}); +assert.eq(6, t.getIndexes().length); +assert.eq(1, + t.getIndexes() + .filter(function(z) { + return z.sparse; + }) + .length); + +res = t.runCommand("createIndexes", {indexes: [{key: {"x": "foo"}, name: "x_1"}]}); +assert(!res.ok); + +assert.eq(6, t.getIndexes().length); + +res = t.runCommand("createIndexes", {indexes: [{key: {"x": 1}, name: ""}]}); +assert(!res.ok); + +assert.eq(6, t.getIndexes().length); + +// Test that v0 indexes cannot be created. +res = t.runCommand('createIndexes', {indexes: [{key: {d: 1}, name: 'd_1', v: 0}]}); +assert.commandFailed(res, 'v0 index creation should fail'); + +// Test that v1 indexes can be created explicitly. +res = t.runCommand('createIndexes', {indexes: [{key: {d: 1}, name: 'd_1', v: 1}]}); +assert.commandWorked(res, 'v1 index creation should succeed'); + +// Test that index creation fails with an invalid top-level field. +res = t.runCommand('createIndexes', {indexes: [{key: {e: 1}, name: 'e_1'}], 'invalidField': 1}); +assert.commandFailedWithCode(res, ErrorCodes.BadValue); + +// Test that index creation fails with an invalid field in the index spec for index version V2. +res = t.runCommand('createIndexes', + {indexes: [{key: {e: 1}, name: 'e_1', 'v': 2, 'invalidField': 1}]}); +assert.commandFailedWithCode(res, ErrorCodes.InvalidIndexSpecificationOption); + +// Test that index creation fails with an invalid field in the index spec for index version V1. +res = t.runCommand('createIndexes', + {indexes: [{key: {e: 1}, name: 'e_1', 'v': 1, 'invalidField': 1}]}); +assert.commandFailedWithCode(res, ErrorCodes.InvalidIndexSpecificationOption); + +// Test that index creation fails with an index named '*'. +res = t.runCommand('createIndexes', {indexes: [{key: {star: 1}, name: '*'}]}); +assert.commandFailedWithCode(res, ErrorCodes.BadValue); + +// Test that index creation fails with an index value of empty string. +res = t.runCommand('createIndexes', {indexes: [{key: {f: ""}, name: 'f_1'}]}); +assert.commandFailedWithCode(res, ErrorCodes.CannotCreateIndex); + +// Test that index creation fails with duplicate index names in the index specs. +res = t.runCommand('createIndexes', { + indexes: [ + {key: {g: 1}, name: 'myidx'}, + {key: {h: 1}, name: 'myidx'}, + ], +}); +assert.commandFailedWithCode(res, ErrorCodes.IndexKeySpecsConflict); + +// Test that user is not allowed to create indexes in config.transactions. +var configDB = db.getSiblingDB('config'); +res = + configDB.runCommand({createIndexes: 'transactions', indexes: [{key: {star: 1}, name: 'star'}]}); +assert.commandFailedWithCode(res, ErrorCodes.IllegalOperation); }()); diff --git a/jstests/core/create_indexes_with_unknown_field_names.js b/jstests/core/create_indexes_with_unknown_field_names.js index c5cfefff3a5..2a3a0cbc9bc 100644 --- a/jstests/core/create_indexes_with_unknown_field_names.js +++ b/jstests/core/create_indexes_with_unknown_field_names.js @@ -3,40 +3,40 @@ * if 'ignoreUnknownIndexOptions: true' is set on the createIndexes command. */ (function() { - "use strict"; +"use strict"; - db.unknown_field_names_create_indexes.drop(); - assert.commandFailedWithCode(db.runCommand({ - createIndexes: "unknown_field_names_create_indexes", - indexes: [{key: {x: 1}, name: "myindex", someField: "someValue"}] - }), - ErrorCodes.InvalidIndexSpecificationOption); +db.unknown_field_names_create_indexes.drop(); +assert.commandFailedWithCode(db.runCommand({ + createIndexes: "unknown_field_names_create_indexes", + indexes: [{key: {x: 1}, name: "myindex", someField: "someValue"}] +}), + ErrorCodes.InvalidIndexSpecificationOption); - assert.commandFailedWithCode(db.runCommand({ - createIndexes: "unknown_field_names_create_indexes", - indexes: [{key: {x: 1}, name: "myindex", someField: "someValue"}], - ignoreUnknownIndexOptions: false - }), - ErrorCodes.InvalidIndexSpecificationOption); +assert.commandFailedWithCode(db.runCommand({ + createIndexes: "unknown_field_names_create_indexes", + indexes: [{key: {x: 1}, name: "myindex", someField: "someValue"}], + ignoreUnknownIndexOptions: false +}), + ErrorCodes.InvalidIndexSpecificationOption); - assert.commandFailedWithCode(db.runCommand({ - createIndexes: "unknown_field_names_create_indexes", - indexes: [{key: {x: 1}, name: "myindex", someField: "someValue"}], - ignoreUnknownIndexOptions: "badValue" - }), - ErrorCodes.TypeMismatch); +assert.commandFailedWithCode(db.runCommand({ + createIndexes: "unknown_field_names_create_indexes", + indexes: [{key: {x: 1}, name: "myindex", someField: "someValue"}], + ignoreUnknownIndexOptions: "badValue" +}), + ErrorCodes.TypeMismatch); - assert.commandWorked(db.runCommand({ - createIndexes: "unknown_field_names_create_indexes", - indexes: [{key: {x: 1}, name: "myindex", someField: "someValue"}], - ignoreUnknownIndexOptions: true - })); +assert.commandWorked(db.runCommand({ + createIndexes: "unknown_field_names_create_indexes", + indexes: [{key: {x: 1}, name: "myindex", someField: "someValue"}], + ignoreUnknownIndexOptions: true +})); - // Make sure 'someField' is not in the index spec. - let indexes = db.unknown_field_names_create_indexes.getIndexes(); - for (let index in indexes) { - if (0 === bsonWoCompare(indexes[index].key, {x: 1})) { - assert.eq(indexes[index].someField, undefined); - } +// Make sure 'someField' is not in the index spec. +let indexes = db.unknown_field_names_create_indexes.getIndexes(); +for (let index in indexes) { + if (0 === bsonWoCompare(indexes[index].key, {x: 1})) { + assert.eq(indexes[index].someField, undefined); } +} })(); diff --git a/jstests/core/crud_api.js b/jstests/core/crud_api.js index 560930dbe99..4b37300e9b1 100644 --- a/jstests/core/crud_api.js +++ b/jstests/core/crud_api.js @@ -9,738 +9,729 @@ // ] (function() { - "use strict"; +"use strict"; - load("jstests/aggregation/extras/utils.js"); // For arrayEq. - - var crudAPISpecTests = function crudAPISpecTests() { - "use strict"; - - // Get the colllection - var coll = db.crud_tests; - - // Setup - function createTestExecutor(coll, method, verifyResult) { - return function(args) { - // Drop collection - coll.drop(); - // Insert test data - var r = coll.insertMany(args.insert); - assert.eq(args.insert.length, r.insertedIds.length); - - // Execute the method with arguments - r = coll[method].apply(coll, args.params); - verifyResult(args.result, r); - - // Get all the results - assert.soonNoExcept( - function() { - var results = coll.find({}).sort({_id: 1}).toArray(); - assert.docEq(args.expected, results); - return true; - }, - function() { - return "collection never contained expected documents"; - }); - }; - } +load("jstests/aggregation/extras/utils.js"); // For arrayEq. - function checkResultObject(first, second) { - // Only assert on the "modifiedCount" property when write commands are enabled - if (db.getMongo().writeMode() === 'commands') { - assert.docEq(first, second); - } else { - var overrideModifiedCount = {modifiedCount: undefined}; - assert.docEq(Object.merge(first, overrideModifiedCount), - Object.merge(second, overrideModifiedCount)); - } - } +var crudAPISpecTests = function crudAPISpecTests() { + "use strict"; - // Setup executors - var deleteManyExecutor = createTestExecutor(coll, 'deleteMany', checkResultObject); - var deleteOneExecutor = createTestExecutor(coll, 'deleteOne', checkResultObject); - var bulkWriteExecutor = createTestExecutor(coll, 'bulkWrite', checkResultObject); - var findOneAndDeleteExecutor = - createTestExecutor(coll, 'findOneAndDelete', checkResultObject); - var findOneAndReplaceExecutor = - createTestExecutor(coll, 'findOneAndReplace', checkResultObject); - var findOneAndUpdateExecutor = - createTestExecutor(coll, 'findOneAndUpdate', checkResultObject); - var insertManyExecutor = createTestExecutor(coll, 'insertMany', checkResultObject); - var insertOneExecutor = createTestExecutor(coll, 'insertOne', checkResultObject); - var replaceOneExecutor = createTestExecutor(coll, 'replaceOne', checkResultObject); - var updateManyExecutor = createTestExecutor(coll, 'updateMany', checkResultObject); - var updateOneExecutor = createTestExecutor(coll, 'updateOne', checkResultObject); - var countExecutor = createTestExecutor(coll, 'count', assert.eq); - var distinctExecutor = - createTestExecutor(coll, 'distinct', (a, b) => assert(arrayEq(a, b))); - - // - // BulkWrite - // - - bulkWriteExecutor({ - insert: [{_id: 1, c: 1}, {_id: 2, c: 2}, {_id: 3, c: 3}], - params: [[ + // Get the colllection + var coll = db.crud_tests; + + // Setup + function createTestExecutor(coll, method, verifyResult) { + return function(args) { + // Drop collection + coll.drop(); + // Insert test data + var r = coll.insertMany(args.insert); + assert.eq(args.insert.length, r.insertedIds.length); + + // Execute the method with arguments + r = coll[method].apply(coll, args.params); + verifyResult(args.result, r); + + // Get all the results + assert.soonNoExcept( + function() { + var results = coll.find({}).sort({_id: 1}).toArray(); + assert.docEq(args.expected, results); + return true; + }, + function() { + return "collection never contained expected documents"; + }); + }; + } + + function checkResultObject(first, second) { + // Only assert on the "modifiedCount" property when write commands are enabled + if (db.getMongo().writeMode() === 'commands') { + assert.docEq(first, second); + } else { + var overrideModifiedCount = {modifiedCount: undefined}; + assert.docEq(Object.merge(first, overrideModifiedCount), + Object.merge(second, overrideModifiedCount)); + } + } + + // Setup executors + var deleteManyExecutor = createTestExecutor(coll, 'deleteMany', checkResultObject); + var deleteOneExecutor = createTestExecutor(coll, 'deleteOne', checkResultObject); + var bulkWriteExecutor = createTestExecutor(coll, 'bulkWrite', checkResultObject); + var findOneAndDeleteExecutor = createTestExecutor(coll, 'findOneAndDelete', checkResultObject); + var findOneAndReplaceExecutor = + createTestExecutor(coll, 'findOneAndReplace', checkResultObject); + var findOneAndUpdateExecutor = createTestExecutor(coll, 'findOneAndUpdate', checkResultObject); + var insertManyExecutor = createTestExecutor(coll, 'insertMany', checkResultObject); + var insertOneExecutor = createTestExecutor(coll, 'insertOne', checkResultObject); + var replaceOneExecutor = createTestExecutor(coll, 'replaceOne', checkResultObject); + var updateManyExecutor = createTestExecutor(coll, 'updateMany', checkResultObject); + var updateOneExecutor = createTestExecutor(coll, 'updateOne', checkResultObject); + var countExecutor = createTestExecutor(coll, 'count', assert.eq); + var distinctExecutor = createTestExecutor(coll, 'distinct', (a, b) => assert(arrayEq(a, b))); + + // + // BulkWrite + // + + bulkWriteExecutor({ + insert: [{_id: 1, c: 1}, {_id: 2, c: 2}, {_id: 3, c: 3}], + params: [[ + {insertOne: {document: {_id: 4, a: 1}}}, + {updateOne: {filter: {_id: 5, a: 2}, update: {$set: {a: 2}}, upsert: true}}, + {updateMany: {filter: {_id: 6, a: 3}, update: {$set: {a: 3}}, upsert: true}}, + {deleteOne: {filter: {c: 1}}}, + {insertOne: {document: {_id: 7, c: 2}}}, + {deleteMany: {filter: {c: 2}}}, + {replaceOne: {filter: {c: 3}, replacement: {c: 4}, upsert: true}} + ]], + result: { + acknowledged: true, + insertedCount: 2, + matchedCount: 1, + deletedCount: 3, + upsertedCount: 2, + insertedIds: {'0': 4, '4': 7}, + upsertedIds: {'1': 5, '2': 6} + }, + expected: [{"_id": 3, "c": 4}, {"_id": 4, "a": 1}, {"_id": 5, "a": 2}, {"_id": 6, "a": 3}] + }); + + bulkWriteExecutor({ + insert: [{_id: 1, c: 1}, {_id: 2, c: 2}, {_id: 3, c: 3}], + params: [ + [ {insertOne: {document: {_id: 4, a: 1}}}, {updateOne: {filter: {_id: 5, a: 2}, update: {$set: {a: 2}}, upsert: true}}, {updateMany: {filter: {_id: 6, a: 3}, update: {$set: {a: 3}}, upsert: true}}, {deleteOne: {filter: {c: 1}}}, - {insertOne: {document: {_id: 7, c: 2}}}, {deleteMany: {filter: {c: 2}}}, {replaceOne: {filter: {c: 3}, replacement: {c: 4}, upsert: true}} - ]], - result: { - acknowledged: true, - insertedCount: 2, - matchedCount: 1, - deletedCount: 3, - upsertedCount: 2, - insertedIds: {'0': 4, '4': 7}, - upsertedIds: {'1': 5, '2': 6} - }, - expected: - [{"_id": 3, "c": 4}, {"_id": 4, "a": 1}, {"_id": 5, "a": 2}, {"_id": 6, "a": 3}] - }); - - bulkWriteExecutor({ - insert: [{_id: 1, c: 1}, {_id: 2, c: 2}, {_id: 3, c: 3}], - params: [ - [ - {insertOne: {document: {_id: 4, a: 1}}}, - {updateOne: {filter: {_id: 5, a: 2}, update: {$set: {a: 2}}, upsert: true}}, - {updateMany: {filter: {_id: 6, a: 3}, update: {$set: {a: 3}}, upsert: true}}, - {deleteOne: {filter: {c: 1}}}, - {deleteMany: {filter: {c: 2}}}, - {replaceOne: {filter: {c: 3}, replacement: {c: 4}, upsert: true}} - ], - {ordered: false} - ], - result: { - acknowledged: true, - insertedCount: 1, - matchedCount: 1, - deletedCount: 2, - upsertedCount: 2, - insertedIds: {'0': 4}, - upsertedIds: {'1': 5, '2': 6} - }, - expected: - [{"_id": 3, "c": 4}, {"_id": 4, "a": 1}, {"_id": 5, "a": 2}, {"_id": 6, "a": 3}] - }); - - // DeleteMany - // - - // DeleteMany when many documents match - deleteManyExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{_id: {$gt: 1}}], - result: {acknowledged: true, deletedCount: 2}, - expected: [{_id: 1, x: 11}] - }); - // DeleteMany when no document matches - deleteManyExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{_id: 4}], - result: {acknowledged: true, deletedCount: 0}, - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] - }); - // DeleteMany when many documents match, no write concern - deleteManyExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{_id: {$gt: 1}}, {w: 0}], - result: {acknowledged: false}, - expected: [{_id: 1, x: 11}] - }); - - // - // DeleteOne - // - - // DeleteOne when many documents match - deleteOneExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{_id: {$gt: 1}}], - result: {acknowledged: true, deletedCount: 1}, - expected: [{_id: 1, x: 11}, {_id: 3, x: 33}] - }); - // DeleteOne when one document matches - deleteOneExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{_id: 2}], - result: {acknowledged: true, deletedCount: 1}, - expected: [{_id: 1, x: 11}, {_id: 3, x: 33}] - }); - // DeleteOne when no documents match - deleteOneExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{_id: 4}], - result: {acknowledged: true, deletedCount: 0}, - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] - }); - // DeleteOne when many documents match, no write concern - deleteOneExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{_id: {$gt: 1}}, {w: 0}], - result: {acknowledged: false}, - expected: [{_id: 1, x: 11}, {_id: 3, x: 33}] - }); - - // - // FindOneAndDelete - // - - // FindOneAndDelete when one document matches - findOneAndDeleteExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{_id: {$gt: 2}}, {projection: {x: 1, _id: 0}, sort: {x: 1}}], - result: {x: 33}, - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}] - }); - // FindOneAndDelete when one document matches - findOneAndDeleteExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{_id: 2}, {projection: {x: 1, _id: 0}, sort: {x: 1}}], - result: {x: 22}, - expected: [{_id: 1, x: 11}, {_id: 3, x: 33}] - }); - // FindOneAndDelete when no documents match - findOneAndDeleteExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{_id: 4}, {projection: {x: 1, _id: 0}, sort: {x: 1}}], - result: null, - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] - }); - - // - // FindOneAndReplace - // - - // FindOneAndReplace when many documents match returning the document before modification - findOneAndReplaceExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{_id: {$gt: 1}}, {x: 32}, {projection: {x: 1, _id: 0}, sort: {x: 1}}], - result: {x: 22}, - expected: [{_id: 1, x: 11}, {_id: 2, x: 32}, {_id: 3, x: 33}] - }); - // FindOneAndReplace when many documents match returning the document after modification - findOneAndReplaceExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [ - {_id: {$gt: 1}}, - {x: 32}, - {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true} - ], - result: {x: 32}, - expected: [{_id: 1, x: 11}, {_id: 2, x: 32}, {_id: 3, x: 33}] - }); - // FindOneAndReplace when one document matches returning the document before modification - findOneAndReplaceExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{_id: 2}, {x: 32}, {projection: {x: 1, _id: 0}, sort: {x: 1}}], - result: {x: 22}, - expected: [{_id: 1, x: 11}, {_id: 2, x: 32}, {_id: 3, x: 33}] - }); - // FindOneAndReplace when one document matches returning the document after modification - findOneAndReplaceExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [ - {_id: 2}, - {x: 32}, - {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true} - ], - result: {x: 32}, - expected: [{_id: 1, x: 11}, {_id: 2, x: 32}, {_id: 3, x: 33}] - }); - // FindOneAndReplace when no documents match returning the document before modification - findOneAndReplaceExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{_id: 4}, {x: 44}, {projection: {x: 1, _id: 0}, sort: {x: 1}}], - result: null, - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] - }); - // FindOneAndReplace when no documents match with upsert returning the document before - // modification - findOneAndReplaceExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{_id: 4}, {x: 44}, {projection: {x: 1, _id: 0}, sort: {x: 1}, upsert: true}], - result: null, - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 44}] - }); - // FindOneAndReplace when no documents match returning the document after modification - findOneAndReplaceExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [ - {_id: 4}, - {x: 44}, - {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true} - ], - result: null, - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] - }); - // FindOneAndReplace when no documents match with upsert returning the document after - // modification - findOneAndReplaceExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [ - {_id: 4}, - {x: 44}, - {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true, upsert: true} - ], - result: {x: 44}, - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 44}] - }); - - assert.throws(function() { - coll.findOneAndReplace({a: 1}, {$set: {b: 1}}); - }); - - // - // FindOneAndUpdate - // - - // FindOneAndUpdate when many documents match returning the document before modification - findOneAndUpdateExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{_id: {$gt: 1}}, {$inc: {x: 1}}, {projection: {x: 1, _id: 0}, sort: {x: 1}}], - result: {x: 22}, - expected: [{_id: 1, x: 11}, {_id: 2, x: 23}, {_id: 3, x: 33}] - }); - // FindOneAndUpdate when many documents match returning the document after modification - findOneAndUpdateExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [ - {_id: {$gt: 1}}, - {$inc: {x: 1}}, - {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true} - ], - result: {x: 23}, - expected: [{_id: 1, x: 11}, {_id: 2, x: 23}, {_id: 3, x: 33}] - }); - // FindOneAndUpdate when one document matches returning the document before modification - findOneAndUpdateExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{_id: 2}, {$inc: {x: 1}}, {projection: {x: 1, _id: 0}, sort: {x: 1}}], - result: {x: 22}, - expected: [{_id: 1, x: 11}, {_id: 2, x: 23}, {_id: 3, x: 33}] - }); - // FindOneAndUpdate when one document matches returning the document after modification - findOneAndUpdateExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [ - {_id: 2}, - {$inc: {x: 1}}, - {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true} - ], - result: {x: 23}, - expected: [{_id: 1, x: 11}, {_id: 2, x: 23}, {_id: 3, x: 33}] - }); - // FindOneAndUpdate when no documents match returning the document before modification - findOneAndUpdateExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{_id: 4}, {$inc: {x: 1}}, {projection: {x: 1, _id: 0}, sort: {x: 1}}], - result: null, - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] - }); - // FindOneAndUpdate when no documents match with upsert returning the document before - // modification - findOneAndUpdateExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [ - {_id: 4}, - {$inc: {x: 1}}, - {projection: {x: 1, _id: 0}, sort: {x: 1}, upsert: true} - ], - result: null, - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}] - }); - // FindOneAndUpdate when no documents match returning the document after modification - findOneAndUpdateExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [ - {_id: 4}, - {$inc: {x: 1}}, - {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true} - ], - result: null, - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] - }); - // FindOneAndUpdate when no documents match with upsert returning the document after - // modification - findOneAndUpdateExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [ - {_id: 4}, - {$inc: {x: 1}}, - {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true, upsert: true} ], - result: {x: 1}, - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}] - }); - - assert.throws(function() { - coll.findOneAndUpdate({a: 1}, {}); - }); - - assert.throws(function() { - coll.findOneAndUpdate({a: 1}, {b: 1}); - }); - - // - // InsertMany - // - - // InsertMany with non-existing documents - insertManyExecutor({ - insert: [{_id: 1, x: 11}], - params: [[{_id: 2, x: 22}, {_id: 3, x: 33}]], - result: {acknowledged: true, insertedIds: [2, 3]}, - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] - }); - // InsertMany with non-existing documents, no write concern - insertManyExecutor({ - insert: [{_id: 1, x: 11}], - params: [[{_id: 2, x: 22}, {_id: 3, x: 33}], {w: 0}], - result: {acknowledged: false}, - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] - }); - - // - // InsertOne - // - - // InsertOne with non-existing documents - insertOneExecutor({ - insert: [{_id: 1, x: 11}], - params: [{_id: 2, x: 22}], - result: {acknowledged: true, insertedId: 2}, - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}] - }); - // InsertOne with non-existing documents, no write concern - insertOneExecutor({ - insert: [{_id: 1, x: 11}], - params: [{_id: 2, x: 22}, {w: 0}], - result: {acknowledged: false}, - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}] - }); - - // - // ReplaceOne - // - - // ReplaceOne when many documents match - replaceOneExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{_id: {$gt: 1}}, {x: 111}], - result: {acknowledged: true, matchedCount: 1, modifiedCount: 1}, - expected: [{_id: 1, x: 11}, {_id: 2, x: 111}, {_id: 3, x: 33}] - }); - // ReplaceOne when one document matches - replaceOneExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{_id: 1}, {_id: 1, x: 111}], - result: {acknowledged: true, matchedCount: 1, modifiedCount: 1}, - expected: [{_id: 1, x: 111}, {_id: 2, x: 22}, {_id: 3, x: 33}] - }); - // ReplaceOne when no documents match - replaceOneExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{_id: 4}, {_id: 4, x: 1}], - result: {acknowledged: true, matchedCount: 0, modifiedCount: 0}, - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] - }); - // ReplaceOne with upsert when no documents match without an id specified - replaceOneExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{_id: 4}, {x: 1}, {upsert: true}], - result: {acknowledged: true, matchedCount: 0, modifiedCount: 0, upsertedId: 4}, - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}] - }); - // ReplaceOne with upsert when no documents match with an id specified - replaceOneExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{_id: 4}, {_id: 4, x: 1}, {upsert: true}], - result: {acknowledged: true, matchedCount: 0, modifiedCount: 0, upsertedId: 4}, - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}] - }); - // ReplaceOne with upsert when no documents match with an id specified, no write concern - replaceOneExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{_id: 4}, {_id: 4, x: 1}, {upsert: true, w: 0}], - result: {acknowledged: false}, - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}] - }); - // ReplaceOne with upsert when no documents match with an id specified, no write concern - replaceOneExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{_id: 4}, {_id: 4, x: 1}, {upsert: true, writeConcern: {w: 0}}], - result: {acknowledged: false}, - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}] - }); - - assert.throws(function() { - coll.replaceOne({a: 1}, {$set: {b: 1}}); - }); - - // - // UpdateMany - // - - // UpdateMany when many documents match - updateManyExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{_id: {$gt: 1}}, {$inc: {x: 1}}], - result: {acknowledged: true, matchedCount: 2, modifiedCount: 2}, - expected: [{_id: 1, x: 11}, {_id: 2, x: 23}, {_id: 3, x: 34}] - }); - // UpdateMany when one document matches - updateManyExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{_id: 1}, {$inc: {x: 1}}], - result: {acknowledged: true, matchedCount: 1, modifiedCount: 1}, - expected: [{_id: 1, x: 12}, {_id: 2, x: 22}, {_id: 3, x: 33}] - }); - // UpdateMany when no documents match - updateManyExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{_id: 4}, {$inc: {x: 1}}], - result: {acknowledged: true, matchedCount: 0, modifiedCount: 0}, - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] - }); - // UpdateMany with upsert when no documents match - updateManyExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{_id: 4}, {$inc: {x: 1}}, {upsert: true}], - result: {acknowledged: true, matchedCount: 0, modifiedCount: 0, upsertedId: 4}, - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}] - }); - // UpdateMany with upsert when no documents match, no write concern - updateManyExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{_id: 4}, {$inc: {x: 1}}, {upsert: true, w: 0}], - result: {acknowledged: false}, - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}] - }); - - assert.throws(function() { - coll.updateMany({a: 1}, {}); - }); - - assert.throws(function() { - coll.updateMany({a: 1}, {b: 1}); - }); - - // - // UpdateOne - // - - // UpdateOne when many documents match - updateOneExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{_id: {$gt: 1}}, {$inc: {x: 1}}], - result: {acknowledged: true, matchedCount: 1, modifiedCount: 1}, - expected: [{_id: 1, x: 11}, {_id: 2, x: 23}, {_id: 3, x: 33}] - }); - // UpdateOne when one document matches - updateOneExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{_id: 1}, {$inc: {x: 1}}], - result: {acknowledged: true, matchedCount: 1, modifiedCount: 1}, - expected: [{_id: 1, x: 12}, {_id: 2, x: 22}, {_id: 3, x: 33}] - }); - // UpdateOne when no documents match - updateOneExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{_id: 4}, {$inc: {x: 1}}], - result: {acknowledged: true, matchedCount: 0, modifiedCount: 0}, - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] - }); - - // UpdateOne with upsert when no documents match - updateOneExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{_id: 4}, {$inc: {x: 1}}, {upsert: true}], - result: {acknowledged: true, matchedCount: 0, modifiedCount: 0, upsertedId: 4}, - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}] - }); - // UpdateOne when many documents match, no write concern - updateOneExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{_id: {$gt: 1}}, {$inc: {x: 1}}, {w: 0}], - result: {acknowledged: false}, - expected: [{_id: 1, x: 11}, {_id: 2, x: 23}, {_id: 3, x: 33}] - }); - - assert.throws(function() { - coll.updateOne({a: 1}, {}); - }); - - assert.throws(function() { - coll.updateOne({a: 1}, {b: 1}); - }); - - // - // Count - // - - // Simple count of all elements - countExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{}], - result: 3, - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] - }); - // Simple count no arguments - countExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [], - result: 3, - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] - }); - // Simple count filtered - countExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{_id: {$gt: 1}}], - result: 2, - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] - }); - // Simple count of all elements, applying limit - countExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{}, {limit: 1}], - result: 1, - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] - }); - // Simple count of all elements, applying skip - countExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{}, {skip: 1}], - result: 2, - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] - }); - // Simple count no arguments, applying hint - countExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: [{}, {hint: {"_id": 1}}], - result: 3, - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] - }); - - // - // Distinct - // - - // Simple distinct of field x no filter - distinctExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: ['x'], - result: [11, 22, 33], - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] - }); - // Simple distinct of field x - distinctExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: ['x', {}], - result: [11, 22, 33], - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] - }); - // Simple distinct of field x filtered - distinctExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: ['x', {x: {$gt: 11}}], - result: [22, 33], - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] - }); - // Simple distinct of field x filtered with maxTimeMS - distinctExecutor({ - insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], - params: ['x', {x: {$gt: 11}}, {maxTimeMS: 100000}], - result: [22, 33], - expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] - }); - - // - // Find - // - - coll.deleteMany({}); - // Insert all of them - coll.insertMany([{a: 0, b: 0}, {a: 1, b: 1}]); - - // Simple projection - var result = - coll.find({}).sort({a: 1}).limit(1).skip(1).projection({_id: 0, a: 1}).toArray(); - assert.docEq(result, [{a: 1}]); - - // Simple tailable cursor - var cursor = coll.find({}).sort({a: 1}).tailable(); - assert.eq(34, (cursor._options & ~DBQuery.Option.slaveOk)); - var cursor = coll.find({}).sort({a: 1}).tailable(false); - assert.eq(2, (cursor._options & ~DBQuery.Option.slaveOk)); - - // Check modifiers - var cursor = coll.find({}).modifiers({$hint: 'a_1'}); - assert.eq('a_1', cursor._query['$hint']); - - // allowPartialResults - var cursor = coll.find({}).allowPartialResults(); - assert.eq(128, (cursor._options & ~DBQuery.Option.slaveOk)); - - // noCursorTimeout - var cursor = coll.find({}).noCursorTimeout(); - assert.eq(16, (cursor._options & ~DBQuery.Option.slaveOk)); - - // - // Aggregation - // - - coll.deleteMany({}); - // Insert all of them - coll.insertMany([{a: 0, b: 0}, {a: 1, b: 1}]); - - // Simple aggregation with useCursor - var result = coll.aggregate([{$match: {}}], {useCursor: true}).toArray(); - assert.eq(2, result.length); - - // Simple aggregation with batchSize - var result = coll.aggregate([{$match: {}}], {batchSize: 2}).toArray(); - assert.eq(2, result.length); - - // Drop collection - coll.drop(); - coll.ensureIndex({a: 1}, {unique: true}); - - // Should throw duplicate key error - assert.throws(function() { - coll.insertMany([{a: 0, b: 0}, {a: 0, b: 1}]); - }); - - assert(coll.findOne({a: 0, b: 0}) != null); - assert.throws(function() { - coll.insertOne({a: 0, b: 0}); - }); - - assert.throws(function() { - coll.updateOne({b: 2}, {$set: {a: 0}}, {upsert: true}); - }); - - assert.throws(function() { - coll.updateMany({b: 2}, {$set: {a: 0}}, {upsert: true}); - }); - - assert.throws(function() { - coll.deleteOne({$invalidFieldName: {a: 1}}); - }); - - assert.throws(function() { - coll.deleteMany({$set: {a: 1}}); - }); - - assert.throws(function() { - coll.bulkWrite([{insertOne: {document: {_id: 4, a: 0}}}]); - }); - }; - - crudAPISpecTests(); + {ordered: false} + ], + result: { + acknowledged: true, + insertedCount: 1, + matchedCount: 1, + deletedCount: 2, + upsertedCount: 2, + insertedIds: {'0': 4}, + upsertedIds: {'1': 5, '2': 6} + }, + expected: [{"_id": 3, "c": 4}, {"_id": 4, "a": 1}, {"_id": 5, "a": 2}, {"_id": 6, "a": 3}] + }); + + // DeleteMany + // + + // DeleteMany when many documents match + deleteManyExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{_id: {$gt: 1}}], + result: {acknowledged: true, deletedCount: 2}, + expected: [{_id: 1, x: 11}] + }); + // DeleteMany when no document matches + deleteManyExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{_id: 4}], + result: {acknowledged: true, deletedCount: 0}, + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] + }); + // DeleteMany when many documents match, no write concern + deleteManyExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{_id: {$gt: 1}}, {w: 0}], + result: {acknowledged: false}, + expected: [{_id: 1, x: 11}] + }); + + // + // DeleteOne + // + + // DeleteOne when many documents match + deleteOneExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{_id: {$gt: 1}}], + result: {acknowledged: true, deletedCount: 1}, + expected: [{_id: 1, x: 11}, {_id: 3, x: 33}] + }); + // DeleteOne when one document matches + deleteOneExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{_id: 2}], + result: {acknowledged: true, deletedCount: 1}, + expected: [{_id: 1, x: 11}, {_id: 3, x: 33}] + }); + // DeleteOne when no documents match + deleteOneExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{_id: 4}], + result: {acknowledged: true, deletedCount: 0}, + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] + }); + // DeleteOne when many documents match, no write concern + deleteOneExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{_id: {$gt: 1}}, {w: 0}], + result: {acknowledged: false}, + expected: [{_id: 1, x: 11}, {_id: 3, x: 33}] + }); + + // + // FindOneAndDelete + // + + // FindOneAndDelete when one document matches + findOneAndDeleteExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{_id: {$gt: 2}}, {projection: {x: 1, _id: 0}, sort: {x: 1}}], + result: {x: 33}, + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}] + }); + // FindOneAndDelete when one document matches + findOneAndDeleteExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{_id: 2}, {projection: {x: 1, _id: 0}, sort: {x: 1}}], + result: {x: 22}, + expected: [{_id: 1, x: 11}, {_id: 3, x: 33}] + }); + // FindOneAndDelete when no documents match + findOneAndDeleteExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{_id: 4}, {projection: {x: 1, _id: 0}, sort: {x: 1}}], + result: null, + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] + }); + + // + // FindOneAndReplace + // + + // FindOneAndReplace when many documents match returning the document before modification + findOneAndReplaceExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{_id: {$gt: 1}}, {x: 32}, {projection: {x: 1, _id: 0}, sort: {x: 1}}], + result: {x: 22}, + expected: [{_id: 1, x: 11}, {_id: 2, x: 32}, {_id: 3, x: 33}] + }); + // FindOneAndReplace when many documents match returning the document after modification + findOneAndReplaceExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [ + {_id: {$gt: 1}}, + {x: 32}, + {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true} + ], + result: {x: 32}, + expected: [{_id: 1, x: 11}, {_id: 2, x: 32}, {_id: 3, x: 33}] + }); + // FindOneAndReplace when one document matches returning the document before modification + findOneAndReplaceExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{_id: 2}, {x: 32}, {projection: {x: 1, _id: 0}, sort: {x: 1}}], + result: {x: 22}, + expected: [{_id: 1, x: 11}, {_id: 2, x: 32}, {_id: 3, x: 33}] + }); + // FindOneAndReplace when one document matches returning the document after modification + findOneAndReplaceExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [ + {_id: 2}, + {x: 32}, + {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true} + ], + result: {x: 32}, + expected: [{_id: 1, x: 11}, {_id: 2, x: 32}, {_id: 3, x: 33}] + }); + // FindOneAndReplace when no documents match returning the document before modification + findOneAndReplaceExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{_id: 4}, {x: 44}, {projection: {x: 1, _id: 0}, sort: {x: 1}}], + result: null, + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] + }); + // FindOneAndReplace when no documents match with upsert returning the document before + // modification + findOneAndReplaceExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{_id: 4}, {x: 44}, {projection: {x: 1, _id: 0}, sort: {x: 1}, upsert: true}], + result: null, + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 44}] + }); + // FindOneAndReplace when no documents match returning the document after modification + findOneAndReplaceExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [ + {_id: 4}, + {x: 44}, + {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true} + ], + result: null, + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] + }); + // FindOneAndReplace when no documents match with upsert returning the document after + // modification + findOneAndReplaceExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [ + {_id: 4}, + {x: 44}, + {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true, upsert: true} + ], + result: {x: 44}, + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 44}] + }); + + assert.throws(function() { + coll.findOneAndReplace({a: 1}, {$set: {b: 1}}); + }); + + // + // FindOneAndUpdate + // + + // FindOneAndUpdate when many documents match returning the document before modification + findOneAndUpdateExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{_id: {$gt: 1}}, {$inc: {x: 1}}, {projection: {x: 1, _id: 0}, sort: {x: 1}}], + result: {x: 22}, + expected: [{_id: 1, x: 11}, {_id: 2, x: 23}, {_id: 3, x: 33}] + }); + // FindOneAndUpdate when many documents match returning the document after modification + findOneAndUpdateExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [ + {_id: {$gt: 1}}, + {$inc: {x: 1}}, + {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true} + ], + result: {x: 23}, + expected: [{_id: 1, x: 11}, {_id: 2, x: 23}, {_id: 3, x: 33}] + }); + // FindOneAndUpdate when one document matches returning the document before modification + findOneAndUpdateExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{_id: 2}, {$inc: {x: 1}}, {projection: {x: 1, _id: 0}, sort: {x: 1}}], + result: {x: 22}, + expected: [{_id: 1, x: 11}, {_id: 2, x: 23}, {_id: 3, x: 33}] + }); + // FindOneAndUpdate when one document matches returning the document after modification + findOneAndUpdateExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [ + {_id: 2}, + {$inc: {x: 1}}, + {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true} + ], + result: {x: 23}, + expected: [{_id: 1, x: 11}, {_id: 2, x: 23}, {_id: 3, x: 33}] + }); + // FindOneAndUpdate when no documents match returning the document before modification + findOneAndUpdateExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{_id: 4}, {$inc: {x: 1}}, {projection: {x: 1, _id: 0}, sort: {x: 1}}], + result: null, + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] + }); + // FindOneAndUpdate when no documents match with upsert returning the document before + // modification + findOneAndUpdateExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: + [{_id: 4}, {$inc: {x: 1}}, {projection: {x: 1, _id: 0}, sort: {x: 1}, upsert: true}], + result: null, + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}] + }); + // FindOneAndUpdate when no documents match returning the document after modification + findOneAndUpdateExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [ + {_id: 4}, + {$inc: {x: 1}}, + {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true} + ], + result: null, + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] + }); + // FindOneAndUpdate when no documents match with upsert returning the document after + // modification + findOneAndUpdateExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [ + {_id: 4}, + {$inc: {x: 1}}, + {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true, upsert: true} + ], + result: {x: 1}, + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}] + }); + + assert.throws(function() { + coll.findOneAndUpdate({a: 1}, {}); + }); + + assert.throws(function() { + coll.findOneAndUpdate({a: 1}, {b: 1}); + }); + + // + // InsertMany + // + + // InsertMany with non-existing documents + insertManyExecutor({ + insert: [{_id: 1, x: 11}], + params: [[{_id: 2, x: 22}, {_id: 3, x: 33}]], + result: {acknowledged: true, insertedIds: [2, 3]}, + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] + }); + // InsertMany with non-existing documents, no write concern + insertManyExecutor({ + insert: [{_id: 1, x: 11}], + params: [[{_id: 2, x: 22}, {_id: 3, x: 33}], {w: 0}], + result: {acknowledged: false}, + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] + }); + + // + // InsertOne + // + + // InsertOne with non-existing documents + insertOneExecutor({ + insert: [{_id: 1, x: 11}], + params: [{_id: 2, x: 22}], + result: {acknowledged: true, insertedId: 2}, + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}] + }); + // InsertOne with non-existing documents, no write concern + insertOneExecutor({ + insert: [{_id: 1, x: 11}], + params: [{_id: 2, x: 22}, {w: 0}], + result: {acknowledged: false}, + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}] + }); + + // + // ReplaceOne + // + + // ReplaceOne when many documents match + replaceOneExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{_id: {$gt: 1}}, {x: 111}], + result: {acknowledged: true, matchedCount: 1, modifiedCount: 1}, + expected: [{_id: 1, x: 11}, {_id: 2, x: 111}, {_id: 3, x: 33}] + }); + // ReplaceOne when one document matches + replaceOneExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{_id: 1}, {_id: 1, x: 111}], + result: {acknowledged: true, matchedCount: 1, modifiedCount: 1}, + expected: [{_id: 1, x: 111}, {_id: 2, x: 22}, {_id: 3, x: 33}] + }); + // ReplaceOne when no documents match + replaceOneExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{_id: 4}, {_id: 4, x: 1}], + result: {acknowledged: true, matchedCount: 0, modifiedCount: 0}, + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] + }); + // ReplaceOne with upsert when no documents match without an id specified + replaceOneExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{_id: 4}, {x: 1}, {upsert: true}], + result: {acknowledged: true, matchedCount: 0, modifiedCount: 0, upsertedId: 4}, + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}] + }); + // ReplaceOne with upsert when no documents match with an id specified + replaceOneExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{_id: 4}, {_id: 4, x: 1}, {upsert: true}], + result: {acknowledged: true, matchedCount: 0, modifiedCount: 0, upsertedId: 4}, + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}] + }); + // ReplaceOne with upsert when no documents match with an id specified, no write concern + replaceOneExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{_id: 4}, {_id: 4, x: 1}, {upsert: true, w: 0}], + result: {acknowledged: false}, + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}] + }); + // ReplaceOne with upsert when no documents match with an id specified, no write concern + replaceOneExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{_id: 4}, {_id: 4, x: 1}, {upsert: true, writeConcern: {w: 0}}], + result: {acknowledged: false}, + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}] + }); + + assert.throws(function() { + coll.replaceOne({a: 1}, {$set: {b: 1}}); + }); + + // + // UpdateMany + // + + // UpdateMany when many documents match + updateManyExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{_id: {$gt: 1}}, {$inc: {x: 1}}], + result: {acknowledged: true, matchedCount: 2, modifiedCount: 2}, + expected: [{_id: 1, x: 11}, {_id: 2, x: 23}, {_id: 3, x: 34}] + }); + // UpdateMany when one document matches + updateManyExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{_id: 1}, {$inc: {x: 1}}], + result: {acknowledged: true, matchedCount: 1, modifiedCount: 1}, + expected: [{_id: 1, x: 12}, {_id: 2, x: 22}, {_id: 3, x: 33}] + }); + // UpdateMany when no documents match + updateManyExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{_id: 4}, {$inc: {x: 1}}], + result: {acknowledged: true, matchedCount: 0, modifiedCount: 0}, + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] + }); + // UpdateMany with upsert when no documents match + updateManyExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{_id: 4}, {$inc: {x: 1}}, {upsert: true}], + result: {acknowledged: true, matchedCount: 0, modifiedCount: 0, upsertedId: 4}, + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}] + }); + // UpdateMany with upsert when no documents match, no write concern + updateManyExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{_id: 4}, {$inc: {x: 1}}, {upsert: true, w: 0}], + result: {acknowledged: false}, + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}] + }); + + assert.throws(function() { + coll.updateMany({a: 1}, {}); + }); + + assert.throws(function() { + coll.updateMany({a: 1}, {b: 1}); + }); + + // + // UpdateOne + // + + // UpdateOne when many documents match + updateOneExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{_id: {$gt: 1}}, {$inc: {x: 1}}], + result: {acknowledged: true, matchedCount: 1, modifiedCount: 1}, + expected: [{_id: 1, x: 11}, {_id: 2, x: 23}, {_id: 3, x: 33}] + }); + // UpdateOne when one document matches + updateOneExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{_id: 1}, {$inc: {x: 1}}], + result: {acknowledged: true, matchedCount: 1, modifiedCount: 1}, + expected: [{_id: 1, x: 12}, {_id: 2, x: 22}, {_id: 3, x: 33}] + }); + // UpdateOne when no documents match + updateOneExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{_id: 4}, {$inc: {x: 1}}], + result: {acknowledged: true, matchedCount: 0, modifiedCount: 0}, + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] + }); + + // UpdateOne with upsert when no documents match + updateOneExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{_id: 4}, {$inc: {x: 1}}, {upsert: true}], + result: {acknowledged: true, matchedCount: 0, modifiedCount: 0, upsertedId: 4}, + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}] + }); + // UpdateOne when many documents match, no write concern + updateOneExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{_id: {$gt: 1}}, {$inc: {x: 1}}, {w: 0}], + result: {acknowledged: false}, + expected: [{_id: 1, x: 11}, {_id: 2, x: 23}, {_id: 3, x: 33}] + }); + + assert.throws(function() { + coll.updateOne({a: 1}, {}); + }); + + assert.throws(function() { + coll.updateOne({a: 1}, {b: 1}); + }); + + // + // Count + // + + // Simple count of all elements + countExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{}], + result: 3, + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] + }); + // Simple count no arguments + countExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [], + result: 3, + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] + }); + // Simple count filtered + countExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{_id: {$gt: 1}}], + result: 2, + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] + }); + // Simple count of all elements, applying limit + countExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{}, {limit: 1}], + result: 1, + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] + }); + // Simple count of all elements, applying skip + countExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{}, {skip: 1}], + result: 2, + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] + }); + // Simple count no arguments, applying hint + countExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: [{}, {hint: {"_id": 1}}], + result: 3, + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] + }); + + // + // Distinct + // + + // Simple distinct of field x no filter + distinctExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: ['x'], + result: [11, 22, 33], + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] + }); + // Simple distinct of field x + distinctExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: ['x', {}], + result: [11, 22, 33], + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] + }); + // Simple distinct of field x filtered + distinctExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: ['x', {x: {$gt: 11}}], + result: [22, 33], + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] + }); + // Simple distinct of field x filtered with maxTimeMS + distinctExecutor({ + insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}], + params: ['x', {x: {$gt: 11}}, {maxTimeMS: 100000}], + result: [22, 33], + expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}] + }); + + // + // Find + // + + coll.deleteMany({}); + // Insert all of them + coll.insertMany([{a: 0, b: 0}, {a: 1, b: 1}]); + + // Simple projection + var result = coll.find({}).sort({a: 1}).limit(1).skip(1).projection({_id: 0, a: 1}).toArray(); + assert.docEq(result, [{a: 1}]); + + // Simple tailable cursor + var cursor = coll.find({}).sort({a: 1}).tailable(); + assert.eq(34, (cursor._options & ~DBQuery.Option.slaveOk)); + var cursor = coll.find({}).sort({a: 1}).tailable(false); + assert.eq(2, (cursor._options & ~DBQuery.Option.slaveOk)); + + // Check modifiers + var cursor = coll.find({}).modifiers({$hint: 'a_1'}); + assert.eq('a_1', cursor._query['$hint']); + + // allowPartialResults + var cursor = coll.find({}).allowPartialResults(); + assert.eq(128, (cursor._options & ~DBQuery.Option.slaveOk)); + + // noCursorTimeout + var cursor = coll.find({}).noCursorTimeout(); + assert.eq(16, (cursor._options & ~DBQuery.Option.slaveOk)); + + // + // Aggregation + // + + coll.deleteMany({}); + // Insert all of them + coll.insertMany([{a: 0, b: 0}, {a: 1, b: 1}]); + + // Simple aggregation with useCursor + var result = coll.aggregate([{$match: {}}], {useCursor: true}).toArray(); + assert.eq(2, result.length); + + // Simple aggregation with batchSize + var result = coll.aggregate([{$match: {}}], {batchSize: 2}).toArray(); + assert.eq(2, result.length); + + // Drop collection + coll.drop(); + coll.ensureIndex({a: 1}, {unique: true}); + + // Should throw duplicate key error + assert.throws(function() { + coll.insertMany([{a: 0, b: 0}, {a: 0, b: 1}]); + }); + + assert(coll.findOne({a: 0, b: 0}) != null); + assert.throws(function() { + coll.insertOne({a: 0, b: 0}); + }); + + assert.throws(function() { + coll.updateOne({b: 2}, {$set: {a: 0}}, {upsert: true}); + }); + + assert.throws(function() { + coll.updateMany({b: 2}, {$set: {a: 0}}, {upsert: true}); + }); + + assert.throws(function() { + coll.deleteOne({$invalidFieldName: {a: 1}}); + }); + + assert.throws(function() { + coll.deleteMany({$set: {a: 1}}); + }); + + assert.throws(function() { + coll.bulkWrite([{insertOne: {document: {_id: 4, a: 0}}}]); + }); +}; + +crudAPISpecTests(); })(); diff --git a/jstests/core/currentop.js b/jstests/core/currentop.js index 9345f900596..636fdee2cb3 100644 --- a/jstests/core/currentop.js +++ b/jstests/core/currentop.js @@ -9,48 +9,48 @@ */ (function() { - "use strict"; - const coll = db.jstests_currentop; - coll.drop(); +"use strict"; +const coll = db.jstests_currentop; +coll.drop(); - // We fsync+lock the server to cause all subsequent write operations to block. - assert.commandWorked(db.fsyncLock()); +// We fsync+lock the server to cause all subsequent write operations to block. +assert.commandWorked(db.fsyncLock()); - const awaitInsertShell = startParallelShell(function() { - assert.writeOK(db.jstests_currentop.insert({})); - }); +const awaitInsertShell = startParallelShell(function() { + assert.writeOK(db.jstests_currentop.insert({})); +}); - // Wait until the write appears in the currentOp output reporting that it is waiting for a lock. - assert.soon( - function() { - var lock_type = ""; - if (jsTest.options().storageEngine === "mobile") { - lock_type = "W"; - } else { - lock_type = "w"; - } - const ops = db.currentOp({ - $and: [ - {"locks.Global": lock_type, waitingForLock: true}, - // Depending on whether CurOp::setNS_inlock() has been called, the "ns" field - // may either be the full collection name or the command namespace. - { - $or: [ - {ns: coll.getFullName()}, - {ns: db.$cmd.getFullName(), "command.insert": coll.getName()} - ] - }, - {type: "op"} - ] - }); - return ops.inprog.length === 1; - }, - function() { - return "Failed to find blocked insert in currentOp() output: " + tojson(db.currentOp()); +// Wait until the write appears in the currentOp output reporting that it is waiting for a lock. +assert.soon( + function() { + var lock_type = ""; + if (jsTest.options().storageEngine === "mobile") { + lock_type = "W"; + } else { + lock_type = "w"; + } + const ops = db.currentOp({ + $and: [ + {"locks.Global": lock_type, waitingForLock: true}, + // Depending on whether CurOp::setNS_inlock() has been called, the "ns" field + // may either be the full collection name or the command namespace. + { + $or: [ + {ns: coll.getFullName()}, + {ns: db.$cmd.getFullName(), "command.insert": coll.getName()} + ] + }, + {type: "op"} + ] }); + return ops.inprog.length === 1; + }, + function() { + return "Failed to find blocked insert in currentOp() output: " + tojson(db.currentOp()); + }); - // Unlock the server and make sure the write finishes. - const fsyncResponse = assert.commandWorked(db.fsyncUnlock()); - assert.eq(fsyncResponse.lockCount, 0); - awaitInsertShell(); +// Unlock the server and make sure the write finishes. +const fsyncResponse = assert.commandWorked(db.fsyncUnlock()); +assert.eq(fsyncResponse.lockCount, 0); +awaitInsertShell(); }()); diff --git a/jstests/core/currentop_cursors.js b/jstests/core/currentop_cursors.js index 5db0c413f85..9cc7e37dcb2 100644 --- a/jstests/core/currentop_cursors.js +++ b/jstests/core/currentop_cursors.js @@ -6,256 +6,241 @@ */ (function() { - "use strict"; - const coll = db.jstests_currentop_cursors; - // Will skip lsid tests if not in commands read mode. - const commandReadMode = db.getMongo().readMode() == "commands"; - - load("jstests/libs/fixture_helpers.js"); // for FixtureHelpers - - // Avoiding using the shell helper to avoid the implicit collection recreation. - db.runCommand({drop: coll.getName()}); - assert.commandWorked(db.createCollection(coll.getName(), {capped: true, size: 1000})); - for (let i = 0; i < 30; ++i) { - assert.commandWorked(coll.insert({"val": i})); - } - /** - * runTest creates a new collection called jstests_currentop_cursors and then runs the provided - * find query. It calls $currentOp and does some basic assertions to make sure idleCursors is - * behaving as intended in each case. - * findFunc: A function that runs a find query. Is expected to return a cursorID. - * Arbitrary code can be run in findFunc as long as it returns a cursorID. - * assertFunc: A function that runs assertions against the results of the $currentOp. - * Takes the following arguments - * 'findOut': The cursorID returned from findFunc. - * 'result': The results from running $currenpOp as an array of JSON objects. - * Arbitrary code can be run in assertFunc, and there is no return value needed. - */ - function runTest({findFunc, assertFunc}) { - const adminDB = db.getSiblingDB("admin"); - const findOut = findFunc(); - const result = - adminDB - .aggregate([ - {$currentOp: {localOps: true, allUsers: false, idleCursors: true}}, - {$match: {$and: [{type: "idleCursor"}, {"cursor.cursorId": findOut}]}} - ]) - .toArray(); - assert.eq(result[0].ns, coll.getFullName(), result); - assert.eq(result[0].cursor.originatingCommand.find, coll.getName(), result); - assertFunc(findOut, result); - const noIdle = - adminDB - .aggregate([ - {$currentOp: {allUsers: false, idleCursors: false}}, - {$match: {$and: [{type: "idleCursor"}, {"cursor.cursorId": findOut}]}} - ]) - .toArray(); - assert.eq(noIdle.length, 0, tojson(noIdle)); - const noFlag = - adminDB.aggregate([{$currentOp: {allUsers: false}}, {$match: {type: "idleCursor"}}]) - .toArray(); - - assert.eq(noIdle.length, 0, tojson(noFlag)); - } - - // Basic test with default values. - runTest({ - findFunc: function() { - return assert - .commandWorked(db.runCommand({find: "jstests_currentop_cursors", batchSize: 2})) - .cursor.id; - }, - assertFunc: function(cursorId, result) { - assert.eq(result.length, 1, result); - // Plan summary does not exist on mongos, so skip this test on mongos. - if (!FixtureHelpers.isMongos(db)) { - assert.eq(result[0].planSummary, "COLLSCAN", result); - } else { - assert(!result[0].hasOwnProperty("planSummary"), result); - } - // Lsid will not exist if not in command read mode. - if (commandReadMode) { - assert(result[0].lsid.hasOwnProperty('id'), result); - assert(result[0].lsid.hasOwnProperty('uid'), result); - } - const uri = new MongoURI(db.getMongo().host); - assert(uri.servers.some((server) => { - return result[0].host == getHostName() + ":" + server.port; - })); - const idleCursor = result[0].cursor; - assert.eq(idleCursor.nDocsReturned, 2, result); - assert.eq(idleCursor.nBatchesReturned, 1, result); - assert.eq(idleCursor.tailable, false, result); - assert.eq(idleCursor.awaitData, false, result); - assert.eq(idleCursor.noCursorTimeout, false, result); - assert.eq(idleCursor.originatingCommand.batchSize, 2, result); - assert.lte(idleCursor.createdDate, idleCursor.lastAccessDate, result); - // Make sure that the top level fields do not also appear in the cursor subobject. - assert(!idleCursor.hasOwnProperty("planSummary"), result); - assert(!idleCursor.hasOwnProperty('host'), result); - assert(!idleCursor.hasOwnProperty('lsid'), result); +"use strict"; +const coll = db.jstests_currentop_cursors; +// Will skip lsid tests if not in commands read mode. +const commandReadMode = db.getMongo().readMode() == "commands"; + +load("jstests/libs/fixture_helpers.js"); // for FixtureHelpers + +// Avoiding using the shell helper to avoid the implicit collection recreation. +db.runCommand({drop: coll.getName()}); +assert.commandWorked(db.createCollection(coll.getName(), {capped: true, size: 1000})); +for (let i = 0; i < 30; ++i) { + assert.commandWorked(coll.insert({"val": i})); +} +/** + * runTest creates a new collection called jstests_currentop_cursors and then runs the provided + * find query. It calls $currentOp and does some basic assertions to make sure idleCursors is + * behaving as intended in each case. + * findFunc: A function that runs a find query. Is expected to return a cursorID. + * Arbitrary code can be run in findFunc as long as it returns a cursorID. + * assertFunc: A function that runs assertions against the results of the $currentOp. + * Takes the following arguments + * 'findOut': The cursorID returned from findFunc. + * 'result': The results from running $currenpOp as an array of JSON objects. + * Arbitrary code can be run in assertFunc, and there is no return value needed. + */ +function runTest({findFunc, assertFunc}) { + const adminDB = db.getSiblingDB("admin"); + const findOut = findFunc(); + const result = adminDB + .aggregate([ + {$currentOp: {localOps: true, allUsers: false, idleCursors: true}}, + {$match: {$and: [{type: "idleCursor"}, {"cursor.cursorId": findOut}]}} + ]) + .toArray(); + assert.eq(result[0].ns, coll.getFullName(), result); + assert.eq(result[0].cursor.originatingCommand.find, coll.getName(), result); + assertFunc(findOut, result); + const noIdle = adminDB + .aggregate([ + {$currentOp: {allUsers: false, idleCursors: false}}, + {$match: {$and: [{type: "idleCursor"}, {"cursor.cursorId": findOut}]}} + ]) + .toArray(); + assert.eq(noIdle.length, 0, tojson(noIdle)); + const noFlag = + adminDB.aggregate([{$currentOp: {allUsers: false}}, {$match: {type: "idleCursor"}}]) + .toArray(); + + assert.eq(noIdle.length, 0, tojson(noFlag)); +} + +// Basic test with default values. +runTest({ + findFunc: function() { + return assert + .commandWorked(db.runCommand({find: "jstests_currentop_cursors", batchSize: 2})) + .cursor.id; + }, + assertFunc: function(cursorId, result) { + assert.eq(result.length, 1, result); + // Plan summary does not exist on mongos, so skip this test on mongos. + if (!FixtureHelpers.isMongos(db)) { + assert.eq(result[0].planSummary, "COLLSCAN", result); + } else { + assert(!result[0].hasOwnProperty("planSummary"), result); } - }); - - // Test that tailable, awaitData, and noCursorTimeout are set. - runTest({ - findFunc: function() { - return assert - .commandWorked(db.runCommand({ - find: "jstests_currentop_cursors", - batchSize: 2, - tailable: true, - awaitData: true, - noCursorTimeout: true - })) - .cursor.id; - }, - assertFunc: function(cursorId, result) { - - assert.eq(result.length, 1, result); - const idleCursor = result[0].cursor; - assert.eq(idleCursor.tailable, true, result); - assert.eq(idleCursor.awaitData, true, result); - assert.eq(idleCursor.noCursorTimeout, true, result); - assert.eq(idleCursor.originatingCommand.batchSize, 2, result); + // Lsid will not exist if not in command read mode. + if (commandReadMode) { + assert(result[0].lsid.hasOwnProperty('id'), result); + assert(result[0].lsid.hasOwnProperty('uid'), result); } - }); - - // Test that dates are set correctly. - runTest({ - findFunc: function() { - return assert - .commandWorked(db.runCommand({find: "jstests_currentop_cursors", batchSize: 2})) - .cursor.id; - }, - assertFunc: function(cursorId, result) { - const adminDB = db.getSiblingDB("admin"); - // Make sure the two cursors have different creation times. - assert.soon(() => { - const secondCursor = assert.commandWorked( - db.runCommand({find: "jstests_currentop_cursors", batchSize: 2})); + const uri = new MongoURI(db.getMongo().host); + assert(uri.servers.some((server) => { + return result[0].host == getHostName() + ":" + server.port; + })); + const idleCursor = result[0].cursor; + assert.eq(idleCursor.nDocsReturned, 2, result); + assert.eq(idleCursor.nBatchesReturned, 1, result); + assert.eq(idleCursor.tailable, false, result); + assert.eq(idleCursor.awaitData, false, result); + assert.eq(idleCursor.noCursorTimeout, false, result); + assert.eq(idleCursor.originatingCommand.batchSize, 2, result); + assert.lte(idleCursor.createdDate, idleCursor.lastAccessDate, result); + // Make sure that the top level fields do not also appear in the cursor subobject. + assert(!idleCursor.hasOwnProperty("planSummary"), result); + assert(!idleCursor.hasOwnProperty('host'), result); + assert(!idleCursor.hasOwnProperty('lsid'), result); + } +}); + +// Test that tailable, awaitData, and noCursorTimeout are set. +runTest({ + findFunc: function() { + return assert + .commandWorked(db.runCommand({ + find: "jstests_currentop_cursors", + batchSize: 2, + tailable: true, + awaitData: true, + noCursorTimeout: true + })) + .cursor.id; + }, + assertFunc: function(cursorId, result) { + assert.eq(result.length, 1, result); + const idleCursor = result[0].cursor; + assert.eq(idleCursor.tailable, true, result); + assert.eq(idleCursor.awaitData, true, result); + assert.eq(idleCursor.noCursorTimeout, true, result); + assert.eq(idleCursor.originatingCommand.batchSize, 2, result); + } +}); + +// Test that dates are set correctly. +runTest({ + findFunc: function() { + return assert + .commandWorked(db.runCommand({find: "jstests_currentop_cursors", batchSize: 2})) + .cursor.id; + }, + assertFunc: function(cursorId, result) { + const adminDB = db.getSiblingDB("admin"); + // Make sure the two cursors have different creation times. + assert.soon(() => { + const secondCursor = assert.commandWorked( + db.runCommand({find: "jstests_currentop_cursors", batchSize: 2})); - const secondResult = - adminDB - .aggregate([ - {$currentOp: {localOps: true, allUsers: false, idleCursors: true}}, - { - $match: { - $and: [ - {type: "idleCursor"}, - {"cursor.cursorId": secondCursor.cursor.id} - ] - } + const secondResult = + adminDB + .aggregate([ + {$currentOp: {localOps: true, allUsers: false, idleCursors: true}}, + { + $match: { + $and: [ + {type: "idleCursor"}, + {"cursor.cursorId": secondCursor.cursor.id} + ] } - ]) - .toArray(); - return result[0].cursor.createdDate < secondResult[0].cursor.createdDate; - }); - } - }); + } + ]) + .toArray(); + return result[0].cursor.createdDate < secondResult[0].cursor.createdDate; + }); + } +}); + +// Test larger batch size. +runTest({ + findFunc: function() { + return assert + .commandWorked(db.runCommand( + {find: "jstests_currentop_cursors", batchSize: 4, noCursorTimeout: true})) + .cursor.id; + }, + assertFunc: function(cursorId, result) { + const idleCursor = result[0].cursor; + assert.eq(result.length, 1, result); + assert.eq(idleCursor.nDocsReturned, 4, result); + assert.eq(idleCursor.nBatchesReturned, 1, result); + assert.eq(idleCursor.noCursorTimeout, true, result); + assert.eq(idleCursor.originatingCommand.batchSize, 4, result); + } +}); + +// Test batchSize and nDocs are incremented correctly. +runTest({ + findFunc: function() { + return assert + .commandWorked(db.runCommand({find: "jstests_currentop_cursors", batchSize: 2})) + .cursor.id; + }, + assertFunc: function(cursorId, result) { + const adminDB = db.getSiblingDB("admin"); + const originalAccess = result[0].cursor.lastAccessDate; + assert.commandWorked(db.runCommand( + {getMore: cursorId, collection: "jstests_currentop_cursors", batchSize: 2})); + result = adminDB + .aggregate([ + {$currentOp: {localOps: true, allUsers: false, idleCursors: true}}, + {$match: {$and: [{type: "idleCursor"}, {"cursor.cursorId": cursorId}]}} + ]) + .toArray(); + let idleCursor = result[0].cursor; + assert.eq(idleCursor.nDocsReturned, 4, result); + assert.eq(idleCursor.nBatchesReturned, 2, result); + assert.eq(idleCursor.originatingCommand.batchSize, 2, result); + // Make sure that the getMore will not finish running in the same milli as the cursor + // creation. + assert.soon(() => { + assert.commandWorked(db.runCommand( + {getMore: cursorId, collection: "jstests_currentop_cursors", batchSize: 2})); + result = adminDB + .aggregate([ + {$currentOp: {localOps: true, allUsers: false, idleCursors: true}}, + {$match: {$and: [{type: "idleCursor"}, {"cursor.cursorId": cursorId}]}} + ]) + .toArray(); + idleCursor = result[0].cursor; + return idleCursor.createdDate < idleCursor.lastAccessDate && + originalAccess < idleCursor.lastAccessDate; + }); + } +}); - // Test larger batch size. +// planSummary does not exist on Mongos, so skip this test. +if (!FixtureHelpers.isMongos(db)) { runTest({ findFunc: function() { + assert.commandWorked(coll.createIndex({"val": 1})); return assert .commandWorked(db.runCommand( - {find: "jstests_currentop_cursors", batchSize: 4, noCursorTimeout: true})) + {find: "jstests_currentop_cursors", filter: {"val": {$gt: 2}}, batchSize: 2})) .cursor.id; }, assertFunc: function(cursorId, result) { - const idleCursor = result[0].cursor; assert.eq(result.length, 1, result); - assert.eq(idleCursor.nDocsReturned, 4, result); - assert.eq(idleCursor.nBatchesReturned, 1, result); - assert.eq(idleCursor.noCursorTimeout, true, result); - assert.eq(idleCursor.originatingCommand.batchSize, 4, result); + assert.eq(result[0].planSummary, "IXSCAN { val: 1 }", result); } }); - - // Test batchSize and nDocs are incremented correctly. +} +// Test lsid.id value is correct if in commandReadMode. +if (commandReadMode) { + const session = db.getMongo().startSession(); runTest({ findFunc: function() { + const sessionDB = session.getDatabase("test"); return assert - .commandWorked(db.runCommand({find: "jstests_currentop_cursors", batchSize: 2})) + .commandWorked( + sessionDB.runCommand({find: "jstests_currentop_cursors", batchSize: 2})) .cursor.id; }, assertFunc: function(cursorId, result) { - const adminDB = db.getSiblingDB("admin"); - const originalAccess = result[0].cursor.lastAccessDate; - assert.commandWorked(db.runCommand( - {getMore: cursorId, collection: "jstests_currentop_cursors", batchSize: 2})); - result = - adminDB - .aggregate([ - {$currentOp: {localOps: true, allUsers: false, idleCursors: true}}, - {$match: {$and: [{type: "idleCursor"}, {"cursor.cursorId": cursorId}]}} - ]) - .toArray(); - let idleCursor = result[0].cursor; - assert.eq(idleCursor.nDocsReturned, 4, result); - assert.eq(idleCursor.nBatchesReturned, 2, result); - assert.eq(idleCursor.originatingCommand.batchSize, 2, result); - // Make sure that the getMore will not finish running in the same milli as the cursor - // creation. - assert.soon(() => { - assert.commandWorked(db.runCommand( - {getMore: cursorId, collection: "jstests_currentop_cursors", batchSize: 2})); - result = - adminDB - .aggregate([ - {$currentOp: {localOps: true, allUsers: false, idleCursors: true}}, - { - $match: - {$and: [{type: "idleCursor"}, {"cursor.cursorId": cursorId}]} - } - ]) - .toArray(); - idleCursor = result[0].cursor; - return idleCursor.createdDate < idleCursor.lastAccessDate && - originalAccess < idleCursor.lastAccessDate; - - }); + assert.eq(result.length, 1, result); + assert.eq(session.getSessionId().id, result[0].lsid.id); } }); - - // planSummary does not exist on Mongos, so skip this test. - if (!FixtureHelpers.isMongos(db)) { - runTest({ - findFunc: function() { - assert.commandWorked(coll.createIndex({"val": 1})); - return assert - .commandWorked(db.runCommand({ - find: "jstests_currentop_cursors", - filter: {"val": {$gt: 2}}, - batchSize: 2 - })) - .cursor.id; - - }, - assertFunc: function(cursorId, result) { - assert.eq(result.length, 1, result); - assert.eq(result[0].planSummary, "IXSCAN { val: 1 }", result); - - } - }); - } - // Test lsid.id value is correct if in commandReadMode. - if (commandReadMode) { - const session = db.getMongo().startSession(); - runTest({ - findFunc: function() { - const sessionDB = session.getDatabase("test"); - return assert - .commandWorked( - sessionDB.runCommand({find: "jstests_currentop_cursors", batchSize: 2})) - .cursor.id; - }, - assertFunc: function(cursorId, result) { - assert.eq(result.length, 1, result); - assert.eq(session.getSessionId().id, result[0].lsid.id); - } - }); - } - +} })(); diff --git a/jstests/core/currentop_predicate.js b/jstests/core/currentop_predicate.js index 049d7d3ab30..ddda0570f87 100644 --- a/jstests/core/currentop_predicate.js +++ b/jstests/core/currentop_predicate.js @@ -2,13 +2,13 @@ // Tests the use of a match predicate with the currentOp command. (function() { - // Test a predicate that matches the currentOp operation we are running. - var res = db.adminCommand("currentOp", {command: {$exists: true}}); - assert.commandWorked(res); - assert.gt(res.inprog.length, 0, tojson(res)); +// Test a predicate that matches the currentOp operation we are running. +var res = db.adminCommand("currentOp", {command: {$exists: true}}); +assert.commandWorked(res); +assert.gt(res.inprog.length, 0, tojson(res)); - // Test a predicate that matches no operations. - res = db.adminCommand("currentOp", {dummyCurOpField: {exists: true}}); - assert.commandWorked(res); - assert.eq(res.inprog.length, 0, tojson(res)); +// Test a predicate that matches no operations. +res = db.adminCommand("currentOp", {dummyCurOpField: {exists: true}}); +assert.commandWorked(res); +assert.eq(res.inprog.length, 0, tojson(res)); })(); diff --git a/jstests/core/cursora.js b/jstests/core/cursora.js index 93113055497..3def8c6162f 100644 --- a/jstests/core/cursora.js +++ b/jstests/core/cursora.js @@ -8,47 +8,48 @@ // ] (function() { - "use strict"; - - const t = db.cursora; - - function run(n) { - if (!isNumber(n)) { - assert(isNumber(n), "cursora.js isNumber"); - } - t.drop(); - - let bulk = t.initializeUnorderedBulkOp(); - for (let i = 0; i < n; i++) - bulk.insert({_id: i}); - assert.writeOK(bulk.execute()); - - const join = startParallelShell("sleep(50);" + "db.cursora.remove({});"); - - let num; - try { - let start = new Date(); - num = t.find(function() { - let num = 2; - for (let x = 0; x < 1000; x++) - num += 2; - return num > 0; - }) - .sort({_id: -1}) - .itcount(); - } catch (e) { - print("cursora.js FAIL " + e); - join(); - throw e; - } +"use strict"; - join(); +const t = db.cursora; - assert.eq(0, t.count()); - if (n == num) - print("cursora.js warning: shouldn't have counted all n: " + n + " num: " + num); +function run(n) { + if (!isNumber(n)) { + assert(isNumber(n), "cursora.js isNumber"); + } + t.drop(); + + let bulk = t.initializeUnorderedBulkOp(); + for (let i = 0; i < n; i++) + bulk.insert({_id: i}); + assert.writeOK(bulk.execute()); + + const join = startParallelShell("sleep(50);" + + "db.cursora.remove({});"); + + let num; + try { + let start = new Date(); + num = t.find(function() { + let num = 2; + for (let x = 0; x < 1000; x++) + num += 2; + return num > 0; + }) + .sort({_id: -1}) + .itcount(); + } catch (e) { + print("cursora.js FAIL " + e); + join(); + throw e; } - run(1500); - run(5000); + join(); + + assert.eq(0, t.count()); + if (n == num) + print("cursora.js warning: shouldn't have counted all n: " + n + " num: " + num); +} + +run(1500); +run(5000); })(); diff --git a/jstests/core/datasize2.js b/jstests/core/datasize2.js index a64bb62c287..2468e490602 100644 --- a/jstests/core/datasize2.js +++ b/jstests/core/datasize2.js @@ -8,32 +8,32 @@ // (function() { - "use strict"; +"use strict"; - var coll = db.foo; - var adminDB = db.getSiblingDB('admin'); - coll.drop(); +var coll = db.foo; +var adminDB = db.getSiblingDB('admin'); +coll.drop(); - var N = 1000; - for (var i = 0; i < N; i++) { - coll.insert({_id: i, s: "asdasdasdasdasdasdasd"}); - } +var N = 1000; +for (var i = 0; i < N; i++) { + coll.insert({_id: i, s: "asdasdasdasdasdasdasd"}); +} - var dataSizeCommand = - {"dataSize": "test.foo", "keyPattern": {"_id": 1}, "min": {"_id": 0}, "max": {"_id": N}}; +var dataSizeCommand = + {"dataSize": "test.foo", "keyPattern": {"_id": 1}, "min": {"_id": 0}, "max": {"_id": N}}; - assert.eq(N, - db.runCommand(dataSizeCommand).numObjects, - "dataSize command on 'test.foo' failed when called on the 'test' DB."); - assert.eq(N, - adminDB.runCommand(dataSizeCommand).numObjects, - "dataSize command on 'test.foo' failed when called on the 'admin' DB."); +assert.eq(N, + db.runCommand(dataSizeCommand).numObjects, + "dataSize command on 'test.foo' failed when called on the 'test' DB."); +assert.eq(N, + adminDB.runCommand(dataSizeCommand).numObjects, + "dataSize command on 'test.foo' failed when called on the 'admin' DB."); - dataSizeCommand.maxObjects = 100; - assert.eq(101, - db.runCommand(dataSizeCommand).numObjects, - "dataSize command with max number of objects set failed on 'test' DB"); - assert.eq(101, - db.runCommand(dataSizeCommand).numObjects, - "dataSize command with max number of objects set failed on 'admin' DB"); +dataSizeCommand.maxObjects = 100; +assert.eq(101, + db.runCommand(dataSizeCommand).numObjects, + "dataSize command with max number of objects set failed on 'test' DB"); +assert.eq(101, + db.runCommand(dataSizeCommand).numObjects, + "dataSize command with max number of objects set failed on 'admin' DB"); })(); diff --git a/jstests/core/dbadmin.js b/jstests/core/dbadmin.js index 1dd042d863d..3d2483b0334 100644 --- a/jstests/core/dbadmin.js +++ b/jstests/core/dbadmin.js @@ -1,37 +1,35 @@ load('jstests/aggregation/extras/utils.js'); (function() { - 'use strict'; +'use strict'; - var t = db.dbadmin; - t.save({x: 1}); - t.save({x: 1}); +var t = db.dbadmin; +t.save({x: 1}); +t.save({x: 1}); - var res = db.adminCommand("listDatabases"); - assert(res.databases && res.databases.length > 0, "listDatabases: " + tojson(res)); +var res = db.adminCommand("listDatabases"); +assert(res.databases && res.databases.length > 0, "listDatabases: " + tojson(res)); - var res = db.adminCommand({listDatabases: 1, nameOnly: true}); - assert(res.databases && res.databases.length > 0 && res.totalSize === undefined, - "listDatabases nameOnly: " + tojson(res)); +var res = db.adminCommand({listDatabases: 1, nameOnly: true}); +assert(res.databases && res.databases.length > 0 && res.totalSize === undefined, + "listDatabases nameOnly: " + tojson(res)); - var now = new Date(); - var x = db._adminCommand("ismaster"); - assert(x.ismaster, "ismaster failed: " + tojson(x)); - assert(x.localTime, "ismaster didn't include time: " + tojson(x)); +var now = new Date(); +var x = db._adminCommand("ismaster"); +assert(x.ismaster, "ismaster failed: " + tojson(x)); +assert(x.localTime, "ismaster didn't include time: " + tojson(x)); - var localTimeSkew = x.localTime - now; - if (localTimeSkew >= 50) { - print("Warning: localTimeSkew " + localTimeSkew + " > 50ms."); - } - assert.lt(localTimeSkew, 500, "isMaster.localTime"); +var localTimeSkew = x.localTime - now; +if (localTimeSkew >= 50) { + print("Warning: localTimeSkew " + localTimeSkew + " > 50ms."); +} +assert.lt(localTimeSkew, 500, "isMaster.localTime"); - var before = db.runCommand("serverStatus"); - print(before.uptimeEstimate); - sleep(5000); - - var after = db.runCommand("serverStatus"); - print(after.uptimeEstimate); - assert.gte( - after.uptimeEstimate, before.uptimeEstimate, "uptime estimate should be non-decreasing"); +var before = db.runCommand("serverStatus"); +print(before.uptimeEstimate); +sleep(5000); +var after = db.runCommand("serverStatus"); +print(after.uptimeEstimate); +assert.gte(after.uptimeEstimate, before.uptimeEstimate, "uptime estimate should be non-decreasing"); })(); diff --git a/jstests/core/dbref4.js b/jstests/core/dbref4.js index d4648497218..0de94028e39 100644 --- a/jstests/core/dbref4.js +++ b/jstests/core/dbref4.js @@ -3,22 +3,22 @@ // Ensures round-trippability of int ids in DBRef's after a save/restore (function() { - "use strict"; +"use strict"; - const coll = db.dbref4; - coll.drop(); +const coll = db.dbref4; +coll.drop(); - coll.insert({ - "refInt": DBRef("DBRef", NumberInt(1), "Ref"), - }); +coll.insert({ + "refInt": DBRef("DBRef", NumberInt(1), "Ref"), +}); - // we inserted something with an int - assert(coll.findOne({'refInt.$id': {$type: 16}})); +// we inserted something with an int +assert(coll.findOne({'refInt.$id': {$type: 16}})); - var doc = coll.findOne(); - doc.x = 1; - coll.save(doc); +var doc = coll.findOne(); +doc.x = 1; +coll.save(doc); - // after pulling it back and saving it again, still has an int - assert(coll.findOne({'refInt.$id': {$type: 16}})); +// after pulling it back and saving it again, still has an int +assert(coll.findOne({'refInt.$id': {$type: 16}})); })(); diff --git a/jstests/core/dbstats.js b/jstests/core/dbstats.js index 18f395a4e9b..aa413f905fc 100644 --- a/jstests/core/dbstats.js +++ b/jstests/core/dbstats.js @@ -3,68 +3,70 @@ // @tags: [requires_dbstats] (function() { - "use strict"; - - function serverIsMongos() { - const res = db.runCommand("ismaster"); - assert.commandWorked(res); - return res.msg === "isdbgrid"; - } - - function serverUsingPersistentStorage() { - const res = db.runCommand("serverStatus"); - assert.commandWorked(res); - return res.storageEngine.persistent === true; - } - - const isMongoS = serverIsMongos(); - const isUsingPersistentStorage = !isMongoS && serverUsingPersistentStorage(); - - let testDB = db.getSiblingDB("dbstats_js"); - assert.commandWorked(testDB.dropDatabase()); - - let coll = testDB["testColl"]; - assert.commandWorked(coll.createIndex({x: 1})); - const doc = {_id: 1, x: 1}; - assert.writeOK(coll.insert(doc)); - - let dbStats = testDB.runCommand({dbStats: 1}); +"use strict"; + +function serverIsMongos() { + const res = db.runCommand("ismaster"); + assert.commandWorked(res); + return res.msg === "isdbgrid"; +} + +function serverUsingPersistentStorage() { + const res = db.runCommand("serverStatus"); + assert.commandWorked(res); + return res.storageEngine.persistent === true; +} + +const isMongoS = serverIsMongos(); +const isUsingPersistentStorage = !isMongoS && serverUsingPersistentStorage(); + +let testDB = db.getSiblingDB("dbstats_js"); +assert.commandWorked(testDB.dropDatabase()); + +let coll = testDB["testColl"]; +assert.commandWorked(coll.createIndex({x: 1})); +const doc = { + _id: 1, + x: 1 +}; +assert.writeOK(coll.insert(doc)); + +let dbStats = testDB.runCommand({dbStats: 1}); +assert.commandWorked(dbStats); + +assert.eq(1, dbStats.objects, tojson(dbStats)); // Includes testColl only +const dataSize = Object.bsonsize(doc); +assert.eq(dataSize, dbStats.avgObjSize, tojson(dbStats)); +assert.eq(dataSize, dbStats.dataSize, tojson(dbStats)); + +// Index count will vary on mongoS if an additional index is needed to support sharding. +if (isMongoS) { + assert(dbStats.hasOwnProperty("indexes"), tojson(dbStats)); +} else { + assert.eq(2, dbStats.indexes, tojson(dbStats)); +} + +assert(dbStats.hasOwnProperty("storageSize"), tojson(dbStats)); +assert(dbStats.hasOwnProperty("totalSize"), tojson(dbStats)); +assert(dbStats.hasOwnProperty("indexSize"), tojson(dbStats)); + +if (isUsingPersistentStorage) { + assert(dbStats.hasOwnProperty("fsUsedSize"), tojson(dbStats)); + assert(dbStats.hasOwnProperty("fsTotalSize"), tojson(dbStats)); +} + +// Confirm collection and view counts on mongoD +if (!isMongoS) { + assert.eq(testDB.getName(), dbStats.db, tojson(dbStats)); + + // We wait to add a view until this point as it allows more exact testing of avgObjSize for + // WiredTiger above. Having more than 1 document would require floating point comparison. + assert.commandWorked(testDB.createView("testView", "testColl", [])); + + dbStats = testDB.runCommand({dbStats: 1}); assert.commandWorked(dbStats); - assert.eq(1, dbStats.objects, tojson(dbStats)); // Includes testColl only - const dataSize = Object.bsonsize(doc); - assert.eq(dataSize, dbStats.avgObjSize, tojson(dbStats)); - assert.eq(dataSize, dbStats.dataSize, tojson(dbStats)); - - // Index count will vary on mongoS if an additional index is needed to support sharding. - if (isMongoS) { - assert(dbStats.hasOwnProperty("indexes"), tojson(dbStats)); - } else { - assert.eq(2, dbStats.indexes, tojson(dbStats)); - } - - assert(dbStats.hasOwnProperty("storageSize"), tojson(dbStats)); - assert(dbStats.hasOwnProperty("totalSize"), tojson(dbStats)); - assert(dbStats.hasOwnProperty("indexSize"), tojson(dbStats)); - - if (isUsingPersistentStorage) { - assert(dbStats.hasOwnProperty("fsUsedSize"), tojson(dbStats)); - assert(dbStats.hasOwnProperty("fsTotalSize"), tojson(dbStats)); - } - - // Confirm collection and view counts on mongoD - if (!isMongoS) { - assert.eq(testDB.getName(), dbStats.db, tojson(dbStats)); - - // We wait to add a view until this point as it allows more exact testing of avgObjSize for - // WiredTiger above. Having more than 1 document would require floating point comparison. - assert.commandWorked(testDB.createView("testView", "testColl", [])); - - dbStats = testDB.runCommand({dbStats: 1}); - assert.commandWorked(dbStats); - - assert.eq(2, dbStats.collections, tojson(dbStats)); // testColl + system.views - assert.eq(1, dbStats.views, tojson(dbStats)); - } - + assert.eq(2, dbStats.collections, tojson(dbStats)); // testColl + system.views + assert.eq(1, dbStats.views, tojson(dbStats)); +} })(); diff --git a/jstests/core/diagdata.js b/jstests/core/diagdata.js index f002004b5a5..2f2c224304e 100644 --- a/jstests/core/diagdata.js +++ b/jstests/core/diagdata.js @@ -7,10 +7,10 @@ load('jstests/libs/ftdc.js'); (function() { - "use strict"; +"use strict"; - // Verify we require admin database - assert.commandFailed(db.diagdata.runCommand("getDiagnosticData")); +// Verify we require admin database +assert.commandFailed(db.diagdata.runCommand("getDiagnosticData")); - verifyGetDiagnosticData(db.getSiblingDB('admin')); +verifyGetDiagnosticData(db.getSiblingDB('admin')); })(); diff --git a/jstests/core/distinct1.js b/jstests/core/distinct1.js index aee7b604926..1d4ccaab16c 100644 --- a/jstests/core/distinct1.js +++ b/jstests/core/distinct1.js @@ -1,69 +1,68 @@ (function() { - "use strict"; - const collName = "distinct1"; - const coll = db.getCollection(collName); - coll.drop(); +"use strict"; +const collName = "distinct1"; +const coll = db.getCollection(collName); +coll.drop(); - assert.eq(0, coll.distinct("a").length, "test empty"); +assert.eq(0, coll.distinct("a").length, "test empty"); - assert.writeOK(coll.insert({a: 1})); - assert.writeOK(coll.insert({a: 2})); - assert.writeOK(coll.insert({a: 2})); - assert.writeOK(coll.insert({a: 2})); - assert.writeOK(coll.insert({a: 3})); +assert.writeOK(coll.insert({a: 1})); +assert.writeOK(coll.insert({a: 2})); +assert.writeOK(coll.insert({a: 2})); +assert.writeOK(coll.insert({a: 2})); +assert.writeOK(coll.insert({a: 3})); - // Test that distinct returns all the distinct values. - assert.eq([1, 2, 3], coll.distinct("a").sort(), "distinct returned unexpected results"); +// Test that distinct returns all the distinct values. +assert.eq([1, 2, 3], coll.distinct("a").sort(), "distinct returned unexpected results"); - // Test that distinct respects the query condition. - assert.eq([1, 2], - coll.distinct("a", {a: {$lt: 3}}).sort(), - "distinct with query returned unexpected results"); +// Test that distinct respects the query condition. +assert.eq([1, 2], + coll.distinct("a", {a: {$lt: 3}}).sort(), + "distinct with query returned unexpected results"); - assert(coll.drop()); +assert(coll.drop()); - assert.writeOK(coll.insert({a: {b: "a"}, c: 12})); - assert.writeOK(coll.insert({a: {b: "b"}, c: 12})); - assert.writeOK(coll.insert({a: {b: "c"}, c: 12})); - assert.writeOK(coll.insert({a: {b: "c"}, c: 12})); +assert.writeOK(coll.insert({a: {b: "a"}, c: 12})); +assert.writeOK(coll.insert({a: {b: "b"}, c: 12})); +assert.writeOK(coll.insert({a: {b: "c"}, c: 12})); +assert.writeOK(coll.insert({a: {b: "c"}, c: 12})); - // Test that distinct works on fields in embedded documents. - assert.eq(["a", "b", "c"], - coll.distinct("a.b").sort(), - "distinct on dotted field returned unexpected results"); +// Test that distinct works on fields in embedded documents. +assert.eq(["a", "b", "c"], + coll.distinct("a.b").sort(), + "distinct on dotted field returned unexpected results"); - assert(coll.drop()); +assert(coll.drop()); - assert.writeOK(coll.insert({_id: 1, a: 1})); - assert.writeOK(coll.insert({_id: 2, a: 2})); +assert.writeOK(coll.insert({_id: 1, a: 1})); +assert.writeOK(coll.insert({_id: 2, a: 2})); - // Test that distinct works on the _id field. - assert.eq([1, 2], coll.distinct("_id").sort(), "distinct on _id returned unexpected results"); +// Test that distinct works on the _id field. +assert.eq([1, 2], coll.distinct("_id").sort(), "distinct on _id returned unexpected results"); - // Test that distinct works with a query on the _id field. - assert.eq([1], - coll.distinct("a", {_id: 1}), - "distinct with query on _id returned unexpected results"); +// Test that distinct works with a query on the _id field. +assert.eq( + [1], coll.distinct("a", {_id: 1}), "distinct with query on _id returned unexpected results"); - assert(coll.drop()); +assert(coll.drop()); - assert.writeOK(coll.insert({a: 1, b: 2})); - assert.writeOK(coll.insert({a: 2, b: 2})); - assert.writeOK(coll.insert({a: 2, b: 1})); - assert.writeOK(coll.insert({a: 2, b: 2})); - assert.writeOK(coll.insert({a: 3, b: 2})); - assert.writeOK(coll.insert({a: 4, b: 1})); - assert.writeOK(coll.insert({a: 4, b: 1})); +assert.writeOK(coll.insert({a: 1, b: 2})); +assert.writeOK(coll.insert({a: 2, b: 2})); +assert.writeOK(coll.insert({a: 2, b: 1})); +assert.writeOK(coll.insert({a: 2, b: 2})); +assert.writeOK(coll.insert({a: 3, b: 2})); +assert.writeOK(coll.insert({a: 4, b: 1})); +assert.writeOK(coll.insert({a: 4, b: 1})); - // Test running the distinct command directly, rather than via shell helper. - let res = assert.commandWorked(db.runCommand({distinct: collName, key: "a"})); - assert.eq([1, 2, 3, 4], res.values.sort()); +// Test running the distinct command directly, rather than via shell helper. +let res = assert.commandWorked(db.runCommand({distinct: collName, key: "a"})); +assert.eq([1, 2, 3, 4], res.values.sort()); - res = assert.commandWorked(db.runCommand({distinct: collName, key: "a", query: null})); - assert.eq([1, 2, 3, 4], res.values.sort()); +res = assert.commandWorked(db.runCommand({distinct: collName, key: "a", query: null})); +assert.eq([1, 2, 3, 4], res.values.sort()); - res = assert.commandWorked(db.runCommand({distinct: collName, key: "a", query: {b: 2}})); - assert.eq([1, 2, 3], res.values.sort()); +res = assert.commandWorked(db.runCommand({distinct: collName, key: "a", query: {b: 2}})); +assert.eq([1, 2, 3], res.values.sort()); - assert.commandFailed(db.runCommand({distinct: collName, key: "a", query: 1})); +assert.commandFailed(db.runCommand({distinct: collName, key: "a", query: 1})); }()); diff --git a/jstests/core/distinct4.js b/jstests/core/distinct4.js index 2723e947d2d..a66022ecbaa 100644 --- a/jstests/core/distinct4.js +++ b/jstests/core/distinct4.js @@ -1,55 +1,54 @@ // Vaildate input to distinct command. SERVER-12642 (function() { - "use strict"; +"use strict"; - var t = db.distinct4; +var t = db.distinct4; - t.drop(); - t.save({a: null}); - t.save({a: 1}); - t.save({a: 1}); - t.save({a: 2}); - t.save({a: 3}); +t.drop(); +t.save({a: null}); +t.save({a: 1}); +t.save({a: 1}); +t.save({a: 2}); +t.save({a: 3}); - // first argument should be a string or error +// first argument should be a string or error - // from shell helper - assert.throws(function() { - t.distinct({a: 1}); - }); +// from shell helper +assert.throws(function() { + t.distinct({a: 1}); +}); - // from command interface - assert.commandFailedWithCode(t.runCommand("distinct", {"key": {a: 1}}), - ErrorCodes.TypeMismatch); +// from command interface +assert.commandFailedWithCode(t.runCommand("distinct", {"key": {a: 1}}), ErrorCodes.TypeMismatch); - // second argument should be a document or error +// second argument should be a document or error - // from shell helper - assert.throws(function() { - t.distinct('a', '1'); - }); +// from shell helper +assert.throws(function() { + t.distinct('a', '1'); +}); - // from command interface - assert.commandFailedWithCode(t.runCommand("distinct", {"key": "a", "query": "a"}), - ErrorCodes.TypeMismatch); +// from command interface +assert.commandFailedWithCode(t.runCommand("distinct", {"key": "a", "query": "a"}), + ErrorCodes.TypeMismatch); - // empty query clause should not cause error +// empty query clause should not cause error - // from shell helper - var a = assert.doesNotThrow(function() { - return t.distinct('a'); - }); - // [ null, 1, 2, 3 ] - assert.eq(4, a.length, tojson(a)); - assert.contains(null, a); - assert.contains(1, a); - assert.contains(2, a); - assert.contains(3, a); +// from shell helper +var a = assert.doesNotThrow(function() { + return t.distinct('a'); +}); +// [ null, 1, 2, 3 ] +assert.eq(4, a.length, tojson(a)); +assert.contains(null, a); +assert.contains(1, a); +assert.contains(2, a); +assert.contains(3, a); - // from command interface - assert.commandWorked(t.runCommand("distinct", {"key": "a"})); +// from command interface +assert.commandWorked(t.runCommand("distinct", {"key": "a"})); - // embedded nulls are prohibited in the key field - assert.commandFailedWithCode(t.runCommand("distinct", {"key": "a\0b"}), 31032); +// embedded nulls are prohibited in the key field +assert.commandFailedWithCode(t.runCommand("distinct", {"key": "a\0b"}), 31032); })(); diff --git a/jstests/core/distinct_compound_index.js b/jstests/core/distinct_compound_index.js index 0176e3581a0..6182267ea51 100644 --- a/jstests/core/distinct_compound_index.js +++ b/jstests/core/distinct_compound_index.js @@ -1,35 +1,34 @@ // @tags: [assumes_balancer_off] (function() { - "use strict"; +"use strict"; - load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. - load("jstests/libs/analyze_plan.js"); // For planHasStage. +load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. +load("jstests/libs/analyze_plan.js"); // For planHasStage. - var coll = db.distinct_multikey_index; +var coll = db.distinct_multikey_index; - coll.drop(); - for (var i = 0; i < 10; i++) { - assert.writeOK(coll.save({a: 1, b: 1})); - assert.writeOK(coll.save({a: 1, b: 2})); - assert.writeOK(coll.save({a: 2, b: 1})); - assert.writeOK(coll.save({a: 2, b: 3})); - } - coll.createIndex({a: 1, b: 1}); +coll.drop(); +for (var i = 0; i < 10; i++) { + assert.writeOK(coll.save({a: 1, b: 1})); + assert.writeOK(coll.save({a: 1, b: 2})); + assert.writeOK(coll.save({a: 2, b: 1})); + assert.writeOK(coll.save({a: 2, b: 3})); +} +coll.createIndex({a: 1, b: 1}); - var explain_distinct_with_query = coll.explain("executionStats").distinct('b', {a: 1}); - assert.commandWorked(explain_distinct_with_query); - assert(planHasStage(db, explain_distinct_with_query.queryPlanner.winningPlan, "DISTINCT_SCAN")); - assert(planHasStage( - db, explain_distinct_with_query.queryPlanner.winningPlan, "PROJECTION_COVERED")); - // If the collection is sharded, we expect at most 2 distinct values per shard. If the - // collection is not sharded, we expect 2 returned. - assert.lte(explain_distinct_with_query.executionStats.nReturned, - 2 * FixtureHelpers.numberOfShardsForCollection(coll)); +var explain_distinct_with_query = coll.explain("executionStats").distinct('b', {a: 1}); +assert.commandWorked(explain_distinct_with_query); +assert(planHasStage(db, explain_distinct_with_query.queryPlanner.winningPlan, "DISTINCT_SCAN")); +assert( + planHasStage(db, explain_distinct_with_query.queryPlanner.winningPlan, "PROJECTION_COVERED")); +// If the collection is sharded, we expect at most 2 distinct values per shard. If the +// collection is not sharded, we expect 2 returned. +assert.lte(explain_distinct_with_query.executionStats.nReturned, + 2 * FixtureHelpers.numberOfShardsForCollection(coll)); - var explain_distinct_without_query = coll.explain("executionStats").distinct('b'); - assert.commandWorked(explain_distinct_without_query); - assert(planHasStage(db, explain_distinct_without_query.queryPlanner.winningPlan, "COLLSCAN")); - assert(!planHasStage( - db, explain_distinct_without_query.queryPlanner.winningPlan, "DISTINCT_SCAN")); - assert.eq(40, explain_distinct_without_query.executionStats.nReturned); +var explain_distinct_without_query = coll.explain("executionStats").distinct('b'); +assert.commandWorked(explain_distinct_without_query); +assert(planHasStage(db, explain_distinct_without_query.queryPlanner.winningPlan, "COLLSCAN")); +assert(!planHasStage(db, explain_distinct_without_query.queryPlanner.winningPlan, "DISTINCT_SCAN")); +assert.eq(40, explain_distinct_without_query.executionStats.nReturned); })(); diff --git a/jstests/core/distinct_index1.js b/jstests/core/distinct_index1.js index bd1822b7a8c..e1cd721c7f4 100644 --- a/jstests/core/distinct_index1.js +++ b/jstests/core/distinct_index1.js @@ -3,79 +3,79 @@ * @tags: [assumes_balancer_off] */ (function() { - load("jstests/libs/analyze_plan.js"); // For getPlanStage. +load("jstests/libs/analyze_plan.js"); // For getPlanStage. - const coll = db.distinct_index1; - coll.drop(); +const coll = db.distinct_index1; +coll.drop(); - function getHash(num) { - return Math.floor(Math.sqrt(num * 123123)) % 10; - } +function getHash(num) { + return Math.floor(Math.sqrt(num * 123123)) % 10; +} - function getDistinctExplainWithExecutionStats(field, query) { - const explain = coll.explain("executionStats").distinct(field, query || {}); - assert(explain.hasOwnProperty("executionStats"), explain); - return explain; - } +function getDistinctExplainWithExecutionStats(field, query) { + const explain = coll.explain("executionStats").distinct(field, query || {}); + assert(explain.hasOwnProperty("executionStats"), explain); + return explain; +} - const bulk = coll.initializeUnorderedBulkOp(); - for (let i = 0; i < 1000; i++) { - bulk.insert({a: getHash(i * 5), b: getHash(i)}); - } - assert.commandWorked(bulk.execute()); +const bulk = coll.initializeUnorderedBulkOp(); +for (let i = 0; i < 1000; i++) { + bulk.insert({a: getHash(i * 5), b: getHash(i)}); +} +assert.commandWorked(bulk.execute()); - let explain = getDistinctExplainWithExecutionStats("a"); - // Collection scan looks at all 1000 documents and gets 1000 distinct values. Looks at 0 index - // keys. - assert.eq(1000, explain.executionStats.nReturned); - assert.eq(0, explain.executionStats.totalKeysExamined); - assert.eq(1000, explain.executionStats.totalDocsExamined); +let explain = getDistinctExplainWithExecutionStats("a"); +// Collection scan looks at all 1000 documents and gets 1000 distinct values. Looks at 0 index +// keys. +assert.eq(1000, explain.executionStats.nReturned); +assert.eq(0, explain.executionStats.totalKeysExamined); +assert.eq(1000, explain.executionStats.totalDocsExamined); - explain = getDistinctExplainWithExecutionStats("a", {a: {$gt: 5}}); - // Collection scan looks at all 1000 documents and gets 398 distinct values which match the - // query. Looks at 0 index keys. - assert.eq(398, explain.executionStats.nReturned); - assert.eq(0, explain.executionStats.totalKeysExamined); - assert.eq(1000, explain.executionStats.totalDocsExamined); +explain = getDistinctExplainWithExecutionStats("a", {a: {$gt: 5}}); +// Collection scan looks at all 1000 documents and gets 398 distinct values which match the +// query. Looks at 0 index keys. +assert.eq(398, explain.executionStats.nReturned); +assert.eq(0, explain.executionStats.totalKeysExamined); +assert.eq(1000, explain.executionStats.totalDocsExamined); - explain = getDistinctExplainWithExecutionStats("b", {a: {$gt: 5}}); - // Collection scan looks at all 1000 documents and gets 398 distinct values which match the - // query. Looks at 0 index keys. - assert.eq(398, explain.executionStats.nReturned); - assert.eq(0, explain.executionStats.totalKeysExamined); - assert.eq(1000, explain.executionStats.totalDocsExamined); +explain = getDistinctExplainWithExecutionStats("b", {a: {$gt: 5}}); +// Collection scan looks at all 1000 documents and gets 398 distinct values which match the +// query. Looks at 0 index keys. +assert.eq(398, explain.executionStats.nReturned); +assert.eq(0, explain.executionStats.totalKeysExamined); +assert.eq(1000, explain.executionStats.totalDocsExamined); - assert.commandWorked(coll.createIndex({a: 1})); +assert.commandWorked(coll.createIndex({a: 1})); - explain = getDistinctExplainWithExecutionStats("a"); - // There are only 10 values. We use the fast distinct hack and only examine each value once. - assert.eq(10, explain.executionStats.nReturned); - assert.lte(10, explain.executionStats.totalKeysExamined); +explain = getDistinctExplainWithExecutionStats("a"); +// There are only 10 values. We use the fast distinct hack and only examine each value once. +assert.eq(10, explain.executionStats.nReturned); +assert.lte(10, explain.executionStats.totalKeysExamined); - explain = getDistinctExplainWithExecutionStats("a", {a: {$gt: 5}}); - // Only 4 values of a are >= 5 and we use the fast distinct hack. - assert.eq(4, explain.executionStats.nReturned); - assert.eq(4, explain.executionStats.totalKeysExamined); - assert.eq(0, explain.executionStats.totalDocsExamined); +explain = getDistinctExplainWithExecutionStats("a", {a: {$gt: 5}}); +// Only 4 values of a are >= 5 and we use the fast distinct hack. +assert.eq(4, explain.executionStats.nReturned); +assert.eq(4, explain.executionStats.totalKeysExamined); +assert.eq(0, explain.executionStats.totalDocsExamined); - explain = getDistinctExplainWithExecutionStats("b", {a: {$gt: 5}}); - // We can't use the fast distinct hack here because we're distinct-ing over 'b'. - assert.eq(398, explain.executionStats.nReturned); - assert.eq(398, explain.executionStats.totalKeysExamined); - assert.eq(398, explain.executionStats.totalDocsExamined); +explain = getDistinctExplainWithExecutionStats("b", {a: {$gt: 5}}); +// We can't use the fast distinct hack here because we're distinct-ing over 'b'. +assert.eq(398, explain.executionStats.nReturned); +assert.eq(398, explain.executionStats.totalKeysExamined); +assert.eq(398, explain.executionStats.totalDocsExamined); - // Test that a distinct over a trailing field of the index can be covered. - assert.commandWorked(coll.dropIndexes()); - assert.commandWorked(coll.createIndex({a: 1, b: 1})); - explain = getDistinctExplainWithExecutionStats("b", {a: {$gt: 5}, b: {$gt: 5}}); - assert.lte(explain.executionStats.nReturned, 171); - assert.eq(0, explain.executionStats.totalDocsExamined); +// Test that a distinct over a trailing field of the index can be covered. +assert.commandWorked(coll.dropIndexes()); +assert.commandWorked(coll.createIndex({a: 1, b: 1})); +explain = getDistinctExplainWithExecutionStats("b", {a: {$gt: 5}, b: {$gt: 5}}); +assert.lte(explain.executionStats.nReturned, 171); +assert.eq(0, explain.executionStats.totalDocsExamined); - // Should use an index scan over the hashed index. - assert.commandWorked(coll.dropIndexes()); - assert.commandWorked(coll.createIndex({a: "hashed"})); - explain = getDistinctExplainWithExecutionStats("a", {$or: [{a: 3}, {a: 5}]}); - assert.eq(188, explain.executionStats.nReturned); - const indexScanStage = getPlanStage(explain.executionStats.executionStages, "IXSCAN"); - assert.eq("hashed", indexScanStage.keyPattern.a); +// Should use an index scan over the hashed index. +assert.commandWorked(coll.dropIndexes()); +assert.commandWorked(coll.createIndex({a: "hashed"})); +explain = getDistinctExplainWithExecutionStats("a", {$or: [{a: 3}, {a: 5}]}); +assert.eq(188, explain.executionStats.nReturned); +const indexScanStage = getPlanStage(explain.executionStats.executionStages, "IXSCAN"); +assert.eq("hashed", indexScanStage.keyPattern.a); })(); diff --git a/jstests/core/distinct_multikey.js b/jstests/core/distinct_multikey.js index 77aba774a1c..72acd2c342c 100644 --- a/jstests/core/distinct_multikey.js +++ b/jstests/core/distinct_multikey.js @@ -2,102 +2,102 @@ * Tests for distinct planning and execution in the presence of multikey indexes. */ (function() { - "use strict"; +"use strict"; - load("jstests/libs/analyze_plan.js"); +load("jstests/libs/analyze_plan.js"); - let coll = db.jstest_distinct_multikey; - coll.drop(); - assert.commandWorked(coll.createIndex({a: 1})); - assert.writeOK(coll.insert({a: [1, 2, 3]})); - assert.writeOK(coll.insert({a: [2, 3, 4]})); - assert.writeOK(coll.insert({a: [5, 6, 7]})); +let coll = db.jstest_distinct_multikey; +coll.drop(); +assert.commandWorked(coll.createIndex({a: 1})); +assert.writeOK(coll.insert({a: [1, 2, 3]})); +assert.writeOK(coll.insert({a: [2, 3, 4]})); +assert.writeOK(coll.insert({a: [5, 6, 7]})); - // Test that distinct can correctly use a multikey index when there is no predicate. - let result = coll.distinct("a"); - assert.eq([1, 2, 3, 4, 5, 6, 7], result.sort()); - let explain = coll.explain("queryPlanner").distinct("a"); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED")); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN")); +// Test that distinct can correctly use a multikey index when there is no predicate. +let result = coll.distinct("a"); +assert.eq([1, 2, 3, 4, 5, 6, 7], result.sort()); +let explain = coll.explain("queryPlanner").distinct("a"); +assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED")); +assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN")); - // Test that distinct can correctly use a multikey index when there is a predicate. This query - // should not be eligible for the distinct scan and cannot be covered. - result = coll.distinct("a", {a: 3}); - assert.eq([1, 2, 3, 4], result.sort()); - explain = coll.explain("queryPlanner").distinct("a", {a: 3}); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "FETCH")); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "IXSCAN")); +// Test that distinct can correctly use a multikey index when there is a predicate. This query +// should not be eligible for the distinct scan and cannot be covered. +result = coll.distinct("a", {a: 3}); +assert.eq([1, 2, 3, 4], result.sort()); +explain = coll.explain("queryPlanner").distinct("a", {a: 3}); +assert(planHasStage(db, explain.queryPlanner.winningPlan, "FETCH")); +assert(planHasStage(db, explain.queryPlanner.winningPlan, "IXSCAN")); - // Test distinct over a dotted multikey field, with a predicate. - coll.drop(); - assert.commandWorked(coll.createIndex({"a.b": 1})); - assert.writeOK(coll.insert({a: {b: [1, 2, 3]}})); - assert.writeOK(coll.insert({a: {b: [2, 3, 4]}})); +// Test distinct over a dotted multikey field, with a predicate. +coll.drop(); +assert.commandWorked(coll.createIndex({"a.b": 1})); +assert.writeOK(coll.insert({a: {b: [1, 2, 3]}})); +assert.writeOK(coll.insert({a: {b: [2, 3, 4]}})); - result = coll.distinct("a.b", {"a.b": 3}); - assert.eq([1, 2, 3, 4], result.sort()); - explain = coll.explain("queryPlanner").distinct("a.b", {"a.b": 3}); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "FETCH")); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "IXSCAN")); +result = coll.distinct("a.b", {"a.b": 3}); +assert.eq([1, 2, 3, 4], result.sort()); +explain = coll.explain("queryPlanner").distinct("a.b", {"a.b": 3}); +assert(planHasStage(db, explain.queryPlanner.winningPlan, "FETCH")); +assert(planHasStage(db, explain.queryPlanner.winningPlan, "IXSCAN")); - // Test that the distinct scan can be used when there is a predicate and the index is not - // multikey. - coll.drop(); - assert.commandWorked(coll.createIndex({a: 1})); - assert.writeOK(coll.insert({a: 1})); - assert.writeOK(coll.insert({a: 2})); - assert.writeOK(coll.insert({a: 3})); +// Test that the distinct scan can be used when there is a predicate and the index is not +// multikey. +coll.drop(); +assert.commandWorked(coll.createIndex({a: 1})); +assert.writeOK(coll.insert({a: 1})); +assert.writeOK(coll.insert({a: 2})); +assert.writeOK(coll.insert({a: 3})); - result = coll.distinct("a", {a: {$gte: 2}}); - assert.eq([2, 3], result.sort()); - explain = coll.explain("queryPlanner").distinct("a", {a: {$gte: 2}}); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED")); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN")); +result = coll.distinct("a", {a: {$gte: 2}}); +assert.eq([2, 3], result.sort()); +explain = coll.explain("queryPlanner").distinct("a", {a: {$gte: 2}}); +assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED")); +assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN")); - // Test a distinct which can use a multikey index, where the field being distinct'ed is not - // multikey. - coll.drop(); - assert.commandWorked(coll.createIndex({a: 1, b: 1})); - assert.writeOK(coll.insert({a: 1, b: [2, 3]})); - assert.writeOK(coll.insert({a: 8, b: [3, 4]})); - assert.writeOK(coll.insert({a: 7, b: [4, 5]})); +// Test a distinct which can use a multikey index, where the field being distinct'ed is not +// multikey. +coll.drop(); +assert.commandWorked(coll.createIndex({a: 1, b: 1})); +assert.writeOK(coll.insert({a: 1, b: [2, 3]})); +assert.writeOK(coll.insert({a: 8, b: [3, 4]})); +assert.writeOK(coll.insert({a: 7, b: [4, 5]})); - result = coll.distinct("a", {a: {$gte: 2}}); - assert.eq([7, 8], result.sort()); - explain = coll.explain("queryPlanner").distinct("a", {a: {$gte: 2}}); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED")); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN")); +result = coll.distinct("a", {a: {$gte: 2}}); +assert.eq([7, 8], result.sort()); +explain = coll.explain("queryPlanner").distinct("a", {a: {$gte: 2}}); +assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED")); +assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN")); - // Test distinct over a trailing multikey field. - result = coll.distinct("b", {a: {$gte: 2}}); - assert.eq([3, 4, 5], result.sort()); - explain = coll.explain("queryPlanner").distinct("b", {a: {$gte: 2}}); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "FETCH")); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "IXSCAN")); +// Test distinct over a trailing multikey field. +result = coll.distinct("b", {a: {$gte: 2}}); +assert.eq([3, 4, 5], result.sort()); +explain = coll.explain("queryPlanner").distinct("b", {a: {$gte: 2}}); +assert(planHasStage(db, explain.queryPlanner.winningPlan, "FETCH")); +assert(planHasStage(db, explain.queryPlanner.winningPlan, "IXSCAN")); - // Test distinct over a trailing non-multikey field, where the leading field is multikey. - coll.drop(); - assert.commandWorked(coll.createIndex({a: 1, b: 1})); - assert.writeOK(coll.insert({a: [2, 3], b: 1})); - assert.writeOK(coll.insert({a: [3, 4], b: 8})); - assert.writeOK(coll.insert({a: [3, 5], b: 7})); +// Test distinct over a trailing non-multikey field, where the leading field is multikey. +coll.drop(); +assert.commandWorked(coll.createIndex({a: 1, b: 1})); +assert.writeOK(coll.insert({a: [2, 3], b: 1})); +assert.writeOK(coll.insert({a: [3, 4], b: 8})); +assert.writeOK(coll.insert({a: [3, 5], b: 7})); - result = coll.distinct("b", {a: 3}); - assert.eq([1, 7, 8], result.sort()); - explain = coll.explain("queryPlanner").distinct("b", {a: 3}); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED")); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN")); +result = coll.distinct("b", {a: 3}); +assert.eq([1, 7, 8], result.sort()); +explain = coll.explain("queryPlanner").distinct("b", {a: 3}); +assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED")); +assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN")); - // Test distinct over a trailing non-multikey dotted path where the leading field is multikey. - coll.drop(); - assert.commandWorked(coll.createIndex({a: 1, "b.c": 1})); - assert.writeOK(coll.insert({a: [2, 3], b: {c: 1}})); - assert.writeOK(coll.insert({a: [3, 4], b: {c: 8}})); - assert.writeOK(coll.insert({a: [3, 5], b: {c: 7}})); +// Test distinct over a trailing non-multikey dotted path where the leading field is multikey. +coll.drop(); +assert.commandWorked(coll.createIndex({a: 1, "b.c": 1})); +assert.writeOK(coll.insert({a: [2, 3], b: {c: 1}})); +assert.writeOK(coll.insert({a: [3, 4], b: {c: 8}})); +assert.writeOK(coll.insert({a: [3, 5], b: {c: 7}})); - result = coll.distinct("b.c", {a: 3}); - assert.eq([1, 7, 8], result.sort()); - explain = coll.explain("queryPlanner").distinct("b.c", {a: 3}); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_DEFAULT")); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN")); +result = coll.distinct("b.c", {a: 3}); +assert.eq([1, 7, 8], result.sort()); +explain = coll.explain("queryPlanner").distinct("b.c", {a: 3}); +assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_DEFAULT")); +assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN")); }()); diff --git a/jstests/core/distinct_multikey_dotted_path.js b/jstests/core/distinct_multikey_dotted_path.js index dc770ec3a24..b06ca2d95ea 100644 --- a/jstests/core/distinct_multikey_dotted_path.js +++ b/jstests/core/distinct_multikey_dotted_path.js @@ -9,190 +9,192 @@ * @tags: [assumes_unsharded_collection, does_not_support_stepdowns] */ (function() { - "use strict"; - load("jstests/libs/analyze_plan.js"); // For planHasStage(). - - const coll = db.distinct_multikey; - coll.drop(); - assert.commandWorked(coll.createIndex({"a.b.c": 1})); - - assert.commandWorked(coll.insert({a: {b: {c: 1}}})); - assert.commandWorked(coll.insert({a: {b: {c: 2}}})); - assert.commandWorked(coll.insert({a: {b: {c: 3}}})); - assert.commandWorked(coll.insert({a: {b: {notRelevant: 3}}})); - assert.commandWorked(coll.insert({a: {notRelevant: 3}})); - - const numPredicate = {"a.b.c": {$gt: 0}}; - - function getAggPipelineForDistinct(path) { - return [{$group: {_id: "$" + path}}]; - } - - // Run an agg pipeline with a $group, and convert the results so they're equivalent - // to what a distinct() would return. - // Note that $group will treat an array as its own key rather than unwinding it. This means - // that a $group on a field that's multikey will have different behavior than a distinct(), so - // we only use this function for non-multikey fields. - function distinctResultsFromPipeline(pipeline) { - const res = coll.aggregate(pipeline).toArray(); - return res.map((x) => x._id); - } - - // Be sure a distinct scan is used when the index is not multi key. - (function testDistinctWithNonMultikeyIndex() { - const results = coll.distinct("a.b.c"); - // TODO SERVER-14832: Returning 'null' here is inconsistent with the behavior when no index - // is present. - assert.sameMembers([1, 2, 3, null], results); - - const expl = coll.explain().distinct("a.b.c"); - assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "DISTINCT_SCAN"), expl); - - // Do an equivalent query using $group. - const pipeline = getAggPipelineForDistinct("a.b.c"); - const aggResults = distinctResultsFromPipeline(pipeline); - assert.sameMembers(aggResults, results); - const aggExpl = assert.commandWorked(coll.explain().aggregate(pipeline)); - assert.gt(getAggPlanStages(aggExpl, "DISTINCT_SCAN").length, 0); - })(); - - // Distinct with a predicate. - (function testDistinctWithPredWithNonMultikeyIndex() { - const results = coll.distinct("a.b.c", numPredicate); - assert.sameMembers([1, 2, 3], results); - - const expl = coll.explain().distinct("a.b.c", numPredicate); - - assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "DISTINCT_SCAN"), expl); - - const pipeline = [{$match: numPredicate}].concat(getAggPipelineForDistinct("a.b.c")); - const aggResults = distinctResultsFromPipeline(pipeline); - assert.sameMembers(aggResults, results); - const aggExpl = assert.commandWorked(coll.explain().aggregate(pipeline)); - assert.gt(getAggPlanStages(aggExpl, "DISTINCT_SCAN").length, 0); - })(); - - // Make the index multi key. - assert.commandWorked(coll.insert({a: {b: [{c: 4}, {c: 5}]}})); - assert.commandWorked(coll.insert({a: {b: [{c: 4}, {c: 6}]}})); - // Empty array is indexed as 'undefined'. - assert.commandWorked(coll.insert({a: {b: {c: []}}})); - - // We should still use the index as long as the path we distinct() on is never an array - // index. - (function testDistinctWithMultikeyIndex() { - const multiKeyResults = coll.distinct("a.b.c"); - // TODO SERVER-14832: Returning 'null' and 'undefined' here is inconsistent with the - // behavior when no index is present. - assert.sameMembers([1, 2, 3, 4, 5, 6, null, undefined], multiKeyResults); - const expl = coll.explain().distinct("a.b.c"); - - assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "DISTINCT_SCAN")); - - // Not running same query with $group now that the field is multikey. See comment above. - })(); - - // We cannot use the DISTINCT_SCAN optimization when there is a multikey path in the key and - // there is a predicate. The reason is that we may have a predicate like {a: 4}, and two - // documents: {a: [4, 5]}, {a: [4, 6]}. With a DISTINCT_SCAN, we would "skip over" one of the - // documents, and leave out either '5' or '6', rather than providing the correct result of - // [4, 5, 6]. The test below is for a similar case. - (function testDistinctWithPredWithMultikeyIndex() { - const pred = {"a.b.c": 4}; - const results = coll.distinct("a.b.c", pred); - assert.sameMembers([4, 5, 6], results); - - const expl = coll.explain().distinct("a.b.c", pred); - assert.eq(false, planHasStage(db, expl.queryPlanner.winningPlan, "DISTINCT_SCAN"), expl); - assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "IXSCAN"), expl); - - // Not running same query with $group now that the field is multikey. See comment above. - })(); - - // Perform a distinct on a path where the last component is multikey. - (function testDistinctOnPathWhereLastComponentIsMultiKey() { - assert.commandWorked(coll.createIndex({"a.b": 1})); - const multiKeyResults = coll.distinct("a.b"); - assert.sameMembers( - [ - null, // From the document with no 'b' field. TODO SERVER-14832: this is - // inconsistent with behavior when no index is present. - {c: 1}, - {c: 2}, - {c: 3}, - {c: 4}, - {c: 5}, - {c: 6}, - {c: []}, - {notRelevant: 3} - ], - multiKeyResults); - - const expl = coll.explain().distinct("a.b"); - assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "DISTINCT_SCAN")); - - // Not running same query with $group now that the field is multikey. See comment above. - })(); - - (function testDistinctOnPathWhereLastComponentIsMultiKeyWithPredicate() { - assert.commandWorked(coll.createIndex({"a.b": 1})); - const pred = {"a.b": {$type: "array"}}; - const multiKeyResults = coll.distinct("a.b", pred); - assert.sameMembers( - [ - {c: 4}, - {c: 5}, - {c: 6}, - ], - multiKeyResults); - - const expl = coll.explain().distinct("a.b", pred); - assert.eq(false, planHasStage(db, expl.queryPlanner.winningPlan, "DISTINCT_SCAN")); - assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "IXSCAN")); - - // Not running same query with $group now that the field is multikey. See comment above. - })(); - - // If the path we distinct() on includes an array index, a COLLSCAN should be used, - // even if an index is available on the prefix to the array component ("a.b" in this case). - (function testDistinctOnNumericMultikeyPathNoIndex() { - const res = coll.distinct("a.b.0"); - assert.eq(res, [{c: 4}]); - - const expl = coll.explain().distinct("a.b.0"); - assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "COLLSCAN"), expl); - - // Will not attempt the equivalent query with aggregation, since $group by "a.b.0" will - // only treat '0' as a field name (not array index). - })(); - - // Creating an index on "a.b.0" and doing a distinct on it should be able to use DISTINCT_SCAN. - (function testDistinctOnNumericMultikeyPathWithIndex() { - assert.commandWorked(coll.createIndex({"a.b.0": 1})); - assert.commandWorked(coll.insert({a: {b: {0: "hello world"}}})); - const res = coll.distinct("a.b.0"); - assert.sameMembers(res, [{c: 4}, "hello world"]); - - const expl = coll.explain().distinct("a.b.0"); - assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "DISTINCT_SCAN"), expl); - - // Will not attempt the equivalent query with aggregation, since $group by "a.b.0" will - // only treat '0' as a field name (not array index). - })(); - - // Creating an index on "a.b.0" and doing a distinct on it should use an IXSCAN, as "a.b" is - // multikey. See explanation above about why a DISTINCT_SCAN cannot be used when the path - // given is multikey. - (function testDistinctWithPredOnNumericMultikeyPathWithIndex() { - const pred = {"a.b.0": {$type: "object"}}; - const res = coll.distinct("a.b.0", pred); - assert.sameMembers(res, [{c: 4}]); - - const expl = coll.explain().distinct("a.b.0", pred); - assert.eq(false, planHasStage(db, expl.queryPlanner.winningPlan, "DISTINCT_SCAN"), expl); - assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "IXSCAN"), expl); - - // Will not attempt the equivalent query with aggregation, since $group by "a.b.0" will - // only treat '0' as a field name (not array index). - })(); +"use strict"; +load("jstests/libs/analyze_plan.js"); // For planHasStage(). + +const coll = db.distinct_multikey; +coll.drop(); +assert.commandWorked(coll.createIndex({"a.b.c": 1})); + +assert.commandWorked(coll.insert({a: {b: {c: 1}}})); +assert.commandWorked(coll.insert({a: {b: {c: 2}}})); +assert.commandWorked(coll.insert({a: {b: {c: 3}}})); +assert.commandWorked(coll.insert({a: {b: {notRelevant: 3}}})); +assert.commandWorked(coll.insert({a: {notRelevant: 3}})); + +const numPredicate = { + "a.b.c": {$gt: 0} +}; + +function getAggPipelineForDistinct(path) { + return [{$group: {_id: "$" + path}}]; +} + +// Run an agg pipeline with a $group, and convert the results so they're equivalent +// to what a distinct() would return. +// Note that $group will treat an array as its own key rather than unwinding it. This means +// that a $group on a field that's multikey will have different behavior than a distinct(), so +// we only use this function for non-multikey fields. +function distinctResultsFromPipeline(pipeline) { + const res = coll.aggregate(pipeline).toArray(); + return res.map((x) => x._id); +} + +// Be sure a distinct scan is used when the index is not multi key. +(function testDistinctWithNonMultikeyIndex() { + const results = coll.distinct("a.b.c"); + // TODO SERVER-14832: Returning 'null' here is inconsistent with the behavior when no index + // is present. + assert.sameMembers([1, 2, 3, null], results); + + const expl = coll.explain().distinct("a.b.c"); + assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "DISTINCT_SCAN"), expl); + + // Do an equivalent query using $group. + const pipeline = getAggPipelineForDistinct("a.b.c"); + const aggResults = distinctResultsFromPipeline(pipeline); + assert.sameMembers(aggResults, results); + const aggExpl = assert.commandWorked(coll.explain().aggregate(pipeline)); + assert.gt(getAggPlanStages(aggExpl, "DISTINCT_SCAN").length, 0); +})(); + +// Distinct with a predicate. +(function testDistinctWithPredWithNonMultikeyIndex() { + const results = coll.distinct("a.b.c", numPredicate); + assert.sameMembers([1, 2, 3], results); + + const expl = coll.explain().distinct("a.b.c", numPredicate); + + assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "DISTINCT_SCAN"), expl); + + const pipeline = [{$match: numPredicate}].concat(getAggPipelineForDistinct("a.b.c")); + const aggResults = distinctResultsFromPipeline(pipeline); + assert.sameMembers(aggResults, results); + const aggExpl = assert.commandWorked(coll.explain().aggregate(pipeline)); + assert.gt(getAggPlanStages(aggExpl, "DISTINCT_SCAN").length, 0); +})(); + +// Make the index multi key. +assert.commandWorked(coll.insert({a: {b: [{c: 4}, {c: 5}]}})); +assert.commandWorked(coll.insert({a: {b: [{c: 4}, {c: 6}]}})); +// Empty array is indexed as 'undefined'. +assert.commandWorked(coll.insert({a: {b: {c: []}}})); + +// We should still use the index as long as the path we distinct() on is never an array +// index. +(function testDistinctWithMultikeyIndex() { + const multiKeyResults = coll.distinct("a.b.c"); + // TODO SERVER-14832: Returning 'null' and 'undefined' here is inconsistent with the + // behavior when no index is present. + assert.sameMembers([1, 2, 3, 4, 5, 6, null, undefined], multiKeyResults); + const expl = coll.explain().distinct("a.b.c"); + + assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "DISTINCT_SCAN")); + + // Not running same query with $group now that the field is multikey. See comment above. +})(); + +// We cannot use the DISTINCT_SCAN optimization when there is a multikey path in the key and +// there is a predicate. The reason is that we may have a predicate like {a: 4}, and two +// documents: {a: [4, 5]}, {a: [4, 6]}. With a DISTINCT_SCAN, we would "skip over" one of the +// documents, and leave out either '5' or '6', rather than providing the correct result of +// [4, 5, 6]. The test below is for a similar case. +(function testDistinctWithPredWithMultikeyIndex() { + const pred = {"a.b.c": 4}; + const results = coll.distinct("a.b.c", pred); + assert.sameMembers([4, 5, 6], results); + + const expl = coll.explain().distinct("a.b.c", pred); + assert.eq(false, planHasStage(db, expl.queryPlanner.winningPlan, "DISTINCT_SCAN"), expl); + assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "IXSCAN"), expl); + + // Not running same query with $group now that the field is multikey. See comment above. +})(); + +// Perform a distinct on a path where the last component is multikey. +(function testDistinctOnPathWhereLastComponentIsMultiKey() { + assert.commandWorked(coll.createIndex({"a.b": 1})); + const multiKeyResults = coll.distinct("a.b"); + assert.sameMembers( + [ + null, // From the document with no 'b' field. TODO SERVER-14832: this is + // inconsistent with behavior when no index is present. + {c: 1}, + {c: 2}, + {c: 3}, + {c: 4}, + {c: 5}, + {c: 6}, + {c: []}, + {notRelevant: 3} + ], + multiKeyResults); + + const expl = coll.explain().distinct("a.b"); + assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "DISTINCT_SCAN")); + + // Not running same query with $group now that the field is multikey. See comment above. +})(); + +(function testDistinctOnPathWhereLastComponentIsMultiKeyWithPredicate() { + assert.commandWorked(coll.createIndex({"a.b": 1})); + const pred = {"a.b": {$type: "array"}}; + const multiKeyResults = coll.distinct("a.b", pred); + assert.sameMembers( + [ + {c: 4}, + {c: 5}, + {c: 6}, + ], + multiKeyResults); + + const expl = coll.explain().distinct("a.b", pred); + assert.eq(false, planHasStage(db, expl.queryPlanner.winningPlan, "DISTINCT_SCAN")); + assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "IXSCAN")); + + // Not running same query with $group now that the field is multikey. See comment above. +})(); + +// If the path we distinct() on includes an array index, a COLLSCAN should be used, +// even if an index is available on the prefix to the array component ("a.b" in this case). +(function testDistinctOnNumericMultikeyPathNoIndex() { + const res = coll.distinct("a.b.0"); + assert.eq(res, [{c: 4}]); + + const expl = coll.explain().distinct("a.b.0"); + assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "COLLSCAN"), expl); + + // Will not attempt the equivalent query with aggregation, since $group by "a.b.0" will + // only treat '0' as a field name (not array index). +})(); + +// Creating an index on "a.b.0" and doing a distinct on it should be able to use DISTINCT_SCAN. +(function testDistinctOnNumericMultikeyPathWithIndex() { + assert.commandWorked(coll.createIndex({"a.b.0": 1})); + assert.commandWorked(coll.insert({a: {b: {0: "hello world"}}})); + const res = coll.distinct("a.b.0"); + assert.sameMembers(res, [{c: 4}, "hello world"]); + + const expl = coll.explain().distinct("a.b.0"); + assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "DISTINCT_SCAN"), expl); + + // Will not attempt the equivalent query with aggregation, since $group by "a.b.0" will + // only treat '0' as a field name (not array index). +})(); + +// Creating an index on "a.b.0" and doing a distinct on it should use an IXSCAN, as "a.b" is +// multikey. See explanation above about why a DISTINCT_SCAN cannot be used when the path +// given is multikey. +(function testDistinctWithPredOnNumericMultikeyPathWithIndex() { + const pred = {"a.b.0": {$type: "object"}}; + const res = coll.distinct("a.b.0", pred); + assert.sameMembers(res, [{c: 4}]); + + const expl = coll.explain().distinct("a.b.0", pred); + assert.eq(false, planHasStage(db, expl.queryPlanner.winningPlan, "DISTINCT_SCAN"), expl); + assert.eq(true, planHasStage(db, expl.queryPlanner.winningPlan, "IXSCAN"), expl); + + // Will not attempt the equivalent query with aggregation, since $group by "a.b.0" will + // only treat '0' as a field name (not array index). +})(); })(); diff --git a/jstests/core/doc_validation.js b/jstests/core/doc_validation.js index 9acfffae4e3..57f99adf48c 100644 --- a/jstests/core/doc_validation.js +++ b/jstests/core/doc_validation.js @@ -9,278 +9,275 @@ // Test basic inserts and updates with document validation. (function() { - "use strict"; +"use strict"; - function assertFailsValidation(res) { - if (res instanceof WriteResult) { - assert.writeErrorWithCode(res, ErrorCodes.DocumentValidationFailure, tojson(res)); - } else { - assert.commandFailedWithCode(res, ErrorCodes.DocumentValidationFailure, tojson(res)); - } +function assertFailsValidation(res) { + if (res instanceof WriteResult) { + assert.writeErrorWithCode(res, ErrorCodes.DocumentValidationFailure, tojson(res)); + } else { + assert.commandFailedWithCode(res, ErrorCodes.DocumentValidationFailure, tojson(res)); } +} - const array = []; - for (let i = 0; i < 2048; i++) { - array.push({arbitrary: i}); - } +const array = []; +for (let i = 0; i < 2048; i++) { + array.push({arbitrary: i}); +} - const collName = "doc_validation"; - const coll = db[collName]; - - /** - * Runs a series of document validation tests using the validator 'validator', which should - * enforce the existence of a field "a". - */ - function runInsertUpdateValidationTest(validator) { - coll.drop(); - - // Create a collection with document validator 'validator'. - assert.commandWorked(db.createCollection(collName, {validator: validator})); - - // Insert and upsert documents that will pass validation. - assert.writeOK(coll.insert({_id: "valid1", a: 1})); - assert.writeOK(coll.update({_id: "valid2"}, {_id: "valid2", a: 2}, {upsert: true})); - assert.writeOK(coll.runCommand( - "findAndModify", {query: {_id: "valid3"}, update: {$set: {a: 3}}, upsert: true})); - - // Insert and upsert documents that will not pass validation. - assertFailsValidation(coll.insert({_id: "invalid3", b: 1})); - assertFailsValidation( - coll.update({_id: "invalid4"}, {_id: "invalid4", b: 2}, {upsert: true})); - assertFailsValidation(coll.runCommand( - "findAndModify", {query: {_id: "invalid4"}, update: {$set: {b: 3}}, upsert: true})); - - // Assert that we can remove the document that passed validation. - assert.writeOK(coll.remove({_id: "valid1"})); - - // Check that we can only update documents that pass validation. We insert a valid and an - // invalid document, then set the validator. - coll.drop(); - assert.writeOK(coll.insert({_id: "valid1", a: 1})); - assert.writeOK(coll.insert({_id: "invalid2", b: 1})); - assert.commandWorked(coll.runCommand("collMod", {validator: validator})); - - // Assert that updates on a conforming document succeed when they affect fields not involved - // in validator. - // Add a new field. - assert.writeOK(coll.update({_id: "valid1"}, {$set: {z: 1}})); - assert.writeOK( - coll.runCommand("findAndModify", {query: {_id: "valid1"}, update: {$set: {y: 2}}})); - // In-place update. - assert.writeOK(coll.update({_id: "valid1"}, {$inc: {z: 1}})); - assert.writeOK( - coll.runCommand("findAndModify", {query: {_id: "valid1"}, update: {$inc: {y: 1}}})); - // Out-of-place update. - assert.writeOK(coll.update({_id: "valid1"}, {$set: {z: array}})); - assert.writeOK( - coll.runCommand("findAndModify", {query: {_id: "valid1"}, update: {$set: {y: array}}})); - // No-op update. - assert.writeOK(coll.update({_id: "valid1"}, {a: 1})); - assert.writeOK( - coll.runCommand("findAndModify", {query: {_id: "valid1"}, update: {$set: {a: 1}}})); - - // Verify those same updates will fail on non-conforming document. - assertFailsValidation(coll.update({_id: "invalid2"}, {$set: {z: 1}})); - assertFailsValidation(coll.update({_id: "invalid2"}, {$inc: {z: 1}})); - assertFailsValidation(coll.update({_id: "invalid2"}, {$set: {z: array}})); - assertFailsValidation( - coll.runCommand("findAndModify", {query: {_id: "invalid2"}, update: {$set: {y: 2}}})); - assertFailsValidation( - coll.runCommand("findAndModify", {query: {_id: "invalid2"}, update: {$inc: {y: 1}}})); - assertFailsValidation(coll.runCommand( - "findAndModify", {query: {_id: "invalid2"}, update: {$set: {y: array}}})); - - // A no-op update of an invalid doc will succeed. - assert.writeOK(coll.update({_id: "invalid2"}, {$set: {b: 1}})); - assert.writeOK( - coll.runCommand("findAndModify", {query: {_id: "invalid2"}, update: {$set: {b: 1}}})); - - // Verify that we can't make a conforming document fail validation, but can update a - // non-conforming document to pass validation. - coll.drop(); - assert.writeOK(coll.insert({_id: "valid1", a: 1})); - assert.writeOK(coll.insert({_id: "invalid2", b: 1})); - assert.writeOK(coll.insert({_id: "invalid3", b: 1})); - assert.commandWorked(coll.runCommand("collMod", {validator: validator})); - - assertFailsValidation(coll.update({_id: "valid1"}, {$unset: {a: 1}})); - assert.writeOK(coll.update({_id: "invalid2"}, {$set: {a: 1}})); - assertFailsValidation( - coll.runCommand("findAndModify", {query: {_id: "valid1"}, update: {$unset: {a: 1}}})); - assert.writeOK( - coll.runCommand("findAndModify", {query: {_id: "invalid3"}, update: {$set: {a: 1}}})); - - // Modify the collection to remove the document validator. - assert.commandWorked(coll.runCommand("collMod", {validator: {}})); - - // Verify that no validation is applied to updates. - assert.writeOK(coll.update({_id: "valid1"}, {$set: {z: 1}})); - assert.writeOK(coll.update({_id: "invalid2"}, {$set: {z: 1}})); - assert.writeOK(coll.update({_id: "valid1"}, {$unset: {a: 1}})); - assert.writeOK(coll.update({_id: "invalid2"}, {$set: {a: 1}})); - assert.writeOK( - coll.runCommand("findAndModify", {query: {_id: "valid1"}, update: {$set: {z: 2}}})); - assert.writeOK( - coll.runCommand("findAndModify", {query: {_id: "invalid2"}, update: {$set: {z: 2}}})); - assert.writeOK( - coll.runCommand("findAndModify", {query: {_id: "valid1"}, update: {$unset: {a: 1}}})); - assert.writeOK( - coll.runCommand("findAndModify", {query: {_id: "invalid2"}, update: {$set: {a: 1}}})); - } +const collName = "doc_validation"; +const coll = db[collName]; - // Run the test with a normal validator. - runInsertUpdateValidationTest({a: {$exists: true}}); - - // Run the test again with an equivalent JSON Schema. - runInsertUpdateValidationTest({$jsonSchema: {required: ["a"]}}); - - /** - * Run a series of document validation tests involving collation using the validator - * 'validator', which should enforce that the field "a" has the value "xyz". - */ - function runCollationValidationTest(validator) { - coll.drop(); - assert.commandWorked(db.createCollection( - collName, {validator: validator, collation: {locale: "en_US", strength: 2}})); - - // An insert that matches the validator should succeed. - assert.writeOK(coll.insert({_id: 0, a: "xyz", b: "foo"})); - - const isJSONSchema = validator.hasOwnProperty("$jsonSchema"); - - // A normal validator should respect the collation and the inserts should succeed. A JSON - // Schema validator ignores the collation and the inserts should fail. - const assertCorrectResult = - isJSONSchema ? res => assertFailsValidation(res) : res => assert.writeOK(res); - assertCorrectResult(coll.insert({a: "XYZ"})); - assertCorrectResult(coll.insert({a: "XyZ", b: "foo"})); - assertCorrectResult(coll.update({_id: 0}, {a: "xyZ", b: "foo"})); - assertCorrectResult(coll.update({_id: 0}, {$set: {a: "Xyz"}})); - assertCorrectResult( - coll.runCommand("findAndModify", {query: {_id: 0}, update: {a: "xyZ", b: "foo"}})); - assertCorrectResult( - coll.runCommand("findAndModify", {query: {_id: 0}, update: {$set: {a: "Xyz"}}})); - - // Test an insert and an update that should always fail. - assertFailsValidation(coll.insert({a: "not xyz"})); - assertFailsValidation(coll.update({_id: 0}, {$set: {a: "xyzz"}})); - assertFailsValidation( - coll.runCommand("findAndModify", {query: {_id: 0}, update: {$set: {a: "xyzz"}}})); - - // A normal validator expands leaf arrays, such that if "a" is an array containing "xyz", it - // matches {a: "xyz"}. A JSON Schema validator does not expand leaf arrays and treats arrays - // as a single array value. - assertCorrectResult(coll.insert({a: ["xyz"]})); - assertCorrectResult(coll.insert({a: ["XYZ"]})); - assertCorrectResult(coll.insert({a: ["XyZ"], b: "foo"})); - } +/** + * Runs a series of document validation tests using the validator 'validator', which should + * enforce the existence of a field "a". + */ +function runInsertUpdateValidationTest(validator) { + coll.drop(); - runCollationValidationTest({a: "xyz"}); - runCollationValidationTest({$jsonSchema: {properties: {a: {enum: ["xyz"]}}}}); + // Create a collection with document validator 'validator'. + assert.commandWorked(db.createCollection(collName, {validator: validator})); - // The validator is allowed to contain $expr. - coll.drop(); - assert.commandWorked(db.createCollection(collName, {validator: {$expr: {$eq: ["$a", 5]}}})); - assert.writeOK(coll.insert({a: 5})); - assertFailsValidation(coll.insert({a: 4})); - assert.commandWorked( - db.runCommand({"collMod": collName, "validator": {$expr: {$eq: ["$a", 4]}}})); - assert.writeOK(coll.insert({a: 4})); - assertFailsValidation(coll.insert({a: 5})); - - // The validator supports $expr with the date extraction expressions (with a timezone - // specified). - coll.drop(); - assert.commandWorked(db.createCollection(collName, { - validator: - {$expr: {$eq: [1, {$dayOfMonth: {date: "$a", timezone: "America/New_York"}}]}} - })); - assert.writeOK(coll.insert({a: ISODate("2017-10-01T22:00:00")})); - assertFailsValidation(coll.insert({a: ISODate("2017-10-01T00:00:00")})); - - // The validator supports $expr with a $dateToParts expression. - coll.drop(); - assert.commandWorked(db.createCollection(collName, { - validator: { - $expr: { - $eq: [ - { - "year": 2017, - "month": 10, - "day": 1, - "hour": 18, - "minute": 0, - "second": 0, - "millisecond": 0 - }, - {$dateToParts: {date: "$a", timezone: "America/New_York"}} - ] - } - } - })); - assert.writeOK(coll.insert({a: ISODate("2017-10-01T22:00:00")})); - assertFailsValidation(coll.insert({a: ISODate("2017-10-01T00:00:00")})); + // Insert and upsert documents that will pass validation. + assert.writeOK(coll.insert({_id: "valid1", a: 1})); + assert.writeOK(coll.update({_id: "valid2"}, {_id: "valid2", a: 2}, {upsert: true})); + assert.writeOK(coll.runCommand("findAndModify", + {query: {_id: "valid3"}, update: {$set: {a: 3}}, upsert: true})); - // The validator supports $expr with $dateToString expression. - coll.drop(); - assert.commandWorked(db.createCollection(collName, { - validator: { - $expr: { - $eq: [ - "2017-07-04 14:56:42 +0000 (0 minutes)", - { - $dateToString: { - format: "%Y-%m-%d %H:%M:%S %z (%Z minutes)", - date: "$date", - timezone: "$tz" - } - } - ] - } - } - })); - assert.writeOK(coll.insert({date: new ISODate("2017-07-04T14:56:42.911Z"), tz: "UTC"})); - assertFailsValidation( - coll.insert({date: new ISODate("2017-07-04T14:56:42.911Z"), tz: "America/New_York"})); + // Insert and upsert documents that will not pass validation. + assertFailsValidation(coll.insert({_id: "invalid3", b: 1})); + assertFailsValidation(coll.update({_id: "invalid4"}, {_id: "invalid4", b: 2}, {upsert: true})); + assertFailsValidation(coll.runCommand( + "findAndModify", {query: {_id: "invalid4"}, update: {$set: {b: 3}}, upsert: true})); + + // Assert that we can remove the document that passed validation. + assert.writeOK(coll.remove({_id: "valid1"})); - // The validator supports $expr with $dateFromParts expression. + // Check that we can only update documents that pass validation. We insert a valid and an + // invalid document, then set the validator. coll.drop(); - assert.commandWorked(db.createCollection(collName, { - validator: { - $expr: { - $eq: [ - ISODate("2016-12-31T15:00:00Z"), - {'$dateFromParts': {year: "$year", "timezone": "$timezone"}} - ] - } - } - })); - assert.writeOK(coll.insert({_id: 0, year: 2017, month: 6, day: 19, timezone: "Asia/Tokyo"})); + assert.writeOK(coll.insert({_id: "valid1", a: 1})); + assert.writeOK(coll.insert({_id: "invalid2", b: 1})); + assert.commandWorked(coll.runCommand("collMod", {validator: validator})); + + // Assert that updates on a conforming document succeed when they affect fields not involved + // in validator. + // Add a new field. + assert.writeOK(coll.update({_id: "valid1"}, {$set: {z: 1}})); + assert.writeOK( + coll.runCommand("findAndModify", {query: {_id: "valid1"}, update: {$set: {y: 2}}})); + // In-place update. + assert.writeOK(coll.update({_id: "valid1"}, {$inc: {z: 1}})); + assert.writeOK( + coll.runCommand("findAndModify", {query: {_id: "valid1"}, update: {$inc: {y: 1}}})); + // Out-of-place update. + assert.writeOK(coll.update({_id: "valid1"}, {$set: {z: array}})); + assert.writeOK( + coll.runCommand("findAndModify", {query: {_id: "valid1"}, update: {$set: {y: array}}})); + // No-op update. + assert.writeOK(coll.update({_id: "valid1"}, {a: 1})); + assert.writeOK( + coll.runCommand("findAndModify", {query: {_id: "valid1"}, update: {$set: {a: 1}}})); + + // Verify those same updates will fail on non-conforming document. + assertFailsValidation(coll.update({_id: "invalid2"}, {$set: {z: 1}})); + assertFailsValidation(coll.update({_id: "invalid2"}, {$inc: {z: 1}})); + assertFailsValidation(coll.update({_id: "invalid2"}, {$set: {z: array}})); assertFailsValidation( - coll.insert({_id: 1, year: 2022, month: 1, day: 1, timezone: "America/New_York"})); + coll.runCommand("findAndModify", {query: {_id: "invalid2"}, update: {$set: {y: 2}}})); + assertFailsValidation( + coll.runCommand("findAndModify", {query: {_id: "invalid2"}, update: {$inc: {y: 1}}})); + assertFailsValidation( + coll.runCommand("findAndModify", {query: {_id: "invalid2"}, update: {$set: {y: array}}})); - // The validator supports $expr with $dateFromString expression. + // A no-op update of an invalid doc will succeed. + assert.writeOK(coll.update({_id: "invalid2"}, {$set: {b: 1}})); + assert.writeOK( + coll.runCommand("findAndModify", {query: {_id: "invalid2"}, update: {$set: {b: 1}}})); + + // Verify that we can't make a conforming document fail validation, but can update a + // non-conforming document to pass validation. coll.drop(); - assert.commandWorked(db.createCollection(collName, { - validator: { - $expr: { - $eq: [ - ISODate("2017-07-04T15:56:02Z"), - {'$dateFromString': {dateString: "$date", timezone: 'America/New_York'}} - ] - } - } - })); - assert.writeOK(coll.insert({_id: 0, date: "2017-07-04T11:56:02"})); - assertFailsValidation(coll.insert({_id: 1, date: "2015-02-02T11:00:00"})); + assert.writeOK(coll.insert({_id: "valid1", a: 1})); + assert.writeOK(coll.insert({_id: "invalid2", b: 1})); + assert.writeOK(coll.insert({_id: "invalid3", b: 1})); + assert.commandWorked(coll.runCommand("collMod", {validator: validator})); - // The validator can contain an $expr that may throw at runtime. + assertFailsValidation(coll.update({_id: "valid1"}, {$unset: {a: 1}})); + assert.writeOK(coll.update({_id: "invalid2"}, {$set: {a: 1}})); + assertFailsValidation( + coll.runCommand("findAndModify", {query: {_id: "valid1"}, update: {$unset: {a: 1}}})); + assert.writeOK( + coll.runCommand("findAndModify", {query: {_id: "invalid3"}, update: {$set: {a: 1}}})); + + // Modify the collection to remove the document validator. + assert.commandWorked(coll.runCommand("collMod", {validator: {}})); + + // Verify that no validation is applied to updates. + assert.writeOK(coll.update({_id: "valid1"}, {$set: {z: 1}})); + assert.writeOK(coll.update({_id: "invalid2"}, {$set: {z: 1}})); + assert.writeOK(coll.update({_id: "valid1"}, {$unset: {a: 1}})); + assert.writeOK(coll.update({_id: "invalid2"}, {$set: {a: 1}})); + assert.writeOK( + coll.runCommand("findAndModify", {query: {_id: "valid1"}, update: {$set: {z: 2}}})); + assert.writeOK( + coll.runCommand("findAndModify", {query: {_id: "invalid2"}, update: {$set: {z: 2}}})); + assert.writeOK( + coll.runCommand("findAndModify", {query: {_id: "valid1"}, update: {$unset: {a: 1}}})); + assert.writeOK( + coll.runCommand("findAndModify", {query: {_id: "invalid2"}, update: {$set: {a: 1}}})); +} + +// Run the test with a normal validator. +runInsertUpdateValidationTest({a: {$exists: true}}); + +// Run the test again with an equivalent JSON Schema. +runInsertUpdateValidationTest({$jsonSchema: {required: ["a"]}}); + +/** + * Run a series of document validation tests involving collation using the validator + * 'validator', which should enforce that the field "a" has the value "xyz". + */ +function runCollationValidationTest(validator) { coll.drop(); - assert.commandWorked( - db.createCollection(collName, {validator: {$expr: {$eq: ["$a", {$divide: [1, "$b"]}]}}})); - assert.writeOK(coll.insert({a: 1, b: 1})); - let res = coll.insert({a: 1, b: 0}); - assert.writeError(res); - assert.eq(res.getWriteError().code, 16608); - assert.writeOK(coll.insert({a: -1, b: -1})); + assert.commandWorked(db.createCollection( + collName, {validator: validator, collation: {locale: "en_US", strength: 2}})); + + // An insert that matches the validator should succeed. + assert.writeOK(coll.insert({_id: 0, a: "xyz", b: "foo"})); + + const isJSONSchema = validator.hasOwnProperty("$jsonSchema"); + + // A normal validator should respect the collation and the inserts should succeed. A JSON + // Schema validator ignores the collation and the inserts should fail. + const assertCorrectResult = + isJSONSchema ? res => assertFailsValidation(res) : res => assert.writeOK(res); + assertCorrectResult(coll.insert({a: "XYZ"})); + assertCorrectResult(coll.insert({a: "XyZ", b: "foo"})); + assertCorrectResult(coll.update({_id: 0}, {a: "xyZ", b: "foo"})); + assertCorrectResult(coll.update({_id: 0}, {$set: {a: "Xyz"}})); + assertCorrectResult( + coll.runCommand("findAndModify", {query: {_id: 0}, update: {a: "xyZ", b: "foo"}})); + assertCorrectResult( + coll.runCommand("findAndModify", {query: {_id: 0}, update: {$set: {a: "Xyz"}}})); + + // Test an insert and an update that should always fail. + assertFailsValidation(coll.insert({a: "not xyz"})); + assertFailsValidation(coll.update({_id: 0}, {$set: {a: "xyzz"}})); + assertFailsValidation( + coll.runCommand("findAndModify", {query: {_id: 0}, update: {$set: {a: "xyzz"}}})); + + // A normal validator expands leaf arrays, such that if "a" is an array containing "xyz", it + // matches {a: "xyz"}. A JSON Schema validator does not expand leaf arrays and treats arrays + // as a single array value. + assertCorrectResult(coll.insert({a: ["xyz"]})); + assertCorrectResult(coll.insert({a: ["XYZ"]})); + assertCorrectResult(coll.insert({a: ["XyZ"], b: "foo"})); +} + +runCollationValidationTest({a: "xyz"}); +runCollationValidationTest({$jsonSchema: {properties: {a: {enum: ["xyz"]}}}}); + +// The validator is allowed to contain $expr. +coll.drop(); +assert.commandWorked(db.createCollection(collName, {validator: {$expr: {$eq: ["$a", 5]}}})); +assert.writeOK(coll.insert({a: 5})); +assertFailsValidation(coll.insert({a: 4})); +assert.commandWorked(db.runCommand({"collMod": collName, "validator": {$expr: {$eq: ["$a", 4]}}})); +assert.writeOK(coll.insert({a: 4})); +assertFailsValidation(coll.insert({a: 5})); + +// The validator supports $expr with the date extraction expressions (with a timezone +// specified). +coll.drop(); +assert.commandWorked(db.createCollection( + collName, + {validator: {$expr: {$eq: [1, {$dayOfMonth: {date: "$a", timezone: "America/New_York"}}]}}})); +assert.writeOK(coll.insert({a: ISODate("2017-10-01T22:00:00")})); +assertFailsValidation(coll.insert({a: ISODate("2017-10-01T00:00:00")})); + +// The validator supports $expr with a $dateToParts expression. +coll.drop(); +assert.commandWorked(db.createCollection(collName, { + validator: { + $expr: { + $eq: [ + { + "year": 2017, + "month": 10, + "day": 1, + "hour": 18, + "minute": 0, + "second": 0, + "millisecond": 0 + }, + {$dateToParts: {date: "$a", timezone: "America/New_York"}} + ] + } + } +})); +assert.writeOK(coll.insert({a: ISODate("2017-10-01T22:00:00")})); +assertFailsValidation(coll.insert({a: ISODate("2017-10-01T00:00:00")})); + +// The validator supports $expr with $dateToString expression. +coll.drop(); +assert.commandWorked(db.createCollection(collName, { + validator: { + $expr: { + $eq: [ + "2017-07-04 14:56:42 +0000 (0 minutes)", + { + $dateToString: { + format: "%Y-%m-%d %H:%M:%S %z (%Z minutes)", + date: "$date", + timezone: "$tz" + } + } + ] + } + } +})); +assert.writeOK(coll.insert({date: new ISODate("2017-07-04T14:56:42.911Z"), tz: "UTC"})); +assertFailsValidation( + coll.insert({date: new ISODate("2017-07-04T14:56:42.911Z"), tz: "America/New_York"})); + +// The validator supports $expr with $dateFromParts expression. +coll.drop(); +assert.commandWorked(db.createCollection(collName, { + validator: { + $expr: { + $eq: [ + ISODate("2016-12-31T15:00:00Z"), + {'$dateFromParts': {year: "$year", "timezone": "$timezone"}} + ] + } + } +})); +assert.writeOK(coll.insert({_id: 0, year: 2017, month: 6, day: 19, timezone: "Asia/Tokyo"})); +assertFailsValidation( + coll.insert({_id: 1, year: 2022, month: 1, day: 1, timezone: "America/New_York"})); + +// The validator supports $expr with $dateFromString expression. +coll.drop(); +assert.commandWorked(db.createCollection(collName, { + validator: { + $expr: { + $eq: [ + ISODate("2017-07-04T15:56:02Z"), + {'$dateFromString': {dateString: "$date", timezone: 'America/New_York'}} + ] + } + } +})); +assert.writeOK(coll.insert({_id: 0, date: "2017-07-04T11:56:02"})); +assertFailsValidation(coll.insert({_id: 1, date: "2015-02-02T11:00:00"})); + +// The validator can contain an $expr that may throw at runtime. +coll.drop(); +assert.commandWorked( + db.createCollection(collName, {validator: {$expr: {$eq: ["$a", {$divide: [1, "$b"]}]}}})); +assert.writeOK(coll.insert({a: 1, b: 1})); +let res = coll.insert({a: 1, b: 0}); +assert.writeError(res); +assert.eq(res.getWriteError().code, 16608); +assert.writeOK(coll.insert({a: -1, b: -1})); })(); diff --git a/jstests/core/doc_validation_invalid_validators.js b/jstests/core/doc_validation_invalid_validators.js index 81d16ec5371..b09e2cb0a14 100644 --- a/jstests/core/doc_validation_invalid_validators.js +++ b/jstests/core/doc_validation_invalid_validators.js @@ -5,65 +5,62 @@ // Verify invalid validator statements won't work and that we // can't create validated collections on restricted databases. (function() { - "use strict"; +"use strict"; - var collName = "doc_validation_invalid_validators"; - var coll = db[collName]; - coll.drop(); +var collName = "doc_validation_invalid_validators"; +var coll = db[collName]; +coll.drop(); - // Check a few invalid match statements for validator. - assert.commandFailed(db.createCollection(collName, {validator: 7})); - assert.commandFailed(db.createCollection(collName, {validator: "assert"})); - assert.commandFailed(db.createCollection(collName, {validator: {$jsonSchema: {invalid: 1}}})); +// Check a few invalid match statements for validator. +assert.commandFailed(db.createCollection(collName, {validator: 7})); +assert.commandFailed(db.createCollection(collName, {validator: "assert"})); +assert.commandFailed(db.createCollection(collName, {validator: {$jsonSchema: {invalid: 1}}})); - // Check some disallowed match statements. - assert.commandFailed(db.createCollection(collName, {validator: {$text: "bob"}})); - assert.commandFailed(db.createCollection(collName, {validator: {$where: "this.a == this.b"}})); - assert.commandFailed(db.createCollection(collName, {validator: {$near: {place: "holder"}}})); - assert.commandFailed(db.createCollection(collName, {validator: {$geoNear: {place: "holder"}}})); - assert.commandFailed( - db.createCollection(collName, {validator: {$nearSphere: {place: "holder"}}})); - assert.commandFailed( - db.createCollection(collName, {validator: {$expr: {$eq: ["$a", "$$unbound"]}}})); +// Check some disallowed match statements. +assert.commandFailed(db.createCollection(collName, {validator: {$text: "bob"}})); +assert.commandFailed(db.createCollection(collName, {validator: {$where: "this.a == this.b"}})); +assert.commandFailed(db.createCollection(collName, {validator: {$near: {place: "holder"}}})); +assert.commandFailed(db.createCollection(collName, {validator: {$geoNear: {place: "holder"}}})); +assert.commandFailed(db.createCollection(collName, {validator: {$nearSphere: {place: "holder"}}})); +assert.commandFailed( + db.createCollection(collName, {validator: {$expr: {$eq: ["$a", "$$unbound"]}}})); - // Verify we fail on admin, local and config databases. - assert.commandFailed( - db.getSiblingDB("admin").createCollection(collName, {validator: {a: {$exists: true}}})); - if (!db.runCommand("isdbgrid").isdbgrid) { - assert.commandFailed( - db.getSiblingDB("local").createCollection(collName, {validator: {a: {$exists: true}}})); - } +// Verify we fail on admin, local and config databases. +assert.commandFailed( + db.getSiblingDB("admin").createCollection(collName, {validator: {a: {$exists: true}}})); +if (!db.runCommand("isdbgrid").isdbgrid) { assert.commandFailed( - db.getSiblingDB("config").createCollection(collName, {validator: {a: {$exists: true}}})); + db.getSiblingDB("local").createCollection(collName, {validator: {a: {$exists: true}}})); +} +assert.commandFailed( + db.getSiblingDB("config").createCollection(collName, {validator: {a: {$exists: true}}})); - // Create collection with document validator. - assert.commandWorked(db.createCollection(collName, {validator: {a: {$exists: true}}})); +// Create collection with document validator. +assert.commandWorked(db.createCollection(collName, {validator: {a: {$exists: true}}})); - // Verify some invalid match statements can't be passed to collMod. - assert.commandFailed( - db.runCommand({"collMod": collName, "validator": {$text: {$search: "bob"}}})); - assert.commandFailed( - db.runCommand({"collMod": collName, "validator": {$where: "this.a == this.b"}})); - assert.commandFailed( - db.runCommand({"collMod": collName, "validator": {$near: {place: "holder"}}})); - assert.commandFailed( - db.runCommand({"collMod": collName, "validator": {$geoNear: {place: "holder"}}})); - assert.commandFailed( - db.runCommand({"collMod": collName, "validator": {$nearSphere: {place: "holder"}}})); - assert.commandFailed( - db.runCommand({"collMod": collName, "validator": {$expr: {$eq: ["$a", "$$unbound"]}}})); - assert.commandFailed( - db.runCommand({"collMod": collName, "validator": {$jsonSchema: {invalid: 7}}})); +// Verify some invalid match statements can't be passed to collMod. +assert.commandFailed(db.runCommand({"collMod": collName, "validator": {$text: {$search: "bob"}}})); +assert.commandFailed( + db.runCommand({"collMod": collName, "validator": {$where: "this.a == this.b"}})); +assert.commandFailed(db.runCommand({"collMod": collName, "validator": {$near: {place: "holder"}}})); +assert.commandFailed( + db.runCommand({"collMod": collName, "validator": {$geoNear: {place: "holder"}}})); +assert.commandFailed( + db.runCommand({"collMod": collName, "validator": {$nearSphere: {place: "holder"}}})); +assert.commandFailed( + db.runCommand({"collMod": collName, "validator": {$expr: {$eq: ["$a", "$$unbound"]}}})); +assert.commandFailed( + db.runCommand({"collMod": collName, "validator": {$jsonSchema: {invalid: 7}}})); - coll.drop(); +coll.drop(); - // Create collection without document validator. - assert.commandWorked(db.createCollection(collName)); +// Create collection without document validator. +assert.commandWorked(db.createCollection(collName)); - // Verify we can't add an invalid validator to a collection without a validator. - assert.commandFailed(db.runCommand({"collMod": collName, "validator": {$text: "bob"}})); - assert.commandFailed( - db.runCommand({"collMod": collName, "validator": {$where: "this.a == this.b"}})); - assert.commandWorked(db.runCommand({"collMod": collName, "validator": {a: {$exists: true}}})); - coll.drop(); +// Verify we can't add an invalid validator to a collection without a validator. +assert.commandFailed(db.runCommand({"collMod": collName, "validator": {$text: "bob"}})); +assert.commandFailed( + db.runCommand({"collMod": collName, "validator": {$where: "this.a == this.b"}})); +assert.commandWorked(db.runCommand({"collMod": collName, "validator": {a: {$exists: true}}})); +coll.drop(); })(); diff --git a/jstests/core/doc_validation_options.js b/jstests/core/doc_validation_options.js index e9ba64d8029..50d8edfa671 100644 --- a/jstests/core/doc_validation_options.js +++ b/jstests/core/doc_validation_options.js @@ -3,56 +3,55 @@ // @tags: [assumes_no_implicit_collection_creation_after_drop, requires_non_retryable_commands] (function() { - "use strict"; +"use strict"; - function assertFailsValidation(res) { - var DocumentValidationFailure = 121; - assert.writeError(res); - assert.eq(res.getWriteError().code, DocumentValidationFailure); - } +function assertFailsValidation(res) { + var DocumentValidationFailure = 121; + assert.writeError(res); + assert.eq(res.getWriteError().code, DocumentValidationFailure); +} - var t = db.doc_validation_options; - t.drop(); +var t = db.doc_validation_options; +t.drop(); - assert.commandWorked(db.createCollection(t.getName(), {validator: {a: 1}})); +assert.commandWorked(db.createCollection(t.getName(), {validator: {a: 1}})); - assertFailsValidation(t.insert({a: 2})); - t.insert({a: 1}); - assert.eq(1, t.count()); +assertFailsValidation(t.insert({a: 2})); +t.insert({a: 1}); +assert.eq(1, t.count()); - // test default to strict - assertFailsValidation(t.update({}, {$set: {a: 2}})); - assert.eq(1, t.find({a: 1}).itcount()); +// test default to strict +assertFailsValidation(t.update({}, {$set: {a: 2}})); +assert.eq(1, t.find({a: 1}).itcount()); - // check we can do a bad update in warn mode - assert.commandWorked(t.runCommand("collMod", {validationAction: "warn"})); - t.update({}, {$set: {a: 2}}); - assert.eq(1, t.find({a: 2}).itcount()); +// check we can do a bad update in warn mode +assert.commandWorked(t.runCommand("collMod", {validationAction: "warn"})); +t.update({}, {$set: {a: 2}}); +assert.eq(1, t.find({a: 2}).itcount()); - // TODO: check log for message? +// TODO: check log for message? - // make sure persisted - var info = db.getCollectionInfos({name: t.getName()})[0]; - assert.eq("warn", info.options.validationAction, tojson(info)); +// make sure persisted +var info = db.getCollectionInfos({name: t.getName()})[0]; +assert.eq("warn", info.options.validationAction, tojson(info)); - // check we can go back to enforce strict - assert.commandWorked( - t.runCommand("collMod", {validationAction: "error", validationLevel: "strict"})); - assertFailsValidation(t.update({}, {$set: {a: 3}})); - assert.eq(1, t.find({a: 2}).itcount()); +// check we can go back to enforce strict +assert.commandWorked( + t.runCommand("collMod", {validationAction: "error", validationLevel: "strict"})); +assertFailsValidation(t.update({}, {$set: {a: 3}})); +assert.eq(1, t.find({a: 2}).itcount()); - // check bad -> bad is ok - assert.commandWorked(t.runCommand("collMod", {validationLevel: "moderate"})); - t.update({}, {$set: {a: 3}}); - assert.eq(1, t.find({a: 3}).itcount()); +// check bad -> bad is ok +assert.commandWorked(t.runCommand("collMod", {validationLevel: "moderate"})); +t.update({}, {$set: {a: 3}}); +assert.eq(1, t.find({a: 3}).itcount()); - // test create - t.drop(); - assert.commandWorked( - db.createCollection(t.getName(), {validator: {a: 1}, validationAction: "warn"})); - - t.insert({a: 2}); - t.insert({a: 1}); - assert.eq(2, t.count()); +// test create +t.drop(); +assert.commandWorked( + db.createCollection(t.getName(), {validator: {a: 1}, validationAction: "warn"})); +t.insert({a: 2}); +t.insert({a: 1}); +assert.eq(2, t.count()); })(); diff --git a/jstests/core/dotted_path_in_null.js b/jstests/core/dotted_path_in_null.js index 31ffc11a562..f32f60a9ff2 100644 --- a/jstests/core/dotted_path_in_null.js +++ b/jstests/core/dotted_path_in_null.js @@ -1,23 +1,23 @@ (function() { - "use strict"; +"use strict"; - const coll = db.dotted_path_in_null; - coll.drop(); +const coll = db.dotted_path_in_null; +coll.drop(); - assert.writeOK(coll.insert({_id: 1, a: [{b: 5}]})); - assert.writeOK(coll.insert({_id: 2, a: [{}]})); - assert.writeOK(coll.insert({_id: 3, a: []})); - assert.writeOK(coll.insert({_id: 4, a: [{}, {b: 5}]})); - assert.writeOK(coll.insert({_id: 5, a: [5, {b: 5}]})); +assert.writeOK(coll.insert({_id: 1, a: [{b: 5}]})); +assert.writeOK(coll.insert({_id: 2, a: [{}]})); +assert.writeOK(coll.insert({_id: 3, a: []})); +assert.writeOK(coll.insert({_id: 4, a: [{}, {b: 5}]})); +assert.writeOK(coll.insert({_id: 5, a: [5, {b: 5}]})); - function getIds(query) { - let ids = []; - coll.find(query).sort({_id: 1}).forEach(doc => ids.push(doc._id)); - return ids; - } +function getIds(query) { + let ids = []; + coll.find(query).sort({_id: 1}).forEach(doc => ids.push(doc._id)); + return ids; +} - assert.eq([2, 4], getIds({"a.b": {$in: [null]}}), "Did not match the expected documents"); +assert.eq([2, 4], getIds({"a.b": {$in: [null]}}), "Did not match the expected documents"); - assert.commandWorked(coll.createIndex({"a.b": 1})); - assert.eq([2, 4], getIds({"a.b": {$in: [null]}}), "Did not match the expected documents"); +assert.commandWorked(coll.createIndex({"a.b": 1})); +assert.eq([2, 4], getIds({"a.b": {$in: [null]}}), "Did not match the expected documents"); }()); diff --git a/jstests/core/drop_index.js b/jstests/core/drop_index.js index 8bce5608773..83e03c5a8fd 100644 --- a/jstests/core/drop_index.js +++ b/jstests/core/drop_index.js @@ -2,85 +2,85 @@ // collection. // @tags: [assumes_no_implicit_index_creation] (function() { - 'use strict'; +'use strict'; - const t = db.drop_index; - t.drop(); +const t = db.drop_index; +t.drop(); - /** - * Extracts index names from listIndexes result. - */ - function getIndexNames(cmdRes) { - return t.getIndexes().map(spec => spec.name); - } +/** + * Extracts index names from listIndexes result. + */ +function getIndexNames(cmdRes) { + return t.getIndexes().map(spec => spec.name); +} - /** - * Checks that collection contains the given list of non-id indexes and nothing else. - */ - function assertIndexes(expectedIndexNames, msg) { - const actualIndexNames = getIndexNames(); - const testMsgSuffix = () => msg + ': expected ' + tojson(expectedIndexNames) + ' but got ' + - tojson(actualIndexNames) + ' instead.'; - assert.eq(expectedIndexNames.length + 1, - actualIndexNames.length, - 'unexpected number of indexes after ' + testMsgSuffix()); - assert(actualIndexNames.includes('_id_'), - '_id index missing after ' + msg + ': ' + tojson(actualIndexNames)); - for (let expectedIndexName of expectedIndexNames) { - assert(actualIndexNames.includes(expectedIndexName), - expectedIndexName + ' index missing after ' + testMsgSuffix()); - } +/** + * Checks that collection contains the given list of non-id indexes and nothing else. + */ +function assertIndexes(expectedIndexNames, msg) { + const actualIndexNames = getIndexNames(); + const testMsgSuffix = () => msg + ': expected ' + tojson(expectedIndexNames) + ' but got ' + + tojson(actualIndexNames) + ' instead.'; + assert.eq(expectedIndexNames.length + 1, + actualIndexNames.length, + 'unexpected number of indexes after ' + testMsgSuffix()); + assert(actualIndexNames.includes('_id_'), + '_id index missing after ' + msg + ': ' + tojson(actualIndexNames)); + for (let expectedIndexName of expectedIndexNames) { + assert(actualIndexNames.includes(expectedIndexName), + expectedIndexName + ' index missing after ' + testMsgSuffix()); } +} - assert.writeOK(t.insert({_id: 1, a: 2, b: 3, c: 1, d: 1, e: 1})); - assertIndexes([], 'inserting test document'); +assert.writeOK(t.insert({_id: 1, a: 2, b: 3, c: 1, d: 1, e: 1})); +assertIndexes([], 'inserting test document'); - assert.commandWorked(t.createIndex({a: 1})); - assert.commandWorked(t.createIndex({b: 1})); - assert.commandWorked(t.createIndex({c: 1})); - assert.commandWorked(t.createIndex({d: 1})); - assert.commandWorked(t.createIndex({e: 1})); - assertIndexes(['a_1', 'b_1', 'c_1', 'd_1', 'e_1'], 'creating indexes'); +assert.commandWorked(t.createIndex({a: 1})); +assert.commandWorked(t.createIndex({b: 1})); +assert.commandWorked(t.createIndex({c: 1})); +assert.commandWorked(t.createIndex({d: 1})); +assert.commandWorked(t.createIndex({e: 1})); +assertIndexes(['a_1', 'b_1', 'c_1', 'd_1', 'e_1'], 'creating indexes'); - // Drop single index by name. - // Collection.dropIndex() throws if the dropIndexes command fails. - t.dropIndex(t._genIndexName({a: 1})); - assertIndexes(['b_1', 'c_1', 'd_1', 'e_1'], 'dropping {a: 1} by name'); +// Drop single index by name. +// Collection.dropIndex() throws if the dropIndexes command fails. +t.dropIndex(t._genIndexName({a: 1})); +assertIndexes(['b_1', 'c_1', 'd_1', 'e_1'], 'dropping {a: 1} by name'); - // Drop single index by key pattern. - t.dropIndex({b: 1}); - assertIndexes(['c_1', 'd_1', 'e_1'], 'dropping {b: 1} by key pattern'); +// Drop single index by key pattern. +t.dropIndex({b: 1}); +assertIndexes(['c_1', 'd_1', 'e_1'], 'dropping {b: 1} by key pattern'); - // Not allowed to drop _id index. - assert.commandFailedWithCode(t.dropIndex('_id_'), ErrorCodes.InvalidOptions); - assert.commandFailedWithCode(t.dropIndex({_id: 1}), ErrorCodes.InvalidOptions); +// Not allowed to drop _id index. +assert.commandFailedWithCode(t.dropIndex('_id_'), ErrorCodes.InvalidOptions); +assert.commandFailedWithCode(t.dropIndex({_id: 1}), ErrorCodes.InvalidOptions); - // Ensure you can recreate indexes, even if you don't use dropIndex method. - // Prior to SERVER-7168, the shell used to cache names of indexes created using - // Collection.ensureIndex(). - assert.commandWorked(t.createIndex({a: 1})); - assertIndexes(['a_1', 'c_1', 'd_1', 'e_1'], 'recreating {a: 1}'); +// Ensure you can recreate indexes, even if you don't use dropIndex method. +// Prior to SERVER-7168, the shell used to cache names of indexes created using +// Collection.ensureIndex(). +assert.commandWorked(t.createIndex({a: 1})); +assertIndexes(['a_1', 'c_1', 'd_1', 'e_1'], 'recreating {a: 1}'); - // Drop multiple indexes. - assert.commandWorked(t.dropIndexes(['c_1', 'd_1'])); - assertIndexes(['a_1', 'e_1'], 'dropping {c: 1} and {d: 1}'); +// Drop multiple indexes. +assert.commandWorked(t.dropIndexes(['c_1', 'd_1'])); +assertIndexes(['a_1', 'e_1'], 'dropping {c: 1} and {d: 1}'); - // Must drop all the indexes provided or none at all - for example, if one of the index names - // provided is invalid. - let ex = assert.throws(() => { - t.dropIndexes(['a_1', '_id_']); - }); - assert.commandFailedWithCode(ex, ErrorCodes.InvalidOptions); - assertIndexes(['a_1', 'e_1'], 'failed dropIndexes command with _id index'); +// Must drop all the indexes provided or none at all - for example, if one of the index names +// provided is invalid. +let ex = assert.throws(() => { + t.dropIndexes(['a_1', '_id_']); +}); +assert.commandFailedWithCode(ex, ErrorCodes.InvalidOptions); +assertIndexes(['a_1', 'e_1'], 'failed dropIndexes command with _id index'); - // List of index names must contain only strings. - ex = assert.throws(() => { - t.dropIndexes(['a_1', 123]); - }); - assert.commandFailedWithCode(ex, ErrorCodes.TypeMismatch); - assertIndexes(['a_1', 'e_1'], 'failed dropIndexes command with non-string index name'); +// List of index names must contain only strings. +ex = assert.throws(() => { + t.dropIndexes(['a_1', 123]); +}); +assert.commandFailedWithCode(ex, ErrorCodes.TypeMismatch); +assertIndexes(['a_1', 'e_1'], 'failed dropIndexes command with non-string index name'); - // Drop all indexes. - assert.commandWorked(t.dropIndexes()); - assertIndexes([], 'dropping all indexes'); +// Drop all indexes. +assert.commandWorked(t.dropIndexes()); +assertIndexes([], 'dropping all indexes'); }()); diff --git a/jstests/core/dropdb.js b/jstests/core/dropdb.js index 1af56da34bf..1fd3fd10582 100644 --- a/jstests/core/dropdb.js +++ b/jstests/core/dropdb.js @@ -12,8 +12,8 @@ function check(shouldExist) { var dbs = m.getDBNames(); assert.eq(Array.contains(dbs, baseName), shouldExist, - "DB " + baseName + " should " + (shouldExist ? "" : "not ") + "exist." + " dbs: " + - tojson(dbs) + "\n" + tojson(m.getDBs())); + "DB " + baseName + " should " + (shouldExist ? "" : "not ") + "exist." + + " dbs: " + tojson(dbs) + "\n" + tojson(m.getDBs())); } ddb.c.save({}); diff --git a/jstests/core/elemMatchProjection.js b/jstests/core/elemMatchProjection.js index f01b566d3b6..390b7aa5d17 100644 --- a/jstests/core/elemMatchProjection.js +++ b/jstests/core/elemMatchProjection.js @@ -2,247 +2,239 @@ // Tests for $elemMatch projections and $ positional operator projection. (function() { - "use strict"; - - const coll = db.SERVER828Test; - coll.drop(); - - const date1 = new Date(); - - // Generate monotonically increasing _id values. ObjectIds generated by the shell are not - // guaranteed to be monotically increasing, and we will depend on the _id sort order later in - // the test. - let currentId = 0; - function nextId() { - return ++currentId; - } - - // Insert various styles of arrays. - const bulk = coll.initializeUnorderedBulkOp(); - for (let i = 0; i < 100; i++) { - bulk.insert({_id: nextId(), group: 1, x: [1, 2, 3, 4, 5]}); - bulk.insert({_id: nextId(), group: 2, x: [{a: 1, b: 2}, {a: 2, c: 3}, {a: 1, d: 5}]}); - bulk.insert({ - _id: nextId(), - group: 3, - x: [{a: 1, b: 2}, {a: 2, c: 3}, {a: 1, d: 5}], - y: [{aa: 1, bb: 2}, {aa: 2, cc: 3}, {aa: 1, dd: 5}] - }); - bulk.insert({_id: nextId(), group: 3, x: [{a: 1, b: 3}, {a: -6, c: 3}]}); - bulk.insert({_id: nextId(), group: 4, x: [{a: 1, b: 4}, {a: -6, c: 3}]}); - bulk.insert( - {_id: nextId(), group: 5, x: [new Date(), 5, 10, 'string', new ObjectId(), 123.456]}); - bulk.insert({ - _id: nextId(), - group: 6, - x: [ - {a: 'string', b: date1}, - {a: new ObjectId(), b: 1.2345}, - {a: 'string2', b: date1} - ] - }); - bulk.insert({_id: nextId(), group: 7, x: [{y: [1, 2, 3, 4]}]}); - bulk.insert({_id: nextId(), group: 8, x: [{y: [{a: 1, b: 2}, {a: 3, b: 4}]}]}); - bulk.insert({ - _id: nextId(), - group: 9, - x: [{y: [{a: 1, b: 2}, {a: 3, b: 4}]}, {z: [{a: 1, b: 2}, {a: 3, b: 4}]}] - }); - bulk.insert({ - _id: nextId(), - group: 10, - x: [{a: 1, b: 2}, {a: 3, b: 4}], - y: [{c: 1, d: 2}, {c: 3, d: 4}] - }); - bulk.insert({ - _id: nextId(), - group: 10, - x: [{a: 1, b: 2}, {a: 3, b: 4}], - y: [{c: 1, d: 2}, {c: 3, d: 4}] - }); - bulk.insert({ - _id: nextId(), - group: 11, - x: [{a: 1, b: 2}, {a: 2, c: 3}, {a: 1, d: 5}], - covered: [{aa: 1, bb: 2}, {aa: 2, cc: 3}, {aa: 1, dd: 5}] - }); - bulk.insert({_id: nextId(), group: 12, x: {y: [{a: 1, b: 1}, {a: 1, b: 2}]}}); - bulk.insert({_id: nextId(), group: 13, x: [{a: 1, b: 1}, {a: 1, b: 2}]}); - bulk.insert({_id: nextId(), group: 13, x: [{a: 1, b: 2}, {a: 1, b: 1}]}); - } - assert.writeOK(bulk.execute()); - - assert.writeOK(coll.createIndex({group: 1, 'y.d': 1})); - assert.writeOK(coll.createIndex({group: 1, covered: 1})); // for covered index test - - // Tests for the $-positional operator. - assert.eq(1, - coll.find({group: 3, 'x.a': 2}, {'x.$': 1}).sort({_id: 1}).toArray()[0].x.length, - "single object match (array length match)"); - - assert.eq(2, - coll.find({group: 3, 'x.a': 1}, {'x.$': 1}).sort({_id: 1}).toArray()[0].x[0].b, - "single object match first"); - - assert.eq(undefined, - coll.find({group: 3, 'x.a': 2}, {_id: 0, 'x.$': 1}).sort({_id: 1}).toArray()[0]._id, - "single object match with filtered _id"); - - assert.eq(1, - coll.find({group: 3, 'x.a': 2}, {'x.$': 1}).sort({_id: 1}).toArray()[0].x.length, - "sorted single object match with filtered _id (array length match)"); - - assert.eq(1, - coll.find({'group': 2, 'x': {'$elemMatch': {'a': 1, 'b': 2}}}, {'x.$': 1}) - .toArray()[0] - .x.length, - "single object match with elemMatch"); - - assert.eq(1, - coll.find({'group': 2, 'x': {'$elemMatch': {'a': 1, 'b': 2}}}, {'x.$': {'$slice': 1}}) - .toArray()[0] - .x.length, - "single object match with elemMatch and positive slice"); - - assert.eq( - 1, - coll.find({'group': 2, 'x': {'$elemMatch': {'a': 1, 'b': 2}}}, {'x.$': {'$slice': -1}}) - .toArray()[0] - .x.length, - "single object match with elemMatch and negative slice"); - - assert.eq(1, - coll.find({'group': 12, 'x.y.a': 1}, {'x.y.$': 1}).toArray()[0].x.y.length, - "single object match with two level dot notation"); - - assert.eq(1, - coll.find({group: 3, 'x.a': 2}, {'x.$': 1}).sort({x: 1}).toArray()[0].x.length, - "sorted object match (array length match)"); - - assert.eq({aa: 1, dd: 5}, - coll.find({group: 3, 'y.dd': 5}, {'y.$': 1}).sort({_id: 1}).toArray()[0].y[0], - "single object match (value match)"); - - assert.throws(function() { - coll.find({group: 3, 'x.a': 2}, {'y.$': 1}).toArray(); - }, [], "throw on invalid projection (field mismatch)"); - - assert.throws(function() { - coll.find({group: 3, 'x.a': 2}, {'y.$': 1}).sort({x: 1}).toArray(); - }, [], "throw on invalid sorted projection (field mismatch)"); - - assert.throws(function() { - coll.find({group: 3, 'x.a': 2}, {'x.$': 1, group: 0}).sort({x: 1}).toArray(); - }, [], "throw on invalid projection combination (include and exclude)"); - - assert.throws(function() { - coll.find({group: 3, 'x.a': 1, 'y.aa': 1}, {'x.$': 1, 'y.$': 1}).toArray(); - }, [], "throw on multiple projections"); - - assert.throws(function() { - coll.find({group: 3}, {'g.$': 1}).toArray(); - }, [], "throw on invalid projection (non-array field)"); - - assert.eq({aa: 1, dd: 5}, - coll.find({group: 11, 'covered.dd': 5}, {'covered.$': 1}).toArray()[0].covered[0], - "single object match (covered index)"); - - assert.eq({aa: 1, dd: 5}, - coll.find({group: 11, 'covered.dd': 5}, {'covered.$': 1}) - .sort({covered: 1}) - .toArray()[0] - .covered[0], - "single object match (sorted covered index)"); - - assert.eq(1, - coll.find({group: 10, 'y.d': 4}, {'y.$': 1}).sort({_id: 1}).toArray()[0].y.length, - "single object match (regular index"); - - // Tests for $elemMatch projection. - assert.eq(-6, - coll.find({group: 4}, {x: {$elemMatch: {a: -6}}}).toArray()[0].x[0].a, - "single object match"); - - assert.eq(1, - coll.find({group: 4}, {x: {$elemMatch: {a: -6}}}).toArray()[0].x.length, - "filters non-matching array elements"); - - assert.eq(1, - coll.find({group: 4}, {x: {$elemMatch: {a: -6, c: 3}}}).toArray()[0].x.length, - "filters non-matching array elements with multiple elemMatch criteria"); - - assert.eq( - 1, - coll.find({group: 13}, {'x': {'$elemMatch': {a: {$gt: 0, $lt: 2}}}}) - .sort({_id: 1}) - .toArray()[0] - .x.length, - "filters non-matching array elements with multiple criteria for a single element in the array"); - - assert.eq( - 3, - coll.find({group: 4}, {x: {$elemMatch: {a: {$lt: 1}}}}).sort({_id: 1}).toArray()[0].x[0].c, - "object operator match"); - - assert.eq([4], - coll.find({group: 1}, {x: {$elemMatch: {$in: [100, 4, -123]}}}).toArray()[0].x, - "$in number match"); - - assert.eq([{a: 1, b: 2}], - coll.find({group: 2}, {x: {$elemMatch: {a: {$in: [1]}}}}).toArray()[0].x, - "$in number match"); - - assert.eq([1], - coll.find({group: 1}, {x: {$elemMatch: {$nin: [4, 5, 6]}}}).toArray()[0].x, - "$nin number match"); - - assert.eq([1], - coll.find({group: 1}, {x: {$elemMatch: {$all: [1]}}}).toArray()[0].x, - "$in number match"); - - assert.eq([{a: 'string', b: date1}], - coll.find({group: 6}, {x: {$elemMatch: {a: 'string'}}}).toArray()[0].x, - "mixed object match on string eq"); - - assert.eq([{a: 'string2', b: date1}], - coll.find({group: 6}, {x: {$elemMatch: {a: /ring2/}}}).toArray()[0].x, - "mixed object match on regexp"); - - assert.eq([{a: 'string', b: date1}], - coll.find({group: 6}, {x: {$elemMatch: {a: {$type: 2}}}}).toArray()[0].x, - "mixed object match on type"); - - assert.eq([{a: 2, c: 3}], - coll.find({group: 2}, {x: {$elemMatch: {a: {$ne: 1}}}}).toArray()[0].x, - "mixed object match on ne"); - - assert.eq([{a: 1, d: 5}], - coll.find({group: 3}, {x: {$elemMatch: {d: {$exists: true}}}}) - .sort({_id: 1}) - .toArray()[0] - .x, - "mixed object match on exists"); - - assert.eq( - [{a: 2, c: 3}], - coll.find({group: 3}, {x: {$elemMatch: {a: {$mod: [2, 0]}}}}).sort({_id: 1}).toArray()[0].x, - "mixed object match on mod"); - - assert.eq({"x": [{"a": 1, "b": 2}], "y": [{"c": 3, "d": 4}]}, - coll.find({group: 10}, {_id: 0, x: {$elemMatch: {a: 1}}, y: {$elemMatch: {c: 3}}}) - .sort({_id: 1}) - .toArray()[0], - "multiple $elemMatch on unique fields 1"); - - // Tests involving getMore. Test the $-positional operator across multiple batches. - let a = coll.find({group: 3, 'x.b': 2}, {'x.$': 1}).sort({_id: 1}).batchSize(1); - while (a.hasNext()) { - assert.eq(2, a.next().x[0].b, "positional getMore test"); - } - - // Test the $elemMatch operator across multiple batches. - a = coll.find({group: 3}, {x: {$elemMatch: {a: 1}}}).sort({_id: 1}).batchSize(1); - while (a.hasNext()) { - assert.eq(1, a.next().x[0].a, "positional getMore test"); - } +"use strict"; + +const coll = db.SERVER828Test; +coll.drop(); + +const date1 = new Date(); + +// Generate monotonically increasing _id values. ObjectIds generated by the shell are not +// guaranteed to be monotically increasing, and we will depend on the _id sort order later in +// the test. +let currentId = 0; +function nextId() { + return ++currentId; +} + +// Insert various styles of arrays. +const bulk = coll.initializeUnorderedBulkOp(); +for (let i = 0; i < 100; i++) { + bulk.insert({_id: nextId(), group: 1, x: [1, 2, 3, 4, 5]}); + bulk.insert({_id: nextId(), group: 2, x: [{a: 1, b: 2}, {a: 2, c: 3}, {a: 1, d: 5}]}); + bulk.insert({ + _id: nextId(), + group: 3, + x: [{a: 1, b: 2}, {a: 2, c: 3}, {a: 1, d: 5}], + y: [{aa: 1, bb: 2}, {aa: 2, cc: 3}, {aa: 1, dd: 5}] + }); + bulk.insert({_id: nextId(), group: 3, x: [{a: 1, b: 3}, {a: -6, c: 3}]}); + bulk.insert({_id: nextId(), group: 4, x: [{a: 1, b: 4}, {a: -6, c: 3}]}); + bulk.insert( + {_id: nextId(), group: 5, x: [new Date(), 5, 10, 'string', new ObjectId(), 123.456]}); + bulk.insert({ + _id: nextId(), + group: 6, + x: [{a: 'string', b: date1}, {a: new ObjectId(), b: 1.2345}, {a: 'string2', b: date1}] + }); + bulk.insert({_id: nextId(), group: 7, x: [{y: [1, 2, 3, 4]}]}); + bulk.insert({_id: nextId(), group: 8, x: [{y: [{a: 1, b: 2}, {a: 3, b: 4}]}]}); + bulk.insert({ + _id: nextId(), + group: 9, + x: [{y: [{a: 1, b: 2}, {a: 3, b: 4}]}, {z: [{a: 1, b: 2}, {a: 3, b: 4}]}] + }); + bulk.insert({ + _id: nextId(), + group: 10, + x: [{a: 1, b: 2}, {a: 3, b: 4}], + y: [{c: 1, d: 2}, {c: 3, d: 4}] + }); + bulk.insert({ + _id: nextId(), + group: 10, + x: [{a: 1, b: 2}, {a: 3, b: 4}], + y: [{c: 1, d: 2}, {c: 3, d: 4}] + }); + bulk.insert({ + _id: nextId(), + group: 11, + x: [{a: 1, b: 2}, {a: 2, c: 3}, {a: 1, d: 5}], + covered: [{aa: 1, bb: 2}, {aa: 2, cc: 3}, {aa: 1, dd: 5}] + }); + bulk.insert({_id: nextId(), group: 12, x: {y: [{a: 1, b: 1}, {a: 1, b: 2}]}}); + bulk.insert({_id: nextId(), group: 13, x: [{a: 1, b: 1}, {a: 1, b: 2}]}); + bulk.insert({_id: nextId(), group: 13, x: [{a: 1, b: 2}, {a: 1, b: 1}]}); +} +assert.writeOK(bulk.execute()); + +assert.writeOK(coll.createIndex({group: 1, 'y.d': 1})); +assert.writeOK(coll.createIndex({group: 1, covered: 1})); // for covered index test + +// Tests for the $-positional operator. +assert.eq(1, + coll.find({group: 3, 'x.a': 2}, {'x.$': 1}).sort({_id: 1}).toArray()[0].x.length, + "single object match (array length match)"); + +assert.eq(2, + coll.find({group: 3, 'x.a': 1}, {'x.$': 1}).sort({_id: 1}).toArray()[0].x[0].b, + "single object match first"); + +assert.eq(undefined, + coll.find({group: 3, 'x.a': 2}, {_id: 0, 'x.$': 1}).sort({_id: 1}).toArray()[0]._id, + "single object match with filtered _id"); + +assert.eq(1, + coll.find({group: 3, 'x.a': 2}, {'x.$': 1}).sort({_id: 1}).toArray()[0].x.length, + "sorted single object match with filtered _id (array length match)"); + +assert.eq(1, + coll.find({'group': 2, 'x': {'$elemMatch': {'a': 1, 'b': 2}}}, {'x.$': 1}) + .toArray()[0] + .x.length, + "single object match with elemMatch"); + +assert.eq(1, + coll.find({'group': 2, 'x': {'$elemMatch': {'a': 1, 'b': 2}}}, {'x.$': {'$slice': 1}}) + .toArray()[0] + .x.length, + "single object match with elemMatch and positive slice"); + +assert.eq(1, + coll.find({'group': 2, 'x': {'$elemMatch': {'a': 1, 'b': 2}}}, {'x.$': {'$slice': -1}}) + .toArray()[0] + .x.length, + "single object match with elemMatch and negative slice"); + +assert.eq(1, + coll.find({'group': 12, 'x.y.a': 1}, {'x.y.$': 1}).toArray()[0].x.y.length, + "single object match with two level dot notation"); + +assert.eq(1, + coll.find({group: 3, 'x.a': 2}, {'x.$': 1}).sort({x: 1}).toArray()[0].x.length, + "sorted object match (array length match)"); + +assert.eq({aa: 1, dd: 5}, + coll.find({group: 3, 'y.dd': 5}, {'y.$': 1}).sort({_id: 1}).toArray()[0].y[0], + "single object match (value match)"); + +assert.throws(function() { + coll.find({group: 3, 'x.a': 2}, {'y.$': 1}).toArray(); +}, [], "throw on invalid projection (field mismatch)"); + +assert.throws(function() { + coll.find({group: 3, 'x.a': 2}, {'y.$': 1}).sort({x: 1}).toArray(); +}, [], "throw on invalid sorted projection (field mismatch)"); + +assert.throws(function() { + coll.find({group: 3, 'x.a': 2}, {'x.$': 1, group: 0}).sort({x: 1}).toArray(); +}, [], "throw on invalid projection combination (include and exclude)"); + +assert.throws(function() { + coll.find({group: 3, 'x.a': 1, 'y.aa': 1}, {'x.$': 1, 'y.$': 1}).toArray(); +}, [], "throw on multiple projections"); + +assert.throws(function() { + coll.find({group: 3}, {'g.$': 1}).toArray(); +}, [], "throw on invalid projection (non-array field)"); + +assert.eq({aa: 1, dd: 5}, + coll.find({group: 11, 'covered.dd': 5}, {'covered.$': 1}).toArray()[0].covered[0], + "single object match (covered index)"); + +assert.eq({aa: 1, dd: 5}, + coll.find({group: 11, 'covered.dd': 5}, {'covered.$': 1}) + .sort({covered: 1}) + .toArray()[0] + .covered[0], + "single object match (sorted covered index)"); + +assert.eq(1, + coll.find({group: 10, 'y.d': 4}, {'y.$': 1}).sort({_id: 1}).toArray()[0].y.length, + "single object match (regular index"); + +// Tests for $elemMatch projection. +assert.eq(-6, + coll.find({group: 4}, {x: {$elemMatch: {a: -6}}}).toArray()[0].x[0].a, + "single object match"); + +assert.eq(1, + coll.find({group: 4}, {x: {$elemMatch: {a: -6}}}).toArray()[0].x.length, + "filters non-matching array elements"); + +assert.eq(1, + coll.find({group: 4}, {x: {$elemMatch: {a: -6, c: 3}}}).toArray()[0].x.length, + "filters non-matching array elements with multiple elemMatch criteria"); + +assert.eq( + 1, + coll.find({group: 13}, {'x': {'$elemMatch': {a: {$gt: 0, $lt: 2}}}}) + .sort({_id: 1}) + .toArray()[0] + .x.length, + "filters non-matching array elements with multiple criteria for a single element in the array"); + +assert.eq( + 3, + coll.find({group: 4}, {x: {$elemMatch: {a: {$lt: 1}}}}).sort({_id: 1}).toArray()[0].x[0].c, + "object operator match"); + +assert.eq([4], + coll.find({group: 1}, {x: {$elemMatch: {$in: [100, 4, -123]}}}).toArray()[0].x, + "$in number match"); + +assert.eq([{a: 1, b: 2}], + coll.find({group: 2}, {x: {$elemMatch: {a: {$in: [1]}}}}).toArray()[0].x, + "$in number match"); + +assert.eq([1], + coll.find({group: 1}, {x: {$elemMatch: {$nin: [4, 5, 6]}}}).toArray()[0].x, + "$nin number match"); + +assert.eq( + [1], coll.find({group: 1}, {x: {$elemMatch: {$all: [1]}}}).toArray()[0].x, "$in number match"); + +assert.eq([{a: 'string', b: date1}], + coll.find({group: 6}, {x: {$elemMatch: {a: 'string'}}}).toArray()[0].x, + "mixed object match on string eq"); + +assert.eq([{a: 'string2', b: date1}], + coll.find({group: 6}, {x: {$elemMatch: {a: /ring2/}}}).toArray()[0].x, + "mixed object match on regexp"); + +assert.eq([{a: 'string', b: date1}], + coll.find({group: 6}, {x: {$elemMatch: {a: {$type: 2}}}}).toArray()[0].x, + "mixed object match on type"); + +assert.eq([{a: 2, c: 3}], + coll.find({group: 2}, {x: {$elemMatch: {a: {$ne: 1}}}}).toArray()[0].x, + "mixed object match on ne"); + +assert.eq( + [{a: 1, d: 5}], + coll.find({group: 3}, {x: {$elemMatch: {d: {$exists: true}}}}).sort({_id: 1}).toArray()[0].x, + "mixed object match on exists"); + +assert.eq( + [{a: 2, c: 3}], + coll.find({group: 3}, {x: {$elemMatch: {a: {$mod: [2, 0]}}}}).sort({_id: 1}).toArray()[0].x, + "mixed object match on mod"); + +assert.eq({"x": [{"a": 1, "b": 2}], "y": [{"c": 3, "d": 4}]}, + coll.find({group: 10}, {_id: 0, x: {$elemMatch: {a: 1}}, y: {$elemMatch: {c: 3}}}) + .sort({_id: 1}) + .toArray()[0], + "multiple $elemMatch on unique fields 1"); + +// Tests involving getMore. Test the $-positional operator across multiple batches. +let a = coll.find({group: 3, 'x.b': 2}, {'x.$': 1}).sort({_id: 1}).batchSize(1); +while (a.hasNext()) { + assert.eq(2, a.next().x[0].b, "positional getMore test"); +} + +// Test the $elemMatch operator across multiple batches. +a = coll.find({group: 3}, {x: {$elemMatch: {a: 1}}}).sort({_id: 1}).batchSize(1); +while (a.hasNext()) { + assert.eq(1, a.next().x[0].a, "positional getMore test"); +} }()); diff --git a/jstests/core/elemmatch_or_pushdown.js b/jstests/core/elemmatch_or_pushdown.js index b9a6d5bcc41..86888996b19 100644 --- a/jstests/core/elemmatch_or_pushdown.js +++ b/jstests/core/elemmatch_or_pushdown.js @@ -3,82 +3,84 @@ * SERVER-38164. */ (function() { - "use strict"; +"use strict"; - const coll = db.elemmatch_or_pushdown; - coll.drop(); +const coll = db.elemmatch_or_pushdown; +coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: 1, b: [{c: 4}]})); - assert.writeOK(coll.insert({_id: 1, a: 2, b: [{c: 4}]})); - assert.writeOK(coll.insert({_id: 2, a: 2, b: [{c: 5}]})); - assert.writeOK(coll.insert({_id: 3, a: 1, b: [{c: 5}]})); - assert.writeOK(coll.insert({_id: 4, a: 1, b: [{c: 6}]})); - assert.writeOK(coll.insert({_id: 5, a: 1, b: [{c: 7}]})); - assert.commandWorked(coll.createIndex({a: 1, "b.c": 1})); +assert.writeOK(coll.insert({_id: 0, a: 1, b: [{c: 4}]})); +assert.writeOK(coll.insert({_id: 1, a: 2, b: [{c: 4}]})); +assert.writeOK(coll.insert({_id: 2, a: 2, b: [{c: 5}]})); +assert.writeOK(coll.insert({_id: 3, a: 1, b: [{c: 5}]})); +assert.writeOK(coll.insert({_id: 4, a: 1, b: [{c: 6}]})); +assert.writeOK(coll.insert({_id: 5, a: 1, b: [{c: 7}]})); +assert.commandWorked(coll.createIndex({a: 1, "b.c": 1})); - assert.eq(coll.find({a: 1, b: {$elemMatch: {$or: [{c: 4}, {c: 5}]}}}).sort({_id: 1}).toArray(), - [{_id: 0, a: 1, b: [{c: 4}]}, {_id: 3, a: 1, b: [{c: 5}]}]); - assert.eq(coll.find({a: 1, $or: [{a: 2}, {b: {$elemMatch: {$or: [{c: 4}, {c: 5}]}}}]}) - .sort({_id: 1}) - .toArray(), - [{_id: 0, a: 1, b: [{c: 4}]}, {_id: 3, a: 1, b: [{c: 5}]}]); +assert.eq(coll.find({a: 1, b: {$elemMatch: {$or: [{c: 4}, {c: 5}]}}}).sort({_id: 1}).toArray(), + [{_id: 0, a: 1, b: [{c: 4}]}, {_id: 3, a: 1, b: [{c: 5}]}]); +assert.eq(coll.find({a: 1, $or: [{a: 2}, {b: {$elemMatch: {$or: [{c: 4}, {c: 5}]}}}]}) + .sort({_id: 1}) + .toArray(), + [{_id: 0, a: 1, b: [{c: 4}]}, {_id: 3, a: 1, b: [{c: 5}]}]); - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: 5, b: [{c: [{f: 8}], d: 6}]})); - assert.writeOK(coll.insert({_id: 1, a: 4, b: [{c: [{f: 8}], d: 6}]})); - assert.writeOK(coll.insert({_id: 2, a: 5, b: [{c: [{f: 8}], d: 7}]})); - assert.writeOK(coll.insert({_id: 3, a: 4, b: [{c: [{f: 9}], d: 6}]})); - assert.writeOK(coll.insert({_id: 4, a: 5, b: [{c: [{f: 8}], e: 7}]})); - assert.writeOK(coll.insert({_id: 5, a: 4, b: [{c: [{f: 8}], e: 7}]})); - assert.writeOK(coll.insert({_id: 6, a: 5, b: [{c: [{f: 8}], e: 8}]})); - assert.writeOK(coll.insert({_id: 7, a: 5, b: [{c: [{f: 9}], e: 7}]})); - assert.commandWorked(coll.createIndex({"b.d": 1, "b.c.f": 1})); - assert.commandWorked(coll.createIndex({"b.e": 1, "b.c.f": 1})); +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: 5, b: [{c: [{f: 8}], d: 6}]})); +assert.writeOK(coll.insert({_id: 1, a: 4, b: [{c: [{f: 8}], d: 6}]})); +assert.writeOK(coll.insert({_id: 2, a: 5, b: [{c: [{f: 8}], d: 7}]})); +assert.writeOK(coll.insert({_id: 3, a: 4, b: [{c: [{f: 9}], d: 6}]})); +assert.writeOK(coll.insert({_id: 4, a: 5, b: [{c: [{f: 8}], e: 7}]})); +assert.writeOK(coll.insert({_id: 5, a: 4, b: [{c: [{f: 8}], e: 7}]})); +assert.writeOK(coll.insert({_id: 6, a: 5, b: [{c: [{f: 8}], e: 8}]})); +assert.writeOK(coll.insert({_id: 7, a: 5, b: [{c: [{f: 9}], e: 7}]})); +assert.commandWorked(coll.createIndex({"b.d": 1, "b.c.f": 1})); +assert.commandWorked(coll.createIndex({"b.e": 1, "b.c.f": 1})); - assert.eq(coll.find({a: 5, b: {$elemMatch: {c: {$elemMatch: {f: 8}}, $or: [{d: 6}, {e: 7}]}}}) - .sort({_id: 1}) - .toArray(), - [{_id: 0, a: 5, b: [{c: [{f: 8}], d: 6}]}, {_id: 4, a: 5, b: [{c: [{f: 8}], e: 7}]}]); +assert.eq(coll.find({a: 5, b: {$elemMatch: {c: {$elemMatch: {f: 8}}, $or: [{d: 6}, {e: 7}]}}}) + .sort({_id: 1}) + .toArray(), + [{_id: 0, a: 5, b: [{c: [{f: 8}], d: 6}]}, {_id: 4, a: 5, b: [{c: [{f: 8}], e: 7}]}]); - // Test that $not predicates in $elemMatch can be pushed into an $or sibling of the $elemMatch. - coll.drop(); - assert.commandWorked(coll.insert({_id: 0, arr: [{a: 0, b: 2}], c: 4, d: 5})); - assert.commandWorked(coll.insert({_id: 1, arr: [{a: 1, b: 2}], c: 4, d: 5})); - assert.commandWorked(coll.insert({_id: 2, arr: [{a: 0, b: 3}], c: 4, d: 5})); - assert.commandWorked(coll.insert({_id: 3, arr: [{a: 1, b: 3}], c: 4, d: 5})); - assert.commandWorked(coll.insert({_id: 4, arr: [{a: 0, b: 2}], c: 6, d: 7})); - assert.commandWorked(coll.insert({_id: 5, arr: [{a: 1, b: 2}], c: 6, d: 7})); - assert.commandWorked(coll.insert({_id: 6, arr: [{a: 0, b: 3}], c: 6, d: 7})); - assert.commandWorked(coll.insert({_id: 7, arr: [{a: 1, b: 3}], c: 6, d: 7})); +// Test that $not predicates in $elemMatch can be pushed into an $or sibling of the $elemMatch. +coll.drop(); +assert.commandWorked(coll.insert({_id: 0, arr: [{a: 0, b: 2}], c: 4, d: 5})); +assert.commandWorked(coll.insert({_id: 1, arr: [{a: 1, b: 2}], c: 4, d: 5})); +assert.commandWorked(coll.insert({_id: 2, arr: [{a: 0, b: 3}], c: 4, d: 5})); +assert.commandWorked(coll.insert({_id: 3, arr: [{a: 1, b: 3}], c: 4, d: 5})); +assert.commandWorked(coll.insert({_id: 4, arr: [{a: 0, b: 2}], c: 6, d: 7})); +assert.commandWorked(coll.insert({_id: 5, arr: [{a: 1, b: 2}], c: 6, d: 7})); +assert.commandWorked(coll.insert({_id: 6, arr: [{a: 0, b: 3}], c: 6, d: 7})); +assert.commandWorked(coll.insert({_id: 7, arr: [{a: 1, b: 3}], c: 6, d: 7})); - const keyPattern = {"arr.a": 1, "arr.b": 1, c: 1, d: 1}; - assert.commandWorked(coll.createIndex(keyPattern)); +const keyPattern = { + "arr.a": 1, + "arr.b": 1, + c: 1, + d: 1 +}; +assert.commandWorked(coll.createIndex(keyPattern)); - const elemMatchOr = { - arr: {$elemMatch: {a: {$ne: 1}, $or: [{b: 2}, {b: 3}]}}, - $or: [ - {c: 4, d: 5}, - {c: 6, d: 7}, - ], - }; +const elemMatchOr = { + arr: {$elemMatch: {a: {$ne: 1}, $or: [{b: 2}, {b: 3}]}}, + $or: [ + {c: 4, d: 5}, + {c: 6, d: 7}, + ], +}; - // Confirm that we get the same results using the index and a COLLSCAN. - for (let hint of[keyPattern, {$natural: 1}]) { - assert.eq(coll.find(elemMatchOr, {_id: 1}).sort({_id: 1}).hint(hint).toArray(), - [{_id: 0}, {_id: 2}, {_id: 4}, {_id: 6}]); +// Confirm that we get the same results using the index and a COLLSCAN. +for (let hint of [keyPattern, {$natural: 1}]) { + assert.eq(coll.find(elemMatchOr, {_id: 1}).sort({_id: 1}).hint(hint).toArray(), + [{_id: 0}, {_id: 2}, {_id: 4}, {_id: 6}]); - assert.eq( - coll.aggregate( - [ - { - $match: - {arr: {$elemMatch: {a: {$ne: 1}}}, $or: [{c: 4, d: 5}, {c: 6, d: 7}]} - }, - {$project: {_id: 1}}, - {$sort: {_id: 1}} - ], - {hint: hint}) - .toArray(), - [{_id: 0}, {_id: 2}, {_id: 4}, {_id: 6}]); - } + assert.eq( + coll.aggregate( + [ + {$match: {arr: {$elemMatch: {a: {$ne: 1}}}, $or: [{c: 4, d: 5}, {c: 6, d: 7}]}}, + {$project: {_id: 1}}, + {$sort: {_id: 1}} + ], + {hint: hint}) + .toArray(), + [{_id: 0}, {_id: 2}, {_id: 4}, {_id: 6}]); +} }()); diff --git a/jstests/core/ensure_sorted.js b/jstests/core/ensure_sorted.js index c2d29728c59..037eda45c19 100644 --- a/jstests/core/ensure_sorted.js +++ b/jstests/core/ensure_sorted.js @@ -6,25 +6,25 @@ // SERVER-17011 Tests whether queries which specify sort and batch size can generate results out of // order due to the ntoreturn hack. The EnsureSortedStage should solve this problem. (function() { - 'use strict'; - var coll = db.ensure_sorted; +'use strict'; +var coll = db.ensure_sorted; - coll.drop(); - assert.commandWorked(coll.createIndex({a: 1, b: 1})); - assert.writeOK(coll.insert({a: 1, b: 4})); - assert.writeOK(coll.insert({a: 2, b: 3})); - assert.writeOK(coll.insert({a: 3, b: 2})); - assert.writeOK(coll.insert({a: 4, b: 1})); +coll.drop(); +assert.commandWorked(coll.createIndex({a: 1, b: 1})); +assert.writeOK(coll.insert({a: 1, b: 4})); +assert.writeOK(coll.insert({a: 2, b: 3})); +assert.writeOK(coll.insert({a: 3, b: 2})); +assert.writeOK(coll.insert({a: 4, b: 1})); - var cursor = coll.find({a: {$lt: 5}}).sort({b: -1}).batchSize(2); - cursor.next(); // {a: 1, b: 4}. - cursor.next(); // {a: 2, b: 3}. +var cursor = coll.find({a: {$lt: 5}}).sort({b: -1}).batchSize(2); +cursor.next(); // {a: 1, b: 4}. +cursor.next(); // {a: 2, b: 3}. - assert.writeOK(coll.update({b: 2}, {$set: {b: 5}})); - var result = cursor.next(); +assert.writeOK(coll.update({b: 2}, {$set: {b: 5}})); +var result = cursor.next(); - // We might either drop the document where "b" is 2 from the result set, or we might include the - // old version of this document (before the update is applied). Either is acceptable, but - // out-of-order results are unacceptable. - assert(result.b === 2 || result.b === 1, "cursor returned: " + printjson(result)); +// We might either drop the document where "b" is 2 from the result set, or we might include the +// old version of this document (before the update is applied). Either is acceptable, but +// out-of-order results are unacceptable. +assert(result.b === 2 || result.b === 1, "cursor returned: " + printjson(result)); })(); diff --git a/jstests/core/exhaust.js b/jstests/core/exhaust.js index fe76916ee95..125c70cefe8 100644 --- a/jstests/core/exhaust.js +++ b/jstests/core/exhaust.js @@ -1,26 +1,25 @@ // @tags: [requires_getmore] (function() { - 'use strict'; +'use strict'; - var c = db.exhaustColl; - c.drop(); +var c = db.exhaustColl; +c.drop(); - const docCount = 4; - for (var i = 0; i < docCount; i++) { - assert.writeOK(c.insert({a: i})); - } +const docCount = 4; +for (var i = 0; i < docCount; i++) { + assert.writeOK(c.insert({a: i})); +} - // Check that the query works without exhaust set - assert.eq(c.find().batchSize(1).itcount(), docCount); - - // Now try to run the same query with exhaust - try { - assert.eq(c.find().batchSize(1).addOption(DBQuery.Option.exhaust).itcount(), docCount); - } catch (e) { - // The exhaust option is not valid against mongos, ensure that this query throws the right - // code - assert.eq(e.code, 18526, () => tojson(e)); - } +// Check that the query works without exhaust set +assert.eq(c.find().batchSize(1).itcount(), docCount); +// Now try to run the same query with exhaust +try { + assert.eq(c.find().batchSize(1).addOption(DBQuery.Option.exhaust).itcount(), docCount); +} catch (e) { + // The exhaust option is not valid against mongos, ensure that this query throws the right + // code + assert.eq(e.code, 18526, () => tojson(e)); +} }()); diff --git a/jstests/core/existsa.js b/jstests/core/existsa.js index d98fd3f2d68..66d0ded50d4 100644 --- a/jstests/core/existsa.js +++ b/jstests/core/existsa.js @@ -2,110 +2,110 @@ * Tests that sparse indexes are disallowed for $exists:false queries. */ (function() { - "use strict"; - - const coll = db.jstests_existsa; - coll.drop(); - - assert.writeOK(coll.insert({})); - assert.writeOK(coll.insert({a: 1})); - assert.writeOK(coll.insert({a: {x: 1}, b: 1})); - - let indexKeySpec = {}; - let indexKeyField = ''; - - /** Configure testing of an index { <indexKeyField>:1 }. */ - function setIndex(_indexKeyField) { - indexKeyField = _indexKeyField; - indexKeySpec = {}; - indexKeySpec[indexKeyField] = 1; - coll.ensureIndex(indexKeySpec, {sparse: true}); - } - setIndex('a'); - - /** @return count when hinting the index to use. */ - function hintedCount(query) { - return coll.find(query).hint(indexKeySpec).itcount(); - } - - /** The query field does not exist and the sparse index is not used without a hint. */ - function assertMissing(query, expectedMissing = 1, expectedIndexedMissing = 0) { - assert.eq(expectedMissing, coll.count(query)); - // We also shouldn't get a different count depending on whether - // an index is used or not. - assert.eq(expectedIndexedMissing, hintedCount(query)); - } - - /** The query field exists and the sparse index is used without a hint. */ - function assertExists(query, expectedExists = 2) { - assert.eq(expectedExists, coll.count(query)); - // An $exists:true predicate generates no index filters. Add another predicate on the index - // key to trigger use of the index. - let andClause = {}; - andClause[indexKeyField] = {$ne: null}; - Object.extend(query, {$and: [andClause]}); - assert.eq(expectedExists, coll.count(query)); - assert.eq(expectedExists, hintedCount(query)); - } - - /** The query field exists and the sparse index is not used without a hint. */ - function assertExistsUnindexed(query, expectedExists = 2) { - assert.eq(expectedExists, coll.count(query)); - // Even with another predicate on the index key, the sparse index is disallowed. - let andClause = {}; - andClause[indexKeyField] = {$ne: null}; - Object.extend(query, {$and: [andClause]}); - assert.eq(expectedExists, coll.count(query)); - assert.eq(expectedExists, hintedCount(query)); - } - - // $exists:false queries match the proper number of documents and disallow the sparse index. - assertMissing({a: {$exists: false}}); - assertMissing({a: {$not: {$exists: true}}}); - assertMissing({$and: [{a: {$exists: false}}]}); - assertMissing({$or: [{a: {$exists: false}}]}); - assertMissing({$nor: [{a: {$exists: true}}]}); - assertMissing({'a.x': {$exists: false}}, 2, 1); - - // Currently a sparse index is disallowed even if the $exists:false query is on a different - // field. - assertMissing({b: {$exists: false}}, 2, 1); - assertMissing({b: {$exists: false}, a: {$ne: 6}}, 2, 1); - assertMissing({b: {$not: {$exists: true}}}, 2, 1); - - // Top level $exists:true queries match the proper number of documents - // and use the sparse index on { a : 1 }. - assertExists({a: {$exists: true}}); - - // Nested $exists queries match the proper number of documents and disallow the sparse index. - assertExistsUnindexed({$nor: [{a: {$exists: false}}]}); - assertExistsUnindexed({$nor: [{'a.x': {$exists: false}}]}, 1); - assertExistsUnindexed({a: {$not: {$exists: false}}}); - - // Nested $exists queries disallow the sparse index in some cases where it is not strictly - // necessary to do so. (Descriptive tests.) - assertExistsUnindexed({$nor: [{b: {$exists: false}}]}, 1); // Unindexed field. - assertExists({$or: [{a: {$exists: true}}]}); // $exists:true not $exists:false. - - // Behavior is similar with $elemMatch. - coll.drop(); - assert.writeOK(coll.insert({a: [{}]})); - assert.writeOK(coll.insert({a: [{b: 1}]})); - assert.writeOK(coll.insert({a: [{b: [1]}]})); - setIndex('a.b'); - - assertMissing({a: {$elemMatch: {b: {$exists: false}}}}); - - // A $elemMatch predicate is treated as nested, and the index should be used for $exists:true. - assertExists({a: {$elemMatch: {b: {$exists: true}}}}); - - // A $not within $elemMatch should not attempt to use a sparse index for $exists:false. - assertExistsUnindexed({'a.b': {$elemMatch: {$not: {$exists: false}}}}, 1); - assertExistsUnindexed({'a.b': {$elemMatch: {$gt: 0, $not: {$exists: false}}}}, 1); - - // A non sparse index will not be disallowed. - coll.drop(); - assert.writeOK(coll.insert({})); - coll.ensureIndex({a: 1}); - assert.eq(1, coll.find({a: {$exists: false}}).itcount()); +"use strict"; + +const coll = db.jstests_existsa; +coll.drop(); + +assert.writeOK(coll.insert({})); +assert.writeOK(coll.insert({a: 1})); +assert.writeOK(coll.insert({a: {x: 1}, b: 1})); + +let indexKeySpec = {}; +let indexKeyField = ''; + +/** Configure testing of an index { <indexKeyField>:1 }. */ +function setIndex(_indexKeyField) { + indexKeyField = _indexKeyField; + indexKeySpec = {}; + indexKeySpec[indexKeyField] = 1; + coll.ensureIndex(indexKeySpec, {sparse: true}); +} +setIndex('a'); + +/** @return count when hinting the index to use. */ +function hintedCount(query) { + return coll.find(query).hint(indexKeySpec).itcount(); +} + +/** The query field does not exist and the sparse index is not used without a hint. */ +function assertMissing(query, expectedMissing = 1, expectedIndexedMissing = 0) { + assert.eq(expectedMissing, coll.count(query)); + // We also shouldn't get a different count depending on whether + // an index is used or not. + assert.eq(expectedIndexedMissing, hintedCount(query)); +} + +/** The query field exists and the sparse index is used without a hint. */ +function assertExists(query, expectedExists = 2) { + assert.eq(expectedExists, coll.count(query)); + // An $exists:true predicate generates no index filters. Add another predicate on the index + // key to trigger use of the index. + let andClause = {}; + andClause[indexKeyField] = {$ne: null}; + Object.extend(query, {$and: [andClause]}); + assert.eq(expectedExists, coll.count(query)); + assert.eq(expectedExists, hintedCount(query)); +} + +/** The query field exists and the sparse index is not used without a hint. */ +function assertExistsUnindexed(query, expectedExists = 2) { + assert.eq(expectedExists, coll.count(query)); + // Even with another predicate on the index key, the sparse index is disallowed. + let andClause = {}; + andClause[indexKeyField] = {$ne: null}; + Object.extend(query, {$and: [andClause]}); + assert.eq(expectedExists, coll.count(query)); + assert.eq(expectedExists, hintedCount(query)); +} + +// $exists:false queries match the proper number of documents and disallow the sparse index. +assertMissing({a: {$exists: false}}); +assertMissing({a: {$not: {$exists: true}}}); +assertMissing({$and: [{a: {$exists: false}}]}); +assertMissing({$or: [{a: {$exists: false}}]}); +assertMissing({$nor: [{a: {$exists: true}}]}); +assertMissing({'a.x': {$exists: false}}, 2, 1); + +// Currently a sparse index is disallowed even if the $exists:false query is on a different +// field. +assertMissing({b: {$exists: false}}, 2, 1); +assertMissing({b: {$exists: false}, a: {$ne: 6}}, 2, 1); +assertMissing({b: {$not: {$exists: true}}}, 2, 1); + +// Top level $exists:true queries match the proper number of documents +// and use the sparse index on { a : 1 }. +assertExists({a: {$exists: true}}); + +// Nested $exists queries match the proper number of documents and disallow the sparse index. +assertExistsUnindexed({$nor: [{a: {$exists: false}}]}); +assertExistsUnindexed({$nor: [{'a.x': {$exists: false}}]}, 1); +assertExistsUnindexed({a: {$not: {$exists: false}}}); + +// Nested $exists queries disallow the sparse index in some cases where it is not strictly +// necessary to do so. (Descriptive tests.) +assertExistsUnindexed({$nor: [{b: {$exists: false}}]}, 1); // Unindexed field. +assertExists({$or: [{a: {$exists: true}}]}); // $exists:true not $exists:false. + +// Behavior is similar with $elemMatch. +coll.drop(); +assert.writeOK(coll.insert({a: [{}]})); +assert.writeOK(coll.insert({a: [{b: 1}]})); +assert.writeOK(coll.insert({a: [{b: [1]}]})); +setIndex('a.b'); + +assertMissing({a: {$elemMatch: {b: {$exists: false}}}}); + +// A $elemMatch predicate is treated as nested, and the index should be used for $exists:true. +assertExists({a: {$elemMatch: {b: {$exists: true}}}}); + +// A $not within $elemMatch should not attempt to use a sparse index for $exists:false. +assertExistsUnindexed({'a.b': {$elemMatch: {$not: {$exists: false}}}}, 1); +assertExistsUnindexed({'a.b': {$elemMatch: {$gt: 0, $not: {$exists: false}}}}, 1); + +// A non sparse index will not be disallowed. +coll.drop(); +assert.writeOK(coll.insert({})); +coll.ensureIndex({a: 1}); +assert.eq(1, coll.find({a: {$exists: false}}).itcount()); })(); diff --git a/jstests/core/explain_agg_write_concern.js b/jstests/core/explain_agg_write_concern.js index 5377d0011c3..cf28b097632 100644 --- a/jstests/core/explain_agg_write_concern.js +++ b/jstests/core/explain_agg_write_concern.js @@ -12,64 +12,62 @@ * Tests related to the aggregate commands behavior with writeConcern and writeConcern + explain. */ (function() { - "use strict"; +"use strict"; - const collName = "explain_agg_write_concern"; - let coll = db[collName]; - let outColl = db[collName + "_out"]; - coll.drop(); - outColl.drop(); +const collName = "explain_agg_write_concern"; +let coll = db[collName]; +let outColl = db[collName + "_out"]; +coll.drop(); +outColl.drop(); - assert.writeOK(coll.insert({_id: 1})); +assert.writeOK(coll.insert({_id: 1})); - // Agg should accept write concern if the last stage is a $out. - assert.commandWorked(db.runCommand({ - aggregate: coll.getName(), - pipeline: [{$out: outColl.getName()}], - cursor: {}, - writeConcern: {w: 1} - })); - assert.eq(1, outColl.find().itcount()); - outColl.drop(); +// Agg should accept write concern if the last stage is a $out. +assert.commandWorked(db.runCommand({ + aggregate: coll.getName(), + pipeline: [{$out: outColl.getName()}], + cursor: {}, + writeConcern: {w: 1} +})); +assert.eq(1, outColl.find().itcount()); +outColl.drop(); - // Agg should accept writeConcern even if read-only. - assert.commandWorked( - db.runCommand({aggregate: coll.getName(), pipeline: [], cursor: {}, writeConcern: {w: 1}})); +// Agg should accept writeConcern even if read-only. +assert.commandWorked( + db.runCommand({aggregate: coll.getName(), pipeline: [], cursor: {}, writeConcern: {w: 1}})); - // Agg should succeed if the last stage is an $out and the explain flag is set. - assert.commandWorked(db.runCommand({ - aggregate: coll.getName(), - pipeline: [{$out: outColl.getName()}], - explain: true, - })); - assert.eq(0, outColl.find().itcount()); - outColl.drop(); +// Agg should succeed if the last stage is an $out and the explain flag is set. +assert.commandWorked(db.runCommand({ + aggregate: coll.getName(), + pipeline: [{$out: outColl.getName()}], + explain: true, +})); +assert.eq(0, outColl.find().itcount()); +outColl.drop(); - // Agg should fail if the last stage is an $out and both the explain flag and writeConcern are - // set. - assert.commandFailed(db.runCommand({ - aggregate: coll.getName(), - pipeline: [{$out: outColl.getName()}], - explain: true, - writeConcern: {w: 1} - })); +// Agg should fail if the last stage is an $out and both the explain flag and writeConcern are +// set. +assert.commandFailed(db.runCommand({ + aggregate: coll.getName(), + pipeline: [{$out: outColl.getName()}], + explain: true, + writeConcern: {w: 1} +})); - // Agg explain helpers with all verbosities (or verbosity omitted) should fail if the last stage - // is an $out and writeConcern is set. - assert.throws(function() { - coll.explain().aggregate([{$out: outColl.getName()}], {writeConcern: {w: 1}}); - }); - assert.throws(function() { - coll.explain("queryPlanner").aggregate([{$out: outColl.getName()}], {writeConcern: {w: 1}}); - }); - assert.throws(function() { - coll.explain("executionStats").aggregate([{$out: outColl.getName()}], { - writeConcern: {w: 1} - }); - }); - assert.throws(function() { - coll.explain("allPlansExecution").aggregate([{$out: outColl.getName()}], { - writeConcern: {w: 1} - }); +// Agg explain helpers with all verbosities (or verbosity omitted) should fail if the last stage +// is an $out and writeConcern is set. +assert.throws(function() { + coll.explain().aggregate([{$out: outColl.getName()}], {writeConcern: {w: 1}}); +}); +assert.throws(function() { + coll.explain("queryPlanner").aggregate([{$out: outColl.getName()}], {writeConcern: {w: 1}}); +}); +assert.throws(function() { + coll.explain("executionStats").aggregate([{$out: outColl.getName()}], {writeConcern: {w: 1}}); +}); +assert.throws(function() { + coll.explain("allPlansExecution").aggregate([{$out: outColl.getName()}], { + writeConcern: {w: 1} }); +}); }()); diff --git a/jstests/core/explain_db_mismatch.js b/jstests/core/explain_db_mismatch.js index 13d54cae77f..09950f489d0 100644 --- a/jstests/core/explain_db_mismatch.js +++ b/jstests/core/explain_db_mismatch.js @@ -1,7 +1,6 @@ // Ensure that explain command errors if the inner command has a $db field that doesn't match the // outer command. (function() { - assert.commandFailedWithCode( - db.runCommand({explain: {find: 'some_collection', $db: 'not_my_db'}}), - ErrorCodes.InvalidNamespace); +assert.commandFailedWithCode(db.runCommand({explain: {find: 'some_collection', $db: 'not_my_db'}}), + ErrorCodes.InvalidNamespace); }()); diff --git a/jstests/core/explain_delete.js b/jstests/core/explain_delete.js index 9599c7df9b8..1863979faa5 100644 --- a/jstests/core/explain_delete.js +++ b/jstests/core/explain_delete.js @@ -2,66 +2,66 @@ // Tests for explaining the delete command. (function() { - "use strict"; +"use strict"; - var collName = "jstests_explain_delete"; - var t = db[collName]; - t.drop(); +var collName = "jstests_explain_delete"; +var t = db[collName]; +t.drop(); - var explain; +var explain; - /** - * Verify that the explain command output 'explain' shows a DELETE stage with an nWouldDelete - * value equal to 'nWouldDelete'. - */ - function checkNWouldDelete(explain, nWouldDelete) { - assert.commandWorked(explain); - assert("executionStats" in explain); - var executionStats = explain.executionStats; - assert("executionStages" in executionStats); +/** + * Verify that the explain command output 'explain' shows a DELETE stage with an nWouldDelete + * value equal to 'nWouldDelete'. + */ +function checkNWouldDelete(explain, nWouldDelete) { + assert.commandWorked(explain); + assert("executionStats" in explain); + var executionStats = explain.executionStats; + assert("executionStages" in executionStats); - // If passed through mongos, then DELETE stage(s) should be below the SHARD_WRITE mongos - // stage. Otherwise the DELETE stage is the root stage. - var execStages = executionStats.executionStages; - if ("SHARD_WRITE" === execStages.stage) { - let totalToBeDeletedAcrossAllShards = 0; - execStages.shards.forEach(function(shardExplain) { - const rootStageName = shardExplain.executionStages.stage; - assert.eq(rootStageName, "DELETE", tojson(execStages)); - totalToBeDeletedAcrossAllShards += shardExplain.executionStages.nWouldDelete; - }); - assert.eq(totalToBeDeletedAcrossAllShards, nWouldDelete, explain); - } else { - assert.eq(execStages.stage, "DELETE", explain); - assert.eq(execStages.nWouldDelete, nWouldDelete, explain); - } + // If passed through mongos, then DELETE stage(s) should be below the SHARD_WRITE mongos + // stage. Otherwise the DELETE stage is the root stage. + var execStages = executionStats.executionStages; + if ("SHARD_WRITE" === execStages.stage) { + let totalToBeDeletedAcrossAllShards = 0; + execStages.shards.forEach(function(shardExplain) { + const rootStageName = shardExplain.executionStages.stage; + assert.eq(rootStageName, "DELETE", tojson(execStages)); + totalToBeDeletedAcrossAllShards += shardExplain.executionStages.nWouldDelete; + }); + assert.eq(totalToBeDeletedAcrossAllShards, nWouldDelete, explain); + } else { + assert.eq(execStages.stage, "DELETE", explain); + assert.eq(execStages.nWouldDelete, nWouldDelete, explain); } +} - // Explain delete against an empty collection. - assert.commandWorked(db.createCollection(t.getName())); - explain = db.runCommand({explain: {delete: collName, deletes: [{q: {a: 1}, limit: 0}]}}); - checkNWouldDelete(explain, 0); +// Explain delete against an empty collection. +assert.commandWorked(db.createCollection(t.getName())); +explain = db.runCommand({explain: {delete: collName, deletes: [{q: {a: 1}, limit: 0}]}}); +checkNWouldDelete(explain, 0); - // Add an index but no data, and check that the explain still works. - t.ensureIndex({a: 1}); - explain = db.runCommand({explain: {delete: collName, deletes: [{q: {a: 1}, limit: 0}]}}); - checkNWouldDelete(explain, 0); +// Add an index but no data, and check that the explain still works. +t.ensureIndex({a: 1}); +explain = db.runCommand({explain: {delete: collName, deletes: [{q: {a: 1}, limit: 0}]}}); +checkNWouldDelete(explain, 0); - // Add some copies of the same document. - for (var i = 0; i < 10; i++) { - t.insert({a: 1}); - } - assert.eq(10, t.count()); +// Add some copies of the same document. +for (var i = 0; i < 10; i++) { + t.insert({a: 1}); +} +assert.eq(10, t.count()); - // Run an explain which shows that all 10 documents *would* be deleted. - explain = db.runCommand({explain: {delete: collName, deletes: [{q: {a: 1}, limit: 0}]}}); - checkNWouldDelete(explain, 10); +// Run an explain which shows that all 10 documents *would* be deleted. +explain = db.runCommand({explain: {delete: collName, deletes: [{q: {a: 1}, limit: 0}]}}); +checkNWouldDelete(explain, 10); - // Make sure all 10 documents are still there. - assert.eq(10, t.count()); +// Make sure all 10 documents are still there. +assert.eq(10, t.count()); - // If we run the same thing without the explain, then all 10 docs should be deleted. - var deleteResult = db.runCommand({delete: collName, deletes: [{q: {a: 1}, limit: 0}]}); - assert.commandWorked(deleteResult); - assert.eq(0, t.count()); +// If we run the same thing without the explain, then all 10 docs should be deleted. +var deleteResult = db.runCommand({delete: collName, deletes: [{q: {a: 1}, limit: 0}]}); +assert.commandWorked(deleteResult); +assert.eq(0, t.count()); }()); diff --git a/jstests/core/explain_distinct.js b/jstests/core/explain_distinct.js index a3cb6606d30..1c4d6612acb 100644 --- a/jstests/core/explain_distinct.js +++ b/jstests/core/explain_distinct.js @@ -6,86 +6,86 @@ * This test ensures that explain on the distinct command works. */ (function() { - 'use strict'; +'use strict'; - load("jstests/libs/analyze_plan.js"); +load("jstests/libs/analyze_plan.js"); - var collName = "jstests_explain_distinct"; - var coll = db[collName]; +var collName = "jstests_explain_distinct"; +var coll = db[collName]; - function runDistinctExplain(collection, keyString, query) { - var distinctCmd = {distinct: collection.getName(), key: keyString}; +function runDistinctExplain(collection, keyString, query) { + var distinctCmd = {distinct: collection.getName(), key: keyString}; - if (typeof query !== 'undefined') { - distinctCmd.query = query; - } - - return coll.runCommand({explain: distinctCmd, verbosity: 'executionStats'}); - } - - coll.drop(); - - // Collection doesn't exist. - var explain = runDistinctExplain(coll, 'a', {}); - assert.commandWorked(explain); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "EOF")); - - // Insert the data to perform distinct() on. - for (var i = 0; i < 10; i++) { - assert.writeOK(coll.insert({a: 1, b: 1})); - assert.writeOK(coll.insert({a: 2, c: 1})); + if (typeof query !== 'undefined') { + distinctCmd.query = query; } - assert.commandFailed(runDistinctExplain(coll, {}, {})); // Bad keyString. - assert.commandFailed(runDistinctExplain(coll, 'a', 'a')); // Bad query. - assert.commandFailed(runDistinctExplain(coll, 'b', {$not: 1})); // Bad query. - assert.commandFailed(runDistinctExplain(coll, 'a', {$not: 1})); // Bad query. - assert.commandFailed(runDistinctExplain(coll, '_id', {$not: 1})); // Bad query. - - // Ensure that server accepts a distinct command with no 'query' field. - assert.commandWorked(runDistinctExplain(coll, '', null)); - assert.commandWorked(runDistinctExplain(coll, '')); - - assert.eq([1], coll.distinct('b')); - var explain = runDistinctExplain(coll, 'b', {}); - assert.commandWorked(explain); - assert.eq(20, explain.executionStats.nReturned); - assert(isCollscan(db, explain.queryPlanner.winningPlan)); - - assert.commandWorked(coll.createIndex({a: 1})); - - assert.eq([1, 2], coll.distinct('a')); - var explain = runDistinctExplain(coll, 'a', {}); - assert.commandWorked(explain); - assert.eq(2, explain.executionStats.nReturned); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED")); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN")); - - // Check that the DISTINCT_SCAN stage has the correct stats. - var stage = getPlanStage(explain.queryPlanner.winningPlan, "DISTINCT_SCAN"); - assert.eq({a: 1}, stage.keyPattern); - assert.eq("a_1", stage.indexName); - assert.eq(false, stage.isMultiKey); - assert.eq(false, stage.isUnique); - assert.eq(false, stage.isSparse); - assert.eq(false, stage.isPartial); - assert.lte(1, stage.indexVersion); - assert("indexBounds" in stage); - - assert.commandWorked(coll.createIndex({a: 1, b: 1})); - - assert.eq([1], coll.distinct('a', {a: 1})); - var explain = runDistinctExplain(coll, 'a', {a: 1}); - assert.commandWorked(explain); - assert.eq(1, explain.executionStats.nReturned); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED")); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN")); - - assert.eq([1], coll.distinct('b', {a: 1})); - var explain = runDistinctExplain(coll, 'b', {a: 1}); - assert.commandWorked(explain); - assert.eq(1, explain.executionStats.nReturned); - assert(!planHasStage(db, explain.queryPlanner.winningPlan, "FETCH")); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED")); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN")); + return coll.runCommand({explain: distinctCmd, verbosity: 'executionStats'}); +} + +coll.drop(); + +// Collection doesn't exist. +var explain = runDistinctExplain(coll, 'a', {}); +assert.commandWorked(explain); +assert(planHasStage(db, explain.queryPlanner.winningPlan, "EOF")); + +// Insert the data to perform distinct() on. +for (var i = 0; i < 10; i++) { + assert.writeOK(coll.insert({a: 1, b: 1})); + assert.writeOK(coll.insert({a: 2, c: 1})); +} + +assert.commandFailed(runDistinctExplain(coll, {}, {})); // Bad keyString. +assert.commandFailed(runDistinctExplain(coll, 'a', 'a')); // Bad query. +assert.commandFailed(runDistinctExplain(coll, 'b', {$not: 1})); // Bad query. +assert.commandFailed(runDistinctExplain(coll, 'a', {$not: 1})); // Bad query. +assert.commandFailed(runDistinctExplain(coll, '_id', {$not: 1})); // Bad query. + +// Ensure that server accepts a distinct command with no 'query' field. +assert.commandWorked(runDistinctExplain(coll, '', null)); +assert.commandWorked(runDistinctExplain(coll, '')); + +assert.eq([1], coll.distinct('b')); +var explain = runDistinctExplain(coll, 'b', {}); +assert.commandWorked(explain); +assert.eq(20, explain.executionStats.nReturned); +assert(isCollscan(db, explain.queryPlanner.winningPlan)); + +assert.commandWorked(coll.createIndex({a: 1})); + +assert.eq([1, 2], coll.distinct('a')); +var explain = runDistinctExplain(coll, 'a', {}); +assert.commandWorked(explain); +assert.eq(2, explain.executionStats.nReturned); +assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED")); +assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN")); + +// Check that the DISTINCT_SCAN stage has the correct stats. +var stage = getPlanStage(explain.queryPlanner.winningPlan, "DISTINCT_SCAN"); +assert.eq({a: 1}, stage.keyPattern); +assert.eq("a_1", stage.indexName); +assert.eq(false, stage.isMultiKey); +assert.eq(false, stage.isUnique); +assert.eq(false, stage.isSparse); +assert.eq(false, stage.isPartial); +assert.lte(1, stage.indexVersion); +assert("indexBounds" in stage); + +assert.commandWorked(coll.createIndex({a: 1, b: 1})); + +assert.eq([1], coll.distinct('a', {a: 1})); +var explain = runDistinctExplain(coll, 'a', {a: 1}); +assert.commandWorked(explain); +assert.eq(1, explain.executionStats.nReturned); +assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED")); +assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN")); + +assert.eq([1], coll.distinct('b', {a: 1})); +var explain = runDistinctExplain(coll, 'b', {a: 1}); +assert.commandWorked(explain); +assert.eq(1, explain.executionStats.nReturned); +assert(!planHasStage(db, explain.queryPlanner.winningPlan, "FETCH")); +assert(planHasStage(db, explain.queryPlanner.winningPlan, "PROJECTION_COVERED")); +assert(planHasStage(db, explain.queryPlanner.winningPlan, "DISTINCT_SCAN")); })(); diff --git a/jstests/core/explain_find_and_modify.js b/jstests/core/explain_find_and_modify.js index a0ba989dd0e..8b7c65d519e 100644 --- a/jstests/core/explain_find_and_modify.js +++ b/jstests/core/explain_find_and_modify.js @@ -12,300 +12,299 @@ * 5. The reported stats should reflect how the command would be executed. */ (function() { - "use strict"; - var cName = "explain_find_and_modify"; - var t = db.getCollection(cName); +"use strict"; +var cName = "explain_find_and_modify"; +var t = db.getCollection(cName); - // Different types of findAndModify explain requests. - var explainRemove = {explain: {findAndModify: cName, remove: true, query: {_id: 0}}}; - var explainUpdate = {explain: {findAndModify: cName, update: {$inc: {i: 1}}, query: {_id: 0}}}; - var explainUpsert = { - explain: {findAndModify: cName, update: {$inc: {i: 1}}, query: {_id: 0}, upsert: true} - }; +// Different types of findAndModify explain requests. +var explainRemove = {explain: {findAndModify: cName, remove: true, query: {_id: 0}}}; +var explainUpdate = {explain: {findAndModify: cName, update: {$inc: {i: 1}}, query: {_id: 0}}}; +var explainUpsert = { + explain: {findAndModify: cName, update: {$inc: {i: 1}}, query: {_id: 0}, upsert: true} +}; - // 1. Explaining findAndModify should never create a database. +// 1. Explaining findAndModify should never create a database. - // Make sure this one doesn't exist before we start. - assert.commandWorked(db.getSiblingDB(cName).runCommand({dropDatabase: 1})); - var newDB = db.getSiblingDB(cName); +// Make sure this one doesn't exist before we start. +assert.commandWorked(db.getSiblingDB(cName).runCommand({dropDatabase: 1})); +var newDB = db.getSiblingDB(cName); - // Explain the command, ensuring the database is not created. - var err_msg = "Explaining findAndModify on a non-existent database should return an error."; - assert.commandFailed(newDB.runCommand(explainRemove), err_msg); - assertDBDoesNotExist(newDB, "Explaining a remove should not create a database."); +// Explain the command, ensuring the database is not created. +var err_msg = "Explaining findAndModify on a non-existent database should return an error."; +assert.commandFailed(newDB.runCommand(explainRemove), err_msg); +assertDBDoesNotExist(newDB, "Explaining a remove should not create a database."); - assert.commandFailed(newDB.runCommand(explainUpsert), err_msg); - assertDBDoesNotExist(newDB, "Explaining an upsert should not create a database."); +assert.commandFailed(newDB.runCommand(explainUpsert), err_msg); +assertDBDoesNotExist(newDB, "Explaining an upsert should not create a database."); - // 2. Explaining findAndModify should never create a collection. +// 2. Explaining findAndModify should never create a collection. - // Insert a document to make sure the database exists. - t.insert({'will': 'be dropped'}); - // Make sure the collection doesn't exist. - t.drop(); +// Insert a document to make sure the database exists. +t.insert({'will': 'be dropped'}); +// Make sure the collection doesn't exist. +t.drop(); - // Explain the command, ensuring the collection is not created. - assert.commandWorked(db.runCommand(explainRemove)); - assertCollDoesNotExist(cName, "explaining a remove should not create a new collection."); +// Explain the command, ensuring the collection is not created. +assert.commandWorked(db.runCommand(explainRemove)); +assertCollDoesNotExist(cName, "explaining a remove should not create a new collection."); - assert.commandWorked(db.runCommand(explainUpsert)); - assertCollDoesNotExist(cName, "explaining an upsert should not create a new collection."); +assert.commandWorked(db.runCommand(explainUpsert)); +assertCollDoesNotExist(cName, "explaining an upsert should not create a new collection."); - assert.commandWorked(db.runCommand(Object.merge(explainUpsert, {fields: {x: 1}}))); - assertCollDoesNotExist(cName, "explaining an upsert should not create a new collection."); +assert.commandWorked(db.runCommand(Object.merge(explainUpsert, {fields: {x: 1}}))); +assertCollDoesNotExist(cName, "explaining an upsert should not create a new collection."); - // 3. Explaining findAndModify should not work with an invalid findAndModify command object. +// 3. Explaining findAndModify should not work with an invalid findAndModify command object. - // Specifying both remove and new is illegal. - assert.commandFailed(db.runCommand({remove: true, new: true})); +// Specifying both remove and new is illegal. +assert.commandFailed(db.runCommand({remove: true, new: true})); - // 4. Explaining findAndModify should not modify any contents of the collection. - var onlyDoc = {_id: 0, i: 1}; - assert.writeOK(t.insert(onlyDoc)); +// 4. Explaining findAndModify should not modify any contents of the collection. +var onlyDoc = {_id: 0, i: 1}; +assert.writeOK(t.insert(onlyDoc)); - // Explaining a delete should not delete anything. - var matchingRemoveCmd = {findAndModify: cName, remove: true, query: {_id: onlyDoc._id}}; - var res = db.runCommand({explain: matchingRemoveCmd}); - assert.commandWorked(res); - assert.eq(t.find().itcount(), 1, "Explaining a remove should not remove any documents."); +// Explaining a delete should not delete anything. +var matchingRemoveCmd = {findAndModify: cName, remove: true, query: {_id: onlyDoc._id}}; +var res = db.runCommand({explain: matchingRemoveCmd}); +assert.commandWorked(res); +assert.eq(t.find().itcount(), 1, "Explaining a remove should not remove any documents."); - // Explaining an update should not update anything. - var matchingUpdateCmd = {findAndModify: cName, update: {x: "x"}, query: {_id: onlyDoc._id}}; - var res = db.runCommand({explain: matchingUpdateCmd}); - assert.commandWorked(res); - assert.eq(t.findOne(), onlyDoc, "Explaining an update should not update any documents."); +// Explaining an update should not update anything. +var matchingUpdateCmd = {findAndModify: cName, update: {x: "x"}, query: {_id: onlyDoc._id}}; +var res = db.runCommand({explain: matchingUpdateCmd}); +assert.commandWorked(res); +assert.eq(t.findOne(), onlyDoc, "Explaining an update should not update any documents."); - // Explaining an upsert should not insert anything. - var matchingUpsertCmd = - {findAndModify: cName, update: {x: "x"}, query: {_id: "non-match"}, upsert: true}; - var res = db.runCommand({explain: matchingUpsertCmd}); - assert.commandWorked(res); - assert.eq(t.find().itcount(), 1, "Explaining an upsert should not insert any documents."); +// Explaining an upsert should not insert anything. +var matchingUpsertCmd = + {findAndModify: cName, update: {x: "x"}, query: {_id: "non-match"}, upsert: true}; +var res = db.runCommand({explain: matchingUpsertCmd}); +assert.commandWorked(res); +assert.eq(t.find().itcount(), 1, "Explaining an upsert should not insert any documents."); - // 5. The reported stats should reflect how it would execute and what it would modify. - var isMongos = db.runCommand({isdbgrid: 1}).isdbgrid; +// 5. The reported stats should reflect how it would execute and what it would modify. +var isMongos = db.runCommand({isdbgrid: 1}).isdbgrid; - // List out the command to be explained, and the expected results of that explain. - var testCases = [ - // -------------------------------------- Removes ---------------------------------------- - { - // Non-matching remove command. - cmd: {remove: true, query: {_id: "no-match"}}, - expectedResult: { - executionStats: { - nReturned: 0, - executionSuccess: true, - executionStages: {stage: "DELETE", nWouldDelete: 0} - } - } - }, - { - // Matching remove command. - cmd: {remove: true, query: {_id: onlyDoc._id}}, - expectedResult: { - executionStats: { - nReturned: 1, - executionSuccess: true, - executionStages: {stage: "DELETE", nWouldDelete: 1} - } - } - }, - // -------------------------------------- Updates ---------------------------------------- - { - // Non-matching update query. - cmd: {update: {$inc: {i: 1}}, query: {_id: "no-match"}}, - expectedResult: { - executionStats: { - nReturned: 0, - executionSuccess: true, - executionStages: {stage: "UPDATE", nWouldModify: 0, wouldInsert: false} - } - } - }, - { - // Non-matching update query, returning new doc. - cmd: {update: {$inc: {i: 1}}, query: {_id: "no-match"}, new: true}, - expectedResult: { - executionStats: { - nReturned: 0, - executionSuccess: true, - executionStages: {stage: "UPDATE", nWouldModify: 0, wouldInsert: false} - } - } - }, - { - // Matching update query. - cmd: {update: {$inc: {i: 1}}, query: {_id: onlyDoc._id}}, - expectedResult: { - executionStats: { - nReturned: 1, - executionSuccess: true, - executionStages: {stage: "UPDATE", nWouldModify: 1, wouldInsert: false} - } - } - }, - { - // Matching update query, returning new doc. - cmd: {update: {$inc: {i: 1}}, query: {_id: onlyDoc._id}, new: true}, - expectedResult: { - executionStats: { - nReturned: 1, - executionSuccess: true, - executionStages: {stage: "UPDATE", nWouldModify: 1, wouldInsert: false} - } - } - }, - // -------------------------------------- Upserts ---------------------------------------- - { - // Non-matching upsert query. - cmd: {update: {$inc: {i: 1}}, upsert: true, query: {_id: "no-match"}}, - expectedResult: { - executionStats: { - nReturned: 0, - executionSuccess: true, - executionStages: {stage: "UPDATE", nWouldModify: 0, wouldInsert: true} - } - } - }, - { - // Non-matching upsert query, returning new doc. - cmd: {update: {$inc: {i: 1}}, upsert: true, query: {_id: "no-match"}, new: true}, - expectedResult: { - executionStats: { - nReturned: 1, - executionSuccess: true, - executionStages: {stage: "UPDATE", nWouldModify: 0, wouldInsert: true} - } - } - }, - { - // Matching upsert query, returning new doc. - cmd: {update: {$inc: {i: 1}}, upsert: true, query: {_id: onlyDoc._id}, new: true}, - expectedResult: { - executionStats: { - nReturned: 1, - executionSuccess: true, - executionStages: {stage: "UPDATE", nWouldModify: 1, wouldInsert: false} - } - } +// List out the command to be explained, and the expected results of that explain. +var testCases = [ + // -------------------------------------- Removes ---------------------------------------- + { + // Non-matching remove command. + cmd: {remove: true, query: {_id: "no-match"}}, + expectedResult: { + executionStats: { + nReturned: 0, + executionSuccess: true, + executionStages: {stage: "DELETE", nWouldDelete: 0} + } } - ]; - - // Apply all the same test cases, this time adding a projection stage. - testCases = testCases.concat(testCases.map(function makeProjection(testCase) { - return { - cmd: Object.merge(testCase.cmd, {fields: {i: 0}}), - expectedResult: { - executionStats: { - // nReturned Shouldn't change. - nReturned: testCase.expectedResult.executionStats.nReturned, - executionStages: { - stage: "PROJECTION_DEFAULT", - transformBy: {i: 0}, - // put previous root stage under projection stage. - inputStage: testCase.expectedResult.executionStats.executionStages - } - } + }, + { + // Matching remove command. + cmd: {remove: true, query: {_id: onlyDoc._id}}, + expectedResult: { + executionStats: { + nReturned: 1, + executionSuccess: true, + executionStages: {stage: "DELETE", nWouldDelete: 1} } - }; - })); - // Actually assert on the test cases. - testCases.forEach(function(testCase) { - assertExplainMatchedAllVerbosities(testCase.cmd, testCase.expectedResult); - }); - - // ----------------------------------------- Helpers ----------------------------------------- - - /** - * Helper to make this test work in the sharding passthrough suite. - * - * Transforms the explain output so that if it came from a mongos, it will be modified - * to have the same format as though it had come from a mongod. - */ - function transformIfSharded(explainOut) { - if (!isMongos) { - return explainOut; } + }, + // -------------------------------------- Updates ---------------------------------------- + { + // Non-matching update query. + cmd: {update: {$inc: {i: 1}}, query: {_id: "no-match"}}, + expectedResult: { + executionStats: { + nReturned: 0, + executionSuccess: true, + executionStages: {stage: "UPDATE", nWouldModify: 0, wouldInsert: false} + } + } + }, + { + // Non-matching update query, returning new doc. + cmd: {update: {$inc: {i: 1}}, query: {_id: "no-match"}, new: true}, + expectedResult: { + executionStats: { + nReturned: 0, + executionSuccess: true, + executionStages: {stage: "UPDATE", nWouldModify: 0, wouldInsert: false} + } + } + }, + { + // Matching update query. + cmd: {update: {$inc: {i: 1}}, query: {_id: onlyDoc._id}}, + expectedResult: { + executionStats: { + nReturned: 1, + executionSuccess: true, + executionStages: {stage: "UPDATE", nWouldModify: 1, wouldInsert: false} + } + } + }, + { + // Matching update query, returning new doc. + cmd: {update: {$inc: {i: 1}}, query: {_id: onlyDoc._id}, new: true}, + expectedResult: { + executionStats: { + nReturned: 1, + executionSuccess: true, + executionStages: {stage: "UPDATE", nWouldModify: 1, wouldInsert: false} + } + } + }, + // -------------------------------------- Upserts ---------------------------------------- + { + // Non-matching upsert query. + cmd: {update: {$inc: {i: 1}}, upsert: true, query: {_id: "no-match"}}, + expectedResult: { + executionStats: { + nReturned: 0, + executionSuccess: true, + executionStages: {stage: "UPDATE", nWouldModify: 0, wouldInsert: true} + } + } + }, + { + // Non-matching upsert query, returning new doc. + cmd: {update: {$inc: {i: 1}}, upsert: true, query: {_id: "no-match"}, new: true}, + expectedResult: { + executionStats: { + nReturned: 1, + executionSuccess: true, + executionStages: {stage: "UPDATE", nWouldModify: 0, wouldInsert: true} + } + } + }, + { + // Matching upsert query, returning new doc. + cmd: {update: {$inc: {i: 1}}, upsert: true, query: {_id: onlyDoc._id}, new: true}, + expectedResult: { + executionStats: { + nReturned: 1, + executionSuccess: true, + executionStages: {stage: "UPDATE", nWouldModify: 1, wouldInsert: false} + } + } + } +]; - // Asserts that the explain command ran on a single shard and modifies the given - // explain output to have a top-level UPDATE or DELETE stage by removing the - // top-level SINGLE_SHARD stage. - function replace(outerKey, innerKey) { - assert(explainOut.hasOwnProperty(outerKey)); - assert(explainOut[outerKey].hasOwnProperty(innerKey)); - - var shardStage = explainOut[outerKey][innerKey]; - assert.eq("SINGLE_SHARD", shardStage.stage); - assert.eq(1, shardStage.shards.length); - Object.extend(explainOut[outerKey], shardStage.shards[0], false); +// Apply all the same test cases, this time adding a projection stage. +testCases = testCases.concat(testCases.map(function makeProjection(testCase) { + return { + cmd: Object.merge(testCase.cmd, {fields: {i: 0}}), + expectedResult: { + executionStats: { + // nReturned Shouldn't change. + nReturned: testCase.expectedResult.executionStats.nReturned, + executionStages: { + stage: "PROJECTION_DEFAULT", + transformBy: {i: 0}, + // put previous root stage under projection stage. + inputStage: testCase.expectedResult.executionStats.executionStages + } + } } + }; +})); +// Actually assert on the test cases. +testCases.forEach(function(testCase) { + assertExplainMatchedAllVerbosities(testCase.cmd, testCase.expectedResult); +}); - replace("queryPlanner", "winningPlan"); - replace("executionStats", "executionStages"); +// ----------------------------------------- Helpers ----------------------------------------- +/** + * Helper to make this test work in the sharding passthrough suite. + * + * Transforms the explain output so that if it came from a mongos, it will be modified + * to have the same format as though it had come from a mongod. + */ +function transformIfSharded(explainOut) { + if (!isMongos) { return explainOut; } - /** - * Assert the results from running the explain match the expected results. - * - * Since we aren't expecting a perfect match (we only specify a subset of the fields we expect - * to match), recursively go through the expected results, and make sure each one has a - * corresponding field on the actual results, and that their values match. - * Example doc for expectedMatches: - * {executionStats: {nReturned: 0, executionStages: {isEOF: 1}}} - */ - function assertExplainResultsMatch(explainOut, expectedMatches, preMsg, currentPath) { - // This is only used recursively, to keep track of where we are in the document. - var isRootLevel = typeof currentPath === "undefined"; - Object.keys(expectedMatches).forEach(function(key) { - var totalFieldName = isRootLevel ? key : currentPath + "." + key; - assert(explainOut.hasOwnProperty(key), - preMsg + "Explain's output does not have a value for " + key); - if (typeof expectedMatches[key] === "object") { - // Sub-doc, recurse to match on it's fields - assertExplainResultsMatch( - explainOut[key], expectedMatches[key], preMsg, totalFieldName); - } else { - assert.eq(explainOut[key], - expectedMatches[key], - preMsg + "Explain's " + totalFieldName + " (" + explainOut[key] + ")" + - " does not match expected value (" + expectedMatches[key] + ")."); - } - }); - } + // Asserts that the explain command ran on a single shard and modifies the given + // explain output to have a top-level UPDATE or DELETE stage by removing the + // top-level SINGLE_SHARD stage. + function replace(outerKey, innerKey) { + assert(explainOut.hasOwnProperty(outerKey)); + assert(explainOut[outerKey].hasOwnProperty(innerKey)); - /** - * Assert that running explain on the given findAndModify command matches the expected results, - * on all the different verbosities (but just assert the command worked on the lowest verbosity, - * since it doesn't have any useful stats). - */ - function assertExplainMatchedAllVerbosities(findAndModifyArgs, expectedResult) { - ["queryPlanner", "executionStats", "allPlansExecution"].forEach(function(verbosityMode) { - var cmd = { - explain: Object.merge({findAndModify: cName}, findAndModifyArgs), - verbosity: verbosityMode - }; - var msg = "Error after running command: " + tojson(cmd) + ": "; - var explainOut = db.runCommand(cmd); - assert.commandWorked(explainOut, "command: " + tojson(cmd)); - // Don't check explain results for queryPlanner mode, as that doesn't have any of the - // interesting stats. - if (verbosityMode !== "queryPlanner") { - explainOut = transformIfSharded(explainOut); - assertExplainResultsMatch(explainOut, expectedResult, msg); - } - }); + var shardStage = explainOut[outerKey][innerKey]; + assert.eq("SINGLE_SHARD", shardStage.stage); + assert.eq(1, shardStage.shards.length); + Object.extend(explainOut[outerKey], shardStage.shards[0], false); } - function assertDBDoesNotExist(db, msg) { - assert.eq(db.getMongo().getDBNames().indexOf(db.getName()), - -1, - msg + "db " + db.getName() + " exists."); - } + replace("queryPlanner", "winningPlan"); + replace("executionStats", "executionStages"); - function assertCollDoesNotExist(cName, msg) { - assert.eq( - db.getCollectionNames().indexOf(cName), -1, msg + "collection " + cName + " exists."); - } + return explainOut; +} + +/** + * Assert the results from running the explain match the expected results. + * + * Since we aren't expecting a perfect match (we only specify a subset of the fields we expect + * to match), recursively go through the expected results, and make sure each one has a + * corresponding field on the actual results, and that their values match. + * Example doc for expectedMatches: + * {executionStats: {nReturned: 0, executionStages: {isEOF: 1}}} + */ +function assertExplainResultsMatch(explainOut, expectedMatches, preMsg, currentPath) { + // This is only used recursively, to keep track of where we are in the document. + var isRootLevel = typeof currentPath === "undefined"; + Object.keys(expectedMatches).forEach(function(key) { + var totalFieldName = isRootLevel ? key : currentPath + "." + key; + assert(explainOut.hasOwnProperty(key), + preMsg + "Explain's output does not have a value for " + key); + if (typeof expectedMatches[key] === "object") { + // Sub-doc, recurse to match on it's fields + assertExplainResultsMatch( + explainOut[key], expectedMatches[key], preMsg, totalFieldName); + } else { + assert.eq(explainOut[key], + expectedMatches[key], + preMsg + "Explain's " + totalFieldName + " (" + explainOut[key] + ")" + + " does not match expected value (" + expectedMatches[key] + ")."); + } + }); +} + +/** + * Assert that running explain on the given findAndModify command matches the expected results, + * on all the different verbosities (but just assert the command worked on the lowest verbosity, + * since it doesn't have any useful stats). + */ +function assertExplainMatchedAllVerbosities(findAndModifyArgs, expectedResult) { + ["queryPlanner", "executionStats", "allPlansExecution"].forEach(function(verbosityMode) { + var cmd = { + explain: Object.merge({findAndModify: cName}, findAndModifyArgs), + verbosity: verbosityMode + }; + var msg = "Error after running command: " + tojson(cmd) + ": "; + var explainOut = db.runCommand(cmd); + assert.commandWorked(explainOut, "command: " + tojson(cmd)); + // Don't check explain results for queryPlanner mode, as that doesn't have any of the + // interesting stats. + if (verbosityMode !== "queryPlanner") { + explainOut = transformIfSharded(explainOut); + assertExplainResultsMatch(explainOut, expectedResult, msg); + } + }); +} + +function assertDBDoesNotExist(db, msg) { + assert.eq(db.getMongo().getDBNames().indexOf(db.getName()), + -1, + msg + "db " + db.getName() + " exists."); +} + +function assertCollDoesNotExist(cName, msg) { + assert.eq(db.getCollectionNames().indexOf(cName), -1, msg + "collection " + cName + " exists."); +} })(); diff --git a/jstests/core/explain_missing_collection.js b/jstests/core/explain_missing_collection.js index e129fb7f16a..c186d3015a4 100644 --- a/jstests/core/explain_missing_collection.js +++ b/jstests/core/explain_missing_collection.js @@ -3,45 +3,45 @@ * @tags: [assumes_no_implicit_collection_creation_after_drop] */ (function() { - var missingColl = db.explain_null_collection; +var missingColl = db.explain_null_collection; - var explain; - var explainColl; +var explain; +var explainColl; - // .find() - missingColl.drop(); - explain = missingColl.explain("executionStats").find().finish(); - assert.commandWorked(explain); - assert("executionStats" in explain); +// .find() +missingColl.drop(); +explain = missingColl.explain("executionStats").find().finish(); +assert.commandWorked(explain); +assert("executionStats" in explain); - // .count() - missingColl.drop(); - explain = missingColl.explain("executionStats").count(); - assert.commandWorked(explain); - assert("executionStats" in explain); +// .count() +missingColl.drop(); +explain = missingColl.explain("executionStats").count(); +assert.commandWorked(explain); +assert("executionStats" in explain); - // .remove() - missingColl.drop(); - explain = missingColl.explain("executionStats").remove({a: 1}); - assert.commandWorked(explain); - assert("executionStats" in explain); +// .remove() +missingColl.drop(); +explain = missingColl.explain("executionStats").remove({a: 1}); +assert.commandWorked(explain); +assert("executionStats" in explain); - // .update() with upsert: false - missingColl.drop(); - explainColl = missingColl.explain("executionStats"); - explain = explainColl.update({a: 1}, {b: 1}); - assert.commandWorked(explain); - assert("executionStats" in explain); +// .update() with upsert: false +missingColl.drop(); +explainColl = missingColl.explain("executionStats"); +explain = explainColl.update({a: 1}, {b: 1}); +assert.commandWorked(explain); +assert("executionStats" in explain); - // .update() with upsert: true - missingColl.drop(); - explainColl = missingColl.explain("executionStats"); - explain = explainColl.update({a: 1}, {b: 1}, {upsert: true}); - assert.commandWorked(explain); - assert("executionStats" in explain); +// .update() with upsert: true +missingColl.drop(); +explainColl = missingColl.explain("executionStats"); +explain = explainColl.update({a: 1}, {b: 1}, {upsert: true}); +assert.commandWorked(explain); +assert("executionStats" in explain); - // .aggregate() - missingColl.drop(); - explain = missingColl.explain("executionStats").aggregate([{$match: {a: 1}}]); - assert.commandWorked(explain); +// .aggregate() +missingColl.drop(); +explain = missingColl.explain("executionStats").aggregate([{$match: {a: 1}}]); +assert.commandWorked(explain); }()); diff --git a/jstests/core/explain_missing_database.js b/jstests/core/explain_missing_database.js index 93123086bde..a1eb89e10e4 100644 --- a/jstests/core/explain_missing_database.js +++ b/jstests/core/explain_missing_database.js @@ -1,44 +1,44 @@ // Test explain of various operations against a non-existent database (function() { - var explainMissingDb = db.getSiblingDB("explainMissingDb"); +var explainMissingDb = db.getSiblingDB("explainMissingDb"); - var explain; - var explainColl; +var explain; +var explainColl; - // .find() - explainMissingDb.dropDatabase(); - explain = explainMissingDb.collection.explain("executionStats").find().finish(); - assert.commandWorked(explain); - assert("executionStats" in explain); +// .find() +explainMissingDb.dropDatabase(); +explain = explainMissingDb.collection.explain("executionStats").find().finish(); +assert.commandWorked(explain); +assert("executionStats" in explain); - // .count() - explainMissingDb.dropDatabase(); - explain = explainMissingDb.collection.explain("executionStats").count(); - assert.commandWorked(explain); - assert("executionStats" in explain); +// .count() +explainMissingDb.dropDatabase(); +explain = explainMissingDb.collection.explain("executionStats").count(); +assert.commandWorked(explain); +assert("executionStats" in explain); - // .remove() - explainMissingDb.dropDatabase(); - explain = explainMissingDb.collection.explain("executionStats").remove({a: 1}); - assert.commandWorked(explain); - assert("executionStats" in explain); +// .remove() +explainMissingDb.dropDatabase(); +explain = explainMissingDb.collection.explain("executionStats").remove({a: 1}); +assert.commandWorked(explain); +assert("executionStats" in explain); - // .update() with upsert: false - explainMissingDb.dropDatabase(); - explainColl = explainMissingDb.collection.explain("executionStats"); - explain = explainColl.update({a: 1}, {b: 1}); - assert.commandWorked(explain); - assert("executionStats" in explain); +// .update() with upsert: false +explainMissingDb.dropDatabase(); +explainColl = explainMissingDb.collection.explain("executionStats"); +explain = explainColl.update({a: 1}, {b: 1}); +assert.commandWorked(explain); +assert("executionStats" in explain); - // .update() with upsert: true - explainMissingDb.dropDatabase(); - explainColl = explainMissingDb.collection.explain("executionStats"); - explain = explainColl.update({a: 1}, {b: 1}, {upsert: true}); - assert.commandWorked(explain); - assert("executionStats" in explain); +// .update() with upsert: true +explainMissingDb.dropDatabase(); +explainColl = explainMissingDb.collection.explain("executionStats"); +explain = explainColl.update({a: 1}, {b: 1}, {upsert: true}); +assert.commandWorked(explain); +assert("executionStats" in explain); - // .aggregate() - explainMissingDb.dropDatabase(); - explain = explainMissingDb.collection.explain("executionStats").aggregate([{$match: {a: 1}}]); - assert.commandWorked(explain); +// .aggregate() +explainMissingDb.dropDatabase(); +explain = explainMissingDb.collection.explain("executionStats").aggregate([{$match: {a: 1}}]); +assert.commandWorked(explain); }()); diff --git a/jstests/core/explain_multi_plan.js b/jstests/core/explain_multi_plan.js index 956cc41e0df..1b2b0d6cdb6 100644 --- a/jstests/core/explain_multi_plan.js +++ b/jstests/core/explain_multi_plan.js @@ -10,71 +10,71 @@ * there are multiple plans available. This is a regression test for SERVER-20849 and SERVER-21376. */ (function() { - "use strict"; - var coll = db.explainMultiPlan; - coll.drop(); +"use strict"; +var coll = db.explainMultiPlan; +coll.drop(); - // Create indices to ensure there are multiple plans available. - assert.commandWorked(coll.ensureIndex({a: 1, b: 1})); - assert.commandWorked(coll.ensureIndex({a: 1, b: -1})); +// Create indices to ensure there are multiple plans available. +assert.commandWorked(coll.ensureIndex({a: 1, b: 1})); +assert.commandWorked(coll.ensureIndex({a: 1, b: -1})); - // Insert some data to work with. - var bulk = coll.initializeOrderedBulkOp(); - var nDocs = 100; - for (var i = 0; i < nDocs; ++i) { - bulk.insert({a: i, b: nDocs - i}); - } - bulk.execute(); +// Insert some data to work with. +var bulk = coll.initializeOrderedBulkOp(); +var nDocs = 100; +for (var i = 0; i < nDocs; ++i) { + bulk.insert({a: i, b: nDocs - i}); +} +bulk.execute(); - // SERVER-20849: The following commands should not crash the server. - assert.doesNotThrow(function() { - coll.explain("allPlansExecution").update({a: {$gte: 1}}, {$set: {x: 0}}); - }); +// SERVER-20849: The following commands should not crash the server. +assert.doesNotThrow(function() { + coll.explain("allPlansExecution").update({a: {$gte: 1}}, {$set: {x: 0}}); +}); - assert.doesNotThrow(function() { - coll.explain("allPlansExecution").remove({a: {$gte: 1}}); - }); +assert.doesNotThrow(function() { + coll.explain("allPlansExecution").remove({a: {$gte: 1}}); +}); - assert.doesNotThrow(function() { - coll.explain("allPlansExecution").findAndModify({query: {a: {$gte: 1}}, remove: true}); - }); +assert.doesNotThrow(function() { + coll.explain("allPlansExecution").findAndModify({query: {a: {$gte: 1}}, remove: true}); +}); - assert.doesNotThrow(function() { - coll.explain("allPlansExecution").findAndModify({query: {a: {$gte: 1}}, update: {y: 1}}); - }); +assert.doesNotThrow(function() { + coll.explain("allPlansExecution").findAndModify({query: {a: {$gte: 1}}, update: {y: 1}}); +}); - assert.doesNotThrow(function() { - coll.explain("allPlansExecution").find({a: {$gte: 1}}).finish(); - }); +assert.doesNotThrow(function() { + coll.explain("allPlansExecution").find({a: {$gte: 1}}).finish(); +}); - assert.doesNotThrow(function() { - coll.explain("allPlansExecution").count({a: {$gte: 1}}); - }); +assert.doesNotThrow(function() { + coll.explain("allPlansExecution").count({a: {$gte: 1}}); +}); - assert.doesNotThrow(function() { - coll.explain("allPlansExecution").distinct("a", {a: {$gte: 1}}); - }); +assert.doesNotThrow(function() { + coll.explain("allPlansExecution").distinct("a", {a: {$gte: 1}}); +}); - // SERVER-21376: Make sure the 'rejectedPlans' field is filled in appropriately. - function assertHasRejectedPlans(explainOutput) { - var queryPlannerOutput = explainOutput.queryPlanner; +// SERVER-21376: Make sure the 'rejectedPlans' field is filled in appropriately. +function assertHasRejectedPlans(explainOutput) { + var queryPlannerOutput = explainOutput.queryPlanner; - // The 'rejectedPlans' section will be in a different place if passed through a mongos. - if ("SINGLE_SHARD" == queryPlannerOutput.winningPlan.stage) { - var shards = queryPlannerOutput.winningPlan.shards; - shards.forEach(function assertShardHasRejectedPlans(shard) { - assert.gt(shard.rejectedPlans.length, 0); - }); - } else { - assert.gt(queryPlannerOutput.rejectedPlans.length, 0); - } + // The 'rejectedPlans' section will be in a different place if passed through a mongos. + if ("SINGLE_SHARD" == queryPlannerOutput.winningPlan.stage) { + var shards = queryPlannerOutput.winningPlan.shards; + shards.forEach(function assertShardHasRejectedPlans(shard) { + assert.gt(shard.rejectedPlans.length, 0); + }); + } else { + assert.gt(queryPlannerOutput.rejectedPlans.length, 0); } +} - var res = coll.explain("queryPlanner").find({a: {$gte: 1}}).finish(); - assert.commandWorked(res); - assertHasRejectedPlans(res); +var res = coll.explain("queryPlanner").find({a: {$gte: 1}}).finish(); +assert.commandWorked(res); +assertHasRejectedPlans(res); - res = coll.explain("executionStats").find({a: {$gte: 1}}).finish(); - assert.commandWorked(res); - assertHasRejectedPlans(res); +res = coll.explain("executionStats").find({a: {$gte: 1}}).finish(); +assert.commandWorked(res); +assertHasRejectedPlans(res); }()); diff --git a/jstests/core/explain_multikey.js b/jstests/core/explain_multikey.js index 91763555ffb..1ec20bb4552 100644 --- a/jstests/core/explain_multikey.js +++ b/jstests/core/explain_multikey.js @@ -4,79 +4,79 @@ // be the case on all shards. // @tags: [assumes_unsharded_collection] (function() { - "use strict"; +"use strict"; - load("jstests/libs/analyze_plan.js"); +load("jstests/libs/analyze_plan.js"); - var coll = db.explain_multikey; - var keyPattern = { - a: 1, - "b.c": 1, - "b.d": 1, - }; +var coll = db.explain_multikey; +var keyPattern = { + a: 1, + "b.c": 1, + "b.d": 1, +}; - /** - * Creates an index with a key pattern of 'keyPattern' on a collection containing a single - * document and runs the specified command under explain. - * - * @param {Object} testOptions - * @param {Object} testOptions.docToInsert - The document to insert into the collection. - * @param {Object} testOptions.commandObj - The operation to run "explain" on. - * @param {string} testOptions.stage - The plan summary name of the winning plan. - * - * @returns {Object} The "queryPlanner" information of the stage with the specified plan summary - * name. - */ - function createIndexAndRunExplain(testOptions) { - coll.drop(); +/** + * Creates an index with a key pattern of 'keyPattern' on a collection containing a single + * document and runs the specified command under explain. + * + * @param {Object} testOptions + * @param {Object} testOptions.docToInsert - The document to insert into the collection. + * @param {Object} testOptions.commandObj - The operation to run "explain" on. + * @param {string} testOptions.stage - The plan summary name of the winning plan. + * + * @returns {Object} The "queryPlanner" information of the stage with the specified plan summary + * name. + */ +function createIndexAndRunExplain(testOptions) { + coll.drop(); - assert.commandWorked(coll.createIndex(keyPattern)); - assert.writeOK(coll.insert(testOptions.docToInsert)); + assert.commandWorked(coll.createIndex(keyPattern)); + assert.writeOK(coll.insert(testOptions.docToInsert)); - var explain = db.runCommand({explain: testOptions.commandObj}); - assert.commandWorked(explain); + var explain = db.runCommand({explain: testOptions.commandObj}); + assert.commandWorked(explain); - assert(planHasStage(db, explain.queryPlanner.winningPlan, testOptions.stage), - "expected stage to be present: " + tojson(explain)); - return getPlanStage(explain.queryPlanner.winningPlan, testOptions.stage); - } + assert(planHasStage(db, explain.queryPlanner.winningPlan, testOptions.stage), + "expected stage to be present: " + tojson(explain)); + return getPlanStage(explain.queryPlanner.winningPlan, testOptions.stage); +} - // Calls createIndexAndRunExplain() twice: once with a document that causes the created index to - // be multikey, and again with a document that doesn't cause the created index to be multikey. - function verifyMultikeyInfoInExplainOutput(testOptions) { - // Insert a document that should cause the index to be multikey. - testOptions.docToInsert = { - a: 1, - b: [{c: ["w", "x"], d: 3}, {c: ["y", "z"], d: 4}], - }; - var stage = createIndexAndRunExplain(testOptions); +// Calls createIndexAndRunExplain() twice: once with a document that causes the created index to +// be multikey, and again with a document that doesn't cause the created index to be multikey. +function verifyMultikeyInfoInExplainOutput(testOptions) { + // Insert a document that should cause the index to be multikey. + testOptions.docToInsert = { + a: 1, + b: [{c: ["w", "x"], d: 3}, {c: ["y", "z"], d: 4}], + }; + var stage = createIndexAndRunExplain(testOptions); - assert.eq(true, stage.isMultiKey, "expected index to be multikey: " + tojson(stage)); - assert.eq({a: [], "b.c": ["b", "b.c"], "b.d": ["b"]}, stage.multiKeyPaths, tojson(stage)); + assert.eq(true, stage.isMultiKey, "expected index to be multikey: " + tojson(stage)); + assert.eq({a: [], "b.c": ["b", "b.c"], "b.d": ["b"]}, stage.multiKeyPaths, tojson(stage)); - // Drop the collection and insert a document that shouldn't cause the index to be multikey. - testOptions.docToInsert = { - a: 1, - b: {c: "w", d: 4}, - }; - stage = createIndexAndRunExplain(testOptions); + // Drop the collection and insert a document that shouldn't cause the index to be multikey. + testOptions.docToInsert = { + a: 1, + b: {c: "w", d: 4}, + }; + stage = createIndexAndRunExplain(testOptions); - assert.eq(false, stage.isMultiKey, "expected index not to be multikey: " + tojson(stage)); - assert.eq({a: [], "b.c": [], "b.d": []}, stage.multiKeyPaths, tojson(stage)); - } + assert.eq(false, stage.isMultiKey, "expected index not to be multikey: " + tojson(stage)); + assert.eq({a: [], "b.c": [], "b.d": []}, stage.multiKeyPaths, tojson(stage)); +} - verifyMultikeyInfoInExplainOutput({ - commandObj: {find: coll.getName(), hint: keyPattern}, - stage: "IXSCAN", - }); +verifyMultikeyInfoInExplainOutput({ + commandObj: {find: coll.getName(), hint: keyPattern}, + stage: "IXSCAN", +}); - verifyMultikeyInfoInExplainOutput({ - commandObj: {count: coll.getName(), hint: keyPattern}, - stage: "COUNT_SCAN", - }); +verifyMultikeyInfoInExplainOutput({ + commandObj: {count: coll.getName(), hint: keyPattern}, + stage: "COUNT_SCAN", +}); - verifyMultikeyInfoInExplainOutput({ - commandObj: {distinct: coll.getName(), key: "a"}, - stage: "DISTINCT_SCAN", - }); +verifyMultikeyInfoInExplainOutput({ + commandObj: {distinct: coll.getName(), key: "a"}, + stage: "DISTINCT_SCAN", +}); })(); diff --git a/jstests/core/explain_sample.js b/jstests/core/explain_sample.js index 8bc7b53906f..bb8ea6d54ef 100644 --- a/jstests/core/explain_sample.js +++ b/jstests/core/explain_sample.js @@ -4,39 +4,39 @@ * @tags: [requires_wiredtiger] */ (function() { - "use strict"; +"use strict"; - load("jstests/libs/analyze_plan.js"); +load("jstests/libs/analyze_plan.js"); - // Although this test is tagged with 'requires_wiredtiger', this is not sufficient for ensuring - // that the parallel suite runs this test only on WT configurations. - if (jsTest.options().storageEngine && jsTest.options().storageEngine !== "wiredTiger") { - jsTest.log("Skipping test on non-WT storage engine: " + jsTest.options().storageEngine); - return; - } +// Although this test is tagged with 'requires_wiredtiger', this is not sufficient for ensuring +// that the parallel suite runs this test only on WT configurations. +if (jsTest.options().storageEngine && jsTest.options().storageEngine !== "wiredTiger") { + jsTest.log("Skipping test on non-WT storage engine: " + jsTest.options().storageEngine); + return; +} - const coll = db.explain_sample; - coll.drop(); +const coll = db.explain_sample; +coll.drop(); - let docsToInsert = []; - for (let i = 0; i < 1000; ++i) { - docsToInsert.push({_id: i}); - } - assert.commandWorked(coll.insert(docsToInsert)); +let docsToInsert = []; +for (let i = 0; i < 1000; ++i) { + docsToInsert.push({_id: i}); +} +assert.commandWorked(coll.insert(docsToInsert)); - // Verify that explain reports execution stats for the MULTI_ITERATOR stage. This is designed to - // reproduce SERVER-35973. - const explain = - assert.commandWorked(coll.explain("allPlansExecution").aggregate([{$sample: {size: 10}}])); - const multiIteratorStages = getAggPlanStages(explain, "MULTI_ITERATOR"); - assert.gt(multiIteratorStages.length, 0, tojson(explain)); - assert.gt(multiIteratorStages.reduce((acc, stage) => acc + stage.nReturned, 0), - 0, - tojson(multiIteratorStages)); - assert.gt(multiIteratorStages.reduce((acc, stage) => acc + stage.advanced, 0), - 0, - tojson(multiIteratorStages)); - assert.gt(multiIteratorStages.reduce((acc, stage) => acc + stage.works, 0), - 0, - tojson(multiIteratorStages)); +// Verify that explain reports execution stats for the MULTI_ITERATOR stage. This is designed to +// reproduce SERVER-35973. +const explain = + assert.commandWorked(coll.explain("allPlansExecution").aggregate([{$sample: {size: 10}}])); +const multiIteratorStages = getAggPlanStages(explain, "MULTI_ITERATOR"); +assert.gt(multiIteratorStages.length, 0, tojson(explain)); +assert.gt(multiIteratorStages.reduce((acc, stage) => acc + stage.nReturned, 0), + 0, + tojson(multiIteratorStages)); +assert.gt(multiIteratorStages.reduce((acc, stage) => acc + stage.advanced, 0), + 0, + tojson(multiIteratorStages)); +assert.gt(multiIteratorStages.reduce((acc, stage) => acc + stage.works, 0), + 0, + tojson(multiIteratorStages)); }()); diff --git a/jstests/core/explain_uuid.js b/jstests/core/explain_uuid.js index be5e9e01adb..bee56d4b2a9 100644 --- a/jstests/core/explain_uuid.js +++ b/jstests/core/explain_uuid.js @@ -3,54 +3,53 @@ * cleanly. */ (function() { - "use strict"; - - // Use our own database so that we're guaranteed the only collection is this one. - const explainDB = db.getSiblingDB("explain_uuid_db"); - - assert.commandWorked(explainDB.dropDatabase()); - - const coll = explainDB.explain_uuid; - assert.commandWorked(coll.insert({a: 1})); - - const collInfos = explainDB.getCollectionInfos({name: coll.getName()}); - assert.eq(collInfos.length, 1, collInfos); - const uuid = collInfos[0].info.uuid; - - // Run a find explain looking up by UUID. - assert.commandFailedWithCode(explainDB.runCommand({explain: {find: uuid}}), - ErrorCodes.InvalidNamespace); - - // Do similar for other commands. - assert.commandFailedWithCode(explainDB.runCommand({explain: {aggregate: uuid, cursor: {}}}), - ErrorCodes.TypeMismatch); - - assert.commandFailedWithCode(explainDB.runCommand({explain: {count: uuid}}), - ErrorCodes.InvalidNamespace); - - assert.commandFailedWithCode(explainDB.runCommand({explain: {distinct: uuid, key: "x"}}), - ErrorCodes.InvalidNamespace); - - // When auth is enabled, running findAndModify with an invalid namespace will produce a special - // error during the auth check, rather than the generic 'InvalidNamespace' error. - const expectedCode = TestData.auth ? 17137 : ErrorCodes.InvalidNamespace; - assert.commandFailedWithCode( - explainDB.runCommand({explain: {findAndModify: uuid, query: {a: 1}, remove: true}}), - expectedCode); - - assert.commandFailedWithCode( - explainDB.runCommand({explain: {delete: uuid, deletes: [{q: {}, limit: 1}]}}), - ErrorCodes.BadValue); - - assert.commandFailedWithCode(explainDB.runCommand({ - explain: { - update: uuid, - updates: [{ - q: {a: 1}, - u: {$set: {b: 1}}, - }] - } - }), - ErrorCodes.BadValue); - +"use strict"; + +// Use our own database so that we're guaranteed the only collection is this one. +const explainDB = db.getSiblingDB("explain_uuid_db"); + +assert.commandWorked(explainDB.dropDatabase()); + +const coll = explainDB.explain_uuid; +assert.commandWorked(coll.insert({a: 1})); + +const collInfos = explainDB.getCollectionInfos({name: coll.getName()}); +assert.eq(collInfos.length, 1, collInfos); +const uuid = collInfos[0].info.uuid; + +// Run a find explain looking up by UUID. +assert.commandFailedWithCode(explainDB.runCommand({explain: {find: uuid}}), + ErrorCodes.InvalidNamespace); + +// Do similar for other commands. +assert.commandFailedWithCode(explainDB.runCommand({explain: {aggregate: uuid, cursor: {}}}), + ErrorCodes.TypeMismatch); + +assert.commandFailedWithCode(explainDB.runCommand({explain: {count: uuid}}), + ErrorCodes.InvalidNamespace); + +assert.commandFailedWithCode(explainDB.runCommand({explain: {distinct: uuid, key: "x"}}), + ErrorCodes.InvalidNamespace); + +// When auth is enabled, running findAndModify with an invalid namespace will produce a special +// error during the auth check, rather than the generic 'InvalidNamespace' error. +const expectedCode = TestData.auth ? 17137 : ErrorCodes.InvalidNamespace; +assert.commandFailedWithCode( + explainDB.runCommand({explain: {findAndModify: uuid, query: {a: 1}, remove: true}}), + expectedCode); + +assert.commandFailedWithCode( + explainDB.runCommand({explain: {delete: uuid, deletes: [{q: {}, limit: 1}]}}), + ErrorCodes.BadValue); + +assert.commandFailedWithCode(explainDB.runCommand({ + explain: { + update: uuid, + updates: [{ + q: {a: 1}, + u: {$set: {b: 1}}, + }] + } +}), + ErrorCodes.BadValue); })(); diff --git a/jstests/core/explain_writecmd_nonexistent_collection.js b/jstests/core/explain_writecmd_nonexistent_collection.js index 2d3080357b5..2496f4b63a0 100644 --- a/jstests/core/explain_writecmd_nonexistent_collection.js +++ b/jstests/core/explain_writecmd_nonexistent_collection.js @@ -3,38 +3,38 @@ // @tags: [requires_non_retryable_writes, requires_fastcount, // assumes_no_implicit_collection_creation_after_drop] (function() { - "use strict"; +"use strict"; - load("jstests/libs/analyze_plan.js"); +load("jstests/libs/analyze_plan.js"); - function assertCollectionDoesNotExist(collName) { - const collectionList = db.getCollectionInfos({name: collName}); - assert.eq(0, collectionList.length, collectionList); - } +function assertCollectionDoesNotExist(collName) { + const collectionList = db.getCollectionInfos({name: collName}); + assert.eq(0, collectionList.length, collectionList); +} - const collName = "explain_delete_nonexistent_collection"; - const coll = db[collName]; - coll.drop(); +const collName = "explain_delete_nonexistent_collection"; +const coll = db[collName]; +coll.drop(); - // Explain of delete against a non-existent collection returns an EOF plan. - let explain = assert.commandWorked( - db.runCommand({explain: {delete: collName, deletes: [{q: {a: 1}, limit: 0}]}})); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "EOF"), explain); - assert(!planHasStage(db, explain.queryPlanner.winningPlan, "DELETE"), explain); +// Explain of delete against a non-existent collection returns an EOF plan. +let explain = assert.commandWorked( + db.runCommand({explain: {delete: collName, deletes: [{q: {a: 1}, limit: 0}]}})); +assert(planHasStage(db, explain.queryPlanner.winningPlan, "EOF"), explain); +assert(!planHasStage(db, explain.queryPlanner.winningPlan, "DELETE"), explain); - assertCollectionDoesNotExist(collName); +assertCollectionDoesNotExist(collName); - // Explain of an update with upsert:false returns an EOF plan. - explain = assert.commandWorked(db.runCommand( - {explain: {update: collName, updates: [{q: {a: 1}, u: {$set: {b: 1}}, upsert: false}]}})); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "EOF"), explain); - assert(!planHasStage(db, explain.queryPlanner.winningPlan, "UPDATE"), explain); - assertCollectionDoesNotExist(collName); +// Explain of an update with upsert:false returns an EOF plan. +explain = assert.commandWorked(db.runCommand( + {explain: {update: collName, updates: [{q: {a: 1}, u: {$set: {b: 1}}, upsert: false}]}})); +assert(planHasStage(db, explain.queryPlanner.winningPlan, "EOF"), explain); +assert(!planHasStage(db, explain.queryPlanner.winningPlan, "UPDATE"), explain); +assertCollectionDoesNotExist(collName); - // Explain of an update with upsert:true returns an EOF plan, and does not create a collection. - explain = assert.commandWorked(db.runCommand( - {explain: {update: collName, updates: [{q: {a: 1}, u: {$set: {b: 1}}, upsert: true}]}})); - assert(planHasStage(db, explain.queryPlanner.winningPlan, "EOF"), explain); - assert(!planHasStage(db, explain.queryPlanner.winningPlan, "UPDATE"), explain); - assertCollectionDoesNotExist(collName); +// Explain of an update with upsert:true returns an EOF plan, and does not create a collection. +explain = assert.commandWorked(db.runCommand( + {explain: {update: collName, updates: [{q: {a: 1}, u: {$set: {b: 1}}, upsert: true}]}})); +assert(planHasStage(db, explain.queryPlanner.winningPlan, "EOF"), explain); +assert(!planHasStage(db, explain.queryPlanner.winningPlan, "UPDATE"), explain); +assertCollectionDoesNotExist(collName); }()); diff --git a/jstests/core/expr.js b/jstests/core/expr.js index f0a463a22fb..78ef8b87f0c 100644 --- a/jstests/core/expr.js +++ b/jstests/core/expr.js @@ -6,317 +6,316 @@ // Tests for $expr in the CRUD commands. (function() { - "use strict"; - - const coll = db.expr; - - const isMaster = db.runCommand("ismaster"); - assert.commandWorked(isMaster); - const isMongos = (isMaster.msg === "isdbgrid"); - - // - // $expr in aggregate. - // - - coll.drop(); - assert.writeOK(coll.insert({a: 0})); - assert.eq(1, coll.aggregate([{$match: {$expr: {$eq: ["$a", 0]}}}]).itcount()); - assert.throws(function() { - coll.aggregate([{$match: {$expr: {$eq: ["$a", "$$unbound"]}}}]); - }); - assert.throws(function() { - coll.aggregate([{$match: {$expr: {$divide: [1, "$a"]}}}]); - }); - - // - // $expr in count. - // - - coll.drop(); - assert.writeOK(coll.insert({a: 0})); - assert.eq(1, coll.find({$expr: {$eq: ["$a", 0]}}).count()); - assert.throws(function() { - coll.find({$expr: {$eq: ["$a", "$$unbound"]}}).count(); - }); - assert.throws(function() { - coll.find({$expr: {$divide: [1, "$a"]}}).count(); - }); - - // - // $expr in distinct. - // - - coll.drop(); - assert.writeOK(coll.insert({a: 0})); - assert.eq(1, coll.distinct("a", {$expr: {$eq: ["$a", 0]}}).length); - assert.throws(function() { - coll.distinct("a", {$expr: {$eq: ["$a", "$$unbound"]}}); - }); - assert.throws(function() { - coll.distinct("a", {$expr: {$divide: [1, "$a"]}}); - }); - - // - // $expr in find. - // - - // $expr is allowed in query. - coll.drop(); - assert.writeOK(coll.insert({a: 0})); - assert.eq(1, coll.find({$expr: {$eq: ["$a", 0]}}).itcount()); - - // $expr with time zone expression across getMore (SERVER-31664). - coll.drop(); - assert.writeOK(coll.insert({a: ISODate("2017-10-01T22:00:00")})); - - let res = assert.commandWorked(db.runCommand({ - find: coll.getName(), - filter: {$expr: {$eq: [1, {$dayOfMonth: {date: "$a", timezone: "America/New_York"}}]}}, - batchSize: 0 - })); - assert.eq(0, res.cursor.firstBatch.length); - - let cursorId = res.cursor.id; - res = assert.commandWorked(db.runCommand({getMore: cursorId, collection: coll.getName()})); - assert.eq(1, res.cursor.nextBatch.length); - - // $expr with unbound variable throws. - assert.throws(function() { - coll.find({$expr: {$eq: ["$a", "$$unbound"]}}).itcount(); - }); - - // $and with $expr child containing an invalid expression throws. - assert.throws(function() { - coll.find({$and: [{a: 0}, {$expr: {$anyElementTrue: undefined}}]}).itcount(); - }); - - // $or with $expr child containing an invalid expression throws. - assert.throws(function() { - coll.find({$or: [{a: 0}, {$expr: {$anyElementTrue: undefined}}]}).itcount(); - }); - - // $nor with $expr child containing an invalid expression throws. - assert.throws(function() { - coll.find({$nor: [{a: 0}, {$expr: {$anyElementTrue: undefined}}]}).itcount(); - }); - - // $expr with division by zero throws. - assert.throws(function() { - coll.find({$expr: {$divide: [1, "$a"]}}).itcount(); - }); - - // $expr is allowed in find with explain. - assert.commandWorked(coll.find({$expr: {$eq: ["$a", 0]}}).explain()); - - // $expr with unbound variable in find with explain throws. - assert.throws(function() { - coll.find({$expr: {$eq: ["$a", "$$unbound"]}}).explain(); - }); - - // $expr with division by zero in find with explain with executionStats throws. - assert.throws(function() { - coll.find({$expr: {$divide: [1, "$a"]}}).explain("executionStats"); - }); - - // $expr is not allowed in $elemMatch projection. - coll.drop(); - assert.writeOK(coll.insert({a: [{b: 5}]})); - assert.throws(function() { - coll.find({}, {a: {$elemMatch: {$expr: {$eq: ["$b", 5]}}}}).itcount(); - }); - - // - // $expr in findAndModify. - // - - // $expr is allowed in the query when upsert=false. - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: 0})); - assert.eq({_id: 0, a: 0, b: 6}, - coll.findAndModify( - {query: {_id: 0, $expr: {$eq: ["$a", 0]}}, update: {$set: {b: 6}}, new: true})); - - // $expr with unbound variable throws. - assert.throws(function() { - coll.findAndModify( - {query: {_id: 0, $expr: {$eq: ["$a", "$$unbound"]}}, update: {$set: {b: 6}}}); - }); - - // $expr with division by zero throws. - assert.throws(function() { - coll.findAndModify({query: {_id: 0, $expr: {$divide: [1, "$a"]}}, update: {$set: {b: 6}}}); - }); - - // $expr is not allowed in the query when upsert=true. - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: 0})); - assert.throws(function() { - coll.findAndModify( - {query: {_id: 0, $expr: {$eq: ["$a", 0]}}, update: {$set: {b: 6}}, upsert: true}); - }); - - // $expr is not allowed in $pull filter. +"use strict"; + +const coll = db.expr; + +const isMaster = db.runCommand("ismaster"); +assert.commandWorked(isMaster); +const isMongos = (isMaster.msg === "isdbgrid"); + +// +// $expr in aggregate. +// + +coll.drop(); +assert.writeOK(coll.insert({a: 0})); +assert.eq(1, coll.aggregate([{$match: {$expr: {$eq: ["$a", 0]}}}]).itcount()); +assert.throws(function() { + coll.aggregate([{$match: {$expr: {$eq: ["$a", "$$unbound"]}}}]); +}); +assert.throws(function() { + coll.aggregate([{$match: {$expr: {$divide: [1, "$a"]}}}]); +}); + +// +// $expr in count. +// + +coll.drop(); +assert.writeOK(coll.insert({a: 0})); +assert.eq(1, coll.find({$expr: {$eq: ["$a", 0]}}).count()); +assert.throws(function() { + coll.find({$expr: {$eq: ["$a", "$$unbound"]}}).count(); +}); +assert.throws(function() { + coll.find({$expr: {$divide: [1, "$a"]}}).count(); +}); + +// +// $expr in distinct. +// + +coll.drop(); +assert.writeOK(coll.insert({a: 0})); +assert.eq(1, coll.distinct("a", {$expr: {$eq: ["$a", 0]}}).length); +assert.throws(function() { + coll.distinct("a", {$expr: {$eq: ["$a", "$$unbound"]}}); +}); +assert.throws(function() { + coll.distinct("a", {$expr: {$divide: [1, "$a"]}}); +}); + +// +// $expr in find. +// + +// $expr is allowed in query. +coll.drop(); +assert.writeOK(coll.insert({a: 0})); +assert.eq(1, coll.find({$expr: {$eq: ["$a", 0]}}).itcount()); + +// $expr with time zone expression across getMore (SERVER-31664). +coll.drop(); +assert.writeOK(coll.insert({a: ISODate("2017-10-01T22:00:00")})); + +let res = assert.commandWorked(db.runCommand({ + find: coll.getName(), + filter: {$expr: {$eq: [1, {$dayOfMonth: {date: "$a", timezone: "America/New_York"}}]}}, + batchSize: 0 +})); +assert.eq(0, res.cursor.firstBatch.length); + +let cursorId = res.cursor.id; +res = assert.commandWorked(db.runCommand({getMore: cursorId, collection: coll.getName()})); +assert.eq(1, res.cursor.nextBatch.length); + +// $expr with unbound variable throws. +assert.throws(function() { + coll.find({$expr: {$eq: ["$a", "$$unbound"]}}).itcount(); +}); + +// $and with $expr child containing an invalid expression throws. +assert.throws(function() { + coll.find({$and: [{a: 0}, {$expr: {$anyElementTrue: undefined}}]}).itcount(); +}); + +// $or with $expr child containing an invalid expression throws. +assert.throws(function() { + coll.find({$or: [{a: 0}, {$expr: {$anyElementTrue: undefined}}]}).itcount(); +}); + +// $nor with $expr child containing an invalid expression throws. +assert.throws(function() { + coll.find({$nor: [{a: 0}, {$expr: {$anyElementTrue: undefined}}]}).itcount(); +}); + +// $expr with division by zero throws. +assert.throws(function() { + coll.find({$expr: {$divide: [1, "$a"]}}).itcount(); +}); + +// $expr is allowed in find with explain. +assert.commandWorked(coll.find({$expr: {$eq: ["$a", 0]}}).explain()); + +// $expr with unbound variable in find with explain throws. +assert.throws(function() { + coll.find({$expr: {$eq: ["$a", "$$unbound"]}}).explain(); +}); + +// $expr with division by zero in find with explain with executionStats throws. +assert.throws(function() { + coll.find({$expr: {$divide: [1, "$a"]}}).explain("executionStats"); +}); + +// $expr is not allowed in $elemMatch projection. +coll.drop(); +assert.writeOK(coll.insert({a: [{b: 5}]})); +assert.throws(function() { + coll.find({}, {a: {$elemMatch: {$expr: {$eq: ["$b", 5]}}}}).itcount(); +}); + +// +// $expr in findAndModify. +// + +// $expr is allowed in the query when upsert=false. +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: 0})); +assert.eq({_id: 0, a: 0, b: 6}, + coll.findAndModify( + {query: {_id: 0, $expr: {$eq: ["$a", 0]}}, update: {$set: {b: 6}}, new: true})); + +// $expr with unbound variable throws. +assert.throws(function() { + coll.findAndModify( + {query: {_id: 0, $expr: {$eq: ["$a", "$$unbound"]}}, update: {$set: {b: 6}}}); +}); + +// $expr with division by zero throws. +assert.throws(function() { + coll.findAndModify({query: {_id: 0, $expr: {$divide: [1, "$a"]}}, update: {$set: {b: 6}}}); +}); + +// $expr is not allowed in the query when upsert=true. +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: 0})); +assert.throws(function() { + coll.findAndModify( + {query: {_id: 0, $expr: {$eq: ["$a", 0]}}, update: {$set: {b: 6}}, upsert: true}); +}); + +// $expr is not allowed in $pull filter. +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: [{b: 5}]})); +assert.throws(function() { + coll.findAndModify({query: {_id: 0}, update: {$pull: {a: {$expr: {$eq: ["$b", 5]}}}}}); +}); + +// $expr is not allowed in arrayFilters. +if (db.getMongo().writeMode() === "commands") { coll.drop(); assert.writeOK(coll.insert({_id: 0, a: [{b: 5}]})); assert.throws(function() { - coll.findAndModify({query: {_id: 0}, update: {$pull: {a: {$expr: {$eq: ["$b", 5]}}}}}); - }); - - // $expr is not allowed in arrayFilters. - if (db.getMongo().writeMode() === "commands") { - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [{b: 5}]})); - assert.throws(function() { - coll.findAndModify({ - query: {_id: 0}, - update: {$set: {"a.$[i].b": 6}}, - arrayFilters: [{"i.b": 5, $expr: {$eq: ["$i.b", 5]}}] - }); + coll.findAndModify({ + query: {_id: 0}, + update: {$set: {"a.$[i].b": 6}}, + arrayFilters: [{"i.b": 5, $expr: {$eq: ["$i.b", 5]}}] }); + }); +} + +// +// $expr in the $geoNear stage. +// + +coll.drop(); +assert.writeOK(coll.insert({geo: {type: "Point", coordinates: [0, 0]}, a: 0})); +assert.commandWorked(coll.ensureIndex({geo: "2dsphere"})); +assert.eq(1, + coll.aggregate({ + $geoNear: { + near: {type: "Point", coordinates: [0, 0]}, + distanceField: "dist", + spherical: true, + query: {$expr: {$eq: ["$a", 0]}} + } + }) + .toArray() + .length); +assert.throws(() => coll.aggregate({ + $geoNear: { + near: {type: "Point", coordinates: [0, 0]}, + distanceField: "dist", + spherical: true, + query: {$expr: {$eq: ["$a", "$$unbound"]}} } - - // - // $expr in the $geoNear stage. - // - - coll.drop(); - assert.writeOK(coll.insert({geo: {type: "Point", coordinates: [0, 0]}, a: 0})); - assert.commandWorked(coll.ensureIndex({geo: "2dsphere"})); - assert.eq(1, - coll.aggregate({ - $geoNear: { - near: {type: "Point", coordinates: [0, 0]}, - distanceField: "dist", - spherical: true, - query: {$expr: {$eq: ["$a", 0]}} - } - }) - .toArray() - .length); - assert.throws(() => coll.aggregate({ - $geoNear: { - near: {type: "Point", coordinates: [0, 0]}, - distanceField: "dist", - spherical: true, - query: {$expr: {$eq: ["$a", "$$unbound"]}} - } - })); - assert.throws(() => coll.aggregate({ - $geoNear: { - near: {type: "Point", coordinates: [0, 0]}, - distanceField: "dist", - spherical: true, - query: {$expr: {$divide: [1, "$a"]}} - } - })); - - // - // $expr in mapReduce. - // - - coll.drop(); - assert.writeOK(coll.insert({a: 0})); - let mapReduceOut = coll.mapReduce( +})); +assert.throws(() => coll.aggregate({ + $geoNear: { + near: {type: "Point", coordinates: [0, 0]}, + distanceField: "dist", + spherical: true, + query: {$expr: {$divide: [1, "$a"]}} + } +})); + +// +// $expr in mapReduce. +// + +coll.drop(); +assert.writeOK(coll.insert({a: 0})); +let mapReduceOut = coll.mapReduce( + function() { + emit(this.a, 1); + }, + function(key, values) { + return Array.sum(values); + }, + {out: {inline: 1}, query: {$expr: {$eq: ["$a", 0]}}}); +assert.commandWorked(mapReduceOut); +assert.eq(mapReduceOut.results.length, 1, tojson(mapReduceOut)); +assert.throws(function() { + coll.mapReduce( function() { emit(this.a, 1); }, function(key, values) { return Array.sum(values); }, - {out: {inline: 1}, query: {$expr: {$eq: ["$a", 0]}}}); - assert.commandWorked(mapReduceOut); - assert.eq(mapReduceOut.results.length, 1, tojson(mapReduceOut)); - assert.throws(function() { - coll.mapReduce( - function() { - emit(this.a, 1); - }, - function(key, values) { - return Array.sum(values); - }, - {out: {inline: 1}, query: {$expr: {$eq: ["$a", "$$unbound"]}}}); - }); - assert.throws(function() { - coll.mapReduce( - function() { - emit(this.a, 1); - }, - function(key, values) { - return Array.sum(values); - }, - {out: {inline: 1}, query: {$expr: {$divide: [1, "$a"]}}}); - }); - - // - // $expr in remove. - // - - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: 0})); - let writeRes = coll.remove({_id: 0, $expr: {$eq: ["$a", 0]}}); - assert.writeOK(writeRes); - assert.eq(1, writeRes.nRemoved); - assert.writeError(coll.remove({_id: 0, $expr: {$eq: ["$a", "$$unbound"]}})); - assert.writeOK(coll.insert({_id: 0, a: 0})); - assert.writeError(coll.remove({_id: 0, $expr: {$divide: [1, "$a"]}})); - - // Any writes preceding the write that fails to parse are executed. - coll.drop(); - assert.writeOK(coll.insert({_id: 0})); - assert.writeOK(coll.insert({_id: 1})); - writeRes = db.runCommand({ - delete: coll.getName(), - deletes: [{q: {_id: 0}, limit: 1}, {q: {$expr: "$$unbound"}, limit: 1}] - }); - assert.commandWorkedIgnoringWriteErrors(writeRes); - assert.eq(writeRes.writeErrors[0].code, 17276, tojson(writeRes)); - assert.eq(writeRes.n, 1, tojson(writeRes)); - - // - // $expr in update. - // - - // $expr is allowed in the query when upsert=false. - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: 0})); - assert.writeOK(coll.update({_id: 0, $expr: {$eq: ["$a", 0]}}, {$set: {b: 6}})); - assert.eq({_id: 0, a: 0, b: 6}, coll.findOne({_id: 0})); - - // $expr with unbound variable fails. - assert.writeError(coll.update({_id: 0, $expr: {$eq: ["$a", "$$unbound"]}}, {$set: {b: 6}})); - - // $expr with division by zero fails. - assert.writeError(coll.update({_id: 0, $expr: {$divide: [1, "$a"]}}, {$set: {b: 6}})); - - // $expr is not allowed in the query when upsert=true. - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: 5})); - assert.writeError( - coll.update({_id: 0, $expr: {$eq: ["$a", 5]}}, {$set: {b: 6}}, {upsert: true})); - - // $expr is not allowed in $pull filter. + {out: {inline: 1}, query: {$expr: {$eq: ["$a", "$$unbound"]}}}); +}); +assert.throws(function() { + coll.mapReduce( + function() { + emit(this.a, 1); + }, + function(key, values) { + return Array.sum(values); + }, + {out: {inline: 1}, query: {$expr: {$divide: [1, "$a"]}}}); +}); + +// +// $expr in remove. +// + +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: 0})); +let writeRes = coll.remove({_id: 0, $expr: {$eq: ["$a", 0]}}); +assert.writeOK(writeRes); +assert.eq(1, writeRes.nRemoved); +assert.writeError(coll.remove({_id: 0, $expr: {$eq: ["$a", "$$unbound"]}})); +assert.writeOK(coll.insert({_id: 0, a: 0})); +assert.writeError(coll.remove({_id: 0, $expr: {$divide: [1, "$a"]}})); + +// Any writes preceding the write that fails to parse are executed. +coll.drop(); +assert.writeOK(coll.insert({_id: 0})); +assert.writeOK(coll.insert({_id: 1})); +writeRes = db.runCommand({ + delete: coll.getName(), + deletes: [{q: {_id: 0}, limit: 1}, {q: {$expr: "$$unbound"}, limit: 1}] +}); +assert.commandWorkedIgnoringWriteErrors(writeRes); +assert.eq(writeRes.writeErrors[0].code, 17276, tojson(writeRes)); +assert.eq(writeRes.n, 1, tojson(writeRes)); + +// +// $expr in update. +// + +// $expr is allowed in the query when upsert=false. +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: 0})); +assert.writeOK(coll.update({_id: 0, $expr: {$eq: ["$a", 0]}}, {$set: {b: 6}})); +assert.eq({_id: 0, a: 0, b: 6}, coll.findOne({_id: 0})); + +// $expr with unbound variable fails. +assert.writeError(coll.update({_id: 0, $expr: {$eq: ["$a", "$$unbound"]}}, {$set: {b: 6}})); + +// $expr with division by zero fails. +assert.writeError(coll.update({_id: 0, $expr: {$divide: [1, "$a"]}}, {$set: {b: 6}})); + +// $expr is not allowed in the query when upsert=true. +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: 5})); +assert.writeError(coll.update({_id: 0, $expr: {$eq: ["$a", 5]}}, {$set: {b: 6}}, {upsert: true})); + +// $expr is not allowed in $pull filter. +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: [{b: 5}]})); +assert.writeError(coll.update({_id: 0}, {$pull: {a: {$expr: {$eq: ["$b", 5]}}}})); + +// $expr is not allowed in arrayFilters. +if (db.getMongo().writeMode() === "commands") { coll.drop(); assert.writeOK(coll.insert({_id: 0, a: [{b: 5}]})); - assert.writeError(coll.update({_id: 0}, {$pull: {a: {$expr: {$eq: ["$b", 5]}}}})); - - // $expr is not allowed in arrayFilters. - if (db.getMongo().writeMode() === "commands") { - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [{b: 5}]})); - assert.writeError(coll.update({_id: 0}, - {$set: {"a.$[i].b": 6}}, - {arrayFilters: [{"i.b": 5, $expr: {$eq: ["$i.b", 5]}}]})); - } - - // Any writes preceding the write that fails to parse are executed. - coll.drop(); - assert.writeOK(coll.insert({_id: 0})); - assert.writeOK(coll.insert({_id: 1})); - writeRes = db.runCommand({ - update: coll.getName(), - updates: [{q: {_id: 0}, u: {$set: {b: 6}}}, {q: {$expr: "$$unbound"}, u: {$set: {b: 6}}}] - }); - assert.commandWorkedIgnoringWriteErrors(writeRes); - assert.eq(writeRes.writeErrors[0].code, 17276, tojson(writeRes)); - assert.eq(writeRes.n, 1, tojson(writeRes)); + assert.writeError(coll.update({_id: 0}, + {$set: {"a.$[i].b": 6}}, + {arrayFilters: [{"i.b": 5, $expr: {$eq: ["$i.b", 5]}}]})); +} + +// Any writes preceding the write that fails to parse are executed. +coll.drop(); +assert.writeOK(coll.insert({_id: 0})); +assert.writeOK(coll.insert({_id: 1})); +writeRes = db.runCommand({ + update: coll.getName(), + updates: [{q: {_id: 0}, u: {$set: {b: 6}}}, {q: {$expr: "$$unbound"}, u: {$set: {b: 6}}}] +}); +assert.commandWorkedIgnoringWriteErrors(writeRes); +assert.eq(writeRes.writeErrors[0].code, 17276, tojson(writeRes)); +assert.eq(writeRes.n, 1, tojson(writeRes)); })(); diff --git a/jstests/core/expr_index_use.js b/jstests/core/expr_index_use.js index 79fe6d87b86..d0eb55656b2 100644 --- a/jstests/core/expr_index_use.js +++ b/jstests/core/expr_index_use.js @@ -1,239 +1,242 @@ // Confirms expected index use when performing a match with a $expr statement. (function() { - "use strict"; - - load("jstests/libs/analyze_plan.js"); - - const coll = db.expr_index_use; - coll.drop(); - - assert.writeOK(coll.insert({a: {b: 1}})); - assert.writeOK(coll.insert({a: {b: [1]}})); - assert.writeOK(coll.insert({a: [{b: 1}]})); - assert.writeOK(coll.insert({a: [{b: [1]}]})); - assert.commandWorked(coll.createIndex({"a.b": 1})); - - assert.writeOK(coll.insert({c: {d: 1}})); - assert.commandWorked(coll.createIndex({"c.d": 1})); - - assert.writeOK(coll.insert({e: [{f: 1}]})); - assert.commandWorked(coll.createIndex({"e.f": 1})); - - assert.writeOK(coll.insert({g: {h: [1]}})); - assert.commandWorked(coll.createIndex({"g.h": 1})); - - assert.writeOK(coll.insert({i: 1, j: [1]})); - assert.commandWorked(coll.createIndex({i: 1, j: 1})); - - assert.writeOK(coll.insert({k: 1, l: "abc"})); - assert.commandWorked(coll.createIndex({k: 1, l: "text"})); - - assert.writeOK(coll.insert({x: 0})); - assert.writeOK(coll.insert({x: 1, y: 1})); - assert.writeOK(coll.insert({x: 2, y: 2})); - assert.writeOK(coll.insert({x: 3, y: 10})); - assert.writeOK(coll.insert({y: 20})); - assert.commandWorked(coll.createIndex({x: 1, y: 1})); - - assert.writeOK(coll.insert({w: 123})); - assert.writeOK(coll.insert({})); - assert.writeOK(coll.insert({w: null})); - assert.writeOK(coll.insert({w: undefined})); - assert.writeOK(coll.insert({w: NaN})); - assert.writeOK(coll.insert({w: "foo"})); - assert.writeOK(coll.insert({w: "FOO"})); - assert.writeOK(coll.insert({w: {z: 1}})); - assert.writeOK(coll.insert({w: {z: 2}})); - assert.commandWorked(coll.createIndex({w: 1})); - assert.commandWorked(coll.createIndex({"w.z": 1})); - - /** - * Executes the expression 'expr' as both a find and an aggregate. Then confirms - * 'metricsToCheck', which is an object containing: - * - nReturned: The number of documents the pipeline is expected to return. - * - expectedIndex: Either an index specification object when index use is expected or - * 'null' if a collection scan is expected. - */ - function confirmExpectedExprExecution(expr, metricsToCheck, collation) { - assert(metricsToCheck.hasOwnProperty("nReturned"), - "metricsToCheck must contain an nReturned field"); - - let aggOptions = {}; - if (collation) { - aggOptions.collation = collation; - } - - const pipeline = [{$match: {$expr: expr}}]; +"use strict"; + +load("jstests/libs/analyze_plan.js"); + +const coll = db.expr_index_use; +coll.drop(); + +assert.writeOK(coll.insert({a: {b: 1}})); +assert.writeOK(coll.insert({a: {b: [1]}})); +assert.writeOK(coll.insert({a: [{b: 1}]})); +assert.writeOK(coll.insert({a: [{b: [1]}]})); +assert.commandWorked(coll.createIndex({"a.b": 1})); + +assert.writeOK(coll.insert({c: {d: 1}})); +assert.commandWorked(coll.createIndex({"c.d": 1})); + +assert.writeOK(coll.insert({e: [{f: 1}]})); +assert.commandWorked(coll.createIndex({"e.f": 1})); + +assert.writeOK(coll.insert({g: {h: [1]}})); +assert.commandWorked(coll.createIndex({"g.h": 1})); + +assert.writeOK(coll.insert({i: 1, j: [1]})); +assert.commandWorked(coll.createIndex({i: 1, j: 1})); + +assert.writeOK(coll.insert({k: 1, l: "abc"})); +assert.commandWorked(coll.createIndex({k: 1, l: "text"})); + +assert.writeOK(coll.insert({x: 0})); +assert.writeOK(coll.insert({x: 1, y: 1})); +assert.writeOK(coll.insert({x: 2, y: 2})); +assert.writeOK(coll.insert({x: 3, y: 10})); +assert.writeOK(coll.insert({y: 20})); +assert.commandWorked(coll.createIndex({x: 1, y: 1})); + +assert.writeOK(coll.insert({w: 123})); +assert.writeOK(coll.insert({})); +assert.writeOK(coll.insert({w: null})); +assert.writeOK(coll.insert({w: undefined})); +assert.writeOK(coll.insert({w: NaN})); +assert.writeOK(coll.insert({w: "foo"})); +assert.writeOK(coll.insert({w: "FOO"})); +assert.writeOK(coll.insert({w: {z: 1}})); +assert.writeOK(coll.insert({w: {z: 2}})); +assert.commandWorked(coll.createIndex({w: 1})); +assert.commandWorked(coll.createIndex({"w.z": 1})); + +/** + * Executes the expression 'expr' as both a find and an aggregate. Then confirms + * 'metricsToCheck', which is an object containing: + * - nReturned: The number of documents the pipeline is expected to return. + * - expectedIndex: Either an index specification object when index use is expected or + * 'null' if a collection scan is expected. + */ +function confirmExpectedExprExecution(expr, metricsToCheck, collation) { + assert(metricsToCheck.hasOwnProperty("nReturned"), + "metricsToCheck must contain an nReturned field"); + + let aggOptions = {}; + if (collation) { + aggOptions.collation = collation; + } - // Verify that $expr returns the correct number of results when run inside the $match stage - // of an aggregate. - assert.eq(metricsToCheck.nReturned, coll.aggregate(pipeline, aggOptions).itcount()); + const pipeline = [{$match: {$expr: expr}}]; - // Verify that $expr returns the correct number of results when run in a find command. - let cursor = coll.find({$expr: expr}); - if (collation) { - cursor = cursor.collation(collation); - } - assert.eq(metricsToCheck.nReturned, cursor.itcount()); - - // Verify that $expr returns the correct number of results when evaluated inside a $project, - // with optimizations inhibited. We expect the plan to be COLLSCAN. - const pipelineWithProject = [ - {$_internalInhibitOptimization: {}}, - {$project: {result: {$cond: [expr, true, false]}}}, - {$match: {result: true}} - ]; - assert.eq(metricsToCheck.nReturned, - coll.aggregate(pipelineWithProject, aggOptions).itcount()); - let explain = coll.explain("executionStats").aggregate(pipelineWithProject, aggOptions); - assert(getAggPlanStage(explain, "COLLSCAN"), tojson(explain)); - - // Verifies that there are no rejected plans, and that the winning plan uses the expected - // index. - // - // 'getPlanStageFunc' is a function which can be called to obtain stage-specific information - // from the explain output. There are different versions of this function for find and - // aggregate explain output. - function verifyExplainOutput(explain, getPlanStageFunc) { - assert(!hasRejectedPlans(explain), tojson(explain)); - - if (metricsToCheck.hasOwnProperty("expectedIndex")) { - const stage = getPlanStageFunc(explain, "IXSCAN"); - assert.neq(null, stage, tojson(explain)); - assert(stage.hasOwnProperty("keyPattern"), tojson(explain)); - assert.docEq(stage.keyPattern, metricsToCheck.expectedIndex, tojson(explain)); - } else { - assert(getPlanStageFunc(explain, "COLLSCAN"), tojson(explain)); - } - } + // Verify that $expr returns the correct number of results when run inside the $match stage + // of an aggregate. + assert.eq(metricsToCheck.nReturned, coll.aggregate(pipeline, aggOptions).itcount()); - explain = - assert.commandWorked(coll.explain("executionStats").aggregate(pipeline, aggOptions)); - verifyExplainOutput(explain, getPlanStage); - - cursor = coll.explain("executionStats").find({$expr: expr}); - if (collation) { - cursor = cursor.collation(collation); + // Verify that $expr returns the correct number of results when run in a find command. + let cursor = coll.find({$expr: expr}); + if (collation) { + cursor = cursor.collation(collation); + } + assert.eq(metricsToCheck.nReturned, cursor.itcount()); + + // Verify that $expr returns the correct number of results when evaluated inside a $project, + // with optimizations inhibited. We expect the plan to be COLLSCAN. + const pipelineWithProject = [ + {$_internalInhibitOptimization: {}}, + {$project: {result: {$cond: [expr, true, false]}}}, + {$match: {result: true}} + ]; + assert.eq(metricsToCheck.nReturned, coll.aggregate(pipelineWithProject, aggOptions).itcount()); + let explain = coll.explain("executionStats").aggregate(pipelineWithProject, aggOptions); + assert(getAggPlanStage(explain, "COLLSCAN"), tojson(explain)); + + // Verifies that there are no rejected plans, and that the winning plan uses the expected + // index. + // + // 'getPlanStageFunc' is a function which can be called to obtain stage-specific information + // from the explain output. There are different versions of this function for find and + // aggregate explain output. + function verifyExplainOutput(explain, getPlanStageFunc) { + assert(!hasRejectedPlans(explain), tojson(explain)); + + if (metricsToCheck.hasOwnProperty("expectedIndex")) { + const stage = getPlanStageFunc(explain, "IXSCAN"); + assert.neq(null, stage, tojson(explain)); + assert(stage.hasOwnProperty("keyPattern"), tojson(explain)); + assert.docEq(stage.keyPattern, metricsToCheck.expectedIndex, tojson(explain)); + } else { + assert(getPlanStageFunc(explain, "COLLSCAN"), tojson(explain)); } - explain = assert.commandWorked(cursor.finish()); - verifyExplainOutput(explain, getPlanStage); } - // Comparison of field and constant. - confirmExpectedExprExecution({$eq: ["$x", 1]}, {nReturned: 1, expectedIndex: {x: 1, y: 1}}); - confirmExpectedExprExecution({$eq: [1, "$x"]}, {nReturned: 1, expectedIndex: {x: 1, y: 1}}); - - // $and with both children eligible for index use. - confirmExpectedExprExecution({$and: [{$eq: ["$x", 2]}, {$eq: ["$y", 2]}]}, - {nReturned: 1, expectedIndex: {x: 1, y: 1}}); - - // $and with one child eligible for index use and one that is not. - confirmExpectedExprExecution({$and: [{$eq: ["$x", 1]}, {$eq: ["$x", "$y"]}]}, - {nReturned: 1, expectedIndex: {x: 1, y: 1}}); - - // $and with one child eligible for index use and a second child containing a $or where one of - // the two children are eligible. - confirmExpectedExprExecution( - {$and: [{$eq: ["$x", 1]}, {$or: [{$eq: ["$x", "$y"]}, {$eq: ["$y", 1]}]}]}, - {nReturned: 1, expectedIndex: {x: 1, y: 1}}); - - // Equality comparison against non-multikey dotted path field is expected to use an index. - confirmExpectedExprExecution({$eq: ["$c.d", 1]}, {nReturned: 1, expectedIndex: {"c.d": 1}}); - - // $lt, $lte, $gt, $gte, $in, $ne, and $cmp are not expected to use an index. This is because we - // have not yet implemented a rewrite of these operators to indexable MatchExpression. - confirmExpectedExprExecution({$lt: ["$x", 1]}, {nReturned: 20}); - confirmExpectedExprExecution({$lt: [1, "$x"]}, {nReturned: 2}); - confirmExpectedExprExecution({$lte: ["$x", 1]}, {nReturned: 21}); - confirmExpectedExprExecution({$lte: [1, "$x"]}, {nReturned: 3}); - confirmExpectedExprExecution({$gt: ["$x", 1]}, {nReturned: 2}); - confirmExpectedExprExecution({$gt: [1, "$x"]}, {nReturned: 20}); - confirmExpectedExprExecution({$gte: ["$x", 1]}, {nReturned: 3}); - confirmExpectedExprExecution({$gte: [1, "$x"]}, {nReturned: 21}); - confirmExpectedExprExecution({$in: ["$x", [1, 3]]}, {nReturned: 2}); - confirmExpectedExprExecution({$cmp: ["$x", 1]}, {nReturned: 22}); - confirmExpectedExprExecution({$ne: ["$x", 1]}, {nReturned: 22}); - - // Comparison with an array value is not expected to use an index. - confirmExpectedExprExecution({$eq: ["$a.b", [1]]}, {nReturned: 2}); - confirmExpectedExprExecution({$eq: ["$w", [1]]}, {nReturned: 0}); - - // A constant expression is not expected to use an index. - confirmExpectedExprExecution(1, {nReturned: 23}); - confirmExpectedExprExecution(false, {nReturned: 0}); - confirmExpectedExprExecution({$eq: [1, 1]}, {nReturned: 23}); - confirmExpectedExprExecution({$eq: [0, 1]}, {nReturned: 0}); - - // Comparison of 2 fields is not expected to use an index. - confirmExpectedExprExecution({$eq: ["$x", "$y"]}, {nReturned: 20}); - - // Comparison against multikey field not expected to use an index. - confirmExpectedExprExecution({$eq: ["$a.b", 1]}, {nReturned: 1}); - confirmExpectedExprExecution({$eq: ["$e.f", [1]]}, {nReturned: 1}); - confirmExpectedExprExecution({$eq: ["$e.f", 1]}, {nReturned: 0}); - confirmExpectedExprExecution({$eq: ["$g.h", [1]]}, {nReturned: 1}); - confirmExpectedExprExecution({$eq: ["$g.h", 1]}, {nReturned: 0}); - - // Comparison against a non-multikey field of a multikey index can use an index - const metricsToCheck = {nReturned: 1}; - metricsToCheck.expectedIndex = {i: 1, j: 1}; - confirmExpectedExprExecution({$eq: ["$i", 1]}, metricsToCheck); - metricsToCheck.nReturned = 0; - confirmExpectedExprExecution({$eq: ["$i", 2]}, metricsToCheck); - - // Equality to NaN can use an index. - confirmExpectedExprExecution({$eq: ["$w", NaN]}, {nReturned: 1, expectedIndex: {w: 1}}); - - // Equality to undefined and equality to missing cannot use an index. - confirmExpectedExprExecution({$eq: ["$w", undefined]}, {nReturned: 16}); - confirmExpectedExprExecution({$eq: ["$w", "$$REMOVE"]}, {nReturned: 16}); - - // Equality to null can use an index. - confirmExpectedExprExecution({$eq: ["$w", null]}, {nReturned: 1, expectedIndex: {w: 1}}); - - // Equality inside a nested object can use a non-multikey index. - confirmExpectedExprExecution({$eq: ["$w.z", 2]}, {nReturned: 1, expectedIndex: {"w.z": 1}}); - - // Test that the collation is respected. Since the collations do not match, we should not use - // the index. - const caseInsensitiveCollation = {locale: "en_US", strength: 2}; - if (db.getMongo().useReadCommands()) { - confirmExpectedExprExecution( - {$eq: ["$w", "FoO"]}, {nReturned: 2}, caseInsensitiveCollation); - } + explain = assert.commandWorked(coll.explain("executionStats").aggregate(pipeline, aggOptions)); + verifyExplainOutput(explain, getPlanStage); - // Test equality queries against a hashed index. - assert.commandWorked(coll.dropIndex({w: 1})); - assert.commandWorked(coll.createIndex({w: "hashed"})); - confirmExpectedExprExecution({$eq: ["$w", 123]}, {nReturned: 1, expectedIndex: {w: "hashed"}}); - confirmExpectedExprExecution({$eq: ["$w", null]}, {nReturned: 1, expectedIndex: {w: "hashed"}}); - confirmExpectedExprExecution({$eq: ["$w", NaN]}, {nReturned: 1, expectedIndex: {w: "hashed"}}); - confirmExpectedExprExecution({$eq: ["$w", undefined]}, {nReturned: 16}); - confirmExpectedExprExecution({$eq: ["$w", "$$REMOVE"]}, {nReturned: 16}); - - // Test that equality to null queries can use a sparse index. - assert.commandWorked(coll.dropIndex({w: "hashed"})); - assert.commandWorked(coll.createIndex({w: 1}, {sparse: true})); - confirmExpectedExprExecution({$eq: ["$w", null]}, {nReturned: 1, expectedIndex: {w: 1}}); - - // Equality match against text index prefix is expected to fail. Equality predicates are - // required against the prefix fields of a text index, but currently $eq inside $expr does not - // qualify. - assert.throws(() => - coll.aggregate([{$match: {$expr: {$eq: ["$k", 1]}, $text: {$search: "abc"}}}]) - .itcount()); - - // Test that equality match in $expr respects the collection's default collation, both when - // there is an index with a matching collation and when there isn't. - assert.commandWorked(db.runCommand({drop: coll.getName()})); - assert.commandWorked( - db.createCollection(coll.getName(), {collation: caseInsensitiveCollation})); - assert.writeOK(coll.insert({a: "foo", b: "bar"})); - assert.writeOK(coll.insert({a: "FOO", b: "BAR"})); - assert.commandWorked(coll.createIndex({a: 1})); - assert.commandWorked(coll.createIndex({b: 1}, {collation: {locale: "simple"}})); - - confirmExpectedExprExecution({$eq: ["$a", "foo"]}, {nReturned: 2, expectedIndex: {a: 1}}); - confirmExpectedExprExecution({$eq: ["$b", "bar"]}, {nReturned: 2}); + cursor = coll.explain("executionStats").find({$expr: expr}); + if (collation) { + cursor = cursor.collation(collation); + } + explain = assert.commandWorked(cursor.finish()); + verifyExplainOutput(explain, getPlanStage); +} + +// Comparison of field and constant. +confirmExpectedExprExecution({$eq: ["$x", 1]}, {nReturned: 1, expectedIndex: {x: 1, y: 1}}); +confirmExpectedExprExecution({$eq: [1, "$x"]}, {nReturned: 1, expectedIndex: {x: 1, y: 1}}); + +// $and with both children eligible for index use. +confirmExpectedExprExecution({$and: [{$eq: ["$x", 2]}, {$eq: ["$y", 2]}]}, + {nReturned: 1, expectedIndex: {x: 1, y: 1}}); + +// $and with one child eligible for index use and one that is not. +confirmExpectedExprExecution({$and: [{$eq: ["$x", 1]}, {$eq: ["$x", "$y"]}]}, + {nReturned: 1, expectedIndex: {x: 1, y: 1}}); + +// $and with one child eligible for index use and a second child containing a $or where one of +// the two children are eligible. +confirmExpectedExprExecution( + {$and: [{$eq: ["$x", 1]}, {$or: [{$eq: ["$x", "$y"]}, {$eq: ["$y", 1]}]}]}, + {nReturned: 1, expectedIndex: {x: 1, y: 1}}); + +// Equality comparison against non-multikey dotted path field is expected to use an index. +confirmExpectedExprExecution({$eq: ["$c.d", 1]}, {nReturned: 1, expectedIndex: {"c.d": 1}}); + +// $lt, $lte, $gt, $gte, $in, $ne, and $cmp are not expected to use an index. This is because we +// have not yet implemented a rewrite of these operators to indexable MatchExpression. +confirmExpectedExprExecution({$lt: ["$x", 1]}, {nReturned: 20}); +confirmExpectedExprExecution({$lt: [1, "$x"]}, {nReturned: 2}); +confirmExpectedExprExecution({$lte: ["$x", 1]}, {nReturned: 21}); +confirmExpectedExprExecution({$lte: [1, "$x"]}, {nReturned: 3}); +confirmExpectedExprExecution({$gt: ["$x", 1]}, {nReturned: 2}); +confirmExpectedExprExecution({$gt: [1, "$x"]}, {nReturned: 20}); +confirmExpectedExprExecution({$gte: ["$x", 1]}, {nReturned: 3}); +confirmExpectedExprExecution({$gte: [1, "$x"]}, {nReturned: 21}); +confirmExpectedExprExecution({$in: ["$x", [1, 3]]}, {nReturned: 2}); +confirmExpectedExprExecution({$cmp: ["$x", 1]}, {nReturned: 22}); +confirmExpectedExprExecution({$ne: ["$x", 1]}, {nReturned: 22}); + +// Comparison with an array value is not expected to use an index. +confirmExpectedExprExecution({$eq: ["$a.b", [1]]}, {nReturned: 2}); +confirmExpectedExprExecution({$eq: ["$w", [1]]}, {nReturned: 0}); + +// A constant expression is not expected to use an index. +confirmExpectedExprExecution(1, {nReturned: 23}); +confirmExpectedExprExecution(false, {nReturned: 0}); +confirmExpectedExprExecution({$eq: [1, 1]}, {nReturned: 23}); +confirmExpectedExprExecution({$eq: [0, 1]}, {nReturned: 0}); + +// Comparison of 2 fields is not expected to use an index. +confirmExpectedExprExecution({$eq: ["$x", "$y"]}, {nReturned: 20}); + +// Comparison against multikey field not expected to use an index. +confirmExpectedExprExecution({$eq: ["$a.b", 1]}, {nReturned: 1}); +confirmExpectedExprExecution({$eq: ["$e.f", [1]]}, {nReturned: 1}); +confirmExpectedExprExecution({$eq: ["$e.f", 1]}, {nReturned: 0}); +confirmExpectedExprExecution({$eq: ["$g.h", [1]]}, {nReturned: 1}); +confirmExpectedExprExecution({$eq: ["$g.h", 1]}, {nReturned: 0}); + +// Comparison against a non-multikey field of a multikey index can use an index +const metricsToCheck = { + nReturned: 1 +}; +metricsToCheck.expectedIndex = { + i: 1, + j: 1 +}; +confirmExpectedExprExecution({$eq: ["$i", 1]}, metricsToCheck); +metricsToCheck.nReturned = 0; +confirmExpectedExprExecution({$eq: ["$i", 2]}, metricsToCheck); + +// Equality to NaN can use an index. +confirmExpectedExprExecution({$eq: ["$w", NaN]}, {nReturned: 1, expectedIndex: {w: 1}}); + +// Equality to undefined and equality to missing cannot use an index. +confirmExpectedExprExecution({$eq: ["$w", undefined]}, {nReturned: 16}); +confirmExpectedExprExecution({$eq: ["$w", "$$REMOVE"]}, {nReturned: 16}); + +// Equality to null can use an index. +confirmExpectedExprExecution({$eq: ["$w", null]}, {nReturned: 1, expectedIndex: {w: 1}}); + +// Equality inside a nested object can use a non-multikey index. +confirmExpectedExprExecution({$eq: ["$w.z", 2]}, {nReturned: 1, expectedIndex: {"w.z": 1}}); + +// Test that the collation is respected. Since the collations do not match, we should not use +// the index. +const caseInsensitiveCollation = { + locale: "en_US", + strength: 2 +}; +if (db.getMongo().useReadCommands()) { + confirmExpectedExprExecution({$eq: ["$w", "FoO"]}, {nReturned: 2}, caseInsensitiveCollation); +} + +// Test equality queries against a hashed index. +assert.commandWorked(coll.dropIndex({w: 1})); +assert.commandWorked(coll.createIndex({w: "hashed"})); +confirmExpectedExprExecution({$eq: ["$w", 123]}, {nReturned: 1, expectedIndex: {w: "hashed"}}); +confirmExpectedExprExecution({$eq: ["$w", null]}, {nReturned: 1, expectedIndex: {w: "hashed"}}); +confirmExpectedExprExecution({$eq: ["$w", NaN]}, {nReturned: 1, expectedIndex: {w: "hashed"}}); +confirmExpectedExprExecution({$eq: ["$w", undefined]}, {nReturned: 16}); +confirmExpectedExprExecution({$eq: ["$w", "$$REMOVE"]}, {nReturned: 16}); + +// Test that equality to null queries can use a sparse index. +assert.commandWorked(coll.dropIndex({w: "hashed"})); +assert.commandWorked(coll.createIndex({w: 1}, {sparse: true})); +confirmExpectedExprExecution({$eq: ["$w", null]}, {nReturned: 1, expectedIndex: {w: 1}}); + +// Equality match against text index prefix is expected to fail. Equality predicates are +// required against the prefix fields of a text index, but currently $eq inside $expr does not +// qualify. +assert.throws( + () => coll.aggregate([{$match: {$expr: {$eq: ["$k", 1]}, $text: {$search: "abc"}}}]).itcount()); + +// Test that equality match in $expr respects the collection's default collation, both when +// there is an index with a matching collation and when there isn't. +assert.commandWorked(db.runCommand({drop: coll.getName()})); +assert.commandWorked(db.createCollection(coll.getName(), {collation: caseInsensitiveCollation})); +assert.writeOK(coll.insert({a: "foo", b: "bar"})); +assert.writeOK(coll.insert({a: "FOO", b: "BAR"})); +assert.commandWorked(coll.createIndex({a: 1})); +assert.commandWorked(coll.createIndex({b: 1}, {collation: {locale: "simple"}})); + +confirmExpectedExprExecution({$eq: ["$a", "foo"]}, {nReturned: 2, expectedIndex: {a: 1}}); +confirmExpectedExprExecution({$eq: ["$b", "bar"]}, {nReturned: 2}); })(); diff --git a/jstests/core/expr_or_pushdown.js b/jstests/core/expr_or_pushdown.js index 431e2932ae7..e2605e08c91 100644 --- a/jstests/core/expr_or_pushdown.js +++ b/jstests/core/expr_or_pushdown.js @@ -3,23 +3,23 @@ * as expected. */ (function() { - "use strict"; +"use strict"; - const coll = db.expr_or_pushdown; - coll.drop(); - assert.commandWorked(coll.createIndex({"a": 1, "b": 1})); - assert.commandWorked(coll.insert({_id: 0, a: "a", b: "b", d: "d"})); - assert.commandWorked(coll.insert({_id: 1, a: "a", b: "c", d: "d"})); - assert.commandWorked(coll.insert({_id: 2, a: "a", b: "x", d: "d"})); - assert.commandWorked(coll.insert({_id: 3, a: "x", b: "b", d: "d"})); - assert.commandWorked(coll.insert({_id: 4, a: "a", b: "b", d: "x"})); +const coll = db.expr_or_pushdown; +coll.drop(); +assert.commandWorked(coll.createIndex({"a": 1, "b": 1})); +assert.commandWorked(coll.insert({_id: 0, a: "a", b: "b", d: "d"})); +assert.commandWorked(coll.insert({_id: 1, a: "a", b: "c", d: "d"})); +assert.commandWorked(coll.insert({_id: 2, a: "a", b: "x", d: "d"})); +assert.commandWorked(coll.insert({_id: 3, a: "x", b: "b", d: "d"})); +assert.commandWorked(coll.insert({_id: 4, a: "a", b: "b", d: "x"})); - const results = coll.find({ - $expr: {$and: [{$eq: ["$d", "d"]}, {$eq: ["$a", "a"]}]}, - $or: [{"b": "b"}, {"b": "c"}] - }) - .sort({_id: 1}) - .toArray(); +const results = coll.find({ + $expr: {$and: [{$eq: ["$d", "d"]}, {$eq: ["$a", "a"]}]}, + $or: [{"b": "b"}, {"b": "c"}] + }) + .sort({_id: 1}) + .toArray(); - assert.eq(results, [{_id: 0, a: "a", b: "b", d: "d"}, {_id: 1, a: "a", b: "c", d: "d"}]); +assert.eq(results, [{_id: 0, a: "a", b: "b", d: "d"}, {_id: 1, a: "a", b: "c", d: "d"}]); }()); diff --git a/jstests/core/expr_valid_positions.js b/jstests/core/expr_valid_positions.js index df1f2470261..cd3ae2bf917 100644 --- a/jstests/core/expr_valid_positions.js +++ b/jstests/core/expr_valid_positions.js @@ -1,23 +1,23 @@ // Verify that $expr can be used in the top-level position, but not in subdocuments. (function() { - "use strict"; +"use strict"; - const coll = db.expr_valid_positions; +const coll = db.expr_valid_positions; - // Works at the BSON root level. - assert.eq(0, coll.find({$expr: {$eq: ["$foo", "$bar"]}}).itcount()); +// Works at the BSON root level. +assert.eq(0, coll.find({$expr: {$eq: ["$foo", "$bar"]}}).itcount()); - // Works inside a $or. - assert.eq(0, coll.find({$or: [{$expr: {$eq: ["$foo", "$bar"]}}, {b: {$gt: 3}}]}).itcount()); +// Works inside a $or. +assert.eq(0, coll.find({$or: [{$expr: {$eq: ["$foo", "$bar"]}}, {b: {$gt: 3}}]}).itcount()); - // Fails inside an elemMatch. - assert.throws(function() { - coll.find({a: {$elemMatch: {$expr: {$eq: ["$foo", "$bar"]}}}}).itcount(); - }); +// Fails inside an elemMatch. +assert.throws(function() { + coll.find({a: {$elemMatch: {$expr: {$eq: ["$foo", "$bar"]}}}}).itcount(); +}); - // Fails inside an _internalSchemaObjectMatch. - assert.throws(function() { - coll.find({a: {$_internalSchemaObjectMatch: {$expr: {$eq: ["$foo", "$bar"]}}}}).itcount(); - }); +// Fails inside an _internalSchemaObjectMatch. +assert.throws(function() { + coll.find({a: {$_internalSchemaObjectMatch: {$expr: {$eq: ["$foo", "$bar"]}}}}).itcount(); +}); }());
\ No newline at end of file diff --git a/jstests/core/failcommand_failpoint.js b/jstests/core/failcommand_failpoint.js index 94712cbac9a..e78d39e3d50 100644 --- a/jstests/core/failcommand_failpoint.js +++ b/jstests/core/failcommand_failpoint.js @@ -2,288 +2,287 @@ * @tags: [assumes_read_concern_unchanged, assumes_read_preference_unchanged] */ (function() { - "use strict"; +"use strict"; - const testDB = db.getSiblingDB("test_failcommand"); - const adminDB = db.getSiblingDB("admin"); +const testDB = db.getSiblingDB("test_failcommand"); +const adminDB = db.getSiblingDB("admin"); - const getThreadName = function() { - let myUri = adminDB.runCommand({whatsmyuri: 1}).you; - return adminDB.aggregate([{$currentOp: {localOps: true}}, {$match: {client: myUri}}]) - .toArray()[0] - .desc; - }; +const getThreadName = function() { + let myUri = adminDB.runCommand({whatsmyuri: 1}).you; + return adminDB.aggregate([{$currentOp: {localOps: true}}, {$match: {client: myUri}}]) + .toArray()[0] + .desc; +}; - let threadName = getThreadName(); +let threadName = getThreadName(); - // Test failing with a particular error code. - assert.commandWorked(adminDB.runCommand({ - configureFailPoint: "failCommand", - mode: "alwaysOn", - data: { - errorCode: ErrorCodes.NotMaster, - failCommands: ["ping"], - threadName: threadName, - } - })); - assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.NotMaster); - assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"})); +// Test failing with a particular error code. +assert.commandWorked(adminDB.runCommand({ + configureFailPoint: "failCommand", + mode: "alwaysOn", + data: { + errorCode: ErrorCodes.NotMaster, + failCommands: ["ping"], + threadName: threadName, + } +})); +assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.NotMaster); +assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"})); - // Test that only commands specified in failCommands fail. - assert.commandWorked(adminDB.runCommand({ - configureFailPoint: "failCommand", - mode: "alwaysOn", - data: { - errorCode: ErrorCodes.BadValue, - failCommands: ["ping"], - threadName: threadName, - } - })); - assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.BadValue); - assert.commandWorked(testDB.runCommand({isMaster: 1})); - assert.commandWorked(testDB.runCommand({buildinfo: 1})); - assert.commandWorked(testDB.runCommand({find: "collection"})); - assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"})); +// Test that only commands specified in failCommands fail. +assert.commandWorked(adminDB.runCommand({ + configureFailPoint: "failCommand", + mode: "alwaysOn", + data: { + errorCode: ErrorCodes.BadValue, + failCommands: ["ping"], + threadName: threadName, + } +})); +assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.BadValue); +assert.commandWorked(testDB.runCommand({isMaster: 1})); +assert.commandWorked(testDB.runCommand({buildinfo: 1})); +assert.commandWorked(testDB.runCommand({find: "collection"})); +assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"})); - // Test failing with multiple commands specified in failCommands. - assert.commandWorked(adminDB.runCommand({ - configureFailPoint: "failCommand", - mode: "alwaysOn", - data: { - errorCode: ErrorCodes.BadValue, - failCommands: ["ping", "isMaster"], - threadName: threadName, - } - })); - assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.BadValue); - assert.commandFailedWithCode(testDB.runCommand({isMaster: 1}), ErrorCodes.BadValue); - assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"})); +// Test failing with multiple commands specified in failCommands. +assert.commandWorked(adminDB.runCommand({ + configureFailPoint: "failCommand", + mode: "alwaysOn", + data: { + errorCode: ErrorCodes.BadValue, + failCommands: ["ping", "isMaster"], + threadName: threadName, + } +})); +assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.BadValue); +assert.commandFailedWithCode(testDB.runCommand({isMaster: 1}), ErrorCodes.BadValue); +assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"})); - // Test skip when failing with a particular error code. - assert.commandWorked(adminDB.runCommand({ - configureFailPoint: "failCommand", - mode: {skip: 2}, - data: { - errorCode: ErrorCodes.NotMaster, - failCommands: ["ping"], - threadName: threadName, - } - })); - assert.commandWorked(testDB.runCommand({ping: 1})); - assert.commandWorked(testDB.runCommand({ping: 1})); - assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.NotMaster); - assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"})); +// Test skip when failing with a particular error code. +assert.commandWorked(adminDB.runCommand({ + configureFailPoint: "failCommand", + mode: {skip: 2}, + data: { + errorCode: ErrorCodes.NotMaster, + failCommands: ["ping"], + threadName: threadName, + } +})); +assert.commandWorked(testDB.runCommand({ping: 1})); +assert.commandWorked(testDB.runCommand({ping: 1})); +assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.NotMaster); +assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"})); - // Test times when failing with a particular error code. - assert.commandWorked(adminDB.runCommand({ - configureFailPoint: "failCommand", - mode: {times: 2}, - data: { - errorCode: ErrorCodes.NotMaster, - failCommands: ["ping"], - threadName: threadName, - } - })); - assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.NotMaster); - assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.NotMaster); - assert.commandWorked(testDB.runCommand({ping: 1})); - assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"})); +// Test times when failing with a particular error code. +assert.commandWorked(adminDB.runCommand({ + configureFailPoint: "failCommand", + mode: {times: 2}, + data: { + errorCode: ErrorCodes.NotMaster, + failCommands: ["ping"], + threadName: threadName, + } +})); +assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.NotMaster); +assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.NotMaster); +assert.commandWorked(testDB.runCommand({ping: 1})); +assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"})); - // Commands not specified in failCommands are not counted for skip. - assert.commandWorked(adminDB.runCommand({ - configureFailPoint: "failCommand", - mode: {skip: 1}, - data: { - errorCode: ErrorCodes.BadValue, - failCommands: ["ping"], - threadName: threadName, - } - })); - assert.commandWorked(testDB.runCommand({isMaster: 1})); - assert.commandWorked(testDB.runCommand({buildinfo: 1})); - assert.commandWorked(testDB.runCommand({ping: 1})); - assert.commandWorked(testDB.runCommand({find: "c"})); - assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.BadValue); - assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"})); +// Commands not specified in failCommands are not counted for skip. +assert.commandWorked(adminDB.runCommand({ + configureFailPoint: "failCommand", + mode: {skip: 1}, + data: { + errorCode: ErrorCodes.BadValue, + failCommands: ["ping"], + threadName: threadName, + } +})); +assert.commandWorked(testDB.runCommand({isMaster: 1})); +assert.commandWorked(testDB.runCommand({buildinfo: 1})); +assert.commandWorked(testDB.runCommand({ping: 1})); +assert.commandWorked(testDB.runCommand({find: "c"})); +assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.BadValue); +assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"})); - // Commands not specified in failCommands are not counted for times. - assert.commandWorked(adminDB.runCommand({ - configureFailPoint: "failCommand", - mode: {times: 1}, - data: { - errorCode: ErrorCodes.BadValue, - failCommands: ["ping"], - threadName: threadName, - } - })); - assert.commandWorked(testDB.runCommand({isMaster: 1})); - assert.commandWorked(testDB.runCommand({buildinfo: 1})); - assert.commandWorked(testDB.runCommand({find: "c"})); - assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.BadValue); - assert.commandWorked(testDB.runCommand({ping: 1})); - assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"})); +// Commands not specified in failCommands are not counted for times. +assert.commandWorked(adminDB.runCommand({ + configureFailPoint: "failCommand", + mode: {times: 1}, + data: { + errorCode: ErrorCodes.BadValue, + failCommands: ["ping"], + threadName: threadName, + } +})); +assert.commandWorked(testDB.runCommand({isMaster: 1})); +assert.commandWorked(testDB.runCommand({buildinfo: 1})); +assert.commandWorked(testDB.runCommand({find: "c"})); +assert.commandFailedWithCode(testDB.runCommand({ping: 1}), ErrorCodes.BadValue); +assert.commandWorked(testDB.runCommand({ping: 1})); +assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"})); - // Test closing connection. - assert.commandWorked(adminDB.runCommand({ - configureFailPoint: "failCommand", - mode: "alwaysOn", - data: { - closeConnection: true, - failCommands: ["ping"], - threadName: threadName, - } - })); - assert.throws(() => testDB.runCommand({ping: 1})); - assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"})); +// Test closing connection. +assert.commandWorked(adminDB.runCommand({ + configureFailPoint: "failCommand", + mode: "alwaysOn", + data: { + closeConnection: true, + failCommands: ["ping"], + threadName: threadName, + } +})); +assert.throws(() => testDB.runCommand({ping: 1})); +assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"})); - threadName = getThreadName(); +threadName = getThreadName(); - // Test that only commands specified in failCommands fail when closing the connection. - assert.commandWorked(adminDB.runCommand({ - configureFailPoint: "failCommand", - mode: "alwaysOn", - data: { - closeConnection: true, - failCommands: ["ping"], - threadName: threadName, - } - })); - assert.commandWorked(testDB.runCommand({isMaster: 1})); - assert.commandWorked(testDB.runCommand({buildinfo: 1})); - assert.commandWorked(testDB.runCommand({find: "c"})); - assert.throws(() => testDB.runCommand({ping: 1})); - assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"})); +// Test that only commands specified in failCommands fail when closing the connection. +assert.commandWorked(adminDB.runCommand({ + configureFailPoint: "failCommand", + mode: "alwaysOn", + data: { + closeConnection: true, + failCommands: ["ping"], + threadName: threadName, + } +})); +assert.commandWorked(testDB.runCommand({isMaster: 1})); +assert.commandWorked(testDB.runCommand({buildinfo: 1})); +assert.commandWorked(testDB.runCommand({find: "c"})); +assert.throws(() => testDB.runCommand({ping: 1})); +assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"})); - threadName = getThreadName(); +threadName = getThreadName(); - // Test skip when closing connection. - assert.commandWorked(adminDB.runCommand({ - configureFailPoint: "failCommand", - mode: {skip: 2}, - data: { - closeConnection: true, - failCommands: ["ping"], - threadName: threadName, - } - })); - assert.commandWorked(testDB.runCommand({ping: 1})); - assert.commandWorked(testDB.runCommand({ping: 1})); - assert.throws(() => testDB.runCommand({ping: 1})); - assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"})); +// Test skip when closing connection. +assert.commandWorked(adminDB.runCommand({ + configureFailPoint: "failCommand", + mode: {skip: 2}, + data: { + closeConnection: true, + failCommands: ["ping"], + threadName: threadName, + } +})); +assert.commandWorked(testDB.runCommand({ping: 1})); +assert.commandWorked(testDB.runCommand({ping: 1})); +assert.throws(() => testDB.runCommand({ping: 1})); +assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"})); - threadName = getThreadName(); +threadName = getThreadName(); - // Commands not specified in failCommands are not counted for skip. - assert.commandWorked(adminDB.runCommand({ - configureFailPoint: "failCommand", - mode: {skip: 1}, - data: { - closeConnection: true, - failCommands: ["ping"], - threadName: threadName, - } - })); - assert.commandWorked(testDB.runCommand({isMaster: 1})); - assert.commandWorked(testDB.runCommand({buildinfo: 1})); - assert.commandWorked(testDB.runCommand({ping: 1})); - assert.commandWorked(testDB.runCommand({find: "c"})); - assert.throws(() => testDB.runCommand({ping: 1})); - assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"})); +// Commands not specified in failCommands are not counted for skip. +assert.commandWorked(adminDB.runCommand({ + configureFailPoint: "failCommand", + mode: {skip: 1}, + data: { + closeConnection: true, + failCommands: ["ping"], + threadName: threadName, + } +})); +assert.commandWorked(testDB.runCommand({isMaster: 1})); +assert.commandWorked(testDB.runCommand({buildinfo: 1})); +assert.commandWorked(testDB.runCommand({ping: 1})); +assert.commandWorked(testDB.runCommand({find: "c"})); +assert.throws(() => testDB.runCommand({ping: 1})); +assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"})); - threadName = getThreadName(); +threadName = getThreadName(); - // Commands not specified in failCommands are not counted for times. - assert.commandWorked(adminDB.runCommand({ - configureFailPoint: "failCommand", - mode: {times: 1}, - data: { - closeConnection: true, - failCommands: ["ping"], - threadName: threadName, - } - })); - assert.commandWorked(testDB.runCommand({isMaster: 1})); - assert.commandWorked(testDB.runCommand({buildinfo: 1})); - assert.commandWorked(testDB.runCommand({find: "c"})); - assert.throws(() => testDB.runCommand({ping: 1})); - assert.commandWorked(testDB.runCommand({ping: 1})); - assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"})); +// Commands not specified in failCommands are not counted for times. +assert.commandWorked(adminDB.runCommand({ + configureFailPoint: "failCommand", + mode: {times: 1}, + data: { + closeConnection: true, + failCommands: ["ping"], + threadName: threadName, + } +})); +assert.commandWorked(testDB.runCommand({isMaster: 1})); +assert.commandWorked(testDB.runCommand({buildinfo: 1})); +assert.commandWorked(testDB.runCommand({find: "c"})); +assert.throws(() => testDB.runCommand({ping: 1})); +assert.commandWorked(testDB.runCommand({ping: 1})); +assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"})); - threadName = getThreadName(); +threadName = getThreadName(); - // Cannot fail on "configureFailPoint" command. - assert.commandWorked(adminDB.runCommand({ - configureFailPoint: "failCommand", - mode: {times: 1}, - data: { - errorCode: ErrorCodes.BadValue, - failCommands: ["configureFailPoint"], - threadName: threadName, - } - })); - assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"})); +// Cannot fail on "configureFailPoint" command. +assert.commandWorked(adminDB.runCommand({ + configureFailPoint: "failCommand", + mode: {times: 1}, + data: { + errorCode: ErrorCodes.BadValue, + failCommands: ["configureFailPoint"], + threadName: threadName, + } +})); +assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"})); - // Test with success and writeConcernError. - assert.commandWorked(adminDB.runCommand({ - configureFailPoint: "failCommand", - mode: {times: 1}, - data: { - writeConcernError: {code: 12345, errmsg: "hello"}, - failCommands: ['insert', 'ping'], - threadName: threadName, - } - })); - // Commands that don't support writeConcern don't tick counter. - assert.commandWorked(testDB.runCommand({ping: 1})); - // Unlisted commands don't tick counter. - assert.commandWorked(testDB.runCommand({update: "c", updates: [{q: {}, u: {}, upsert: true}]})); - var res = testDB.runCommand({insert: "c", documents: [{}]}); - assert.commandWorkedIgnoringWriteConcernErrors(res); - assert.eq(res.writeConcernError, {code: 12345, errmsg: "hello"}); - assert.commandWorked(testDB.runCommand({insert: "c", documents: [{}]})); // Works again. - assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"})); +// Test with success and writeConcernError. +assert.commandWorked(adminDB.runCommand({ + configureFailPoint: "failCommand", + mode: {times: 1}, + data: { + writeConcernError: {code: 12345, errmsg: "hello"}, + failCommands: ['insert', 'ping'], + threadName: threadName, + } +})); +// Commands that don't support writeConcern don't tick counter. +assert.commandWorked(testDB.runCommand({ping: 1})); +// Unlisted commands don't tick counter. +assert.commandWorked(testDB.runCommand({update: "c", updates: [{q: {}, u: {}, upsert: true}]})); +var res = testDB.runCommand({insert: "c", documents: [{}]}); +assert.commandWorkedIgnoringWriteConcernErrors(res); +assert.eq(res.writeConcernError, {code: 12345, errmsg: "hello"}); +assert.commandWorked(testDB.runCommand({insert: "c", documents: [{}]})); // Works again. +assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"})); - // Test with natural failure and writeConcernError. +// Test with natural failure and writeConcernError. - // This document is removed before testing the following insert to prevent a DuplicateKeyError - // if the failcommand_failpoint test is run multiple times on the same fixture. - testDB.c.remove({_id: 'dup'}); +// This document is removed before testing the following insert to prevent a DuplicateKeyError +// if the failcommand_failpoint test is run multiple times on the same fixture. +testDB.c.remove({_id: 'dup'}); - assert.commandWorked(testDB.runCommand({insert: "c", documents: [{_id: 'dup'}]})); - assert.commandWorked(adminDB.runCommand({ - configureFailPoint: "failCommand", - mode: {times: 1}, - data: { - writeConcernError: {code: 12345, errmsg: "hello"}, - failCommands: ['insert'], - threadName: threadName, - } - })); - var res = testDB.runCommand({insert: "c", documents: [{_id: 'dup'}]}); - assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey); - assert.eq(res.writeConcernError, {code: 12345, errmsg: "hello"}); - assert.commandWorked(testDB.runCommand({insert: "c", documents: [{}]})); // Works again. - assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"})); +assert.commandWorked(testDB.runCommand({insert: "c", documents: [{_id: 'dup'}]})); +assert.commandWorked(adminDB.runCommand({ + configureFailPoint: "failCommand", + mode: {times: 1}, + data: { + writeConcernError: {code: 12345, errmsg: "hello"}, + failCommands: ['insert'], + threadName: threadName, + } +})); +var res = testDB.runCommand({insert: "c", documents: [{_id: 'dup'}]}); +assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey); +assert.eq(res.writeConcernError, {code: 12345, errmsg: "hello"}); +assert.commandWorked(testDB.runCommand({insert: "c", documents: [{}]})); // Works again. +assert.commandWorked(adminDB.runCommand({configureFailPoint: "failCommand", mode: "off"})); - // Test that specifying both writeConcernError and closeConnection : false will not make - // `times` decrement twice per operation - assert.commandWorked(adminDB.runCommand({ - configureFailPoint: "failCommand", - mode: {times: 2}, - data: { - failCommands: ["insert"], - closeConnection: false, - writeConcernError: {code: 12345, errmsg: "hello"}, - threadName: threadName, - } - })); - - var res = testDB.runCommand({insert: "test", documents: [{a: "something"}]}); - assert.commandWorkedIgnoringWriteConcernErrors(res); - assert.eq(res.writeConcernError, {code: 12345, errmsg: "hello"}); - res = testDB.runCommand({insert: "test", documents: [{a: "something else"}]}); - assert.commandWorkedIgnoringWriteConcernErrors(res); - assert.eq(res.writeConcernError, {code: 12345, errmsg: "hello"}); - assert.commandWorked(testDB.runCommand({insert: "test", documents: [{b: "or_other"}]})); +// Test that specifying both writeConcernError and closeConnection : false will not make +// `times` decrement twice per operation +assert.commandWorked(adminDB.runCommand({ + configureFailPoint: "failCommand", + mode: {times: 2}, + data: { + failCommands: ["insert"], + closeConnection: false, + writeConcernError: {code: 12345, errmsg: "hello"}, + threadName: threadName, + } +})); +var res = testDB.runCommand({insert: "test", documents: [{a: "something"}]}); +assert.commandWorkedIgnoringWriteConcernErrors(res); +assert.eq(res.writeConcernError, {code: 12345, errmsg: "hello"}); +res = testDB.runCommand({insert: "test", documents: [{a: "something else"}]}); +assert.commandWorkedIgnoringWriteConcernErrors(res); +assert.eq(res.writeConcernError, {code: 12345, errmsg: "hello"}); +assert.commandWorked(testDB.runCommand({insert: "test", documents: [{b: "or_other"}]})); }()); diff --git a/jstests/core/field_name_validation.js b/jstests/core/field_name_validation.js index 656e01886cc..72d346a0f0c 100644 --- a/jstests/core/field_name_validation.js +++ b/jstests/core/field_name_validation.js @@ -10,173 +10,170 @@ * @tags: [assumes_unsharded_collection] */ (function() { - "use strict"; - - const coll = db.field_name_validation; - coll.drop(); - - // - // Insert command field name validation. - // - - // Test that dotted field names are allowed. - assert.writeOK(coll.insert({"a.b": 1})); - assert.writeOK(coll.insert({"_id.a": 1})); - assert.writeOK(coll.insert({a: {"a.b": 1}})); - assert.writeOK(coll.insert({_id: {"a.b": 1}})); - - // Test that _id cannot be a regex. - assert.writeError(coll.insert({_id: /a/})); - - // Test that _id cannot be an array. - assert.writeError(coll.insert({_id: [9]})); - - // Test that $-prefixed field names are allowed in embedded objects. - assert.writeOK(coll.insert({a: {$b: 1}})); - assert.eq(1, coll.find({"a.$b": 1}).itcount()); - - // Test that $-prefixed field names are not allowed at the top level. - assert.writeErrorWithCode(coll.insert({$a: 1}), ErrorCodes.BadValue); - assert.writeErrorWithCode(coll.insert({valid: 1, $a: 1}), ErrorCodes.BadValue); - - // Test that reserved $-prefixed field names are also not allowed. - assert.writeErrorWithCode(coll.insert({$ref: 1}), ErrorCodes.BadValue); - assert.writeErrorWithCode(coll.insert({$id: 1}), ErrorCodes.BadValue); - assert.writeErrorWithCode(coll.insert({$db: 1}), ErrorCodes.BadValue); - - // Test that _id cannot be an object with an element that has a $-prefixed field name. - assert.writeErrorWithCode(coll.insert({_id: {$b: 1}}), ErrorCodes.DollarPrefixedFieldName); - assert.writeErrorWithCode(coll.insert({_id: {a: 1, $b: 1}}), - ErrorCodes.DollarPrefixedFieldName); - - // Should not enforce the same restrictions on an embedded _id field. - assert.writeOK(coll.insert({a: {_id: [9]}})); - assert.writeOK(coll.insert({a: {_id: /a/}})); - assert.writeOK(coll.insert({a: {_id: {$b: 1}}})); - - // - // Update command field name validation. - // - coll.drop(); - - // Dotted fields are allowed in an update. - assert.writeOK(coll.update({}, {"a.b": 1}, {upsert: true})); - assert.eq(0, coll.find({"a.b": 1}).itcount()); - assert.eq(1, coll.find({}).itcount()); - - // Dotted fields represent paths in $set. - assert.writeOK(coll.update({}, {$set: {"a.b": 1}}, {upsert: true})); - assert.eq(1, coll.find({"a.b": 1}).itcount()); - - // Dotted fields represent paths in the query object. - assert.writeOK(coll.update({"a.b": 1}, {$set: {"a.b": 2}})); - assert.eq(1, coll.find({"a.b": 2}).itcount()); - assert.eq(1, coll.find({a: {b: 2}}).itcount()); - - assert.writeOK(coll.update({"a.b": 2}, {"a.b": 3})); - assert.eq(0, coll.find({"a.b": 3}).itcount()); - - // $-prefixed field names are not allowed. - assert.writeErrorWithCode(coll.update({"a.b": 1}, {$c: 1}, {upsert: true}), - ErrorCodes.FailedToParse); - assert.writeErrorWithCode(coll.update({"a.b": 1}, {$set: {$c: 1}}, {upsert: true}), - ErrorCodes.DollarPrefixedFieldName); - assert.writeErrorWithCode(coll.update({"a.b": 1}, {$set: {c: {$d: 1}}}, {upsert: true}), - ErrorCodes.DollarPrefixedFieldName); - - // Reserved $-prefixed field names are also not allowed. - assert.writeErrorWithCode(coll.update({"a.b": 1}, {$ref: 1}), ErrorCodes.FailedToParse); - assert.writeErrorWithCode(coll.update({"a.b": 1}, {$id: 1}), ErrorCodes.FailedToParse); - assert.writeErrorWithCode(coll.update({"a.b": 1}, {$db: 1}), ErrorCodes.FailedToParse); - - // - // FindAndModify field name validation. - // - coll.drop(); - - // Dotted fields are allowed in update object. - coll.findAndModify({query: {_id: 0}, update: {_id: 0, "a.b": 1}, upsert: true}); - assert.eq([{_id: 0, "a.b": 1}], coll.find({_id: 0}).toArray()); - - // Dotted fields represent paths in $set. - coll.findAndModify({query: {_id: 1}, update: {$set: {_id: 1, "a.b": 1}}, upsert: true}); - assert.eq([{_id: 1, a: {b: 1}}], coll.find({_id: 1}).toArray()); - - // Dotted fields represent paths in the query object. - coll.findAndModify({query: {_id: 0, "a.b": 1}, update: {"a.b": 2}}); - assert.eq([{_id: 0, "a.b": 1}], coll.find({_id: 0}).toArray()); - - coll.findAndModify({query: {_id: 1, "a.b": 1}, update: {$set: {_id: 1, "a.b": 2}}}); - assert.eq([{_id: 1, a: {b: 2}}], coll.find({_id: 1}).toArray()); - - // $-prefixed field names are not allowed. - assert.throws(function() { - coll.findAndModify({query: {_id: 1}, update: {_id: 1, $invalid: 1}}); - }); - assert.throws(function() { - coll.findAndModify({query: {_id: 1}, update: {$set: {_id: 1, $invalid: 1}}}); - }); - - // Reserved $-prefixed field names are also not allowed. - assert.throws(function() { - coll.findAndModify({query: {_id: 1}, update: {_id: 1, $ref: 1}}); - }); - assert.throws(function() { - coll.findAndModify({query: {_id: 1}, update: {_id: 1, $id: 1}}); - }); - assert.throws(function() { - coll.findAndModify({query: {_id: 1}, update: {_id: 1, $db: 1}}); - }); - - // - // Aggregation field name validation. - // - coll.drop(); - - assert.writeOK(coll.insert({_id: {a: 1, b: 2}, "c.d": 3})); - - // Dotted fields represent paths in an aggregation pipeline. - assert.eq(coll.aggregate([{$match: {"_id.a": 1}}, {$project: {"_id.b": 1}}]).toArray(), - [{_id: {b: 2}}]); - assert.eq(coll.aggregate([{$match: {"c.d": 3}}, {$project: {"_id.b": 1}}]).toArray(), []); - - assert.eq(coll.aggregate([{$project: {"_id.a": 1}}]).toArray(), [{_id: {a: 1}}]); - assert.eq(coll.aggregate([{$project: {"c.d": 1, _id: 0}}]).toArray(), [{}]); - - assert.eq(coll.aggregate([ - {$addFields: {"new.field": {$multiply: ["$c.d", "$_id.a"]}}}, - {$project: {"new.field": 1, _id: 0}} - ]) - .toArray(), - [{new: {field: null}}]); - - assert.eq(coll.aggregate([{$group: {_id: "$_id.a", e: {$sum: "$_id.b"}}}]).toArray(), - [{_id: 1, e: 2}]); - assert.eq(coll.aggregate([{$group: {_id: "$_id.a", e: {$sum: "$c.d"}}}]).toArray(), - [{_id: 1, e: 0}]); - - // Accumulation statements cannot have a dotted field name. - assert.commandFailed(db.runCommand({ - aggregate: coll.getName(), - pipeline: [{$group: {_id: "$_id.a", "e.f": {$sum: "$_id.b"}}}] - })); - - // $-prefixed field names are not allowed in an aggregation pipeline. - assert.commandFailed( - db.runCommand({aggregate: coll.getName(), pipeline: [{$match: {"$invalid": 1}}]})); - - assert.commandFailed(db.runCommand({ - aggregate: coll.getName(), - pipeline: [{$project: {"_id.a": 1, "$newField": {$multiply: ["$_id.b", "$_id.a"]}}}] - })); - - assert.commandFailed(db.runCommand({ - aggregate: coll.getName(), - pipeline: [{$addFields: {"_id.a": 1, "$newField": {$multiply: ["$_id.b", "$_id.a"]}}}] - })); - - assert.commandFailed(db.runCommand({ - aggregate: coll.getName(), - pipeline: [{$group: {_id: "$_id.a", "$invalid": {$sum: "$_id.b"}}}] - })); +"use strict"; + +const coll = db.field_name_validation; +coll.drop(); + +// +// Insert command field name validation. +// + +// Test that dotted field names are allowed. +assert.writeOK(coll.insert({"a.b": 1})); +assert.writeOK(coll.insert({"_id.a": 1})); +assert.writeOK(coll.insert({a: {"a.b": 1}})); +assert.writeOK(coll.insert({_id: {"a.b": 1}})); + +// Test that _id cannot be a regex. +assert.writeError(coll.insert({_id: /a/})); + +// Test that _id cannot be an array. +assert.writeError(coll.insert({_id: [9]})); + +// Test that $-prefixed field names are allowed in embedded objects. +assert.writeOK(coll.insert({a: {$b: 1}})); +assert.eq(1, coll.find({"a.$b": 1}).itcount()); + +// Test that $-prefixed field names are not allowed at the top level. +assert.writeErrorWithCode(coll.insert({$a: 1}), ErrorCodes.BadValue); +assert.writeErrorWithCode(coll.insert({valid: 1, $a: 1}), ErrorCodes.BadValue); + +// Test that reserved $-prefixed field names are also not allowed. +assert.writeErrorWithCode(coll.insert({$ref: 1}), ErrorCodes.BadValue); +assert.writeErrorWithCode(coll.insert({$id: 1}), ErrorCodes.BadValue); +assert.writeErrorWithCode(coll.insert({$db: 1}), ErrorCodes.BadValue); + +// Test that _id cannot be an object with an element that has a $-prefixed field name. +assert.writeErrorWithCode(coll.insert({_id: {$b: 1}}), ErrorCodes.DollarPrefixedFieldName); +assert.writeErrorWithCode(coll.insert({_id: {a: 1, $b: 1}}), ErrorCodes.DollarPrefixedFieldName); + +// Should not enforce the same restrictions on an embedded _id field. +assert.writeOK(coll.insert({a: {_id: [9]}})); +assert.writeOK(coll.insert({a: {_id: /a/}})); +assert.writeOK(coll.insert({a: {_id: {$b: 1}}})); + +// +// Update command field name validation. +// +coll.drop(); + +// Dotted fields are allowed in an update. +assert.writeOK(coll.update({}, {"a.b": 1}, {upsert: true})); +assert.eq(0, coll.find({"a.b": 1}).itcount()); +assert.eq(1, coll.find({}).itcount()); + +// Dotted fields represent paths in $set. +assert.writeOK(coll.update({}, {$set: {"a.b": 1}}, {upsert: true})); +assert.eq(1, coll.find({"a.b": 1}).itcount()); + +// Dotted fields represent paths in the query object. +assert.writeOK(coll.update({"a.b": 1}, {$set: {"a.b": 2}})); +assert.eq(1, coll.find({"a.b": 2}).itcount()); +assert.eq(1, coll.find({a: {b: 2}}).itcount()); + +assert.writeOK(coll.update({"a.b": 2}, {"a.b": 3})); +assert.eq(0, coll.find({"a.b": 3}).itcount()); + +// $-prefixed field names are not allowed. +assert.writeErrorWithCode(coll.update({"a.b": 1}, {$c: 1}, {upsert: true}), + ErrorCodes.FailedToParse); +assert.writeErrorWithCode(coll.update({"a.b": 1}, {$set: {$c: 1}}, {upsert: true}), + ErrorCodes.DollarPrefixedFieldName); +assert.writeErrorWithCode(coll.update({"a.b": 1}, {$set: {c: {$d: 1}}}, {upsert: true}), + ErrorCodes.DollarPrefixedFieldName); + +// Reserved $-prefixed field names are also not allowed. +assert.writeErrorWithCode(coll.update({"a.b": 1}, {$ref: 1}), ErrorCodes.FailedToParse); +assert.writeErrorWithCode(coll.update({"a.b": 1}, {$id: 1}), ErrorCodes.FailedToParse); +assert.writeErrorWithCode(coll.update({"a.b": 1}, {$db: 1}), ErrorCodes.FailedToParse); + +// +// FindAndModify field name validation. +// +coll.drop(); + +// Dotted fields are allowed in update object. +coll.findAndModify({query: {_id: 0}, update: {_id: 0, "a.b": 1}, upsert: true}); +assert.eq([{_id: 0, "a.b": 1}], coll.find({_id: 0}).toArray()); + +// Dotted fields represent paths in $set. +coll.findAndModify({query: {_id: 1}, update: {$set: {_id: 1, "a.b": 1}}, upsert: true}); +assert.eq([{_id: 1, a: {b: 1}}], coll.find({_id: 1}).toArray()); + +// Dotted fields represent paths in the query object. +coll.findAndModify({query: {_id: 0, "a.b": 1}, update: {"a.b": 2}}); +assert.eq([{_id: 0, "a.b": 1}], coll.find({_id: 0}).toArray()); + +coll.findAndModify({query: {_id: 1, "a.b": 1}, update: {$set: {_id: 1, "a.b": 2}}}); +assert.eq([{_id: 1, a: {b: 2}}], coll.find({_id: 1}).toArray()); + +// $-prefixed field names are not allowed. +assert.throws(function() { + coll.findAndModify({query: {_id: 1}, update: {_id: 1, $invalid: 1}}); +}); +assert.throws(function() { + coll.findAndModify({query: {_id: 1}, update: {$set: {_id: 1, $invalid: 1}}}); +}); + +// Reserved $-prefixed field names are also not allowed. +assert.throws(function() { + coll.findAndModify({query: {_id: 1}, update: {_id: 1, $ref: 1}}); +}); +assert.throws(function() { + coll.findAndModify({query: {_id: 1}, update: {_id: 1, $id: 1}}); +}); +assert.throws(function() { + coll.findAndModify({query: {_id: 1}, update: {_id: 1, $db: 1}}); +}); + +// +// Aggregation field name validation. +// +coll.drop(); + +assert.writeOK(coll.insert({_id: {a: 1, b: 2}, "c.d": 3})); + +// Dotted fields represent paths in an aggregation pipeline. +assert.eq(coll.aggregate([{$match: {"_id.a": 1}}, {$project: {"_id.b": 1}}]).toArray(), + [{_id: {b: 2}}]); +assert.eq(coll.aggregate([{$match: {"c.d": 3}}, {$project: {"_id.b": 1}}]).toArray(), []); + +assert.eq(coll.aggregate([{$project: {"_id.a": 1}}]).toArray(), [{_id: {a: 1}}]); +assert.eq(coll.aggregate([{$project: {"c.d": 1, _id: 0}}]).toArray(), [{}]); + +assert.eq(coll.aggregate([ + {$addFields: {"new.field": {$multiply: ["$c.d", "$_id.a"]}}}, + {$project: {"new.field": 1, _id: 0}} + ]) + .toArray(), + [{new: {field: null}}]); + +assert.eq(coll.aggregate([{$group: {_id: "$_id.a", e: {$sum: "$_id.b"}}}]).toArray(), + [{_id: 1, e: 2}]); +assert.eq(coll.aggregate([{$group: {_id: "$_id.a", e: {$sum: "$c.d"}}}]).toArray(), + [{_id: 1, e: 0}]); + +// Accumulation statements cannot have a dotted field name. +assert.commandFailed(db.runCommand( + {aggregate: coll.getName(), pipeline: [{$group: {_id: "$_id.a", "e.f": {$sum: "$_id.b"}}}]})); + +// $-prefixed field names are not allowed in an aggregation pipeline. +assert.commandFailed( + db.runCommand({aggregate: coll.getName(), pipeline: [{$match: {"$invalid": 1}}]})); + +assert.commandFailed(db.runCommand({ + aggregate: coll.getName(), + pipeline: [{$project: {"_id.a": 1, "$newField": {$multiply: ["$_id.b", "$_id.a"]}}}] +})); + +assert.commandFailed(db.runCommand({ + aggregate: coll.getName(), + pipeline: [{$addFields: {"_id.a": 1, "$newField": {$multiply: ["$_id.b", "$_id.a"]}}}] +})); + +assert.commandFailed(db.runCommand({ + aggregate: coll.getName(), + pipeline: [{$group: {_id: "$_id.a", "$invalid": {$sum: "$_id.b"}}}] +})); })(); diff --git a/jstests/core/filemd5.js b/jstests/core/filemd5.js index 4c8ad3cd754..9ea70283a73 100644 --- a/jstests/core/filemd5.js +++ b/jstests/core/filemd5.js @@ -9,20 +9,20 @@ // ] (function() { - "use strict"; +"use strict"; - db.fs.chunks.drop(); - assert.writeOK(db.fs.chunks.insert({files_id: 1, n: 0, data: new BinData(0, "test")})); +db.fs.chunks.drop(); +assert.writeOK(db.fs.chunks.insert({files_id: 1, n: 0, data: new BinData(0, "test")})); - assert.commandFailedWithCode(db.runCommand({filemd5: 1, root: "fs"}), ErrorCodes.BadValue); +assert.commandFailedWithCode(db.runCommand({filemd5: 1, root: "fs"}), ErrorCodes.BadValue); - db.fs.chunks.ensureIndex({files_id: 1, n: 1}); - assert.commandWorked(db.runCommand({filemd5: 1, root: "fs"})); +db.fs.chunks.ensureIndex({files_id: 1, n: 1}); +assert.commandWorked(db.runCommand({filemd5: 1, root: "fs"})); - assert.commandFailedWithCode(db.runCommand({filemd5: 1, root: "fs", partialOk: 1, md5state: 5}), - 50847); - assert.writeOK(db.fs.chunks.insert({files_id: 2, n: 0})); - assert.commandFailedWithCode(db.runCommand({filemd5: 2, root: "fs"}), 50848); - assert.writeOK(db.fs.chunks.update({files_id: 2, n: 0}, {$set: {data: 5}})); - assert.commandFailedWithCode(db.runCommand({filemd5: 2, root: "fs"}), 50849); +assert.commandFailedWithCode(db.runCommand({filemd5: 1, root: "fs", partialOk: 1, md5state: 5}), + 50847); +assert.writeOK(db.fs.chunks.insert({files_id: 2, n: 0})); +assert.commandFailedWithCode(db.runCommand({filemd5: 2, root: "fs"}), 50848); +assert.writeOK(db.fs.chunks.update({files_id: 2, n: 0}, {$set: {data: 5}})); +assert.commandFailedWithCode(db.runCommand({filemd5: 2, root: "fs"}), 50849); }()); diff --git a/jstests/core/find4.js b/jstests/core/find4.js index ad482916a19..3721763b358 100644 --- a/jstests/core/find4.js +++ b/jstests/core/find4.js @@ -1,42 +1,42 @@ (function() { - "use strict"; +"use strict"; - const coll = db.find4; - coll.drop(); +const coll = db.find4; +coll.drop(); - assert.writeOK(coll.insert({a: 1123, b: 54332})); +assert.writeOK(coll.insert({a: 1123, b: 54332})); - let o = coll.findOne(); - assert.eq(1123, o.a, "A"); - assert.eq(54332, o.b, "B"); - assert(o._id.str, "C"); +let o = coll.findOne(); +assert.eq(1123, o.a, "A"); +assert.eq(54332, o.b, "B"); +assert(o._id.str, "C"); - o = coll.findOne({}, {a: 1}); - assert.eq(1123, o.a, "D"); - assert(o._id.str, "E"); - assert(!o.b, "F"); +o = coll.findOne({}, {a: 1}); +assert.eq(1123, o.a, "D"); +assert(o._id.str, "E"); +assert(!o.b, "F"); - o = coll.findOne({}, {b: 1}); - assert.eq(54332, o.b, "G"); - assert(o._id.str, "H"); - assert(!o.a, "I"); +o = coll.findOne({}, {b: 1}); +assert.eq(54332, o.b, "G"); +assert(o._id.str, "H"); +assert(!o.a, "I"); - assert(coll.drop()); +assert(coll.drop()); - assert.writeOK(coll.insert({a: 1, b: 1})); - assert.writeOK(coll.insert({a: 2, b: 2})); - assert.eq("1-1,2-2", - coll.find() - .sort({a: 1}) - .map(function(z) { - return z.a + "-" + z.b; - }) - .toString()); - assert.eq("1-undefined,2-undefined", - coll.find({}, {a: 1}) - .sort({a: 1}) - .map(function(z) { - return z.a + "-" + z.b; - }) - .toString()); +assert.writeOK(coll.insert({a: 1, b: 1})); +assert.writeOK(coll.insert({a: 2, b: 2})); +assert.eq("1-1,2-2", + coll.find() + .sort({a: 1}) + .map(function(z) { + return z.a + "-" + z.b; + }) + .toString()); +assert.eq("1-undefined,2-undefined", + coll.find({}, {a: 1}) + .sort({a: 1}) + .map(function(z) { + return z.a + "-" + z.b; + }) + .toString()); }()); diff --git a/jstests/core/find5.js b/jstests/core/find5.js index 41ed0034b5c..f7e52c0ccc6 100644 --- a/jstests/core/find5.js +++ b/jstests/core/find5.js @@ -1,56 +1,56 @@ // @tags: [requires_fastcount] (function() { - "use strict"; +"use strict"; - const coll = db.find5; - coll.drop(); +const coll = db.find5; +coll.drop(); - assert.writeOK(coll.insert({a: 1})); - assert.writeOK(coll.insert({b: 5})); +assert.writeOK(coll.insert({a: 1})); +assert.writeOK(coll.insert({b: 5})); - assert.eq(2, coll.find({}, {b: 1}).count(), "A"); +assert.eq(2, coll.find({}, {b: 1}).count(), "A"); - function getIds(projection) { - return coll.find({}, projection).map(doc => doc._id).sort(); - } +function getIds(projection) { + return coll.find({}, projection).map(doc => doc._id).sort(); +} - assert.eq(Array.tojson(getIds(null)), Array.tojson(getIds({})), "B1 "); - assert.eq(Array.tojson(getIds(null)), Array.tojson(getIds({a: 1})), "B2 "); - assert.eq(Array.tojson(getIds(null)), Array.tojson(getIds({b: 1})), "B3 "); - assert.eq(Array.tojson(getIds(null)), Array.tojson(getIds({c: 1})), "B4 "); +assert.eq(Array.tojson(getIds(null)), Array.tojson(getIds({})), "B1 "); +assert.eq(Array.tojson(getIds(null)), Array.tojson(getIds({a: 1})), "B2 "); +assert.eq(Array.tojson(getIds(null)), Array.tojson(getIds({b: 1})), "B3 "); +assert.eq(Array.tojson(getIds(null)), Array.tojson(getIds({c: 1})), "B4 "); - let results = coll.find({}, {a: 1}).sort({a: -1}); - let first = results[0]; - assert.eq(1, first.a, "C1"); - assert.isnull(first.b, "C2"); +let results = coll.find({}, {a: 1}).sort({a: -1}); +let first = results[0]; +assert.eq(1, first.a, "C1"); +assert.isnull(first.b, "C2"); - let second = results[1]; - assert.isnull(second.a, "C3"); - assert.isnull(second.b, "C4"); +let second = results[1]; +assert.isnull(second.a, "C3"); +assert.isnull(second.b, "C4"); - results = coll.find({}, {b: 1}).sort({a: -1}); - first = results[0]; - assert.isnull(first.a, "C5"); - assert.isnull(first.b, "C6"); +results = coll.find({}, {b: 1}).sort({a: -1}); +first = results[0]; +assert.isnull(first.a, "C5"); +assert.isnull(first.b, "C6"); - second = results[1]; - assert.isnull(second.a, "C7"); - assert.eq(5, second.b, "C8"); +second = results[1]; +assert.isnull(second.a, "C7"); +assert.eq(5, second.b, "C8"); - assert(coll.drop()); +assert(coll.drop()); - assert.writeOK(coll.insert({a: 1, b: {c: 2, d: 3, e: 4}})); - assert.eq(2, coll.findOne({}, {"b.c": 1}).b.c, "D"); +assert.writeOK(coll.insert({a: 1, b: {c: 2, d: 3, e: 4}})); +assert.eq(2, coll.findOne({}, {"b.c": 1}).b.c, "D"); - const o = coll.findOne({}, {"b.c": 1, "b.d": 1}); - assert(o.b.c, "E 1"); - assert(o.b.d, "E 2"); - assert(!o.b.e, "E 3"); +const o = coll.findOne({}, {"b.c": 1, "b.d": 1}); +assert(o.b.c, "E 1"); +assert(o.b.d, "E 2"); +assert(!o.b.e, "E 3"); - assert(!coll.findOne({}, {"b.c": 1}).b.d, "F"); +assert(!coll.findOne({}, {"b.c": 1}).b.d, "F"); - assert(coll.drop()); - assert.writeOK(coll.insert({a: {b: {c: 1}}})); - assert.eq(1, coll.findOne({}, {"a.b.c": 1}).a.b.c, "G"); +assert(coll.drop()); +assert.writeOK(coll.insert({a: {b: {c: 1}}})); +assert.eq(1, coll.findOne({}, {"a.b.c": 1}).a.b.c, "G"); }()); diff --git a/jstests/core/find_and_modify3.js b/jstests/core/find_and_modify3.js index 3f8fc22d98d..a319aef7a2d 100644 --- a/jstests/core/find_and_modify3.js +++ b/jstests/core/find_and_modify3.js @@ -19,7 +19,8 @@ orig2 = t.findOne({_id: 2}); out = t.findAndModify({ query: {_id: 1, 'comments.i': 0}, - update: {$set: {'comments.$.j': 2}}, 'new': true, + update: {$set: {'comments.$.j': 2}}, + 'new': true, sort: {other: 1} }); assert.eq(out.comments[0], {i: 0, j: 2}); @@ -29,7 +30,8 @@ assert.eq(t.findOne({_id: 2}), orig2); out = t.findAndModify({ query: {other: 1, 'comments.i': 1}, - update: {$set: {'comments.$.j': 3}}, 'new': true, + update: {$set: {'comments.$.j': 3}}, + 'new': true, sort: {other: 1} }); assert.eq(out.comments[0], {i: 0, j: 2}); diff --git a/jstests/core/find_and_modify4.js b/jstests/core/find_and_modify4.js index 15fb93c8a9f..d5b3ae23cb2 100644 --- a/jstests/core/find_and_modify4.js +++ b/jstests/core/find_and_modify4.js @@ -11,7 +11,8 @@ function getNextVal(counterName) { var ret = t.findAndModify({ query: {_id: counterName}, update: {$inc: {val: 1}}, - upsert: true, 'new': true, + upsert: true, + 'new': true, }); return ret; } diff --git a/jstests/core/find_and_modify_concurrent_update.js b/jstests/core/find_and_modify_concurrent_update.js index 80b737cfbed..9682bea4c65 100644 --- a/jstests/core/find_and_modify_concurrent_update.js +++ b/jstests/core/find_and_modify_concurrent_update.js @@ -11,34 +11,33 @@ // Ensures that find and modify will not apply an update to a document which, due to a concurrent // modification, no longer matches the query predicate. (function() { - "use strict"; - - // Repeat the test a few times, as the timing of the yield means it won't fail consistently. - for (var i = 0; i < 3; i++) { - var t = db.find_and_modify_concurrent; - t.drop(); - - assert.commandWorked(t.ensureIndex({a: 1})); - assert.commandWorked(t.ensureIndex({b: 1})); - assert.writeOK(t.insert({_id: 1, a: 1, b: 1})); - - var join = startParallelShell( - "db.find_and_modify_concurrent.update({a: 1, b: 1}, {$inc: {a: 1}});"); - - // Due to the sleep, we expect this find and modify to yield before updating the - // document. - var res = t.findAndModify( - {query: {a: 1, b: 1, $where: "sleep(100); return true;"}, update: {$inc: {a: 1}}}); - - join(); - var docs = t.find().toArray(); - assert.eq(docs.length, 1); - - // Both the find and modify and the update operations look for a document with a==1, - // and then increment 'a' by 1. One should win the race and set a=2. The other should - // fail to find a match. The assertion is that 'a' got incremented once (not zero times - // and not twice). - assert.eq(docs[0].a, 2); - } - +"use strict"; + +// Repeat the test a few times, as the timing of the yield means it won't fail consistently. +for (var i = 0; i < 3; i++) { + var t = db.find_and_modify_concurrent; + t.drop(); + + assert.commandWorked(t.ensureIndex({a: 1})); + assert.commandWorked(t.ensureIndex({b: 1})); + assert.writeOK(t.insert({_id: 1, a: 1, b: 1})); + + var join = + startParallelShell("db.find_and_modify_concurrent.update({a: 1, b: 1}, {$inc: {a: 1}});"); + + // Due to the sleep, we expect this find and modify to yield before updating the + // document. + var res = t.findAndModify( + {query: {a: 1, b: 1, $where: "sleep(100); return true;"}, update: {$inc: {a: 1}}}); + + join(); + var docs = t.find().toArray(); + assert.eq(docs.length, 1); + + // Both the find and modify and the update operations look for a document with a==1, + // and then increment 'a' by 1. One should win the race and set a=2. The other should + // fail to find a match. The assertion is that 'a' got incremented once (not zero times + // and not twice). + assert.eq(docs[0].a, 2); +} })(); diff --git a/jstests/core/find_and_modify_empty_coll.js b/jstests/core/find_and_modify_empty_coll.js index 7325d73583e..c47674c800a 100644 --- a/jstests/core/find_and_modify_empty_coll.js +++ b/jstests/core/find_and_modify_empty_coll.js @@ -7,19 +7,19 @@ * Test that findAndModify works against a non-existent collection. */ (function() { - 'use strict'; - var coll = db.find_and_modify_server18054; - coll.drop(); +'use strict'; +var coll = db.find_and_modify_server18054; +coll.drop(); - assert.eq(null, coll.findAndModify({remove: true})); - assert.eq(null, coll.findAndModify({update: {$inc: {i: 1}}})); - var upserted = - coll.findAndModify({query: {_id: 0}, update: {$inc: {i: 1}}, upsert: true, new: true}); - assert.eq(upserted, {_id: 0, i: 1}); +assert.eq(null, coll.findAndModify({remove: true})); +assert.eq(null, coll.findAndModify({update: {$inc: {i: 1}}})); +var upserted = + coll.findAndModify({query: {_id: 0}, update: {$inc: {i: 1}}, upsert: true, new: true}); +assert.eq(upserted, {_id: 0, i: 1}); - coll.drop(); +coll.drop(); - assert.eq(null, coll.findAndModify({remove: true, fields: {z: 1}})); - assert.eq(null, coll.findAndModify({update: {$inc: {i: 1}}, fields: {z: 1}})); - assert.eq(null, coll.findAndModify({update: {$inc: {i: 1}}, upsert: true, fields: {z: 1}})); +assert.eq(null, coll.findAndModify({remove: true, fields: {z: 1}})); +assert.eq(null, coll.findAndModify({update: {$inc: {i: 1}}, fields: {z: 1}})); +assert.eq(null, coll.findAndModify({update: {$inc: {i: 1}}, upsert: true, fields: {z: 1}})); })(); diff --git a/jstests/core/find_and_modify_invalid_query_params.js b/jstests/core/find_and_modify_invalid_query_params.js index a54d9217b24..6c5d16c94f1 100644 --- a/jstests/core/find_and_modify_invalid_query_params.js +++ b/jstests/core/find_and_modify_invalid_query_params.js @@ -4,90 +4,89 @@ * @tags: [assumes_unsharded_collection] */ (function() { - "use strict"; - - const coll = db.find_and_modify_invalid_inputs; - coll.drop(); - coll.insert({_id: 0}); - coll.insert({_id: 1}); - - function assertFailedWithCode(cmd, errorCode) { - const err = assert.throws(() => coll.findAndModify(cmd)); - assert.eq(err.code, errorCode); - } - - function assertWorked(cmd, expectedValue) { - const out = assert.doesNotThrow(() => coll.findAndModify(cmd)); - assert.eq(out.value, expectedValue); - } - - // Verify that the findAndModify command works when we supply a valid query. - let out = coll.findAndModify({query: {_id: 1}, update: {$set: {value: "basic"}}, new: true}); - assert.eq(out, {_id: 1, value: "basic"}); - - // Verify that invalid 'query' object fails. - assertFailedWithCode({query: null, update: {value: 2}}, 31160); - assertFailedWithCode({query: 1, update: {value: 2}}, 31160); - assertFailedWithCode({query: "{_id: 1}", update: {value: 2}}, 31160); - assertFailedWithCode({query: false, update: {value: 2}}, 31160); - - // Verify that missing and empty query object is allowed. - assertWorked({update: {$set: {value: "missingQuery"}}, new: true}, "missingQuery"); - assertWorked({query: {}, update: {$set: {value: "emptyQuery"}}, new: true}, "emptyQuery"); - - // Verify that command works when we supply a valid sort specification. - assertWorked({sort: {_id: -1}, update: {$set: {value: "sort"}}, new: true}, "sort"); - - // Verify that invaid 'sort' object fails. - assertFailedWithCode({sort: null, update: {value: 2}}, 31174); - assertFailedWithCode({sort: 1, update: {value: 2}}, 31174); - assertFailedWithCode({sort: "{_id: 1}", update: {value: 2}}, 31174); - assertFailedWithCode({sort: false, update: {value: 2}}, 31174); - - // Verify that missing and empty 'sort' object is allowed. - assertWorked({update: {$set: {value: "missingSort"}}, new: true}, "missingSort"); - assertWorked({sort: {}, update: {$set: {value: "emptySort"}}, new: true}, "emptySort"); - - // Verify that the 'fields' projection works. - assertWorked({fields: {_id: 0}, update: {$set: {value: "project"}}, new: true}, "project"); - - // Verify that invaid 'fields' object fails. - assertFailedWithCode({fields: null, update: {value: 2}}, 31175); - assertFailedWithCode({fields: 1, update: {value: 2}}, 31175); - assertFailedWithCode({fields: "{_id: 1}", update: {value: 2}}, 31175); - assertFailedWithCode({fields: false, update: {value: 2}}, 31175); - - // Verify that missing and empty 'fields' object is allowed. Also verify that the command - // projects all the fields. - assertWorked({update: {$set: {value: "missingFields"}}, new: true}, "missingFields"); - assertWorked({fields: {}, update: {$set: {value: "emptyFields"}}, new: true}, "emptyFields"); - - // Verify that findOneAndDelete() shell helper throws the same errors as findAndModify(). - let err = assert.throws(() => coll.findOneAndDelete("{_id: 1}")); - assert.eq(err.code, 31160); - err = assert.throws(() => coll.findOneAndDelete(null, {sort: 1})); - assert.eq(err.code, 31174); - - // Verify that findOneAndReplace() shell helper throws the same errors as findAndModify(). - err = assert.throws(() => coll.findOneAndReplace("{_id: 1}", {})); - assert.eq(err.code, 31160); - err = assert.throws(() => coll.findOneAndReplace(null, {}, {sort: 1})); - assert.eq(err.code, 31174); - - // Verify that findOneAndUpdate() shell helper throws the same errors as findAndModify(). - err = assert.throws(() => coll.findOneAndUpdate("{_id: 1}", {$set: {value: "new"}})); - assert.eq(err.code, 31160); - err = assert.throws(() => coll.findOneAndUpdate(null, {$set: {value: "new"}}, {sort: 1})); - assert.eq(err.code, 31174); - - // Verify that find and modify shell helpers allow null query object. - out = - coll.findOneAndUpdate(null, {$set: {value: "findOneAndUpdate"}}, {returnNewDocument: true}); - assert.eq(out.value, "findOneAndUpdate"); - - out = coll.findOneAndReplace(null, {value: "findOneAndReplace"}, {returnNewDocument: true}); - assert.eq(out.value, "findOneAndReplace"); - - out = coll.findOneAndDelete(null); - assert.eq(out.value, "findOneAndReplace"); +"use strict"; + +const coll = db.find_and_modify_invalid_inputs; +coll.drop(); +coll.insert({_id: 0}); +coll.insert({_id: 1}); + +function assertFailedWithCode(cmd, errorCode) { + const err = assert.throws(() => coll.findAndModify(cmd)); + assert.eq(err.code, errorCode); +} + +function assertWorked(cmd, expectedValue) { + const out = assert.doesNotThrow(() => coll.findAndModify(cmd)); + assert.eq(out.value, expectedValue); +} + +// Verify that the findAndModify command works when we supply a valid query. +let out = coll.findAndModify({query: {_id: 1}, update: {$set: {value: "basic"}}, new: true}); +assert.eq(out, {_id: 1, value: "basic"}); + +// Verify that invalid 'query' object fails. +assertFailedWithCode({query: null, update: {value: 2}}, 31160); +assertFailedWithCode({query: 1, update: {value: 2}}, 31160); +assertFailedWithCode({query: "{_id: 1}", update: {value: 2}}, 31160); +assertFailedWithCode({query: false, update: {value: 2}}, 31160); + +// Verify that missing and empty query object is allowed. +assertWorked({update: {$set: {value: "missingQuery"}}, new: true}, "missingQuery"); +assertWorked({query: {}, update: {$set: {value: "emptyQuery"}}, new: true}, "emptyQuery"); + +// Verify that command works when we supply a valid sort specification. +assertWorked({sort: {_id: -1}, update: {$set: {value: "sort"}}, new: true}, "sort"); + +// Verify that invaid 'sort' object fails. +assertFailedWithCode({sort: null, update: {value: 2}}, 31174); +assertFailedWithCode({sort: 1, update: {value: 2}}, 31174); +assertFailedWithCode({sort: "{_id: 1}", update: {value: 2}}, 31174); +assertFailedWithCode({sort: false, update: {value: 2}}, 31174); + +// Verify that missing and empty 'sort' object is allowed. +assertWorked({update: {$set: {value: "missingSort"}}, new: true}, "missingSort"); +assertWorked({sort: {}, update: {$set: {value: "emptySort"}}, new: true}, "emptySort"); + +// Verify that the 'fields' projection works. +assertWorked({fields: {_id: 0}, update: {$set: {value: "project"}}, new: true}, "project"); + +// Verify that invaid 'fields' object fails. +assertFailedWithCode({fields: null, update: {value: 2}}, 31175); +assertFailedWithCode({fields: 1, update: {value: 2}}, 31175); +assertFailedWithCode({fields: "{_id: 1}", update: {value: 2}}, 31175); +assertFailedWithCode({fields: false, update: {value: 2}}, 31175); + +// Verify that missing and empty 'fields' object is allowed. Also verify that the command +// projects all the fields. +assertWorked({update: {$set: {value: "missingFields"}}, new: true}, "missingFields"); +assertWorked({fields: {}, update: {$set: {value: "emptyFields"}}, new: true}, "emptyFields"); + +// Verify that findOneAndDelete() shell helper throws the same errors as findAndModify(). +let err = assert.throws(() => coll.findOneAndDelete("{_id: 1}")); +assert.eq(err.code, 31160); +err = assert.throws(() => coll.findOneAndDelete(null, {sort: 1})); +assert.eq(err.code, 31174); + +// Verify that findOneAndReplace() shell helper throws the same errors as findAndModify(). +err = assert.throws(() => coll.findOneAndReplace("{_id: 1}", {})); +assert.eq(err.code, 31160); +err = assert.throws(() => coll.findOneAndReplace(null, {}, {sort: 1})); +assert.eq(err.code, 31174); + +// Verify that findOneAndUpdate() shell helper throws the same errors as findAndModify(). +err = assert.throws(() => coll.findOneAndUpdate("{_id: 1}", {$set: {value: "new"}})); +assert.eq(err.code, 31160); +err = assert.throws(() => coll.findOneAndUpdate(null, {$set: {value: "new"}}, {sort: 1})); +assert.eq(err.code, 31174); + +// Verify that find and modify shell helpers allow null query object. +out = coll.findOneAndUpdate(null, {$set: {value: "findOneAndUpdate"}}, {returnNewDocument: true}); +assert.eq(out.value, "findOneAndUpdate"); + +out = coll.findOneAndReplace(null, {value: "findOneAndReplace"}, {returnNewDocument: true}); +assert.eq(out.value, "findOneAndReplace"); + +out = coll.findOneAndDelete(null); +assert.eq(out.value, "findOneAndReplace"); })(); diff --git a/jstests/core/find_and_modify_pipeline_update.js b/jstests/core/find_and_modify_pipeline_update.js index ba793fb2ce3..781b4d0335d 100644 --- a/jstests/core/find_and_modify_pipeline_update.js +++ b/jstests/core/find_and_modify_pipeline_update.js @@ -3,55 +3,52 @@ * @tags: [requires_non_retryable_writes] */ (function() { - "use strict"; +"use strict"; - load("jstests/libs/fixture_helpers.js"); // For isMongos. +load("jstests/libs/fixture_helpers.js"); // For isMongos. - const coll = db.find_and_modify_pipeline_update; - coll.drop(); +const coll = db.find_and_modify_pipeline_update; +coll.drop(); - // Test that it generally works. - assert.commandWorked(coll.insert([{_id: 0}, {_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}])); - let found = coll.findAndModify({query: {_id: 0}, update: [{$set: {y: 1}}]}); - assert.eq(found, {_id: 0}); - found = coll.findAndModify({query: {_id: 0}, update: [{$set: {z: 2}}], new: true}); - assert.eq(found, {_id: 0, y: 1, z: 2}); +// Test that it generally works. +assert.commandWorked(coll.insert([{_id: 0}, {_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}])); +let found = coll.findAndModify({query: {_id: 0}, update: [{$set: {y: 1}}]}); +assert.eq(found, {_id: 0}); +found = coll.findAndModify({query: {_id: 0}, update: [{$set: {z: 2}}], new: true}); +assert.eq(found, {_id: 0, y: 1, z: 2}); - found = coll.findAndModify({query: {_id: 0}, update: [{$unset: ["z"]}], new: true}); - assert.eq(found, {_id: 0, y: 1}); +found = coll.findAndModify({query: {_id: 0}, update: [{$unset: ["z"]}], new: true}); +assert.eq(found, {_id: 0, y: 1}); - // Test that pipeline-style update supports the 'fields' argument. +// Test that pipeline-style update supports the 'fields' argument. +assert(coll.drop()); +assert.commandWorked(coll.insert([{_id: 0, x: 0}, {_id: 1, x: 1}, {_id: 2, x: 2}, {_id: 3, x: 3}])); +found = coll.findAndModify({query: {_id: 0}, update: [{$set: {y: 0}}], fields: {x: 0}}); +assert.eq(found, {_id: 0}); + +found = coll.findAndModify({query: {_id: 1}, update: [{$set: {y: 1}}], fields: {x: 1}}); +assert.eq(found, {_id: 1, x: 1}); + +found = coll.findAndModify({query: {_id: 2}, update: [{$set: {y: 2}}], fields: {x: 0}, new: true}); +assert.eq(found, {_id: 2, y: 2}); + +found = coll.findAndModify({query: {_id: 3}, update: [{$set: {y: 3}}], fields: {x: 1}, new: true}); +assert.eq(found, {_id: 3, x: 3}); + +// We skip the following test for sharded fixtures as it will fail as the query for +// findAndModify must contain the shard key. +if (!FixtureHelpers.isMongos(db)) { + // Test that 'sort' works with pipeline-style update. assert(coll.drop()); assert.commandWorked( - coll.insert([{_id: 0, x: 0}, {_id: 1, x: 1}, {_id: 2, x: 2}, {_id: 3, x: 3}])); - found = coll.findAndModify({query: {_id: 0}, update: [{$set: {y: 0}}], fields: {x: 0}}); - assert.eq(found, {_id: 0}); - - found = coll.findAndModify({query: {_id: 1}, update: [{$set: {y: 1}}], fields: {x: 1}}); - assert.eq(found, {_id: 1, x: 1}); - - found = - coll.findAndModify({query: {_id: 2}, update: [{$set: {y: 2}}], fields: {x: 0}, new: true}); - assert.eq(found, {_id: 2, y: 2}); - - found = - coll.findAndModify({query: {_id: 3}, update: [{$set: {y: 3}}], fields: {x: 1}, new: true}); - assert.eq(found, {_id: 3, x: 3}); - - // We skip the following test for sharded fixtures as it will fail as the query for - // findAndModify must contain the shard key. - if (!FixtureHelpers.isMongos(db)) { - // Test that 'sort' works with pipeline-style update. - assert(coll.drop()); - assert.commandWorked( - coll.insert([{_id: 0, x: 'b'}, {_id: 1, x: 'd'}, {_id: 2, x: 'a'}, {_id: 3, x: 'c'}])); - found = coll.findAndModify({update: [{$set: {foo: "bar"}}], sort: {x: -1}, new: true}); - assert.eq(found, {_id: 1, x: 'd', foo: "bar"}); - } - - // Test that it rejects the combination of arrayFilters and a pipeline-style update. - let err = - assert.throws(() => coll.findAndModify( - {query: {_id: 1}, update: [{$set: {y: 1}}], arrayFilters: [{"i.x": 4}]})); - assert.eq(err.code, ErrorCodes.FailedToParse); + coll.insert([{_id: 0, x: 'b'}, {_id: 1, x: 'd'}, {_id: 2, x: 'a'}, {_id: 3, x: 'c'}])); + found = coll.findAndModify({update: [{$set: {foo: "bar"}}], sort: {x: -1}, new: true}); + assert.eq(found, {_id: 1, x: 'd', foo: "bar"}); +} + +// Test that it rejects the combination of arrayFilters and a pipeline-style update. +let err = + assert.throws(() => coll.findAndModify( + {query: {_id: 1}, update: [{$set: {y: 1}}], arrayFilters: [{"i.x": 4}]})); +assert.eq(err.code, ErrorCodes.FailedToParse); }()); diff --git a/jstests/core/find_and_modify_server6226.js b/jstests/core/find_and_modify_server6226.js index a4093142150..e99b910e178 100644 --- a/jstests/core/find_and_modify_server6226.js +++ b/jstests/core/find_and_modify_server6226.js @@ -1,9 +1,9 @@ (function() { - 'use strict'; +'use strict'; - var t = db.find_and_modify_server6226; - t.drop(); +var t = db.find_and_modify_server6226; +t.drop(); - var ret = t.findAndModify({query: {_id: 1}, update: {"$inc": {i: 1}}, upsert: true}); - assert.isnull(ret); +var ret = t.findAndModify({query: {_id: 1}, update: {"$inc": {i: 1}}, upsert: true}); +assert.isnull(ret); })(); diff --git a/jstests/core/find_and_modify_server6865.js b/jstests/core/find_and_modify_server6865.js index 98e5b28ee47..1c5d9363a6f 100644 --- a/jstests/core/find_and_modify_server6865.js +++ b/jstests/core/find_and_modify_server6865.js @@ -8,297 +8,286 @@ * when remove=true or new=false, but not when new=true. */ (function() { - 'use strict'; +'use strict'; - var collName = 'find_and_modify_server6865'; - var t = db.getCollection(collName); - t.drop(); +var collName = 'find_and_modify_server6865'; +var t = db.getCollection(collName); +t.drop(); - /** - * Asserts that the specified query and projection returns the expected - * result, using both the find() operation and the findAndModify command. - * - * insert -- document to insert after dropping collection t - * cmdObj -- arguments to the findAndModify command - * - * expected -- the document 'value' expected to be returned after the - * projection is applied - */ - function testFAMWorked(insert, cmdObj, expected) { - t.drop(); - t.insert(insert); - - var res; - - if (!cmdObj['new']) { - // Test that the find operation returns the expected result. - res = t.findOne(cmdObj['query'], cmdObj['fields']); - assert.eq(res, expected, 'positional projection failed for find'); - } - - // Test that the findAndModify command returns the expected result. - res = t.runCommand('findAndModify', cmdObj); - assert.commandWorked(res, 'findAndModify command failed'); - assert.eq(res.value, expected, 'positional projection failed for findAndModify'); - - if (cmdObj['new']) { - // Test that the find operation returns the expected result. - res = t.findOne(cmdObj['query'], cmdObj['fields']); - assert.eq(res, expected, 'positional projection failed for find'); - } - } +/** + * Asserts that the specified query and projection returns the expected + * result, using both the find() operation and the findAndModify command. + * + * insert -- document to insert after dropping collection t + * cmdObj -- arguments to the findAndModify command + * + * expected -- the document 'value' expected to be returned after the + * projection is applied + */ +function testFAMWorked(insert, cmdObj, expected) { + t.drop(); + t.insert(insert); - /** - * Asserts that the specified findAndModify command returns an error. - */ - function testFAMFailed(insert, cmdObj) { - t.drop(); - t.insert(insert); + var res; - var res = t.runCommand('findAndModify', cmdObj); - assert.commandFailed(res, 'findAndModify command unexpectedly succeeded'); + if (!cmdObj['new']) { + // Test that the find operation returns the expected result. + res = t.findOne(cmdObj['query'], cmdObj['fields']); + assert.eq(res, expected, 'positional projection failed for find'); } - // - // Delete operations - // - - // Simple query that uses an inclusion projection. - testFAMWorked({_id: 42, a: [1, 2], b: 3}, - {query: {_id: 42}, fields: {_id: 0, b: 1}, remove: true}, - {b: 3}); - - // Simple query that uses an exclusion projection. - testFAMWorked({_id: 42, a: [1, 2], b: 3, c: 4}, - {query: {_id: 42}, fields: {a: 0, b: 0}, remove: true}, - {_id: 42, c: 4}); + // Test that the findAndModify command returns the expected result. + res = t.runCommand('findAndModify', cmdObj); + assert.commandWorked(res, 'findAndModify command failed'); + assert.eq(res.value, expected, 'positional projection failed for findAndModify'); - // Simple query that uses $elemMatch in the projection. - testFAMWorked({ - _id: 42, - b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}] - }, - {query: {_id: 42}, fields: {b: {$elemMatch: {value: 2}}}, remove: true}, - {_id: 42, b: [{name: 'second', value: 2}]}); - - // Query on an array of values while using a positional projection. - testFAMWorked( - {_id: 42, a: [1, 2]}, {query: {a: 2}, fields: {'a.$': 1}, remove: true}, {_id: 42, a: [2]}); + if (cmdObj['new']) { + // Test that the find operation returns the expected result. + res = t.findOne(cmdObj['query'], cmdObj['fields']); + assert.eq(res, expected, 'positional projection failed for find'); + } +} - // Query on an array of objects while using a positional projection. - testFAMWorked({ - _id: 42, - b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}] - }, - {query: {_id: 42, 'b.name': 'third'}, fields: {'b.$': 1}, remove: true}, - {_id: 42, b: [{name: 'third', value: 3}]}); - - // Query on an array of objects while using a position projection. - // Verifies that the projection {'b.$.value': 1} is treated the - // same as {'b.$': 1}. - testFAMWorked({ - _id: 42, - b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}] - }, - {query: {_id: 42, 'b.name': 'third'}, fields: {'b.$.value': 1}, remove: true}, - {_id: 42, b: [{name: 'third', value: 3}]}); - - // Query on an array of objects using $elemMatch while using an inclusion projection. - testFAMWorked({ - _id: 42, - a: 5, - b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}] - }, - { - query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}}, - fields: {_id: 0, a: 5}, - remove: true - }, - {a: 5}); - - // Query on an array of objects using $elemMatch while using the positional - // operator in the projection. - testFAMWorked({ - _id: 42, - b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}] - }, - { - query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}}, - fields: {_id: 0, 'b.$': 1}, - remove: true - }, - {b: [{name: 'john', value: 1}]}); - - // - // Update operations with new=false - // - - // Simple query that uses an inclusion projection. - testFAMWorked({_id: 42, a: [1, 2], b: 3}, - {query: {_id: 42}, fields: {_id: 0, b: 1}, update: {$inc: {b: 1}}, new: false}, - {b: 3}); - - // Simple query that uses an exclusion projection. - testFAMWorked({_id: 42, a: [1, 2], b: 3, c: 4}, - {query: {_id: 42}, fields: {a: 0, b: 0}, update: {$set: {c: 5}}, new: false}, - {_id: 42, c: 4}); - - // Simple query that uses $elemMatch in the projection. - testFAMWorked({ - _id: 42, - b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}] - }, - { - query: {_id: 42}, - fields: {b: {$elemMatch: {value: 2}}}, - update: {$set: {name: '2nd'}}, - new: false - }, - {_id: 42, b: [{name: 'second', value: 2}]}); - - // Query on an array of values while using a positional projection. - testFAMWorked( - {_id: 42, a: [1, 2]}, - {query: {a: 2}, fields: {'a.$': 1}, update: {$set: {'b.kind': 'xyz'}}, new: false}, - {_id: 42, a: [2]}); - - // Query on an array of objects while using a positional projection. - testFAMWorked({ - _id: 42, - b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}] - }, - { - query: {_id: 42, 'b.name': 'third'}, - fields: {'b.$': 1}, - update: {$set: {'b.$.kind': 'xyz'}}, - new: false - }, - {_id: 42, b: [{name: 'third', value: 3}]}); - - // Query on an array of objects while using $elemMatch in the projection, - // where the matched array element is modified. - testFAMWorked( - {_id: 1, a: [{x: 1, y: 1}, {x: 1, y: 2}]}, - {query: {_id: 1}, fields: {a: {$elemMatch: {x: 1}}}, update: {$pop: {a: -1}}, new: false}, - {_id: 1, a: [{x: 1, y: 1}]}); - - // Query on an array of objects using $elemMatch while using an inclusion projection. - testFAMWorked({ - _id: 42, - a: 5, - b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}] - }, - { - query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}}, - fields: {_id: 0, a: 5}, - update: {$inc: {a: 6}}, - new: false - }, - {a: 5}); - - // Query on an array of objects using $elemMatch while using the positional - // operator in the projection. - testFAMWorked({ - _id: 42, - b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}] - }, - { - query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}}, - fields: {_id: 0, 'b.$': 1}, - update: {$set: {name: 'james'}}, - new: false - }, - {b: [{name: 'john', value: 1}]}); - - // - // Update operations with new=true - // - - // Simple query that uses an inclusion projection. - testFAMWorked({_id: 42, a: [1, 2], b: 3}, - {query: {_id: 42}, fields: {_id: 0, b: 1}, update: {$inc: {b: 1}}, new: true}, - {b: 4}); - - // Simple query that uses an exclusion projection. - testFAMWorked({_id: 42, a: [1, 2], b: 3, c: 4}, - {query: {_id: 42}, fields: {a: 0, b: 0}, update: {$set: {c: 5}}, new: true}, - {_id: 42, c: 5}); - - // Simple query that uses $elemMatch in the projection. - testFAMWorked({ - _id: 42, - b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}] - }, - { - query: {_id: 42}, - fields: {b: {$elemMatch: {value: 2}}}, - update: {$set: {'b.1.name': '2nd'}}, - new: true - }, - {_id: 42, b: [{name: '2nd', value: 2}]}); - - // Query on an array of values while using a positional projection. - testFAMFailed( - {_id: 42, a: [1, 2]}, - {query: {a: 2}, fields: {'a.$': 1}, update: {$set: {'b.kind': 'xyz'}}, new: true}); - - // Query on an array of objects while using a positional projection. - testFAMFailed({ - _id: 42, - b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}] - }, - { - query: {_id: 42, 'b.name': 'third'}, - fields: {'b.$': 1}, - update: {$set: {'b.$.kind': 'xyz'}}, - new: true - }); - - // Query on an array of objects while using $elemMatch in the projection. - testFAMWorked({ - _id: 42, - b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}] - }, - { - query: {_id: 42}, - fields: {b: {$elemMatch: {value: 2}}, c: 1}, - update: {$set: {c: 'xyz'}}, - new: true - }, - {_id: 42, b: [{name: 'second', value: 2}], c: 'xyz'}); - - // Query on an array of objects while using $elemMatch in the projection, - // where the matched array element is modified. - testFAMWorked( - {_id: 1, a: [{x: 1, y: 1}, {x: 1, y: 2}]}, - {query: {_id: 1}, fields: {a: {$elemMatch: {x: 1}}}, update: {$pop: {a: -1}}, new: true}, - {_id: 1, a: [{x: 1, y: 2}]}); - - // Query on an array of objects using $elemMatch while using an inclusion projection. - testFAMWorked({ - _id: 42, - a: 5, - b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}] +/** + * Asserts that the specified findAndModify command returns an error. + */ +function testFAMFailed(insert, cmdObj) { + t.drop(); + t.insert(insert); + + var res = t.runCommand('findAndModify', cmdObj); + assert.commandFailed(res, 'findAndModify command unexpectedly succeeded'); +} + +// +// Delete operations +// + +// Simple query that uses an inclusion projection. +testFAMWorked( + {_id: 42, a: [1, 2], b: 3}, {query: {_id: 42}, fields: {_id: 0, b: 1}, remove: true}, {b: 3}); + +// Simple query that uses an exclusion projection. +testFAMWorked({_id: 42, a: [1, 2], b: 3, c: 4}, + {query: {_id: 42}, fields: {a: 0, b: 0}, remove: true}, + {_id: 42, c: 4}); + +// Simple query that uses $elemMatch in the projection. +testFAMWorked({ + _id: 42, + b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}] +}, + {query: {_id: 42}, fields: {b: {$elemMatch: {value: 2}}}, remove: true}, + {_id: 42, b: [{name: 'second', value: 2}]}); + +// Query on an array of values while using a positional projection. +testFAMWorked( + {_id: 42, a: [1, 2]}, {query: {a: 2}, fields: {'a.$': 1}, remove: true}, {_id: 42, a: [2]}); + +// Query on an array of objects while using a positional projection. +testFAMWorked({ + _id: 42, + b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}] +}, + {query: {_id: 42, 'b.name': 'third'}, fields: {'b.$': 1}, remove: true}, + {_id: 42, b: [{name: 'third', value: 3}]}); + +// Query on an array of objects while using a position projection. +// Verifies that the projection {'b.$.value': 1} is treated the +// same as {'b.$': 1}. +testFAMWorked({ + _id: 42, + b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}] +}, + {query: {_id: 42, 'b.name': 'third'}, fields: {'b.$.value': 1}, remove: true}, + {_id: 42, b: [{name: 'third', value: 3}]}); + +// Query on an array of objects using $elemMatch while using an inclusion projection. +testFAMWorked({ + _id: 42, + a: 5, + b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}] +}, + { + query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}}, + fields: {_id: 0, a: 5}, + remove: true + }, + {a: 5}); + +// Query on an array of objects using $elemMatch while using the positional +// operator in the projection. +testFAMWorked( + {_id: 42, b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}]}, + { + query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}}, + fields: {_id: 0, 'b.$': 1}, + remove: true }, - { - query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}}, - fields: {_id: 0, a: 5}, - update: {$inc: {a: 6}}, - new: true - }, - {a: 11}); - - // Query on an array of objects using $elemMatch while using the positional - // operator in the projection. - testFAMFailed({ - _id: 42, - b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}] + {b: [{name: 'john', value: 1}]}); + +// +// Update operations with new=false +// + +// Simple query that uses an inclusion projection. +testFAMWorked({_id: 42, a: [1, 2], b: 3}, + {query: {_id: 42}, fields: {_id: 0, b: 1}, update: {$inc: {b: 1}}, new: false}, + {b: 3}); + +// Simple query that uses an exclusion projection. +testFAMWorked({_id: 42, a: [1, 2], b: 3, c: 4}, + {query: {_id: 42}, fields: {a: 0, b: 0}, update: {$set: {c: 5}}, new: false}, + {_id: 42, c: 4}); + +// Simple query that uses $elemMatch in the projection. +testFAMWorked({ + _id: 42, + b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}] +}, + { + query: {_id: 42}, + fields: {b: {$elemMatch: {value: 2}}}, + update: {$set: {name: '2nd'}}, + new: false + }, + {_id: 42, b: [{name: 'second', value: 2}]}); + +// Query on an array of values while using a positional projection. +testFAMWorked({_id: 42, a: [1, 2]}, + {query: {a: 2}, fields: {'a.$': 1}, update: {$set: {'b.kind': 'xyz'}}, new: false}, + {_id: 42, a: [2]}); + +// Query on an array of objects while using a positional projection. +testFAMWorked({ + _id: 42, + b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}] +}, + { + query: {_id: 42, 'b.name': 'third'}, + fields: {'b.$': 1}, + update: {$set: {'b.$.kind': 'xyz'}}, + new: false + }, + {_id: 42, b: [{name: 'third', value: 3}]}); + +// Query on an array of objects while using $elemMatch in the projection, +// where the matched array element is modified. +testFAMWorked( + {_id: 1, a: [{x: 1, y: 1}, {x: 1, y: 2}]}, + {query: {_id: 1}, fields: {a: {$elemMatch: {x: 1}}}, update: {$pop: {a: -1}}, new: false}, + {_id: 1, a: [{x: 1, y: 1}]}); + +// Query on an array of objects using $elemMatch while using an inclusion projection. +testFAMWorked({ + _id: 42, + a: 5, + b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}] +}, + { + query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}}, + fields: {_id: 0, a: 5}, + update: {$inc: {a: 6}}, + new: false + }, + {a: 5}); + +// Query on an array of objects using $elemMatch while using the positional +// operator in the projection. +testFAMWorked( + {_id: 42, b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}]}, + { + query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}}, + fields: {_id: 0, 'b.$': 1}, + update: {$set: {name: 'james'}}, + new: false }, - { - query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}}, - fields: {_id: 0, 'b.$': 1}, - update: {$set: {name: 'james'}}, - new: true - }); - + {b: [{name: 'john', value: 1}]}); + +// +// Update operations with new=true +// + +// Simple query that uses an inclusion projection. +testFAMWorked({_id: 42, a: [1, 2], b: 3}, + {query: {_id: 42}, fields: {_id: 0, b: 1}, update: {$inc: {b: 1}}, new: true}, + {b: 4}); + +// Simple query that uses an exclusion projection. +testFAMWorked({_id: 42, a: [1, 2], b: 3, c: 4}, + {query: {_id: 42}, fields: {a: 0, b: 0}, update: {$set: {c: 5}}, new: true}, + {_id: 42, c: 5}); + +// Simple query that uses $elemMatch in the projection. +testFAMWorked({ + _id: 42, + b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}] +}, + { + query: {_id: 42}, + fields: {b: {$elemMatch: {value: 2}}}, + update: {$set: {'b.1.name': '2nd'}}, + new: true + }, + {_id: 42, b: [{name: '2nd', value: 2}]}); + +// Query on an array of values while using a positional projection. +testFAMFailed({_id: 42, a: [1, 2]}, + {query: {a: 2}, fields: {'a.$': 1}, update: {$set: {'b.kind': 'xyz'}}, new: true}); + +// Query on an array of objects while using a positional projection. +testFAMFailed({ + _id: 42, + b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}] +}, + { + query: {_id: 42, 'b.name': 'third'}, + fields: {'b.$': 1}, + update: {$set: {'b.$.kind': 'xyz'}}, + new: true + }); + +// Query on an array of objects while using $elemMatch in the projection. +testFAMWorked({ + _id: 42, + b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}] +}, + { + query: {_id: 42}, + fields: {b: {$elemMatch: {value: 2}}, c: 1}, + update: {$set: {c: 'xyz'}}, + new: true + }, + {_id: 42, b: [{name: 'second', value: 2}], c: 'xyz'}); + +// Query on an array of objects while using $elemMatch in the projection, +// where the matched array element is modified. +testFAMWorked( + {_id: 1, a: [{x: 1, y: 1}, {x: 1, y: 2}]}, + {query: {_id: 1}, fields: {a: {$elemMatch: {x: 1}}}, update: {$pop: {a: -1}}, new: true}, + {_id: 1, a: [{x: 1, y: 2}]}); + +// Query on an array of objects using $elemMatch while using an inclusion projection. +testFAMWorked({ + _id: 42, + a: 5, + b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}] +}, + { + query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}}, + fields: {_id: 0, a: 5}, + update: {$inc: {a: 6}}, + new: true + }, + {a: 11}); + +// Query on an array of objects using $elemMatch while using the positional +// operator in the projection. +testFAMFailed( + {_id: 42, b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}]}, { + query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}}, + fields: {_id: 0, 'b.$': 1}, + update: {$set: {name: 'james'}}, + new: true + }); })(); diff --git a/jstests/core/find_dedup.js b/jstests/core/find_dedup.js index a2ac0bfa3d8..df9dbfa9d12 100644 --- a/jstests/core/find_dedup.js +++ b/jstests/core/find_dedup.js @@ -3,45 +3,45 @@ // @tags: [requires_fastcount] (function() { - "use strict"; +"use strict"; - const coll = db.jstests_find_dedup; +const coll = db.jstests_find_dedup; - function checkDedup(query, idArray) { - const resultsArr = coll.find(query).sort({_id: 1}).toArray(); - assert.eq(resultsArr.length, idArray.length, "same number of results"); +function checkDedup(query, idArray) { + const resultsArr = coll.find(query).sort({_id: 1}).toArray(); + assert.eq(resultsArr.length, idArray.length, "same number of results"); - for (let i = 0; i < idArray.length; i++) { - assert(("_id" in resultsArr[i]), "result doc missing _id"); - assert.eq(idArray[i], resultsArr[i]._id, "_id mismatch for doc " + i); - } + for (let i = 0; i < idArray.length; i++) { + assert(("_id" in resultsArr[i]), "result doc missing _id"); + assert.eq(idArray[i], resultsArr[i]._id, "_id mismatch for doc " + i); } - - // Deduping $or - coll.drop(); - coll.ensureIndex({a: 1, b: 1}); - assert.writeOK(coll.insert({_id: 1, a: 1, b: 1})); - assert.writeOK(coll.insert({_id: 2, a: 1, b: 1})); - assert.writeOK(coll.insert({_id: 3, a: 2, b: 2})); - assert.writeOK(coll.insert({_id: 4, a: 3, b: 3})); - assert.writeOK(coll.insert({_id: 5, a: 3, b: 3})); - checkDedup({ - $or: [ - {a: {$gte: 0, $lte: 2}, b: {$gte: 0, $lte: 2}}, - {a: {$gte: 1, $lte: 3}, b: {$gte: 1, $lte: 3}}, - {a: {$gte: 1, $lte: 4}, b: {$gte: 1, $lte: 4}} - ] - }, - [1, 2, 3, 4, 5]); - - // Deduping multikey - assert(coll.drop()); - assert.writeOK(coll.insert({_id: 1, a: [1, 2, 3], b: [4, 5, 6]})); - assert.writeOK(coll.insert({_id: 2, a: [1, 2, 3], b: [4, 5, 6]})); - assert.eq(2, coll.count()); - - checkDedup({$or: [{a: {$in: [1, 2]}}, {b: {$in: [4, 5]}}]}, [1, 2]); - - assert.commandWorked(coll.createIndex({a: 1})); - checkDedup({$or: [{a: {$in: [1, 2]}}, {b: {$in: [4, 5]}}]}, [1, 2]); +} + +// Deduping $or +coll.drop(); +coll.ensureIndex({a: 1, b: 1}); +assert.writeOK(coll.insert({_id: 1, a: 1, b: 1})); +assert.writeOK(coll.insert({_id: 2, a: 1, b: 1})); +assert.writeOK(coll.insert({_id: 3, a: 2, b: 2})); +assert.writeOK(coll.insert({_id: 4, a: 3, b: 3})); +assert.writeOK(coll.insert({_id: 5, a: 3, b: 3})); +checkDedup({ + $or: [ + {a: {$gte: 0, $lte: 2}, b: {$gte: 0, $lte: 2}}, + {a: {$gte: 1, $lte: 3}, b: {$gte: 1, $lte: 3}}, + {a: {$gte: 1, $lte: 4}, b: {$gte: 1, $lte: 4}} + ] +}, + [1, 2, 3, 4, 5]); + +// Deduping multikey +assert(coll.drop()); +assert.writeOK(coll.insert({_id: 1, a: [1, 2, 3], b: [4, 5, 6]})); +assert.writeOK(coll.insert({_id: 2, a: [1, 2, 3], b: [4, 5, 6]})); +assert.eq(2, coll.count()); + +checkDedup({$or: [{a: {$in: [1, 2]}}, {b: {$in: [4, 5]}}]}, [1, 2]); + +assert.commandWorked(coll.createIndex({a: 1})); +checkDedup({$or: [{a: {$in: [1, 2]}}, {b: {$in: [4, 5]}}]}, [1, 2]); }()); diff --git a/jstests/core/find_getmore_bsonsize.js b/jstests/core/find_getmore_bsonsize.js index 6b9008cec51..6a19dec4302 100644 --- a/jstests/core/find_getmore_bsonsize.js +++ b/jstests/core/find_getmore_bsonsize.js @@ -3,86 +3,86 @@ // Ensure that the find and getMore commands can handle documents nearing the 16 MB size limit for // user-stored BSON documents. (function() { - 'use strict'; +'use strict'; - var cmdRes; - var collName = 'find_getmore_bsonsize'; - var coll = db[collName]; +var cmdRes; +var collName = 'find_getmore_bsonsize'; +var coll = db[collName]; - coll.drop(); +coll.drop(); - var oneKB = 1024; - var oneMB = 1024 * oneKB; +var oneKB = 1024; +var oneMB = 1024 * oneKB; - // Build a (1 MB - 1 KB) string. - var smallStr = 'x'; - while (smallStr.length < oneMB) { - smallStr += smallStr; - } - assert.eq(smallStr.length, oneMB); - smallStr = smallStr.substring(0, oneMB - oneKB); +// Build a (1 MB - 1 KB) string. +var smallStr = 'x'; +while (smallStr.length < oneMB) { + smallStr += smallStr; +} +assert.eq(smallStr.length, oneMB); +smallStr = smallStr.substring(0, oneMB - oneKB); - // Build a (16 MB - 1 KB) string. - var bigStr = 'y'; - while (bigStr.length < (16 * oneMB)) { - bigStr += bigStr; - } - assert.eq(bigStr.length, 16 * oneMB); - bigStr = bigStr.substring(0, (16 * oneMB) - oneKB); +// Build a (16 MB - 1 KB) string. +var bigStr = 'y'; +while (bigStr.length < (16 * oneMB)) { + bigStr += bigStr; +} +assert.eq(bigStr.length, 16 * oneMB); +bigStr = bigStr.substring(0, (16 * oneMB) - oneKB); - // Collection has one ~1 MB doc followed by one ~16 MB doc. - assert.writeOK(coll.insert({_id: 0, padding: smallStr})); - assert.writeOK(coll.insert({_id: 1, padding: bigStr})); +// Collection has one ~1 MB doc followed by one ~16 MB doc. +assert.writeOK(coll.insert({_id: 0, padding: smallStr})); +assert.writeOK(coll.insert({_id: 1, padding: bigStr})); - // Find command should just return the first doc, as adding the last would create an invalid - // command response document. - cmdRes = db.runCommand({find: collName}); - assert.commandWorked(cmdRes); - assert.gt(cmdRes.cursor.id, NumberLong(0)); - assert.eq(cmdRes.cursor.ns, coll.getFullName()); - assert.eq(cmdRes.cursor.firstBatch.length, 1); +// Find command should just return the first doc, as adding the last would create an invalid +// command response document. +cmdRes = db.runCommand({find: collName}); +assert.commandWorked(cmdRes); +assert.gt(cmdRes.cursor.id, NumberLong(0)); +assert.eq(cmdRes.cursor.ns, coll.getFullName()); +assert.eq(cmdRes.cursor.firstBatch.length, 1); - // The 16 MB doc should be returned alone on getMore. This is the last document in the - // collection, so the server should close the cursor. - cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName}); - assert.eq(cmdRes.cursor.id, NumberLong(0)); - assert.eq(cmdRes.cursor.ns, coll.getFullName()); - assert.eq(cmdRes.cursor.nextBatch.length, 1); +// The 16 MB doc should be returned alone on getMore. This is the last document in the +// collection, so the server should close the cursor. +cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName}); +assert.eq(cmdRes.cursor.id, NumberLong(0)); +assert.eq(cmdRes.cursor.ns, coll.getFullName()); +assert.eq(cmdRes.cursor.nextBatch.length, 1); - // Setup a cursor without returning any results (batchSize of zero). - cmdRes = db.runCommand({find: collName, batchSize: 0}); - assert.commandWorked(cmdRes); - assert.gt(cmdRes.cursor.id, NumberLong(0)); - assert.eq(cmdRes.cursor.ns, coll.getFullName()); - assert.eq(cmdRes.cursor.firstBatch.length, 0); +// Setup a cursor without returning any results (batchSize of zero). +cmdRes = db.runCommand({find: collName, batchSize: 0}); +assert.commandWorked(cmdRes); +assert.gt(cmdRes.cursor.id, NumberLong(0)); +assert.eq(cmdRes.cursor.ns, coll.getFullName()); +assert.eq(cmdRes.cursor.firstBatch.length, 0); - // First getMore should only return one doc, since both don't fit in the response. - cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName}); - assert.gt(cmdRes.cursor.id, NumberLong(0)); - assert.eq(cmdRes.cursor.ns, coll.getFullName()); - assert.eq(cmdRes.cursor.nextBatch.length, 1); +// First getMore should only return one doc, since both don't fit in the response. +cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName}); +assert.gt(cmdRes.cursor.id, NumberLong(0)); +assert.eq(cmdRes.cursor.ns, coll.getFullName()); +assert.eq(cmdRes.cursor.nextBatch.length, 1); - // Second getMore should return the second doc and a third will close the cursor. - cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName}); - assert.eq(cmdRes.cursor.id, NumberLong(0)); - assert.eq(cmdRes.cursor.ns, coll.getFullName()); - assert.eq(cmdRes.cursor.nextBatch.length, 1); +// Second getMore should return the second doc and a third will close the cursor. +cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName}); +assert.eq(cmdRes.cursor.id, NumberLong(0)); +assert.eq(cmdRes.cursor.ns, coll.getFullName()); +assert.eq(cmdRes.cursor.nextBatch.length, 1); - coll.drop(); +coll.drop(); - // Insert a document of exactly 16MB and make sure the find command can return it. - bigStr = 'y'; - while (bigStr.length < (16 * oneMB)) { - bigStr += bigStr; - } - bigStr = bigStr.substring(0, (16 * oneMB) - 32); - var maxSizeDoc = {_id: 0, padding: bigStr}; - assert.eq(Object.bsonsize(maxSizeDoc), 16 * oneMB); - assert.writeOK(coll.insert(maxSizeDoc)); +// Insert a document of exactly 16MB and make sure the find command can return it. +bigStr = 'y'; +while (bigStr.length < (16 * oneMB)) { + bigStr += bigStr; +} +bigStr = bigStr.substring(0, (16 * oneMB) - 32); +var maxSizeDoc = {_id: 0, padding: bigStr}; +assert.eq(Object.bsonsize(maxSizeDoc), 16 * oneMB); +assert.writeOK(coll.insert(maxSizeDoc)); - cmdRes = db.runCommand({find: collName}); - assert.commandWorked(cmdRes); - assert.eq(cmdRes.cursor.id, NumberLong(0)); - assert.eq(cmdRes.cursor.ns, coll.getFullName()); - assert.eq(cmdRes.cursor.firstBatch.length, 1); +cmdRes = db.runCommand({find: collName}); +assert.commandWorked(cmdRes); +assert.eq(cmdRes.cursor.id, NumberLong(0)); +assert.eq(cmdRes.cursor.ns, coll.getFullName()); +assert.eq(cmdRes.cursor.firstBatch.length, 1); })(); diff --git a/jstests/core/find_getmore_cmd.js b/jstests/core/find_getmore_cmd.js index 5f815fb1d4c..55ad7a4a443 100644 --- a/jstests/core/find_getmore_cmd.js +++ b/jstests/core/find_getmore_cmd.js @@ -2,88 +2,85 @@ // Tests that explicitly invoke the find and getMore commands. (function() { - 'use strict'; +'use strict'; - var cmdRes; - var cursorId; - var defaultBatchSize = 101; - var collName = 'find_getmore_cmd'; - var coll = db[collName]; +var cmdRes; +var cursorId; +var defaultBatchSize = 101; +var collName = 'find_getmore_cmd'; +var coll = db[collName]; - coll.drop(); - for (var i = 0; i < 150; i++) { - assert.writeOK(coll.insert({a: i})); - } +coll.drop(); +for (var i = 0; i < 150; i++) { + assert.writeOK(coll.insert({a: i})); +} - // Verify result of a find command that specifies none of the optional arguments. - cmdRes = db.runCommand({find: collName}); - assert.commandWorked(cmdRes); - assert.gt(cmdRes.cursor.id, NumberLong(0)); - assert.eq(cmdRes.cursor.ns, coll.getFullName()); - assert.eq(cmdRes.cursor.firstBatch.length, defaultBatchSize); +// Verify result of a find command that specifies none of the optional arguments. +cmdRes = db.runCommand({find: collName}); +assert.commandWorked(cmdRes); +assert.gt(cmdRes.cursor.id, NumberLong(0)); +assert.eq(cmdRes.cursor.ns, coll.getFullName()); +assert.eq(cmdRes.cursor.firstBatch.length, defaultBatchSize); - // Use a getMore command to get the next batch. - cursorId = cmdRes.cursor.id; - cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName}); - assert.commandWorked(cmdRes); - assert.eq(cmdRes.cursor.id, NumberLong(0)); - assert.eq(cmdRes.cursor.ns, coll.getFullName()); - assert.eq(cmdRes.cursor.nextBatch.length, 150 - defaultBatchSize); +// Use a getMore command to get the next batch. +cursorId = cmdRes.cursor.id; +cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName}); +assert.commandWorked(cmdRes); +assert.eq(cmdRes.cursor.id, NumberLong(0)); +assert.eq(cmdRes.cursor.ns, coll.getFullName()); +assert.eq(cmdRes.cursor.nextBatch.length, 150 - defaultBatchSize); - // Cursor should have been closed, so attempting to get another batch should fail. - cmdRes = db.runCommand({getMore: cursorId, collection: collName}); - assert.commandFailed(cmdRes); +// Cursor should have been closed, so attempting to get another batch should fail. +cmdRes = db.runCommand({getMore: cursorId, collection: collName}); +assert.commandFailed(cmdRes); - // Find command with limit. - cmdRes = db.runCommand({find: collName, limit: 10}); - assert.commandWorked(cmdRes); - assert.eq(cmdRes.cursor.id, NumberLong(0)); - assert.eq(cmdRes.cursor.ns, coll.getFullName()); - assert.eq(cmdRes.cursor.firstBatch.length, 10); +// Find command with limit. +cmdRes = db.runCommand({find: collName, limit: 10}); +assert.commandWorked(cmdRes); +assert.eq(cmdRes.cursor.id, NumberLong(0)); +assert.eq(cmdRes.cursor.ns, coll.getFullName()); +assert.eq(cmdRes.cursor.firstBatch.length, 10); - // Find command with positive batchSize followed by getMore command with positive batchSize. - cmdRes = db.runCommand({find: collName, batchSize: 10}); - assert.commandWorked(cmdRes); - assert.gt(cmdRes.cursor.id, NumberLong(0)); - assert.eq(cmdRes.cursor.ns, coll.getFullName()); - assert.eq(cmdRes.cursor.firstBatch.length, 10); - cmdRes = - db.runCommand({getMore: cmdRes.cursor.id, collection: collName, batchSize: NumberInt(5)}); - assert.gt(cmdRes.cursor.id, NumberLong(0)); - assert.eq(cmdRes.cursor.ns, coll.getFullName()); - assert.eq(cmdRes.cursor.nextBatch.length, 5); +// Find command with positive batchSize followed by getMore command with positive batchSize. +cmdRes = db.runCommand({find: collName, batchSize: 10}); +assert.commandWorked(cmdRes); +assert.gt(cmdRes.cursor.id, NumberLong(0)); +assert.eq(cmdRes.cursor.ns, coll.getFullName()); +assert.eq(cmdRes.cursor.firstBatch.length, 10); +cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName, batchSize: NumberInt(5)}); +assert.gt(cmdRes.cursor.id, NumberLong(0)); +assert.eq(cmdRes.cursor.ns, coll.getFullName()); +assert.eq(cmdRes.cursor.nextBatch.length, 5); - // Find command with zero batchSize followed by getMore command (default batchSize). - cmdRes = db.runCommand({find: collName, batchSize: 0}); - assert.commandWorked(cmdRes); - assert.gt(cmdRes.cursor.id, NumberLong(0)); - assert.eq(cmdRes.cursor.ns, coll.getFullName()); - assert.eq(cmdRes.cursor.firstBatch.length, 0); - cmdRes = - db.runCommand({getMore: cmdRes.cursor.id, collection: collName, batchSize: NumberInt(5)}); - assert.gt(cmdRes.cursor.id, NumberLong(0)); - assert.eq(cmdRes.cursor.ns, coll.getFullName()); - assert.eq(cmdRes.cursor.nextBatch.length, 5); +// Find command with zero batchSize followed by getMore command (default batchSize). +cmdRes = db.runCommand({find: collName, batchSize: 0}); +assert.commandWorked(cmdRes); +assert.gt(cmdRes.cursor.id, NumberLong(0)); +assert.eq(cmdRes.cursor.ns, coll.getFullName()); +assert.eq(cmdRes.cursor.firstBatch.length, 0); +cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName, batchSize: NumberInt(5)}); +assert.gt(cmdRes.cursor.id, NumberLong(0)); +assert.eq(cmdRes.cursor.ns, coll.getFullName()); +assert.eq(cmdRes.cursor.nextBatch.length, 5); - // Batch size and limit together. - cmdRes = db.runCommand({find: collName, batchSize: 10, limit: 20}); - assert.commandWorked(cmdRes); - assert.gt(cmdRes.cursor.id, NumberLong(0)); - assert.eq(cmdRes.cursor.ns, coll.getFullName()); - assert.eq(cmdRes.cursor.firstBatch.length, 10); - cmdRes = - db.runCommand({getMore: cmdRes.cursor.id, collection: collName, batchSize: NumberInt(11)}); - assert.eq(cmdRes.cursor.id, NumberLong(0)); - assert.eq(cmdRes.cursor.ns, coll.getFullName()); - assert.eq(cmdRes.cursor.nextBatch.length, 10); +// Batch size and limit together. +cmdRes = db.runCommand({find: collName, batchSize: 10, limit: 20}); +assert.commandWorked(cmdRes); +assert.gt(cmdRes.cursor.id, NumberLong(0)); +assert.eq(cmdRes.cursor.ns, coll.getFullName()); +assert.eq(cmdRes.cursor.firstBatch.length, 10); +cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName, batchSize: NumberInt(11)}); +assert.eq(cmdRes.cursor.id, NumberLong(0)); +assert.eq(cmdRes.cursor.ns, coll.getFullName()); +assert.eq(cmdRes.cursor.nextBatch.length, 10); - // Find command with batchSize and singleBatch. - cmdRes = db.runCommand({find: collName, batchSize: 10, singleBatch: true}); - assert.commandWorked(cmdRes); - assert.eq(cmdRes.cursor.id, NumberLong(0)); - assert.eq(cmdRes.cursor.ns, coll.getFullName()); - assert.eq(cmdRes.cursor.firstBatch.length, 10); +// Find command with batchSize and singleBatch. +cmdRes = db.runCommand({find: collName, batchSize: 10, singleBatch: true}); +assert.commandWorked(cmdRes); +assert.eq(cmdRes.cursor.id, NumberLong(0)); +assert.eq(cmdRes.cursor.ns, coll.getFullName()); +assert.eq(cmdRes.cursor.firstBatch.length, 10); - // Error on invalid collection name. - assert.commandFailedWithCode(db.runCommand({find: ""}), ErrorCodes.InvalidNamespace); +// Error on invalid collection name. +assert.commandFailedWithCode(db.runCommand({find: ""}), ErrorCodes.InvalidNamespace); })(); diff --git a/jstests/core/find_projection_meta_errors.js b/jstests/core/find_projection_meta_errors.js index 6fd69cb9d04..c43590e3bad 100644 --- a/jstests/core/find_projection_meta_errors.js +++ b/jstests/core/find_projection_meta_errors.js @@ -1,23 +1,23 @@ // Basic tests for errors when parsing the $meta projection. (function() { - "use strict"; +"use strict"; - const coll = db.find_projection_meta_errors; - coll.drop(); +const coll = db.find_projection_meta_errors; +coll.drop(); - assert.commandWorked(coll.insert({a: 1})); - assert.commandWorked(coll.insert({a: 2})); +assert.commandWorked(coll.insert({a: 1})); +assert.commandWorked(coll.insert({a: 2})); - assert.commandFailedWithCode( - db.runCommand({find: coll.getName(), projection: {score: {$meta: "searchScore"}}}), - ErrorCodes.BadValue); +assert.commandFailedWithCode( + db.runCommand({find: coll.getName(), projection: {score: {$meta: "searchScore"}}}), + ErrorCodes.BadValue); - assert.commandFailedWithCode( - db.runCommand({find: coll.getName(), projection: {score: {$meta: "searchHighlights"}}}), - ErrorCodes.BadValue); +assert.commandFailedWithCode( + db.runCommand({find: coll.getName(), projection: {score: {$meta: "searchHighlights"}}}), + ErrorCodes.BadValue); - assert.commandFailedWithCode( - db.runCommand({find: coll.getName(), projection: {score: {$meta: "some garbage"}}}), - ErrorCodes.BadValue); +assert.commandFailedWithCode( + db.runCommand({find: coll.getName(), projection: {score: {$meta: "some garbage"}}}), + ErrorCodes.BadValue); }()); diff --git a/jstests/core/fsync.js b/jstests/core/fsync.js index 2f116364c34..bd5526b8bc9 100644 --- a/jstests/core/fsync.js +++ b/jstests/core/fsync.js @@ -9,107 +9,107 @@ * @tags: [requires_fastcount, requires_fsync] */ (function() { - "use strict"; - - // Start with a clean DB. - var fsyncLockDB = db.getSisterDB('fsyncLockTestDB'); - fsyncLockDB.dropDatabase(); - - // Tests the db.fsyncLock/fsyncUnlock features. - var storageEngine = db.serverStatus().storageEngine.name; - - // As of SERVER-18899 fsyncLock/fsyncUnlock will error when called on a storage engine - // that does not support the begin/end backup commands. - var supportsFsync = db.fsyncLock(); - - if (!supportsFsync.ok) { - assert.commandFailedWithCode(supportsFsync, ErrorCodes.CommandNotSupported); - jsTestLog("Skipping test for " + storageEngine + " as it does not support fsync"); - return; - } - db.fsyncUnlock(); - - var resFail = fsyncLockDB.runCommand({fsync: 1, lock: 1}); - - // Start with a clean DB - var fsyncLockDB = db.getSisterDB('fsyncLockTestDB'); - fsyncLockDB.dropDatabase(); - - // Test that a single, regular write works as expected. - assert.writeOK(fsyncLockDB.coll.insert({x: 1})); - - // Test that fsyncLock doesn't work unless invoked against the admin DB. - var resFail = fsyncLockDB.runCommand({fsync: 1, lock: 1}); - assert(!resFail.ok, "fsyncLock command succeeded against DB other than admin."); - - // Uses admin automatically and locks the server for writes. - var fsyncLockRes = db.fsyncLock(); - assert(fsyncLockRes.ok, "fsyncLock command failed against admin DB"); - assert(db.currentOp().fsyncLock, "Value in db.currentOp incorrect for fsyncLocked server"); - - // Make sure writes are blocked. Spawn a write operation in a separate shell and make sure it - // is blocked. There is really no way to do that currently, so just check that the write didn't - // go through. - var writeOpHandle = startParallelShell("db.getSisterDB('fsyncLockTestDB').coll.insert({x:1});"); - sleep(3000); - - // Make sure reads can still run even though there is a pending write and also that the write - // didn't get through. - assert.eq(1, fsyncLockDB.coll.find({}).itcount()); - - // Unlock and make sure the insert succeeded. - var fsyncUnlockRes = db.fsyncUnlock(); - assert(fsyncUnlockRes.ok, "fsyncUnlock command failed"); - assert(db.currentOp().fsyncLock == null, "fsyncUnlock is not null in db.currentOp"); - - // Make sure the db is unlocked and the initial write made it through. - writeOpHandle(); - assert.writeOK(fsyncLockDB.coll.insert({x: 2})); - - assert.eq(3, fsyncLockDB.coll.count({})); - - // Issue the fsyncLock and fsyncUnlock a second time, to ensure that we can - // run this command repeatedly with no problems. - var fsyncLockRes = db.fsyncLock(); - assert(fsyncLockRes.ok, "Second execution of fsyncLock command failed"); - - var fsyncUnlockRes = db.fsyncUnlock(); - assert(fsyncUnlockRes.ok, "Second execution of fsyncUnlock command failed"); - - // Make sure that insert attempts made during multiple fsyncLock requests will not execute until - // all locks have been released. - fsyncLockRes = db.fsyncLock(); - assert.commandWorked(fsyncLockRes); - assert(fsyncLockRes.lockCount == 1, tojson(fsyncLockRes)); - let currentOp = db.currentOp(); - assert.commandWorked(currentOp); - assert(currentOp.fsyncLock, "Value in db.currentOp incorrect for fsyncLocked server"); - - let shellHandle1 = - startParallelShell("db.getSisterDB('fsyncLockTestDB').multipleLock.insert({x:1});"); - - fsyncLockRes = db.fsyncLock(); - assert.commandWorked(fsyncLockRes); - assert(fsyncLockRes.lockCount == 2, tojson(fsyncLockRes)); - currentOp = db.currentOp(); - assert.commandWorked(currentOp); - assert(currentOp.fsyncLock, "Value in db.currentOp incorrect for fsyncLocked server"); - - let shellHandle2 = - startParallelShell("db.getSisterDB('fsyncLockTestDB').multipleLock.insert({x:1});"); - sleep(3000); - assert.eq(0, fsyncLockDB.multipleLock.find({}).itcount()); - - fsyncUnlockRes = db.fsyncUnlock(); - assert.commandWorked(fsyncUnlockRes); - assert(fsyncUnlockRes.lockCount == 1, tojson(fsyncLockRes)); - sleep(3000); - assert.eq(0, fsyncLockDB.multipleLock.find({}).itcount()); - - fsyncUnlockRes = db.fsyncUnlock(); - assert.commandWorked(fsyncUnlockRes); - assert(fsyncUnlockRes.lockCount == 0, tojson(fsyncLockRes)); - shellHandle1(); - shellHandle2(); - assert.eq(2, fsyncLockDB.multipleLock.find({}).itcount()); +"use strict"; + +// Start with a clean DB. +var fsyncLockDB = db.getSisterDB('fsyncLockTestDB'); +fsyncLockDB.dropDatabase(); + +// Tests the db.fsyncLock/fsyncUnlock features. +var storageEngine = db.serverStatus().storageEngine.name; + +// As of SERVER-18899 fsyncLock/fsyncUnlock will error when called on a storage engine +// that does not support the begin/end backup commands. +var supportsFsync = db.fsyncLock(); + +if (!supportsFsync.ok) { + assert.commandFailedWithCode(supportsFsync, ErrorCodes.CommandNotSupported); + jsTestLog("Skipping test for " + storageEngine + " as it does not support fsync"); + return; +} +db.fsyncUnlock(); + +var resFail = fsyncLockDB.runCommand({fsync: 1, lock: 1}); + +// Start with a clean DB +var fsyncLockDB = db.getSisterDB('fsyncLockTestDB'); +fsyncLockDB.dropDatabase(); + +// Test that a single, regular write works as expected. +assert.writeOK(fsyncLockDB.coll.insert({x: 1})); + +// Test that fsyncLock doesn't work unless invoked against the admin DB. +var resFail = fsyncLockDB.runCommand({fsync: 1, lock: 1}); +assert(!resFail.ok, "fsyncLock command succeeded against DB other than admin."); + +// Uses admin automatically and locks the server for writes. +var fsyncLockRes = db.fsyncLock(); +assert(fsyncLockRes.ok, "fsyncLock command failed against admin DB"); +assert(db.currentOp().fsyncLock, "Value in db.currentOp incorrect for fsyncLocked server"); + +// Make sure writes are blocked. Spawn a write operation in a separate shell and make sure it +// is blocked. There is really no way to do that currently, so just check that the write didn't +// go through. +var writeOpHandle = startParallelShell("db.getSisterDB('fsyncLockTestDB').coll.insert({x:1});"); +sleep(3000); + +// Make sure reads can still run even though there is a pending write and also that the write +// didn't get through. +assert.eq(1, fsyncLockDB.coll.find({}).itcount()); + +// Unlock and make sure the insert succeeded. +var fsyncUnlockRes = db.fsyncUnlock(); +assert(fsyncUnlockRes.ok, "fsyncUnlock command failed"); +assert(db.currentOp().fsyncLock == null, "fsyncUnlock is not null in db.currentOp"); + +// Make sure the db is unlocked and the initial write made it through. +writeOpHandle(); +assert.writeOK(fsyncLockDB.coll.insert({x: 2})); + +assert.eq(3, fsyncLockDB.coll.count({})); + +// Issue the fsyncLock and fsyncUnlock a second time, to ensure that we can +// run this command repeatedly with no problems. +var fsyncLockRes = db.fsyncLock(); +assert(fsyncLockRes.ok, "Second execution of fsyncLock command failed"); + +var fsyncUnlockRes = db.fsyncUnlock(); +assert(fsyncUnlockRes.ok, "Second execution of fsyncUnlock command failed"); + +// Make sure that insert attempts made during multiple fsyncLock requests will not execute until +// all locks have been released. +fsyncLockRes = db.fsyncLock(); +assert.commandWorked(fsyncLockRes); +assert(fsyncLockRes.lockCount == 1, tojson(fsyncLockRes)); +let currentOp = db.currentOp(); +assert.commandWorked(currentOp); +assert(currentOp.fsyncLock, "Value in db.currentOp incorrect for fsyncLocked server"); + +let shellHandle1 = + startParallelShell("db.getSisterDB('fsyncLockTestDB').multipleLock.insert({x:1});"); + +fsyncLockRes = db.fsyncLock(); +assert.commandWorked(fsyncLockRes); +assert(fsyncLockRes.lockCount == 2, tojson(fsyncLockRes)); +currentOp = db.currentOp(); +assert.commandWorked(currentOp); +assert(currentOp.fsyncLock, "Value in db.currentOp incorrect for fsyncLocked server"); + +let shellHandle2 = + startParallelShell("db.getSisterDB('fsyncLockTestDB').multipleLock.insert({x:1});"); +sleep(3000); +assert.eq(0, fsyncLockDB.multipleLock.find({}).itcount()); + +fsyncUnlockRes = db.fsyncUnlock(); +assert.commandWorked(fsyncUnlockRes); +assert(fsyncUnlockRes.lockCount == 1, tojson(fsyncLockRes)); +sleep(3000); +assert.eq(0, fsyncLockDB.multipleLock.find({}).itcount()); + +fsyncUnlockRes = db.fsyncUnlock(); +assert.commandWorked(fsyncUnlockRes); +assert(fsyncUnlockRes.lockCount == 0, tojson(fsyncLockRes)); +shellHandle1(); +shellHandle2(); +assert.eq(2, fsyncLockDB.multipleLock.find({}).itcount()); }()); diff --git a/jstests/core/fts1.js b/jstests/core/fts1.js index 9b95fa8dc14..2ce50a22eeb 100644 --- a/jstests/core/fts1.js +++ b/jstests/core/fts1.js @@ -2,26 +2,26 @@ // collection. // @tags: [assumes_no_implicit_index_creation] (function() { - "use strict"; +"use strict"; - load("jstests/libs/fts.js"); +load("jstests/libs/fts.js"); - const coll = db.text1; - coll.drop(); +const coll = db.text1; +coll.drop(); - assert.commandWorked(coll.createIndex({x: "text"}, {name: "x_text"})); +assert.commandWorked(coll.createIndex({x: "text"}, {name: "x_text"})); - assert.eq([], queryIDS(coll, "az"), "A0"); +assert.eq([], queryIDS(coll, "az"), "A0"); - assert.writeOK(coll.insert({_id: 1, x: "az b c"})); - assert.writeOK(coll.insert({_id: 2, x: "az b"})); - assert.writeOK(coll.insert({_id: 3, x: "b c"})); - assert.writeOK(coll.insert({_id: 4, x: "b c d"})); +assert.writeOK(coll.insert({_id: 1, x: "az b c"})); +assert.writeOK(coll.insert({_id: 2, x: "az b"})); +assert.writeOK(coll.insert({_id: 3, x: "b c"})); +assert.writeOK(coll.insert({_id: 4, x: "b c d"})); - assert.eq([1, 2, 3, 4], queryIDS(coll, "c az").sort(), "A1"); - assert.eq([4], queryIDS(coll, "d"), "A2"); +assert.eq([1, 2, 3, 4], queryIDS(coll, "c az").sort(), "A1"); +assert.eq([4], queryIDS(coll, "d"), "A2"); - const index = coll.getIndexes().find(index => index.name === "x_text"); - assert.neq(index, undefined); - assert.gte(index.textIndexVersion, 1, tojson(index)); +const index = coll.getIndexes().find(index => index.name === "x_text"); +assert.neq(index, undefined); +assert.gte(index.textIndexVersion, 1, tojson(index)); }()); diff --git a/jstests/core/fts_array.js b/jstests/core/fts_array.js index 967dd223392..16d51981908 100644 --- a/jstests/core/fts_array.js +++ b/jstests/core/fts_array.js @@ -2,54 +2,52 @@ * Tests for the interaction between FTS indexes and arrays. */ (function() { - "use strict"; +"use strict"; - let coll = db.jstests_fts_array; - coll.drop(); - assert.commandWorked(coll.createIndex({"a.b": 1, words: "text"})); +let coll = db.jstests_fts_array; +coll.drop(); +assert.commandWorked(coll.createIndex({"a.b": 1, words: "text"})); - // Verify that the leading field of the index cannot contain an array. - assert.writeErrorWithCode(coll.insert({a: {b: []}, words: "omnibus"}), - ErrorCodes.CannotBuildIndexKeys); - assert.writeErrorWithCode(coll.insert({a: {b: [1]}, words: "omnibus"}), - ErrorCodes.CannotBuildIndexKeys); - assert.writeErrorWithCode(coll.insert({a: {b: [1, 2]}, words: "omnibus"}), - ErrorCodes.CannotBuildIndexKeys); - assert.writeErrorWithCode(coll.insert({a: [], words: "omnibus"}), - ErrorCodes.CannotBuildIndexKeys); - assert.writeErrorWithCode(coll.insert({a: [{b: 1}], words: "omnibus"}), - ErrorCodes.CannotBuildIndexKeys); - assert.writeErrorWithCode(coll.insert({a: [{b: 1}, {b: 2}], words: "omnibus"}), - ErrorCodes.CannotBuildIndexKeys); +// Verify that the leading field of the index cannot contain an array. +assert.writeErrorWithCode(coll.insert({a: {b: []}, words: "omnibus"}), + ErrorCodes.CannotBuildIndexKeys); +assert.writeErrorWithCode(coll.insert({a: {b: [1]}, words: "omnibus"}), + ErrorCodes.CannotBuildIndexKeys); +assert.writeErrorWithCode(coll.insert({a: {b: [1, 2]}, words: "omnibus"}), + ErrorCodes.CannotBuildIndexKeys); +assert.writeErrorWithCode(coll.insert({a: [], words: "omnibus"}), ErrorCodes.CannotBuildIndexKeys); +assert.writeErrorWithCode(coll.insert({a: [{b: 1}], words: "omnibus"}), + ErrorCodes.CannotBuildIndexKeys); +assert.writeErrorWithCode(coll.insert({a: [{b: 1}, {b: 2}], words: "omnibus"}), + ErrorCodes.CannotBuildIndexKeys); - coll.drop(); - assert.commandWorked(coll.createIndex({words: "text", "y.z": 1})); +coll.drop(); +assert.commandWorked(coll.createIndex({words: "text", "y.z": 1})); - // Verify that the trailing field of the index cannot contain an array. - assert.writeErrorWithCode(coll.insert({words: "macerate", y: {z: []}}), - ErrorCodes.CannotBuildIndexKeys); - assert.writeErrorWithCode(coll.insert({words: "macerate", y: {z: [1]}}), - ErrorCodes.CannotBuildIndexKeys); - assert.writeErrorWithCode(coll.insert({words: "macerate", y: {z: [1, 2]}}), - ErrorCodes.CannotBuildIndexKeys); - assert.writeErrorWithCode(coll.insert({words: "macerate", y: []}), - ErrorCodes.CannotBuildIndexKeys); - assert.writeErrorWithCode(coll.insert({words: "macerate", y: [{z: 1}]}), - ErrorCodes.CannotBuildIndexKeys); - assert.writeErrorWithCode(coll.insert({words: "macerate", y: [{z: 1}, {z: 2}]}), - ErrorCodes.CannotBuildIndexKeys); +// Verify that the trailing field of the index cannot contain an array. +assert.writeErrorWithCode(coll.insert({words: "macerate", y: {z: []}}), + ErrorCodes.CannotBuildIndexKeys); +assert.writeErrorWithCode(coll.insert({words: "macerate", y: {z: [1]}}), + ErrorCodes.CannotBuildIndexKeys); +assert.writeErrorWithCode(coll.insert({words: "macerate", y: {z: [1, 2]}}), + ErrorCodes.CannotBuildIndexKeys); +assert.writeErrorWithCode(coll.insert({words: "macerate", y: []}), ErrorCodes.CannotBuildIndexKeys); +assert.writeErrorWithCode(coll.insert({words: "macerate", y: [{z: 1}]}), + ErrorCodes.CannotBuildIndexKeys); +assert.writeErrorWithCode(coll.insert({words: "macerate", y: [{z: 1}, {z: 2}]}), + ErrorCodes.CannotBuildIndexKeys); - // Verify that array fields are allowed when positionally indexed. - coll.drop(); - assert.commandWorked(coll.createIndex({"a.0": 1, words: "text"})); - assert.writeOK(coll.insert({a: [0, 1, 2], words: "dander"})); - assert.eq({a: [0, 1, 2], words: "dander"}, - coll.findOne({"a.0": 0, $text: {$search: "dander"}}, {_id: 0, a: 1, words: 1})); - assert.writeErrorWithCode(coll.insert({a: [[8, 9], 1, 2], words: "dander"}), - ErrorCodes.CannotBuildIndexKeys); - coll.drop(); - assert.commandWorked(coll.createIndex({"a.0.1": 1, words: "text"})); - assert.writeOK(coll.insert({a: [[8, 9], 1, 2], words: "dander"})); - assert.eq({a: [[8, 9], 1, 2], words: "dander"}, - coll.findOne({"a.0.1": 9, $text: {$search: "dander"}}, {_id: 0, a: 1, words: 1})); +// Verify that array fields are allowed when positionally indexed. +coll.drop(); +assert.commandWorked(coll.createIndex({"a.0": 1, words: "text"})); +assert.writeOK(coll.insert({a: [0, 1, 2], words: "dander"})); +assert.eq({a: [0, 1, 2], words: "dander"}, + coll.findOne({"a.0": 0, $text: {$search: "dander"}}, {_id: 0, a: 1, words: 1})); +assert.writeErrorWithCode(coll.insert({a: [[8, 9], 1, 2], words: "dander"}), + ErrorCodes.CannotBuildIndexKeys); +coll.drop(); +assert.commandWorked(coll.createIndex({"a.0.1": 1, words: "text"})); +assert.writeOK(coll.insert({a: [[8, 9], 1, 2], words: "dander"})); +assert.eq({a: [[8, 9], 1, 2], words: "dander"}, + coll.findOne({"a.0.1": 9, $text: {$search: "dander"}}, {_id: 0, a: 1, words: 1})); }()); diff --git a/jstests/core/fts_diacritic_and_caseinsensitive.js b/jstests/core/fts_diacritic_and_caseinsensitive.js index 898735f3140..476fe9d2ca1 100644 --- a/jstests/core/fts_diacritic_and_caseinsensitive.js +++ b/jstests/core/fts_diacritic_and_caseinsensitive.js @@ -3,32 +3,29 @@ load('jstests/libs/fts.js'); (function() { - "use strict"; - var coll = db.fts_diacritic_and_caseinsensitive; - - coll.drop(); - - assert.writeOK(coll.insert({ - _id: 0, - a: "O próximo Vôo à Noite sobre o Atlântico, Põe Freqüentemente o único Médico." - })); - - assert.commandWorked(coll.ensureIndex({a: "text"}, {default_language: "portuguese"})); - - assert.eq([0], queryIDS(coll, "proximo voo a", null)); - assert.eq([0], queryIDS(coll, "átlántico", null)); - assert.eq([0], queryIDS(coll, "\"proxIMO\"", null)); - assert.eq([0], queryIDS(coll, "\"poé\" atlânTico", null)); - assert.eq([0], queryIDS(coll, "\"próximo voo\" \"unico médico\"", null)); - assert.eq([0], queryIDS(coll, "\"proximo voo\" -\"unico atlantico\"", null)); - - assert.eq([], queryIDS(coll, "À", null)); - assert.eq([], queryIDS(coll, "próximoo", null)); - assert.eq([], queryIDS(coll, "proximoo vvôo à a", null)); - assert.eq([], queryIDS(coll, "À -próximo -Vôo", null)); - assert.eq([], queryIDS(coll, "à proximo -voo", null)); - assert.eq([], queryIDS(coll, "mo vo", null)); - assert.eq([], queryIDS(coll, "\"unico atlantico\"", null)); - assert.eq([], queryIDS(coll, "\"próximo Vôo\" -\"unico medico\"", null)); - +"use strict"; +var coll = db.fts_diacritic_and_caseinsensitive; + +coll.drop(); + +assert.writeOK(coll.insert( + {_id: 0, a: "O próximo Vôo à Noite sobre o Atlântico, Põe Freqüentemente o único Médico."})); + +assert.commandWorked(coll.ensureIndex({a: "text"}, {default_language: "portuguese"})); + +assert.eq([0], queryIDS(coll, "proximo voo a", null)); +assert.eq([0], queryIDS(coll, "átlántico", null)); +assert.eq([0], queryIDS(coll, "\"proxIMO\"", null)); +assert.eq([0], queryIDS(coll, "\"poé\" atlânTico", null)); +assert.eq([0], queryIDS(coll, "\"próximo voo\" \"unico médico\"", null)); +assert.eq([0], queryIDS(coll, "\"proximo voo\" -\"unico atlantico\"", null)); + +assert.eq([], queryIDS(coll, "À", null)); +assert.eq([], queryIDS(coll, "próximoo", null)); +assert.eq([], queryIDS(coll, "proximoo vvôo à a", null)); +assert.eq([], queryIDS(coll, "À -próximo -Vôo", null)); +assert.eq([], queryIDS(coll, "à proximo -voo", null)); +assert.eq([], queryIDS(coll, "mo vo", null)); +assert.eq([], queryIDS(coll, "\"unico atlantico\"", null)); +assert.eq([], queryIDS(coll, "\"próximo Vôo\" -\"unico medico\"", null)); })(); diff --git a/jstests/core/fts_diacritic_and_casesensitive.js b/jstests/core/fts_diacritic_and_casesensitive.js index d5c15034dbc..ae3c51c703b 100644 --- a/jstests/core/fts_diacritic_and_casesensitive.js +++ b/jstests/core/fts_diacritic_and_casesensitive.js @@ -4,62 +4,51 @@ load('jstests/libs/fts.js'); (function() { - "use strict"; - var coll = db.fts_diacritic_and_casesensitive; - - coll.drop(); - - assert.writeOK(coll.insert({ - _id: 0, - a: "O próximo Vôo à Noite sobre o Atlântico, Põe Freqüentemente o único Médico." - })); - - assert.commandWorked(coll.ensureIndex({a: "text"}, {default_language: "portuguese"})); - - assert.eq( - [0], - queryIDS(coll, "próximo vôo à ", null, {$diacriticSensitive: true, $caseSensitive: true})); - assert.eq([0], - queryIDS(coll, "Atlântico", null, {$diacriticSensitive: true, $caseSensitive: true})); - assert.eq( - [0], - queryIDS(coll, "\"próximo\"", null, {$diacriticSensitive: true, $caseSensitive: true})); - assert.eq( - [0], - queryIDS( - coll, "\"Põe\" Atlântico", null, {$diacriticSensitive: true, $caseSensitive: true})); - assert.eq([0], - queryIDS(coll, - "\"próximo Vôo\" \"único Médico\"", - null, - {$diacriticSensitive: true, $caseSensitive: true})); - assert.eq([0], - queryIDS(coll, - "\"próximo Vôo\" -\"único médico\"", - null, - {$diacriticSensitive: true, $caseSensitive: true})); - - assert.eq([], queryIDS(coll, "À", null, {$diacriticSensitive: true, $caseSensitive: true})); - assert.eq([], - queryIDS(coll, "Próximo", null, {$diacriticSensitive: true, $caseSensitive: true})); - assert.eq( - [], - queryIDS(coll, "proximo vôo à ", null, {$diacriticSensitive: true, $caseSensitive: true})); - assert.eq( - [], - queryIDS(coll, "À -próximo -Vôo", null, {$diacriticSensitive: true, $caseSensitive: true})); - assert.eq( - [], - queryIDS(coll, "à proximo -Vôo", null, {$diacriticSensitive: true, $caseSensitive: true})); - assert.eq([], queryIDS(coll, "mo Vô", null, {$diacriticSensitive: true, $caseSensitive: true})); - assert.eq( - [], - queryIDS( - coll, "\"único médico\"", null, {$diacriticSensitive: true, $caseSensitive: true})); - assert.eq([], - queryIDS(coll, - "\"próximo Vôo\" -\"único Médico\"", - null, - {$diacriticSensitive: true, $caseSensitive: true})); - +"use strict"; +var coll = db.fts_diacritic_and_casesensitive; + +coll.drop(); + +assert.writeOK(coll.insert( + {_id: 0, a: "O próximo Vôo à Noite sobre o Atlântico, Põe Freqüentemente o único Médico."})); + +assert.commandWorked(coll.ensureIndex({a: "text"}, {default_language: "portuguese"})); + +assert.eq([0], + queryIDS(coll, "próximo vôo à ", null, {$diacriticSensitive: true, $caseSensitive: true})); +assert.eq([0], + queryIDS(coll, "Atlântico", null, {$diacriticSensitive: true, $caseSensitive: true})); +assert.eq([0], + queryIDS(coll, "\"próximo\"", null, {$diacriticSensitive: true, $caseSensitive: true})); +assert.eq( + [0], + queryIDS(coll, "\"Põe\" Atlântico", null, {$diacriticSensitive: true, $caseSensitive: true})); +assert.eq([0], + queryIDS(coll, + "\"próximo Vôo\" \"único Médico\"", + null, + {$diacriticSensitive: true, $caseSensitive: true})); +assert.eq([0], + queryIDS(coll, + "\"próximo Vôo\" -\"único médico\"", + null, + {$diacriticSensitive: true, $caseSensitive: true})); + +assert.eq([], queryIDS(coll, "À", null, {$diacriticSensitive: true, $caseSensitive: true})); +assert.eq([], queryIDS(coll, "Próximo", null, {$diacriticSensitive: true, $caseSensitive: true})); +assert.eq([], + queryIDS(coll, "proximo vôo à ", null, {$diacriticSensitive: true, $caseSensitive: true})); +assert.eq( + [], queryIDS(coll, "À -próximo -Vôo", null, {$diacriticSensitive: true, $caseSensitive: true})); +assert.eq( + [], queryIDS(coll, "à proximo -Vôo", null, {$diacriticSensitive: true, $caseSensitive: true})); +assert.eq([], queryIDS(coll, "mo Vô", null, {$diacriticSensitive: true, $caseSensitive: true})); +assert.eq( + [], + queryIDS(coll, "\"único médico\"", null, {$diacriticSensitive: true, $caseSensitive: true})); +assert.eq([], + queryIDS(coll, + "\"próximo Vôo\" -\"único Médico\"", + null, + {$diacriticSensitive: true, $caseSensitive: true})); })();
\ No newline at end of file diff --git a/jstests/core/fts_diacriticsensitive.js b/jstests/core/fts_diacriticsensitive.js index e21d5360051..a377b810ea6 100644 --- a/jstests/core/fts_diacriticsensitive.js +++ b/jstests/core/fts_diacriticsensitive.js @@ -3,40 +3,36 @@ load('jstests/libs/fts.js'); (function() { - "use strict"; - var coll = db.fts_diacriticsensitive; - - coll.drop(); - - assert.writeOK(coll.insert({ - _id: 0, - a: "O próximo vôo à noite sobre o Atlântico, põe freqüentemente o único médico." - })); - - assert.commandWorked(coll.ensureIndex({a: "text"}, {default_language: "portuguese"})); - - assert.throws(function() { - queryIDS(coll, "hello", null, {$diacriticSensitive: "invalid"}); - }); - - assert.eq([0], queryIDS(coll, "PRÓXIMO VÔO À", null, {$diacriticSensitive: true})); - assert.eq([0], queryIDS(coll, "atlântico", null, {$diacriticSensitive: true})); - assert.eq([0], queryIDS(coll, "\"próximo\"", null, {$diacriticSensitive: true})); - assert.eq([0], queryIDS(coll, "\"põe\" atlântico", null, {$diacriticSensitive: true})); - assert.eq( - [0], queryIDS(coll, "\"próximo vôo\" \"único médico\"", null, {$diacriticSensitive: true})); - assert.eq( - [0], - queryIDS(coll, "\"próximo vôo\" -\"unico médico\"", null, {$diacriticSensitive: true})); - - assert.eq([], queryIDS(coll, "à ", null, {$diacriticSensitive: true})); - assert.eq([], queryIDS(coll, "proximo", null, {$diacriticSensitive: true})); - assert.eq([], queryIDS(coll, "proximo voo à ", null, {$diacriticSensitive: true})); - assert.eq([], queryIDS(coll, "à -PRÓXIMO -vôo", null, {$diacriticSensitive: true})); - assert.eq([], queryIDS(coll, "à proximo -vôo", null, {$diacriticSensitive: true})); - assert.eq([], queryIDS(coll, "mo vô", null, {$diacriticSensitive: true})); - assert.eq([], queryIDS(coll, "\"unico medico\"", null, {$diacriticSensitive: true})); - assert.eq( - [], queryIDS(coll, "\"próximo vôo\" -\"único médico\"", null, {$diacriticSensitive: true})); - +"use strict"; +var coll = db.fts_diacriticsensitive; + +coll.drop(); + +assert.writeOK(coll.insert( + {_id: 0, a: "O próximo vôo à noite sobre o Atlântico, põe freqüentemente o único médico."})); + +assert.commandWorked(coll.ensureIndex({a: "text"}, {default_language: "portuguese"})); + +assert.throws(function() { + queryIDS(coll, "hello", null, {$diacriticSensitive: "invalid"}); +}); + +assert.eq([0], queryIDS(coll, "PRÓXIMO VÔO À", null, {$diacriticSensitive: true})); +assert.eq([0], queryIDS(coll, "atlântico", null, {$diacriticSensitive: true})); +assert.eq([0], queryIDS(coll, "\"próximo\"", null, {$diacriticSensitive: true})); +assert.eq([0], queryIDS(coll, "\"põe\" atlântico", null, {$diacriticSensitive: true})); +assert.eq([0], + queryIDS(coll, "\"próximo vôo\" \"único médico\"", null, {$diacriticSensitive: true})); +assert.eq([0], + queryIDS(coll, "\"próximo vôo\" -\"unico médico\"", null, {$diacriticSensitive: true})); + +assert.eq([], queryIDS(coll, "à ", null, {$diacriticSensitive: true})); +assert.eq([], queryIDS(coll, "proximo", null, {$diacriticSensitive: true})); +assert.eq([], queryIDS(coll, "proximo voo à ", null, {$diacriticSensitive: true})); +assert.eq([], queryIDS(coll, "à -PRÓXIMO -vôo", null, {$diacriticSensitive: true})); +assert.eq([], queryIDS(coll, "à proximo -vôo", null, {$diacriticSensitive: true})); +assert.eq([], queryIDS(coll, "mo vô", null, {$diacriticSensitive: true})); +assert.eq([], queryIDS(coll, "\"unico medico\"", null, {$diacriticSensitive: true})); +assert.eq([], + queryIDS(coll, "\"próximo vôo\" -\"único médico\"", null, {$diacriticSensitive: true})); })(); diff --git a/jstests/core/fts_dotted_prefix_fields.js b/jstests/core/fts_dotted_prefix_fields.js index f811c4a7203..efbe3a91abf 100644 --- a/jstests/core/fts_dotted_prefix_fields.js +++ b/jstests/core/fts_dotted_prefix_fields.js @@ -1,15 +1,15 @@ // Test that text search works correct when the text index has dotted paths as the non-text // prefixes. (function() { - "use strict"; +"use strict"; - let coll = db.fts_dotted_prefix_fields; - coll.drop(); - assert.commandWorked(coll.createIndex({"a.x": 1, "a.y": 1, "b.x": 1, "b.y": 1, words: "text"})); - assert.writeOK(coll.insert({a: {x: 1, y: 2}, b: {x: 3, y: 4}, words: "lorem ipsum dolor sit"})); - assert.writeOK(coll.insert({a: {x: 1, y: 2}, b: {x: 5, y: 4}, words: "lorem ipsum dolor sit"})); +let coll = db.fts_dotted_prefix_fields; +coll.drop(); +assert.commandWorked(coll.createIndex({"a.x": 1, "a.y": 1, "b.x": 1, "b.y": 1, words: "text"})); +assert.writeOK(coll.insert({a: {x: 1, y: 2}, b: {x: 3, y: 4}, words: "lorem ipsum dolor sit"})); +assert.writeOK(coll.insert({a: {x: 1, y: 2}, b: {x: 5, y: 4}, words: "lorem ipsum dolor sit"})); - assert.eq(1, - coll.find({$text: {$search: "lorem ipsum"}, "a.x": 1, "a.y": 2, "b.x": 3, "b.y": 4}) - .itcount()); +assert.eq( + 1, + coll.find({$text: {$search: "lorem ipsum"}, "a.x": 1, "a.y": 2, "b.x": 3, "b.y": 4}).itcount()); }()); diff --git a/jstests/core/fts_explain.js b/jstests/core/fts_explain.js index 9245ac7ec52..0b147e5987a 100644 --- a/jstests/core/fts_explain.js +++ b/jstests/core/fts_explain.js @@ -5,35 +5,35 @@ // Test $text explain. SERVER-12037. (function() { - "use strict"; - - const coll = db.fts_explain; - let res; - - coll.drop(); - res = coll.ensureIndex({content: "text"}, {default_language: "none"}); - assert.commandWorked(res); - - res = coll.insert({content: "some data"}); - assert.writeOK(res); - - const explain = - coll.find({$text: {$search: "\"a\" -b -\"c\""}}, {content: 1, score: {$meta: "textScore"}}) - .explain(true); - let stage = explain.executionStats.executionStages; - if ("SINGLE_SHARD" === stage.stage) { - stage = stage.shards[0].executionStages; - } - - assert.eq(stage.stage, "PROJECTION_DEFAULT"); - - let textStage = stage.inputStage; - assert.eq(textStage.stage, "TEXT"); - assert.gte(textStage.textIndexVersion, 1, "textIndexVersion incorrect or missing."); - assert.eq(textStage.inputStage.stage, "TEXT_MATCH"); - assert.eq(textStage.inputStage.inputStage.stage, "TEXT_OR"); - assert.eq(textStage.parsedTextQuery.terms, ["a"]); - assert.eq(textStage.parsedTextQuery.negatedTerms, ["b"]); - assert.eq(textStage.parsedTextQuery.phrases, ["a"]); - assert.eq(textStage.parsedTextQuery.negatedPhrases, ["c"]); +"use strict"; + +const coll = db.fts_explain; +let res; + +coll.drop(); +res = coll.ensureIndex({content: "text"}, {default_language: "none"}); +assert.commandWorked(res); + +res = coll.insert({content: "some data"}); +assert.writeOK(res); + +const explain = + coll.find({$text: {$search: "\"a\" -b -\"c\""}}, {content: 1, score: {$meta: "textScore"}}) + .explain(true); +let stage = explain.executionStats.executionStages; +if ("SINGLE_SHARD" === stage.stage) { + stage = stage.shards[0].executionStages; +} + +assert.eq(stage.stage, "PROJECTION_DEFAULT"); + +let textStage = stage.inputStage; +assert.eq(textStage.stage, "TEXT"); +assert.gte(textStage.textIndexVersion, 1, "textIndexVersion incorrect or missing."); +assert.eq(textStage.inputStage.stage, "TEXT_MATCH"); +assert.eq(textStage.inputStage.inputStage.stage, "TEXT_OR"); +assert.eq(textStage.parsedTextQuery.terms, ["a"]); +assert.eq(textStage.parsedTextQuery.negatedTerms, ["b"]); +assert.eq(textStage.parsedTextQuery.phrases, ["a"]); +assert.eq(textStage.parsedTextQuery.negatedPhrases, ["c"]); })(); diff --git a/jstests/core/fts_index_version2.js b/jstests/core/fts_index_version2.js index 05fecab36ee..f8c57f4e2d7 100644 --- a/jstests/core/fts_index_version2.js +++ b/jstests/core/fts_index_version2.js @@ -3,31 +3,28 @@ load('jstests/libs/fts.js'); (function() { - "use strict"; - var coll = db.fts_index_version2; - - coll.drop(); - - assert.writeOK(coll.insert({ - _id: 0, - a: "O próximo Vôo à Noite sobre o Atlântico, Põe Freqüentemente o único Médico." - })); - - assert.commandWorked( - coll.ensureIndex({a: "text"}, {default_language: "portuguese", textIndexVersion: 2})); - - assert.eq([0], queryIDS(coll, "próximo vôo à ", null)); - assert.eq([0], queryIDS(coll, "atlântico", null)); - assert.eq([0], queryIDS(coll, "\"próxIMO\"", null)); - assert.eq([0], queryIDS(coll, "\"põe\" atlânTico", null)); - assert.eq([0], queryIDS(coll, "\"próximo vôo\" \"único médico\"", null)); - assert.eq([0], queryIDS(coll, "\"próximo vôo\" -\"único Atlântico\"", null)); - - assert.eq([], queryIDS(coll, "proximo voo a", null)); - assert.eq([], queryIDS(coll, "átlántico", null)); - assert.eq([], queryIDS(coll, "\"proxIMO\"", null)); - assert.eq([], queryIDS(coll, "\"poé\" atlânTico", null)); - assert.eq([], queryIDS(coll, "\"próximo voo\" \"unico médico\"", null)); - assert.eq([], queryIDS(coll, "\"próximo Vôo\" -\"único Médico\"", null)); - +"use strict"; +var coll = db.fts_index_version2; + +coll.drop(); + +assert.writeOK(coll.insert( + {_id: 0, a: "O próximo Vôo à Noite sobre o Atlântico, Põe Freqüentemente o único Médico."})); + +assert.commandWorked( + coll.ensureIndex({a: "text"}, {default_language: "portuguese", textIndexVersion: 2})); + +assert.eq([0], queryIDS(coll, "próximo vôo à ", null)); +assert.eq([0], queryIDS(coll, "atlântico", null)); +assert.eq([0], queryIDS(coll, "\"próxIMO\"", null)); +assert.eq([0], queryIDS(coll, "\"põe\" atlânTico", null)); +assert.eq([0], queryIDS(coll, "\"próximo vôo\" \"único médico\"", null)); +assert.eq([0], queryIDS(coll, "\"próximo vôo\" -\"único Atlântico\"", null)); + +assert.eq([], queryIDS(coll, "proximo voo a", null)); +assert.eq([], queryIDS(coll, "átlántico", null)); +assert.eq([], queryIDS(coll, "\"proxIMO\"", null)); +assert.eq([], queryIDS(coll, "\"poé\" atlânTico", null)); +assert.eq([], queryIDS(coll, "\"próximo voo\" \"unico médico\"", null)); +assert.eq([], queryIDS(coll, "\"próximo Vôo\" -\"único Médico\"", null)); })(); diff --git a/jstests/core/fts_querylang.js b/jstests/core/fts_querylang.js index de27b65ba5b..a80258940ba 100644 --- a/jstests/core/fts_querylang.js +++ b/jstests/core/fts_querylang.js @@ -1,86 +1,91 @@ // Test the $text query operator. // @tags: [requires_non_retryable_writes] (function() { - "use strict"; +"use strict"; - const coll = db.getCollection("fts_querylang"); - coll.drop(); +const coll = db.getCollection("fts_querylang"); +coll.drop(); - assert.commandWorked(coll.insert({_id: 0, unindexedField: 0, a: "textual content"})); - assert.commandWorked(coll.insert({_id: 1, unindexedField: 1, a: "additional content"})); - assert.commandWorked(coll.insert({_id: 2, unindexedField: 2, a: "irrelevant content"})); - assert.commandWorked(coll.createIndex({a: "text"})); +assert.commandWorked(coll.insert({_id: 0, unindexedField: 0, a: "textual content"})); +assert.commandWorked(coll.insert({_id: 1, unindexedField: 1, a: "additional content"})); +assert.commandWorked(coll.insert({_id: 2, unindexedField: 2, a: "irrelevant content"})); +assert.commandWorked(coll.createIndex({a: "text"})); - // Test text query with no results. - assert.eq(false, coll.find({$text: {$search: "words"}}).hasNext()); +// Test text query with no results. +assert.eq(false, coll.find({$text: {$search: "words"}}).hasNext()); - // Test basic text query. - let results = coll.find({$text: {$search: "textual content -irrelevant"}}).toArray(); - assert.eq(results.length, 2, results); - assert.neq(results[0]._id, 2, results); - assert.neq(results[1]._id, 2, results); +// Test basic text query. +let results = coll.find({$text: {$search: "textual content -irrelevant"}}).toArray(); +assert.eq(results.length, 2, results); +assert.neq(results[0]._id, 2, results); +assert.neq(results[1]._id, 2, results); - // Test sort with basic text query. - results = coll.find({$text: {$search: "textual content -irrelevant"}}) - .sort({unindexedField: 1}) - .toArray(); - assert.eq(results.length, 2, results); - assert.eq(results[0]._id, 0, results); - assert.eq(results[1]._id, 1, results); +// Test sort with basic text query. +results = coll.find({$text: {$search: "textual content -irrelevant"}}) + .sort({unindexedField: 1}) + .toArray(); +assert.eq(results.length, 2, results); +assert.eq(results[0]._id, 0, results); +assert.eq(results[1]._id, 1, results); - // Test skip with basic text query. - results = coll.find({$text: {$search: "textual content -irrelevant"}}) - .sort({unindexedField: 1}) - .skip(1) - .toArray(); - assert.eq(results.length, 1, results); - assert.eq(results[0]._id, 1, results); +// Test skip with basic text query. +results = coll.find({$text: {$search: "textual content -irrelevant"}}) + .sort({unindexedField: 1}) + .skip(1) + .toArray(); +assert.eq(results.length, 1, results); +assert.eq(results[0]._id, 1, results); - // Test limit with basic text query. - results = coll.find({$text: {$search: "textual content -irrelevant"}}) - .sort({unindexedField: 1}) - .limit(1) - .toArray(); - assert.eq(results.length, 1, results); - assert.eq(results[0]._id, 0, results); +// Test limit with basic text query. +results = coll.find({$text: {$search: "textual content -irrelevant"}}) + .sort({unindexedField: 1}) + .limit(1) + .toArray(); +assert.eq(results.length, 1, results); +assert.eq(results[0]._id, 0, results); - // Test $and of basic text query with indexed expression. - results = coll.find({$text: {$search: "content -irrelevant"}, _id: 1}).toArray(); - assert.eq(results.length, 1, results); - assert.eq(results[0]._id, 1, results); +// Test $and of basic text query with indexed expression. +results = coll.find({$text: {$search: "content -irrelevant"}, _id: 1}).toArray(); +assert.eq(results.length, 1, results); +assert.eq(results[0]._id, 1, results); - // Test $and of basic text query with indexed expression and bad language. - assert.commandFailedWithCode(assert.throws(function() { - coll.find({$text: {$search: "content -irrelevant", $language: "spanglish"}, _id: 1}) - .itcount(); - }), - ErrorCodes.BadValue); +// Test $and of basic text query with indexed expression and bad language. +assert.commandFailedWithCode( + assert.throws(function() { + coll.find({ + $text: {$search: "content -irrelevant", $language: "spanglish"}, + _id: 1 + }) + .itcount(); + }), + ErrorCodes.BadValue); - // Test $and of basic text query with unindexed expression. - results = coll.find({$text: {$search: "content -irrelevant"}, unindexedField: 1}).toArray(); - assert.eq(results.length, 1, results); - assert.eq(results[0]._id, 1, results); +// Test $and of basic text query with unindexed expression. +results = coll.find({$text: {$search: "content -irrelevant"}, unindexedField: 1}).toArray(); +assert.eq(results.length, 1, results); +assert.eq(results[0]._id, 1, results); - // Test $language. - let cursor = coll.find({$text: {$search: "contents", $language: "none"}}); - assert.eq(false, cursor.hasNext()); +// Test $language. +let cursor = coll.find({$text: {$search: "contents", $language: "none"}}); +assert.eq(false, cursor.hasNext()); - cursor = coll.find({$text: {$search: "contents", $language: "EN"}}); - assert.eq(true, cursor.hasNext()); +cursor = coll.find({$text: {$search: "contents", $language: "EN"}}); +assert.eq(true, cursor.hasNext()); - cursor = coll.find({$text: {$search: "contents", $language: "spanglish"}}); - assert.commandFailedWithCode(assert.throws(function() { - cursor.next(); - }), - ErrorCodes.BadValue); +cursor = coll.find({$text: {$search: "contents", $language: "spanglish"}}); +assert.commandFailedWithCode(assert.throws(function() { + cursor.next(); + }), + ErrorCodes.BadValue); - // Test update with $text. - coll.update({$text: {$search: "textual content -irrelevant"}}, {$set: {b: 1}}, {multi: true}); - assert.eq(2, coll.find({b: 1}).itcount(), 'incorrect number of documents updated'); +// Test update with $text. +coll.update({$text: {$search: "textual content -irrelevant"}}, {$set: {b: 1}}, {multi: true}); +assert.eq(2, coll.find({b: 1}).itcount(), 'incorrect number of documents updated'); - // $text cannot be contained within a $nor. - assert.commandFailedWithCode(assert.throws(function() { - coll.find({$nor: [{$text: {$search: 'a'}}]}).itcount(); - }), - ErrorCodes.BadValue); +// $text cannot be contained within a $nor. +assert.commandFailedWithCode( + assert.throws(function() { + coll.find({$nor: [{$text: {$search: 'a'}}]}).itcount(); + }), + ErrorCodes.BadValue); }()); diff --git a/jstests/core/fts_score_sort.js b/jstests/core/fts_score_sort.js index e074ca68ca2..9a4cc1a120b 100644 --- a/jstests/core/fts_score_sort.js +++ b/jstests/core/fts_score_sort.js @@ -1,61 +1,61 @@ // Test sorting with text score metadata. (function() { - "use strict"; - - var t = db.getSiblingDB("test").getCollection("fts_score_sort"); - t.drop(); - - assert.writeOK(t.insert({_id: 0, a: "textual content"})); - assert.writeOK(t.insert({_id: 1, a: "additional content"})); - assert.writeOK(t.insert({_id: 2, a: "irrelevant content"})); - assert.commandWorked(t.ensureIndex({a: "text"})); - - // $meta sort specification should be rejected if it has additional keys. - assert.throws(function() { - t.find({$text: {$search: "textual content"}}, {score: {$meta: "textScore"}}) - .sort({score: {$meta: "textScore", extra: 1}}) - .itcount(); - }); - - // $meta sort specification should be rejected if the type of meta sort is not known. - assert.throws(function() { - t.find({$text: {$search: "textual content"}}, {score: {$meta: "textScore"}}) - .sort({score: {$meta: "unknown"}}) - .itcount(); - }); - - // Sort spefication should be rejected if a $-keyword other than $meta is used. - assert.throws(function() { - t.find({$text: {$search: "textual content"}}, {score: {$meta: "textScore"}}) - .sort({score: {$notMeta: "textScore"}}) - .itcount(); - }); - - // Sort spefication should be rejected if it is a string, not an object with $meta. - assert.throws(function() { - t.find({$text: {$search: "textual content"}}, {score: {$meta: "textScore"}}) - .sort({score: "textScore"}) - .itcount(); - }); - - // Sort by the text score. - var results = - t.find({$text: {$search: "textual content -irrelevant"}}, {score: {$meta: "textScore"}}) - .sort({score: {$meta: "textScore"}}) - .toArray(); - assert.eq(results.length, 2); - assert.eq(results[0]._id, 0); - assert.eq(results[1]._id, 1); - assert.gt(results[0].score, results[1].score); - - // Sort by {_id descending, score} and verify the order is right. - var results = - t.find({$text: {$search: "textual content -irrelevant"}}, {score: {$meta: "textScore"}}) - .sort({_id: -1, score: {$meta: "textScore"}}) - .toArray(); - assert.eq(results.length, 2); - assert.eq(results[0]._id, 1); - assert.eq(results[1]._id, 0); - // Note the reversal from above. - assert.lt(results[0].score, results[1].score); +"use strict"; + +var t = db.getSiblingDB("test").getCollection("fts_score_sort"); +t.drop(); + +assert.writeOK(t.insert({_id: 0, a: "textual content"})); +assert.writeOK(t.insert({_id: 1, a: "additional content"})); +assert.writeOK(t.insert({_id: 2, a: "irrelevant content"})); +assert.commandWorked(t.ensureIndex({a: "text"})); + +// $meta sort specification should be rejected if it has additional keys. +assert.throws(function() { + t.find({$text: {$search: "textual content"}}, {score: {$meta: "textScore"}}) + .sort({score: {$meta: "textScore", extra: 1}}) + .itcount(); +}); + +// $meta sort specification should be rejected if the type of meta sort is not known. +assert.throws(function() { + t.find({$text: {$search: "textual content"}}, {score: {$meta: "textScore"}}) + .sort({score: {$meta: "unknown"}}) + .itcount(); +}); + +// Sort spefication should be rejected if a $-keyword other than $meta is used. +assert.throws(function() { + t.find({$text: {$search: "textual content"}}, {score: {$meta: "textScore"}}) + .sort({score: {$notMeta: "textScore"}}) + .itcount(); +}); + +// Sort spefication should be rejected if it is a string, not an object with $meta. +assert.throws(function() { + t.find({$text: {$search: "textual content"}}, {score: {$meta: "textScore"}}) + .sort({score: "textScore"}) + .itcount(); +}); + +// Sort by the text score. +var results = + t.find({$text: {$search: "textual content -irrelevant"}}, {score: {$meta: "textScore"}}) + .sort({score: {$meta: "textScore"}}) + .toArray(); +assert.eq(results.length, 2); +assert.eq(results[0]._id, 0); +assert.eq(results[1]._id, 1); +assert.gt(results[0].score, results[1].score); + +// Sort by {_id descending, score} and verify the order is right. +var results = + t.find({$text: {$search: "textual content -irrelevant"}}, {score: {$meta: "textScore"}}) + .sort({_id: -1, score: {$meta: "textScore"}}) + .toArray(); +assert.eq(results.length, 2); +assert.eq(results[0]._id, 1); +assert.eq(results[1]._id, 0); +// Note the reversal from above. +assert.lt(results[0].score, results[1].score); }()); diff --git a/jstests/core/fts_spanish.js b/jstests/core/fts_spanish.js index 89915cf2889..264e1d9125b 100644 --- a/jstests/core/fts_spanish.js +++ b/jstests/core/fts_spanish.js @@ -1,37 +1,32 @@ (function() { - "use strict"; +"use strict"; - load("jstests/libs/fts.js"); +load("jstests/libs/fts.js"); - const coll = db.text_spanish; - coll.drop(); +const coll = db.text_spanish; +coll.drop(); - assert.writeOK(coll.insert({_id: 1, title: "mi blog", text: "Este es un blog de prueba"})); - assert.writeOK( - coll.insert({_id: 2, title: "mi segundo post", text: "Este es un blog de prueba"})); - assert.writeOK(coll.insert( - {_id: 3, title: "cuchillos son divertidos", text: "este es mi tercer blog stemmed"})); - assert.writeOK(coll.insert({ - _id: 4, - language: "en", - title: "My fourth blog", - text: "This stemmed blog is in english" - })); +assert.writeOK(coll.insert({_id: 1, title: "mi blog", text: "Este es un blog de prueba"})); +assert.writeOK(coll.insert({_id: 2, title: "mi segundo post", text: "Este es un blog de prueba"})); +assert.writeOK(coll.insert( + {_id: 3, title: "cuchillos son divertidos", text: "este es mi tercer blog stemmed"})); +assert.writeOK(coll.insert( + {_id: 4, language: "en", title: "My fourth blog", text: "This stemmed blog is in english"})); - // Create a text index, giving more weight to the "title" field. - assert.commandWorked(coll.createIndex({title: "text", text: "text"}, - {weights: {title: 10}, default_language: "es"})); +// Create a text index, giving more weight to the "title" field. +assert.commandWorked(coll.createIndex({title: "text", text: "text"}, + {weights: {title: 10}, default_language: "es"})); - assert.eq(4, coll.count({$text: {$search: "blog"}})); - assert.eq([4], queryIDS(coll, "stem")); - assert.eq([3], queryIDS(coll, "stemmed")); - assert.eq([4], queryIDS(coll, "stemmed", null, {"$language": "en"})); - assert.eq([1, 2], queryIDS(coll, "prueba").sort()); +assert.eq(4, coll.count({$text: {$search: "blog"}})); +assert.eq([4], queryIDS(coll, "stem")); +assert.eq([3], queryIDS(coll, "stemmed")); +assert.eq([4], queryIDS(coll, "stemmed", null, {"$language": "en"})); +assert.eq([1, 2], queryIDS(coll, "prueba").sort()); - assert.writeError(coll.insert({_id: 5, language: "spanglish", title: "", text: ""})); +assert.writeError(coll.insert({_id: 5, language: "spanglish", title: "", text: ""})); - assert.commandWorked(coll.dropIndexes()); - assert.commandFailedWithCode( - coll.createIndex({title: "text", text: "text"}, {default_language: "spanglish"}), - ErrorCodes.CannotCreateIndex); +assert.commandWorked(coll.dropIndexes()); +assert.commandFailedWithCode( + coll.createIndex({title: "text", text: "text"}, {default_language: "spanglish"}), + ErrorCodes.CannotCreateIndex); }()); diff --git a/jstests/core/fts_trailing_fields.js b/jstests/core/fts_trailing_fields.js index 2c7f79b423d..3f46cd1b1b7 100644 --- a/jstests/core/fts_trailing_fields.js +++ b/jstests/core/fts_trailing_fields.js @@ -1,22 +1,22 @@ // Tests for predicates which can use the trailing field of a text index. (function() { - "use strict"; +"use strict"; - const coll = db.fts_trailing_fields; +const coll = db.fts_trailing_fields; - coll.drop(); - assert.commandWorked(coll.createIndex({a: 1, b: "text", c: 1})); - assert.writeOK(coll.insert({a: 2, b: "lorem ipsum"})); +coll.drop(); +assert.commandWorked(coll.createIndex({a: 1, b: "text", c: 1})); +assert.writeOK(coll.insert({a: 2, b: "lorem ipsum"})); - assert.eq(0, coll.find({a: 2, $text: {$search: "lorem"}, c: {$exists: true}}).itcount()); - assert.eq(1, coll.find({a: 2, $text: {$search: "lorem"}, c: null}).itcount()); - assert.eq(1, coll.find({a: 2, $text: {$search: "lorem"}, c: {$exists: false}}).itcount()); +assert.eq(0, coll.find({a: 2, $text: {$search: "lorem"}, c: {$exists: true}}).itcount()); +assert.eq(1, coll.find({a: 2, $text: {$search: "lorem"}, c: null}).itcount()); +assert.eq(1, coll.find({a: 2, $text: {$search: "lorem"}, c: {$exists: false}}).itcount()); - // An equality predicate on the leading field isn't useful, but it shouldn't cause any problems. - // Same with an $elemMatch predicate on one of the trailing fields. - coll.drop(); - assert.commandWorked(coll.createIndex({a: 1, b: "text", "c.d": 1})); - assert.writeOK(coll.insert({a: 2, b: "lorem ipsum", c: {d: 3}})); - assert.eq(0, coll.find({a: [1, 2], $text: {$search: "lorem"}}).itcount()); - assert.eq(0, coll.find({a: 2, $text: {$search: "lorem"}, c: {$elemMatch: {d: 3}}}).itcount()); +// An equality predicate on the leading field isn't useful, but it shouldn't cause any problems. +// Same with an $elemMatch predicate on one of the trailing fields. +coll.drop(); +assert.commandWorked(coll.createIndex({a: 1, b: "text", "c.d": 1})); +assert.writeOK(coll.insert({a: 2, b: "lorem ipsum", c: {d: 3}})); +assert.eq(0, coll.find({a: [1, 2], $text: {$search: "lorem"}}).itcount()); +assert.eq(0, coll.find({a: 2, $text: {$search: "lorem"}, c: {$elemMatch: {d: 3}}}).itcount()); }()); diff --git a/jstests/core/function_string_representations.js b/jstests/core/function_string_representations.js index 840038c766f..106cdcda9f2 100644 --- a/jstests/core/function_string_representations.js +++ b/jstests/core/function_string_representations.js @@ -2,40 +2,41 @@ // does_not_support_stepdowns, // ] -/** Demonstrate that mapReduce can accept functions represented by strings. +/** + * Demonstrate that mapReduce can accept functions represented by strings. * Some drivers do not have a type which represents a Javascript function. These languages represent * the arguments to mapReduce as strings. */ (function() { - "use strict"; - - var col = db.function_string_representations; - col.drop(); - assert.writeOK(col.insert({ - _id: "abc123", - ord_date: new Date("Oct 04, 2012"), - status: 'A', - price: 25, - items: [{sku: "mmm", qty: 5, price: 2.5}, {sku: "nnn", qty: 5, price: 2.5}] - })); - - var mapFunction = "function() {emit(this._id, this.price);}"; - var reduceFunction = "function(keyCustId, valuesPrices) {return Array.sum(valuesPrices);}"; - assert.commandWorked(col.mapReduce(mapFunction, reduceFunction, {out: "map_reduce_example"})); - - // Provided strings may end with semicolons and/or whitespace - mapFunction += " ; "; - reduceFunction += " ; "; - assert.commandWorked(col.mapReduce(mapFunction, reduceFunction, {out: "map_reduce_example"})); - - // $where exhibits the same behavior - var whereFunction = "function() {return this.price === 25;}"; - assert.eq(1, col.find({$where: whereFunction}).itcount()); - - whereFunction += ";"; - assert.eq(1, col.find({$where: whereFunction}).itcount()); - - // system.js does not need to be tested, as its contents types' are preserved, and - // strings are not promoted into functions. +"use strict"; + +var col = db.function_string_representations; +col.drop(); +assert.writeOK(col.insert({ + _id: "abc123", + ord_date: new Date("Oct 04, 2012"), + status: 'A', + price: 25, + items: [{sku: "mmm", qty: 5, price: 2.5}, {sku: "nnn", qty: 5, price: 2.5}] +})); + +var mapFunction = "function() {emit(this._id, this.price);}"; +var reduceFunction = "function(keyCustId, valuesPrices) {return Array.sum(valuesPrices);}"; +assert.commandWorked(col.mapReduce(mapFunction, reduceFunction, {out: "map_reduce_example"})); + +// Provided strings may end with semicolons and/or whitespace +mapFunction += " ; "; +reduceFunction += " ; "; +assert.commandWorked(col.mapReduce(mapFunction, reduceFunction, {out: "map_reduce_example"})); + +// $where exhibits the same behavior +var whereFunction = "function() {return this.price === 25;}"; +assert.eq(1, col.find({$where: whereFunction}).itcount()); + +whereFunction += ";"; +assert.eq(1, col.find({$where: whereFunction}).itcount()); + +// system.js does not need to be tested, as its contents types' are preserved, and +// strings are not promoted into functions. })(); diff --git a/jstests/core/geo3.js b/jstests/core/geo3.js index 6735eb78eb1..b5fec6769e9 100644 --- a/jstests/core/geo3.js +++ b/jstests/core/geo3.js @@ -1,77 +1,79 @@ // @tags: [requires_fastcount, operations_longer_than_stepdown_interval_in_txns] (function() { - t = db.geo3; - t.drop(); - - n = 1; - arr = []; - for (var x = -100; x < 100; x += 2) { - for (var y = -100; y < 100; y += 2) { - arr.push({_id: n++, loc: [x, y], a: Math.abs(x) % 5, b: Math.abs(y) % 5}); - } - } - t.insert(arr); - assert.eq(t.count(), 100 * 100); - assert.eq(t.count(), n - 1); - - t.ensureIndex({loc: "2d"}); - - // Test the "query" parameter in $geoNear. - - let res = t.aggregate([ - {$geoNear: {near: [50, 50], distanceField: "dist", query: {a: 2}}}, - {$limit: 10}, - ]).toArray(); - assert.eq(10, res.length, tojson(res)); - res.forEach(doc => assert.eq(2, doc.a, tojson(doc))); - - function avgA(q, len) { - if (!len) - len = 10; - var realq = {loc: {$near: [50, 50]}}; - if (q) - Object.extend(realq, q); - var as = t.find(realq).limit(len).map(function(z) { - return z.a; - }); - assert.eq(len, as.length, "length in avgA"); - return Array.avg(as); - } - - function testFiltering(msg) { - assert.gt(2, avgA({}), msg + " testFiltering 1 "); - assert.eq(2, avgA({a: 2}), msg + " testFiltering 2 "); - assert.eq(4, avgA({a: 4}), msg + " testFiltering 3 "); +t = db.geo3; +t.drop(); + +n = 1; +arr = []; +for (var x = -100; x < 100; x += 2) { + for (var y = -100; y < 100; y += 2) { + arr.push({_id: n++, loc: [x, y], a: Math.abs(x) % 5, b: Math.abs(y) % 5}); } +} +t.insert(arr); +assert.eq(t.count(), 100 * 100); +assert.eq(t.count(), n - 1); - testFiltering("just loc"); +t.ensureIndex({loc: "2d"}); - assert.commandWorked(t.dropIndex({loc: "2d"})); - assert.commandWorked(t.ensureIndex({loc: "2d", a: 1})); +// Test the "query" parameter in $geoNear. - res = t.aggregate([ +let res = t.aggregate([ {$geoNear: {near: [50, 50], distanceField: "dist", query: {a: 2}}}, {$limit: 10}, ]).toArray(); - assert.eq(10, res.length, "B3"); - res.forEach(doc => assert.eq(2, doc.a, tojson(doc))); - - testFiltering("loc and a"); - - assert.commandWorked(t.dropIndex({loc: "2d", a: 1})); - assert.commandWorked(t.ensureIndex({loc: "2d", b: 1})); - - testFiltering("loc and b"); - - q = {loc: {$near: [50, 50]}}; - assert.eq(100, t.find(q).limit(100).itcount(), "D1"); - assert.eq(100, t.find(q).limit(100).size(), "D2"); - - assert.eq(20, t.find(q).limit(20).itcount(), "D3"); - assert.eq(20, t.find(q).limit(20).size(), "D4"); - - // SERVER-14039 Wrong limit after skip with $nearSphere, 2d index - assert.eq(10, t.find(q).skip(10).limit(10).itcount(), "D5"); - assert.eq(10, t.find(q).skip(10).limit(10).size(), "D6"); +assert.eq(10, res.length, tojson(res)); +res.forEach(doc => assert.eq(2, doc.a, tojson(doc))); + +function avgA(q, len) { + if (!len) + len = 10; + var realq = {loc: {$near: [50, 50]}}; + if (q) + Object.extend(realq, q); + var as = t.find(realq).limit(len).map(function(z) { + return z.a; + }); + assert.eq(len, as.length, "length in avgA"); + return Array.avg(as); +} + +function testFiltering(msg) { + assert.gt(2, avgA({}), msg + " testFiltering 1 "); + assert.eq(2, avgA({a: 2}), msg + " testFiltering 2 "); + assert.eq(4, avgA({a: 4}), msg + " testFiltering 3 "); +} + +testFiltering("just loc"); + +assert.commandWorked(t.dropIndex({loc: "2d"})); +assert.commandWorked(t.ensureIndex({loc: "2d", a: 1})); + +res = t.aggregate([ + {$geoNear: {near: [50, 50], distanceField: "dist", query: {a: 2}}}, + {$limit: 10}, + ]).toArray(); +assert.eq(10, res.length, "B3"); +res.forEach(doc => assert.eq(2, doc.a, tojson(doc))); + +testFiltering("loc and a"); + +assert.commandWorked(t.dropIndex({loc: "2d", a: 1})); +assert.commandWorked(t.ensureIndex({loc: "2d", b: 1})); + +testFiltering("loc and b"); + +q = { + loc: {$near: [50, 50]} +}; +assert.eq(100, t.find(q).limit(100).itcount(), "D1"); +assert.eq(100, t.find(q).limit(100).size(), "D2"); + +assert.eq(20, t.find(q).limit(20).itcount(), "D3"); +assert.eq(20, t.find(q).limit(20).size(), "D4"); + +// SERVER-14039 Wrong limit after skip with $nearSphere, 2d index +assert.eq(10, t.find(q).skip(10).limit(10).itcount(), "D5"); +assert.eq(10, t.find(q).skip(10).limit(10).size(), "D6"); }()); diff --git a/jstests/core/geo_2d_trailing_fields.js b/jstests/core/geo_2d_trailing_fields.js index 60c1e207ca7..3cb25a6e9ce 100644 --- a/jstests/core/geo_2d_trailing_fields.js +++ b/jstests/core/geo_2d_trailing_fields.js @@ -1,49 +1,47 @@ // Tests for predicates which can use the trailing field of a 2d index. (function() { - "use strict"; - - const coll = db.geo_2d_trailing_fields; - - const isMaster = assert.commandWorked(db.adminCommand({isMaster: 1})); - - coll.drop(); - assert.commandWorked(coll.createIndex({a: "2d", b: 1})); - assert.writeOK(coll.insert({a: [0, 0]})); - - // Verify that $near queries handle existence predicates over the trailing fields correctly. - assert.eq(0, coll.find({a: {$near: [0, 0]}, b: {$exists: true}}).itcount()); - assert.eq(1, coll.find({a: {$near: [0, 0]}, b: null}).itcount()); - assert.eq(1, coll.find({a: {$near: [0, 0]}, b: {$exists: false}}).itcount()); - - // Verify that non-near 2d queries handle existence predicates over the trailing fields - // correctly. - assert.eq(0, - coll.find({a: {$geoWithin: {$center: [[0, 0], 1]}}, b: {$exists: true}}).itcount()); - assert.eq(1, coll.find({a: {$geoWithin: {$center: [[0, 0], 1]}}, b: null}).itcount()); - assert.eq(1, - coll.find({a: {$geoWithin: {$center: [[0, 0], 1]}}, b: {$exists: false}}).itcount()); - - coll.drop(); - assert.commandWorked(coll.createIndex({a: "2d", "b.c": 1})); - assert.writeOK(coll.insert({a: [0, 0], b: [{c: 2}, {c: 3}]})); - - // Verify that $near queries correctly handle predicates which cannot be covered due to array - // semantics. - assert.eq(0, coll.find({a: {$near: [0, 0]}, "b.c": [2, 3]}).itcount()); - assert.eq(0, coll.find({a: {$near: [0, 0]}, "b.c": {$type: "array"}}).itcount()); - - // Verify that non-near 2d queries correctly handle predicates which cannot be covered due to - // array semantics. - assert.eq(0, coll.find({a: {$geoWithin: {$center: [[0, 0], 1]}}, "b.c": [2, 3]}).itcount()); - assert.eq( - 0, coll.find({a: {$geoWithin: {$center: [[0, 0], 1]}}, "b.c": {$type: "array"}}).itcount()); - - coll.drop(); - assert.commandWorked(coll.createIndex({a: "2d", "b.c": 1})); - assert.writeOK(coll.insert({a: [0, 0], b: [{c: 1}, {c: 2}]})); - - // Verify that non-near 2d queries correctly handle predicates which cannot be covered due to - // array semantics. - assert.eq( - 1, coll.find({a: {$geoWithin: {$center: [[0, 0], 1]}}, b: {$elemMatch: {c: 1}}}).itcount()); +"use strict"; + +const coll = db.geo_2d_trailing_fields; + +const isMaster = assert.commandWorked(db.adminCommand({isMaster: 1})); + +coll.drop(); +assert.commandWorked(coll.createIndex({a: "2d", b: 1})); +assert.writeOK(coll.insert({a: [0, 0]})); + +// Verify that $near queries handle existence predicates over the trailing fields correctly. +assert.eq(0, coll.find({a: {$near: [0, 0]}, b: {$exists: true}}).itcount()); +assert.eq(1, coll.find({a: {$near: [0, 0]}, b: null}).itcount()); +assert.eq(1, coll.find({a: {$near: [0, 0]}, b: {$exists: false}}).itcount()); + +// Verify that non-near 2d queries handle existence predicates over the trailing fields +// correctly. +assert.eq(0, coll.find({a: {$geoWithin: {$center: [[0, 0], 1]}}, b: {$exists: true}}).itcount()); +assert.eq(1, coll.find({a: {$geoWithin: {$center: [[0, 0], 1]}}, b: null}).itcount()); +assert.eq(1, coll.find({a: {$geoWithin: {$center: [[0, 0], 1]}}, b: {$exists: false}}).itcount()); + +coll.drop(); +assert.commandWorked(coll.createIndex({a: "2d", "b.c": 1})); +assert.writeOK(coll.insert({a: [0, 0], b: [{c: 2}, {c: 3}]})); + +// Verify that $near queries correctly handle predicates which cannot be covered due to array +// semantics. +assert.eq(0, coll.find({a: {$near: [0, 0]}, "b.c": [2, 3]}).itcount()); +assert.eq(0, coll.find({a: {$near: [0, 0]}, "b.c": {$type: "array"}}).itcount()); + +// Verify that non-near 2d queries correctly handle predicates which cannot be covered due to +// array semantics. +assert.eq(0, coll.find({a: {$geoWithin: {$center: [[0, 0], 1]}}, "b.c": [2, 3]}).itcount()); +assert.eq(0, + coll.find({a: {$geoWithin: {$center: [[0, 0], 1]}}, "b.c": {$type: "array"}}).itcount()); + +coll.drop(); +assert.commandWorked(coll.createIndex({a: "2d", "b.c": 1})); +assert.writeOK(coll.insert({a: [0, 0], b: [{c: 1}, {c: 2}]})); + +// Verify that non-near 2d queries correctly handle predicates which cannot be covered due to +// array semantics. +assert.eq(1, + coll.find({a: {$geoWithin: {$center: [[0, 0], 1]}}, b: {$elemMatch: {c: 1}}}).itcount()); }()); diff --git a/jstests/core/geo_2d_with_geojson_point.js b/jstests/core/geo_2d_with_geojson_point.js index 23592e004f8..5c30d6e30ab 100644 --- a/jstests/core/geo_2d_with_geojson_point.js +++ b/jstests/core/geo_2d_with_geojson_point.js @@ -9,5 +9,5 @@ t.ensureIndex({loc: '2d'}); var geoJSONPoint = {type: 'Point', coordinates: [0, 0]}; print(assert.throws(function() { - t.findOne({loc: {$near: {$geometry: geoJSONPoint}}}); -}, [], 'querying 2d index with GeoJSON point.')); + t.findOne({loc: {$near: {$geometry: geoJSONPoint}}}); + }, [], 'querying 2d index with GeoJSON point.')); diff --git a/jstests/core/geo_big_polygon2.js b/jstests/core/geo_big_polygon2.js index 3f93d44ef0a..1c4e0d42b87 100644 --- a/jstests/core/geo_big_polygon2.js +++ b/jstests/core/geo_big_polygon2.js @@ -36,386 +36,387 @@ var objects = [ {name: "just north of equator", geo: {type: "Point", coordinates: [-97.9, 0.1]}}, {name: "just south of equator", geo: {type: "Point", coordinates: [-97.9, -0.1]}}, { - name: "north pole - crs84CRS", - geo: {type: "Point", coordinates: [-97.9, 90.0], crs: crs84CRS} - }, - { - name: "south pole - epsg4326CRS", - geo: {type: "Point", coordinates: [-97.9, -90.0], crs: epsg4326CRS} + name: "north pole - crs84CRS", + geo: {type: "Point", coordinates: [-97.9, 90.0], crs: crs84CRS} + }, + { + name: "south pole - epsg4326CRS", + geo: {type: "Point", coordinates: [-97.9, -90.0], crs: epsg4326CRS} + }, + { + name: "short line string: PA, LA, 4corners, ATX, Mansfield, FL, Reston, NYC", + geo: { + type: "LineString", + coordinates: [ + [-122.1611953, 37.4420407], + [-118.283638, 34.028517], + [-109.045223, 36.9990835], + [-97.850404, 30.3921555], + [-97.904187, 30.395457], + [-86.600836, 30.398147], + [-77.357837, 38.9589935], + [-73.987723, 40.7575074] + ] + } + }, + { + name: "1024 point long line string from south pole to north pole", + geo: {type: "LineString", coordinates: genLonLine(2.349902, -90.0, 90.0, 180.0 / 1024)} + }, + { + name: "line crossing equator - epsg4326CRS", + geo: { + type: "LineString", + coordinates: [[-77.0451853, -12.0553442], [-76.7784557, 18.0098528]], + crs: epsg4326CRS + } + }, + { + name: "GeoJson polygon", + geo: { + type: "Polygon", + coordinates: + [[[-80.0, 30.0], [-40.0, 30.0], [-40.0, 60.0], [-80.0, 60.0], [-80.0, 30.0]]] + } + }, + { + name: "polygon w/ hole", + geo: { + type: "Polygon", + coordinates: [ + [[-80.0, 30.0], [-40.0, 30.0], [-40.0, 60.0], [-80.0, 60.0], [-80.0, 30.0]], + [[-70.0, 40.0], [-60.0, 40.0], [-60.0, 50.0], [-70.0, 50.0], [-70.0, 40.0]] + ] + } + }, + { + name: "polygon w/ two holes", + geo: { + type: "Polygon", + coordinates: [ + [[-80.0, 30.0], [-40.0, 30.0], [-40.0, 60.0], [-80.0, 60.0], [-80.0, 30.0]], + [[-70.0, 40.0], [-60.0, 40.0], [-60.0, 50.0], [-70.0, 50.0], [-70.0, 40.0]], + [[-55.0, 40.0], [-45.0, 40.0], [-45.0, 50.0], [-55.0, 50.0], [-55.0, 40.0]] + ] + } + }, + { + name: "polygon covering North pole", + geo: { + type: "Polygon", + coordinates: [[[-120.0, 89.0], [0.0, 89.0], [120.0, 89.0], [-120.0, 89.0]]] + } + }, + { + name: "polygon covering South pole", + geo: { + type: "Polygon", + coordinates: [[[-120.0, -89.0], [0.0, -89.0], [120.0, -89.0], [-120.0, -89.0]]] + } + }, + { + name: "big polygon/rectangle covering both poles", + geo: { + type: "Polygon", + coordinates: [ + [[-130.0, 89.0], [-120.0, 89.0], [-120.0, -89.0], [-130.0, -89.0], [-130.0, 89.0]] + ], + crs: strictCRS + } + }, + { + name: "polygon (triangle) w/ hole at North pole", + geo: { + type: "Polygon", + coordinates: [ + [[-120.0, 80.0], [0.0, 80.0], [120.0, 80.0], [-120.0, 80.0]], + [[-120.0, 88.0], [0.0, 88.0], [120.0, 88.0], [-120.0, 88.0]] + ] + } }, - { - name: "short line string: PA, LA, 4corners, ATX, Mansfield, FL, Reston, NYC", - geo: { - type: "LineString", - coordinates: [ - [-122.1611953, 37.4420407], - [-118.283638, 34.028517], - [-109.045223, 36.9990835], - [-97.850404, 30.3921555], - [-97.904187, 30.395457], - [-86.600836, 30.398147], - [-77.357837, 38.9589935], - [-73.987723, 40.7575074] - ] - } + { + name: "polygon with edge on equator", + geo: { + type: "Polygon", + coordinates: [[[-120.0, 0.0], [120.0, 0.0], [0.0, 90.0], [-120.0, 0.0]]] + } }, - { - name: "1024 point long line string from south pole to north pole", - geo: {type: "LineString", coordinates: genLonLine(2.349902, -90.0, 90.0, 180.0 / 1024)} + { + name: "polygon just inside single hemisphere (Northern) - China, California, Europe", + geo: { + type: "Polygon", + coordinates: + [[[120.0, 0.000001], [-120.0, 0.000001], [0.0, 0.000001], [120.0, 0.000001]]] + } }, { - name: "line crossing equator - epsg4326CRS", - geo: { - type: "LineString", - coordinates: [[-77.0451853, -12.0553442], [-76.7784557, 18.0098528]], - crs: epsg4326CRS - } + name: "polygon inside Northern hemisphere", + geo: { + type: "Polygon", + coordinates: [[[120.0, 80.0], [-120.0, 80.0], [0.0, 80.0], [120.0, 80.0]]] + } }, { - name: "GeoJson polygon", - geo: { - type: "Polygon", - coordinates: - [[[-80.0, 30.0], [-40.0, 30.0], [-40.0, 60.0], [-80.0, 60.0], [-80.0, 30.0]]] - } + name: "polygon just inside a single hemisphere (Southern) - Pacific, Indonesia, Africa", + geo: { + type: "Polygon", + coordinates: + [[[-120.0, -0.000001], [120.0, -0.000001], [0.0, -0.000001], [-120.0, -0.000001]]] + } }, { - name: "polygon w/ hole", - geo: { - type: "Polygon", - coordinates: [ - [[-80.0, 30.0], [-40.0, 30.0], [-40.0, 60.0], [-80.0, 60.0], [-80.0, 30.0]], - [[-70.0, 40.0], [-60.0, 40.0], [-60.0, 50.0], [-70.0, 50.0], [-70.0, 40.0]] - ] - } - }, - { - name: "polygon w/ two holes", - geo: { - type: "Polygon", - coordinates: [ - [[-80.0, 30.0], [-40.0, 30.0], [-40.0, 60.0], [-80.0, 60.0], [-80.0, 30.0]], - [[-70.0, 40.0], [-60.0, 40.0], [-60.0, 50.0], [-70.0, 50.0], [-70.0, 40.0]], - [[-55.0, 40.0], [-45.0, 40.0], [-45.0, 50.0], [-55.0, 50.0], [-55.0, 40.0]] - ] - } + name: "polygon inside Southern hemisphere", + geo: { + type: "Polygon", + coordinates: [[[-120.0, -80.0], [120.0, -80.0], [0.0, -80.0], [-120.0, -80.0]]] + } + }, + { + name: "single point (MultiPoint): Palo Alto", + geo: {type: "MultiPoint", coordinates: [[-122.1611953, 37.4420407]]} + }, + { + name: "multiple points(MultiPoint): PA, LA, 4corners, ATX, Mansfield, FL, Reston, NYC", + geo: { + type: "MultiPoint", + coordinates: [ + [-122.1611953, 37.4420407], + [-118.283638, 34.028517], + [-109.045223, 36.9990835], + [-97.850404, 30.3921555], + [-97.904187, 30.395457], + [-86.600836, 30.398147], + [-77.357837, 38.9589935], + [-73.987723, 40.7575074] + ] + } }, { - name: "polygon covering North pole", - geo: { - type: "Polygon", - coordinates: [[[-120.0, 89.0], [0.0, 89.0], [120.0, 89.0], [-120.0, 89.0]]] - } + name: "two points (MultiPoint): Shenzhen, Guangdong, China", + geo: {type: "MultiPoint", coordinates: [[114.0538788, 22.5551603], [114.022837, 22.44395]]} + }, + { + name: "two points (MultiPoint) but only one in: Shenzhen, Guangdong, China", + geo: {type: "MultiPoint", coordinates: [[114.0538788, 22.5551603], [113.743858, 23.025815]]} + }, + { + name: "multi line string: new zealand bays", + geo: { + type: "MultiLineString", + coordinates: [ + [ + [172.803869, -43.592789], + [172.659335, -43.620348], + [172.684038, -43.636528], + [172.820922, -43.605325] + ], + [ + [172.830497, -43.607768], + [172.813263, -43.656319], + [172.823096, -43.660996], + [172.850943, -43.607609] + ], + [ + [172.912056, -43.623148], + [172.887696, -43.670897], + [172.900469, -43.676178], + [172.931735, -43.622839] + ] + ] + } }, - { - name: "polygon covering South pole", - geo: { - type: "Polygon", - coordinates: [[[-120.0, -89.0], [0.0, -89.0], [120.0, -89.0], [-120.0, -89.0]]] - } - }, - { - name: "big polygon/rectangle covering both poles", - geo: { - type: "Polygon", - coordinates: - [[[-130.0, 89.0], [-120.0, 89.0], [-120.0, -89.0], [-130.0, -89.0], [-130.0, 89.0]]], - crs: strictCRS - } - }, - { - name: "polygon (triangle) w/ hole at North pole", - geo: { - type: "Polygon", - coordinates: [ - [[-120.0, 80.0], [0.0, 80.0], [120.0, 80.0], [-120.0, 80.0]], - [[-120.0, 88.0], [0.0, 88.0], [120.0, 88.0], [-120.0, 88.0]] - ] - } - }, - { - name: "polygon with edge on equator", - geo: { - type: "Polygon", - coordinates: [[[-120.0, 0.0], [120.0, 0.0], [0.0, 90.0], [-120.0, 0.0]]] - } - }, - { - name: "polygon just inside single hemisphere (Northern) - China, California, Europe", - geo: { - type: "Polygon", - coordinates: - [[[120.0, 0.000001], [-120.0, 0.000001], [0.0, 0.000001], [120.0, 0.000001]]] - } - }, - { - name: "polygon inside Northern hemisphere", - geo: { - type: "Polygon", - coordinates: [[[120.0, 80.0], [-120.0, 80.0], [0.0, 80.0], [120.0, 80.0]]] - } - }, - { - name: "polygon just inside a single hemisphere (Southern) - Pacific, Indonesia, Africa", - geo: { - type: "Polygon", - coordinates: - [[[-120.0, -0.000001], [120.0, -0.000001], [0.0, -0.000001], [-120.0, -0.000001]]] - } - }, - { - name: "polygon inside Southern hemisphere", - geo: { - type: "Polygon", - coordinates: [[[-120.0, -80.0], [120.0, -80.0], [0.0, -80.0], [-120.0, -80.0]]] - } - }, - { - name: "single point (MultiPoint): Palo Alto", - geo: {type: "MultiPoint", coordinates: [[-122.1611953, 37.4420407]]} - }, - { - name: "multiple points(MultiPoint): PA, LA, 4corners, ATX, Mansfield, FL, Reston, NYC", - geo: { - type: "MultiPoint", - coordinates: [ - [-122.1611953, 37.4420407], - [-118.283638, 34.028517], - [-109.045223, 36.9990835], - [-97.850404, 30.3921555], - [-97.904187, 30.395457], - [-86.600836, 30.398147], - [-77.357837, 38.9589935], - [-73.987723, 40.7575074] - ] - } - }, - { - name: "two points (MultiPoint): Shenzhen, Guangdong, China", - geo: {type: "MultiPoint", coordinates: [[114.0538788, 22.5551603], [114.022837, 22.44395]]} - }, - { - name: "two points (MultiPoint) but only one in: Shenzhen, Guangdong, China", - geo: {type: "MultiPoint", coordinates: [[114.0538788, 22.5551603], [113.743858, 23.025815]]} - }, - { - name: "multi line string: new zealand bays", - geo: { - type: "MultiLineString", - coordinates: [ - [ - [172.803869, -43.592789], - [172.659335, -43.620348], - [172.684038, -43.636528], - [172.820922, -43.605325] - ], - [ - [172.830497, -43.607768], - [172.813263, -43.656319], - [172.823096, -43.660996], - [172.850943, -43.607609] - ], - [ - [172.912056, -43.623148], - [172.887696, -43.670897], - [172.900469, -43.676178], - [172.931735, -43.622839] - ] - ] - } - }, - { - name: "multi polygon: new zealand north and south islands", - geo: { - type: "MultiPolygon", - coordinates: [ - [[ - [165.773255, -45.902933], - [169.398419, -47.261538], - [174.672744, -41.767722], - [172.288845, -39.897992], - [165.773255, -45.902933] - ]], - [[ - [173.166448, -39.778262], - [175.342744, -42.677333], - [179.913373, -37.224362], - [171.475953, -32.688871], - [173.166448, -39.778262] - ]] - ] - } - }, - { - name: "geometry collection: point in Australia and triangle around Australia", - geo: { - type: "GeometryCollection", - geometries: [ - {name: "center of Australia", type: "Point", coordinates: [133.985885, -27.240790]}, - { - name: "Triangle around Australia", - type: "Polygon", - coordinates: [[ - [97.423178, -44.735405], - [169.845050, -38.432287], - [143.824366, 15.966509], - [97.423178, -44.735405] + { + name: "multi polygon: new zealand north and south islands", + geo: { + type: "MultiPolygon", + coordinates: [ + [[ + [165.773255, -45.902933], + [169.398419, -47.261538], + [174.672744, -41.767722], + [172.288845, -39.897992], + [165.773255, -45.902933] + ]], + [[ + [173.166448, -39.778262], + [175.342744, -42.677333], + [179.913373, -37.224362], + [171.475953, -32.688871], + [173.166448, -39.778262] ]] - } - ] - } + ] + } + }, + { + name: "geometry collection: point in Australia and triangle around Australia", + geo: { + type: "GeometryCollection", + geometries: [ + {name: "center of Australia", type: "Point", coordinates: [133.985885, -27.240790]}, + { + name: "Triangle around Australia", + type: "Polygon", + coordinates: [[ + [97.423178, -44.735405], + [169.845050, -38.432287], + [143.824366, 15.966509], + [97.423178, -44.735405] + ]] + } + ] + } } ]; // Test various polygons which are not queryable var badPolys = [ { - name: "Polygon with bad CRS", - type: "Polygon", - coordinates: [[ - [114.0834046, 22.6648202], - [113.8293457, 22.3819359], - [114.2736054, 22.4047911], - [114.0834046, 22.6648202] - ]], - crs: badCRS - }, - { - name: "Open polygon < 3 sides", - type: "Polygon", - coordinates: [[[114.0834046, 22.6648202], [113.8293457, 22.3819359]]], - crs: strictCRS - }, - { - name: "Open polygon > 3 sides", - type: "Polygon", - coordinates: [[ - [114.0834046, 22.6648202], - [113.8293457, 22.3819359], - [114.2736054, 22.4047911], - [114.1, 22.5] - ]], - crs: strictCRS - }, - { - name: "duplicate non-adjacent points", - type: "Polygon", - coordinates: [[ - [114.0834046, 22.6648202], - [113.8293457, 22.3819359], - [114.2736054, 22.4047911], - [113.8293457, 22.3819359], - [-65.9165954, 22.6648202], - [114.0834046, 22.6648202] - ]], - crs: strictCRS - }, - { - name: "One hole in polygon", - type: "Polygon", - coordinates: [ - [[-80.0, 30.0], [-40.0, 30.0], [-40.0, 60.0], [-80.0, 60.0], [-80.0, 30.0]], - [[-70.0, 40.0], [-60.0, 40.0], [-60.0, 50.0], [-70.0, 50.0], [-70.0, 40.0]] - ], - crs: strictCRS - }, - { - name: "2 holes in polygon", - type: "Polygon", - coordinates: [ - [[-80.0, 30.0], [-40.0, 30.0], [-40.0, 60.0], [-80.0, 60.0], [-80.0, 30.0]], - [[-70.0, 40.0], [-60.0, 40.0], [-60.0, 50.0], [-70.0, 50.0], [-70.0, 40.0]], - [[-55.0, 40.0], [-45.0, 40.0], [-45.0, 50.0], [-55.0, 50.0], [-55.0, 40.0]] - ], - crs: strictCRS - }, - { - name: "complex polygon (edges cross)", - type: "Polygon", - coordinates: [[[10.0, 10.0], [20.0, 10.0], [10.0, 20.0], [20.0, 20.0], [10.0, 10.0]]], - crs: strictCRS + name: "Polygon with bad CRS", + type: "Polygon", + coordinates: [[ + [114.0834046, 22.6648202], + [113.8293457, 22.3819359], + [114.2736054, 22.4047911], + [114.0834046, 22.6648202] + ]], + crs: badCRS + }, + { + name: "Open polygon < 3 sides", + type: "Polygon", + coordinates: [[[114.0834046, 22.6648202], [113.8293457, 22.3819359]]], + crs: strictCRS + }, + { + name: "Open polygon > 3 sides", + type: "Polygon", + coordinates: [[ + [114.0834046, 22.6648202], + [113.8293457, 22.3819359], + [114.2736054, 22.4047911], + [114.1, 22.5] + ]], + crs: strictCRS + }, + { + name: "duplicate non-adjacent points", + type: "Polygon", + coordinates: [[ + [114.0834046, 22.6648202], + [113.8293457, 22.3819359], + [114.2736054, 22.4047911], + [113.8293457, 22.3819359], + [-65.9165954, 22.6648202], + [114.0834046, 22.6648202] + ]], + crs: strictCRS + }, + { + name: "One hole in polygon", + type: "Polygon", + coordinates: [ + [[-80.0, 30.0], [-40.0, 30.0], [-40.0, 60.0], [-80.0, 60.0], [-80.0, 30.0]], + [[-70.0, 40.0], [-60.0, 40.0], [-60.0, 50.0], [-70.0, 50.0], [-70.0, 40.0]] + ], + crs: strictCRS + }, + { + name: "2 holes in polygon", + type: "Polygon", + coordinates: [ + [[-80.0, 30.0], [-40.0, 30.0], [-40.0, 60.0], [-80.0, 60.0], [-80.0, 30.0]], + [[-70.0, 40.0], [-60.0, 40.0], [-60.0, 50.0], [-70.0, 50.0], [-70.0, 40.0]], + [[-55.0, 40.0], [-45.0, 40.0], [-45.0, 50.0], [-55.0, 50.0], [-55.0, 40.0]] + ], + crs: strictCRS + }, + { + name: "complex polygon (edges cross)", + type: "Polygon", + coordinates: [[[10.0, 10.0], [20.0, 10.0], [10.0, 20.0], [20.0, 20.0], [10.0, 10.0]]], + crs: strictCRS } ]; // Closed polygons used in query (3, 4, 5, 6-sided) var polys = [ { - name: "3 sided closed polygon", - type: "Polygon", // triangle - coordinates: [[[10.0, 10.0], [20.0, 10.0], [15.0, 17.0], [10.0, 10.0]]], - crs: strictCRS, - nW: 0, - nI: 1 - }, - { - name: "3 sided closed polygon (non-big)", - type: "Polygon", // triangle - coordinates: [[[10.0, 10.0], [20.0, 10.0], [15.0, 17.0], [10.0, 10.0]]], - nW: 0, - nI: 1 - }, - { - name: "4 sided closed polygon", - type: "Polygon", // rectangle - coordinates: [[[10.0, 10.0], [20.0, 10.0], [20.0, 20.0], [10.0, 20.0], [10.0, 10.0]]], - crs: strictCRS, - nW: 0, - nI: 1 - }, - { - name: "4 sided closed polygon (non-big)", - type: "Polygon", // rectangle - coordinates: [[[10.0, 10.0], [20.0, 10.0], [20.0, 20.0], [10.0, 20.0], [10.0, 10.0]]], - nW: 0, - nI: 1 - }, - { - name: "5 sided closed polygon", - type: "Polygon", // pentagon - coordinates: - [[[10.0, 10.0], [20.0, 10.0], [25.0, 18.0], [15.0, 25.0], [5.0, 18.0], [10.0, 10.0]]], - crs: strictCRS, - nW: 0, - nI: 1 - }, - { - name: "5 sided closed polygon (non-big)", - type: "Polygon", // pentagon - coordinates: - [[[10.0, 10.0], [20.0, 10.0], [25.0, 18.0], [15.0, 25.0], [5.0, 18.0], [10.0, 10.0]]], - nW: 0, - nI: 1 - }, - { - name: "6 sided closed polygon", - type: "Polygon", // hexagon - coordinates: [[ - [10.0, 10.0], - [15.0, 10.0], - [22.0, 15.0], - [15.0, 20.0], - [10.0, 20.0], - [7.0, 15.0], - [10.0, 10.0] - ]], - crs: strictCRS, - nW: 0, - nI: 1 - }, - { - name: "6 sided closed polygon (non-big)", - type: "Polygon", // hexagon - coordinates: [[ - [10.0, 10.0], - [15.0, 10.0], - [22.0, 15.0], - [15.0, 20.0], - [10.0, 20.0], - [7.0, 15.0], - [10.0, 10.0] - ]], - nW: 0, - nI: 1 + name: "3 sided closed polygon", + type: "Polygon", // triangle + coordinates: [[[10.0, 10.0], [20.0, 10.0], [15.0, 17.0], [10.0, 10.0]]], + crs: strictCRS, + nW: 0, + nI: 1 + }, + { + name: "3 sided closed polygon (non-big)", + type: "Polygon", // triangle + coordinates: [[[10.0, 10.0], [20.0, 10.0], [15.0, 17.0], [10.0, 10.0]]], + nW: 0, + nI: 1 + }, + { + name: "4 sided closed polygon", + type: "Polygon", // rectangle + coordinates: [[[10.0, 10.0], [20.0, 10.0], [20.0, 20.0], [10.0, 20.0], [10.0, 10.0]]], + crs: strictCRS, + nW: 0, + nI: 1 + }, + { + name: "4 sided closed polygon (non-big)", + type: "Polygon", // rectangle + coordinates: [[[10.0, 10.0], [20.0, 10.0], [20.0, 20.0], [10.0, 20.0], [10.0, 10.0]]], + nW: 0, + nI: 1 + }, + { + name: "5 sided closed polygon", + type: "Polygon", // pentagon + coordinates: + [[[10.0, 10.0], [20.0, 10.0], [25.0, 18.0], [15.0, 25.0], [5.0, 18.0], [10.0, 10.0]]], + crs: strictCRS, + nW: 0, + nI: 1 + }, + { + name: "5 sided closed polygon (non-big)", + type: "Polygon", // pentagon + coordinates: + [[[10.0, 10.0], [20.0, 10.0], [25.0, 18.0], [15.0, 25.0], [5.0, 18.0], [10.0, 10.0]]], + nW: 0, + nI: 1 + }, + { + name: "6 sided closed polygon", + type: "Polygon", // hexagon + coordinates: [[ + [10.0, 10.0], + [15.0, 10.0], + [22.0, 15.0], + [15.0, 20.0], + [10.0, 20.0], + [7.0, 15.0], + [10.0, 10.0] + ]], + crs: strictCRS, + nW: 0, + nI: 1 + }, + { + name: "6 sided closed polygon (non-big)", + type: "Polygon", // hexagon + coordinates: [[ + [10.0, 10.0], + [15.0, 10.0], + [22.0, 15.0], + [15.0, 20.0], + [10.0, 20.0], + [7.0, 15.0], + [10.0, 10.0] + ]], + nW: 0, + nI: 1 } ]; @@ -477,67 +478,67 @@ var totalObjects = getNumberOfValidObjects(objects); var nsidedPolys = [ // Big Polygon centered on 0, 0 { - name: "4 sided polygon centered on 0, 0", - type: "Polygon", - coordinates: [nGonGenerator(4, 30, true, 0, 0)], - crs: strictCRS, - nW: totalObjects - 3, - nI: totalObjects + name: "4 sided polygon centered on 0, 0", + type: "Polygon", + coordinates: [nGonGenerator(4, 30, true, 0, 0)], + crs: strictCRS, + nW: totalObjects - 3, + nI: totalObjects }, // Non-big polygons have counterclockwise coordinates { - name: "4 sided polygon centered on 0, 0 (non-big)", - type: "Polygon", - coordinates: [nGonGenerator(4, 30, false, 0, 0)], - nW: 0, - nI: 3 + name: "4 sided polygon centered on 0, 0 (non-big)", + type: "Polygon", + coordinates: [nGonGenerator(4, 30, false, 0, 0)], + nW: 0, + nI: 3 }, { - name: "100 sided polygon centered on 0, 0", - type: "Polygon", - coordinates: [nGonGenerator(100, 20, true, 0, 0)], - crs: strictCRS, - nW: totalObjects - 3, - nI: totalObjects + name: "100 sided polygon centered on 0, 0", + type: "Polygon", + coordinates: [nGonGenerator(100, 20, true, 0, 0)], + crs: strictCRS, + nW: totalObjects - 3, + nI: totalObjects }, { - name: "100 sided polygon centered on 0, 0 (non-big)", - type: "Polygon", - coordinates: [nGonGenerator(100, 20, false, 0, 0)], - nW: 0, - nI: 3 + name: "100 sided polygon centered on 0, 0 (non-big)", + type: "Polygon", + coordinates: [nGonGenerator(100, 20, false, 0, 0)], + nW: 0, + nI: 3 }, { - name: "5000 sided polygon centered on 0, 0 (non-big)", - type: "Polygon", - coordinates: [nGonGenerator(5000, 89.99, false, 0, 0)], - nW: 0, - nI: 3 + name: "5000 sided polygon centered on 0, 0 (non-big)", + type: "Polygon", + coordinates: [nGonGenerator(5000, 89.99, false, 0, 0)], + nW: 0, + nI: 3 }, { - name: "25000 sided polygon centered on 0, 0", - type: "Polygon", - coordinates: [nGonGenerator(25000, 89.99, true, 0, 0)], - crs: strictCRS, - nW: totalObjects - 3, - nI: totalObjects + name: "25000 sided polygon centered on 0, 0", + type: "Polygon", + coordinates: [nGonGenerator(25000, 89.99, true, 0, 0)], + crs: strictCRS, + nW: totalObjects - 3, + nI: totalObjects }, // Big polygon centered on Shenzen { - name: "4 sided polygon centered on Shenzen", - type: "Polygon", - coordinates: [nGonGenerator(4, 5, true, 114.1, 22.55)], - crs: strictCRS, - nW: totalObjects - 3, - nI: totalObjects - 2 + name: "4 sided polygon centered on Shenzen", + type: "Polygon", + coordinates: [nGonGenerator(4, 5, true, 114.1, 22.55)], + crs: strictCRS, + nW: totalObjects - 3, + nI: totalObjects - 2 }, { - name: "4 sided polygon centered on Shenzen (non-big)", - type: "Polygon", - coordinates: [nGonGenerator(4, 5, false, 114.1, 22.55)], - crs: strictCRS, - nW: 2, - nI: 3 + name: "4 sided polygon centered on Shenzen (non-big)", + type: "Polygon", + coordinates: [nGonGenerator(4, 5, false, 114.1, 22.55)], + crs: strictCRS, + nW: 2, + nI: 3 } ]; @@ -567,7 +568,6 @@ totalObjects = coll.count(); var indexes = ["none", "2dsphere"]; indexes.forEach(function(index) { - // Reset indexes on collection assert.commandWorked(coll.dropIndexes(), "drop indexes"); @@ -578,7 +578,6 @@ indexes.forEach(function(index) { // These polygons should not be queryable badPolys.forEach(function(p) { - // within assert.throws(function() { coll.count({geo: {$geoWithin: {$geometry: p}}}); @@ -592,7 +591,6 @@ indexes.forEach(function(index) { // Tests for closed polygons polys.forEach(function(p) { - // geoWithin query var docArray = []; var q = {geo: {$geoWithin: {$geometry: p}}}; @@ -622,19 +620,15 @@ indexes.forEach(function(index) { bulk.insert(doc); }); assert.eq(docArray.length, bulk.execute().nInserted, "reinsert " + p.name); - }); // test the n-sided closed polygons nsidedPolys.forEach(function(p) { - // within assert.eq(p.nW, coll.count({geo: {$geoWithin: {$geometry: p}}}), "within " + p.name); // intersects assert.eq( p.nI, coll.count({geo: {$geoIntersects: {$geometry: p}}}), "intersection " + p.name); - }); - }); diff --git a/jstests/core/geo_big_polygon3.js b/jstests/core/geo_big_polygon3.js index 5adae06102e..424510f521a 100644 --- a/jstests/core/geo_big_polygon3.js +++ b/jstests/core/geo_big_polygon3.js @@ -28,21 +28,20 @@ coll.drop(); var objects = [ {name: "point with strictCRS", type: "Point", coordinates: [-97.9, 0], crs: strictCRS}, { - name: "multipoint with strictCRS", - type: "MultiPoint", - coordinates: [[-97.9, 0], [-10.9, 0]], - crs: strictCRS + name: "multipoint with strictCRS", + type: "MultiPoint", + coordinates: [[-97.9, 0], [-10.9, 0]], + crs: strictCRS }, { - name: "line with strictCRS", - type: "LineString", - coordinates: [[-122.1611953, 37.4420407], [-118.283638, 34.028517]], - crs: strictCRS + name: "line with strictCRS", + type: "LineString", + coordinates: [[-122.1611953, 37.4420407], [-118.283638, 34.028517]], + crs: strictCRS } ]; objects.forEach(function(o) { - // within assert.throws(function() { coll.count({geo: {$geoWithin: {$geometry: o}}}); @@ -99,34 +98,34 @@ assert.commandWorked(coll.dropIndex({geo: "2dsphere"}), "drop 2dsphere index"); objects = [ { - name: "NYC Times Square - point", - geo: {type: "Point", coordinates: [-73.9857, 40.7577], crs: strictCRS} + name: "NYC Times Square - point", + geo: {type: "Point", coordinates: [-73.9857, 40.7577], crs: strictCRS} }, { - name: "NYC CitiField & JFK - multipoint", - geo: { - type: "MultiPoint", - coordinates: [[-73.8458, 40.7569], [-73.7789, 40.6397]], - crs: strictCRS - } + name: "NYC CitiField & JFK - multipoint", + geo: { + type: "MultiPoint", + coordinates: [[-73.8458, 40.7569], [-73.7789, 40.6397]], + crs: strictCRS + } }, { - name: "NYC - Times Square to CitiField to JFK - line/string", - geo: { - type: "LineString", - coordinates: [[-73.9857, 40.7577], [-73.8458, 40.7569], [-73.7789, 40.6397]], - crs: strictCRS - } + name: "NYC - Times Square to CitiField to JFK - line/string", + geo: { + type: "LineString", + coordinates: [[-73.9857, 40.7577], [-73.8458, 40.7569], [-73.7789, 40.6397]], + crs: strictCRS + } }, { - name: "NYC - Times Square to CitiField to JFK to Times Square - polygon", - geo: { - type: "Polygon", - coordinates: [ - [[-73.9857, 40.7577], [-73.7789, 40.6397], [-73.8458, 40.7569], [-73.9857, 40.7577]] - ], - crs: strictCRS - } + name: "NYC - Times Square to CitiField to JFK to Times Square - polygon", + geo: { + type: "Polygon", + coordinates: [ + [[-73.9857, 40.7577], [-73.7789, 40.6397], [-73.8458, 40.7569], [-73.9857, 40.7577]] + ], + crs: strictCRS + } } ]; @@ -165,44 +164,44 @@ coll.remove({}); // Objects should be found from query objects = [ { - name: "NYC Times Square - point crs84CRS", - geo: {type: "Point", coordinates: [-73.9857, 40.7577], crs: crs84CRS} + name: "NYC Times Square - point crs84CRS", + geo: {type: "Point", coordinates: [-73.9857, 40.7577], crs: crs84CRS} }, { - name: "NYC Times Square - point epsg4326CRS", - geo: {type: "Point", coordinates: [-73.9857, 40.7577], crs: epsg4326CRS} + name: "NYC Times Square - point epsg4326CRS", + geo: {type: "Point", coordinates: [-73.9857, 40.7577], crs: epsg4326CRS} }, { - name: "NYC CitiField & JFK - multipoint crs84CRS", - geo: { - type: "MultiPoint", - coordinates: [[-73.8458, 40.7569], [-73.7789, 40.6397]], - crs: crs84CRS - } + name: "NYC CitiField & JFK - multipoint crs84CRS", + geo: { + type: "MultiPoint", + coordinates: [[-73.8458, 40.7569], [-73.7789, 40.6397]], + crs: crs84CRS + } }, { - name: "NYC CitiField & JFK - multipoint epsg4326CRS", - geo: { - type: "MultiPoint", - coordinates: [[-73.8458, 40.7569], [-73.7789, 40.6397]], - crs: epsg4326CRS - } + name: "NYC CitiField & JFK - multipoint epsg4326CRS", + geo: { + type: "MultiPoint", + coordinates: [[-73.8458, 40.7569], [-73.7789, 40.6397]], + crs: epsg4326CRS + } }, { - name: "NYC - Times Square to CitiField to JFK - line/string crs84CRS", - geo: { - type: "LineString", - coordinates: [[-73.9857, 40.7577], [-73.8458, 40.7569], [-73.7789, 40.6397]], - crs: crs84CRS - } + name: "NYC - Times Square to CitiField to JFK - line/string crs84CRS", + geo: { + type: "LineString", + coordinates: [[-73.9857, 40.7577], [-73.8458, 40.7569], [-73.7789, 40.6397]], + crs: crs84CRS + } }, { - name: "NYC - Times Square to CitiField to JFK - line/string epsg4326CRS", - geo: { - type: "LineString", - coordinates: [[-73.9857, 40.7577], [-73.8458, 40.7569], [-73.7789, 40.6397]], - crs: epsg4326CRS - } + name: "NYC - Times Square to CitiField to JFK - line/string epsg4326CRS", + geo: { + type: "LineString", + coordinates: [[-73.9857, 40.7577], [-73.8458, 40.7569], [-73.7789, 40.6397]], + crs: epsg4326CRS + } } ]; diff --git a/jstests/core/geo_distinct.js b/jstests/core/geo_distinct.js index f2064008ae9..965ec6f7a18 100644 --- a/jstests/core/geo_distinct.js +++ b/jstests/core/geo_distinct.js @@ -5,110 +5,113 @@ // @tags: [requires_fastcount] (function() { - "use strict"; - const coll = db.geo_distinct; - let res; - - // - // 1. Test distinct with geo values for 'key'. - // - - coll.drop(); - assert.writeOK(coll.insert({loc: {type: 'Point', coordinates: [10, 20]}})); - assert.writeOK(coll.insert({loc: {type: 'Point', coordinates: [10, 20]}})); - assert.writeOK(coll.insert({loc: {type: 'Point', coordinates: [20, 30]}})); - assert.writeOK(coll.insert({loc: {type: 'Point', coordinates: [20, 30]}})); - assert.eq(4, coll.count()); - - // Test distinct on GeoJSON points with/without a 2dsphere index. - - res = coll.runCommand('distinct', {key: 'loc'}); - assert.commandWorked(res); - assert.eq(res.values.sort(bsonWoCompare), - [{type: 'Point', coordinates: [10, 20]}, {type: 'Point', coordinates: [20, 30]}]); - - assert.commandWorked(coll.createIndex({loc: '2dsphere'})); - - res = coll.runCommand('distinct', {key: 'loc'}); - assert.commandWorked(res); - assert.eq(res.values.sort(bsonWoCompare), - [{type: 'Point', coordinates: [10, 20]}, {type: 'Point', coordinates: [20, 30]}]); - - // Test distinct on legacy points with/without a 2d index. - - // (Note that distinct on a 2d-indexed field doesn't produce a list of coordinate pairs, since - // distinct logically operates on unique values in an array. Hence, the results are unintuitive - // and not semantically meaningful.) - - assert.commandWorked(coll.dropIndexes()); - - res = coll.runCommand('distinct', {key: 'loc.coordinates'}); - assert.commandWorked(res); - assert.eq(res.values.sort(), [10, 20, 30]); - - assert.commandWorked(coll.createIndex({'loc.coordinates': '2d'})); - - res = coll.runCommand('distinct', {key: 'loc.coordinates'}); - assert.commandWorked(res); - assert.eq(res.values.sort(), [10, 20, 30]); - - // - // 2. Test distinct with geo predicates for 'query'. - // - - assert(coll.drop()); - const bulk = coll.initializeUnorderedBulkOp(); - for (let i = 0; i < 50; ++i) { - bulk.insert({zone: 1, loc: {type: 'Point', coordinates: [-20, -20]}}); - bulk.insert({zone: 2, loc: {type: 'Point', coordinates: [-10, -10]}}); - bulk.insert({zone: 3, loc: {type: 'Point', coordinates: [0, 0]}}); - bulk.insert({zone: 4, loc: {type: 'Point', coordinates: [10, 10]}}); - bulk.insert({zone: 5, loc: {type: 'Point', coordinates: [20, 20]}}); - } - assert.writeOK(bulk.execute()); - - const originGeoJSON = {type: 'Point', coordinates: [0, 0]}; - - // Test distinct with $nearSphere query predicate. - - // A. Unindexed key, no geo index on query predicate. - res = coll.runCommand( - 'distinct', - {key: 'zone', query: {loc: {$nearSphere: {$geometry: originGeoJSON, $maxDistance: 1}}}}); - assert.commandFailed(res); - // B. Unindexed key, with 2dsphere index on query predicate. - assert.commandWorked(coll.createIndex({loc: '2dsphere'})); - res = coll.runCommand( - 'distinct', - {key: 'zone', query: {loc: {$nearSphere: {$geometry: originGeoJSON, $maxDistance: 1}}}}); - assert.commandWorked(res); - assert.eq(res.values.sort(), [3]); - // C. Indexed key, with 2dsphere index on query predicate. - assert.commandWorked(coll.createIndex({zone: 1})); - res = coll.runCommand( - 'distinct', - {key: 'zone', query: {loc: {$nearSphere: {$geometry: originGeoJSON, $maxDistance: 1}}}}); - assert.commandWorked(res); - assert.eq(res.values.sort(), [3]); - - // Test distinct with $near query predicate. - - assert.commandWorked(coll.dropIndexes()); - - // A. Unindexed key, no geo index on query predicate. - res = coll.runCommand( - 'distinct', {key: 'zone', query: {'loc.coordinates': {$near: [0, 0], $maxDistance: 1}}}); - assert.commandFailed(res); - // B. Unindexed key, with 2d index on query predicate. - assert.commandWorked(coll.createIndex({'loc.coordinates': '2d'})); - res = coll.runCommand( - 'distinct', {key: 'zone', query: {'loc.coordinates': {$near: [0, 0], $maxDistance: 1}}}); - assert.commandWorked(res); - assert.eq(res.values.sort(), [3]); - // C. Indexed key, with 2d index on query predicate. - assert.commandWorked(coll.createIndex({zone: 1})); - res = coll.runCommand( - 'distinct', {key: 'zone', query: {'loc.coordinates': {$near: [0, 0], $maxDistance: 1}}}); - assert.commandWorked(res); - assert.eq(res.values.sort(), [3]); +"use strict"; +const coll = db.geo_distinct; +let res; + +// +// 1. Test distinct with geo values for 'key'. +// + +coll.drop(); +assert.writeOK(coll.insert({loc: {type: 'Point', coordinates: [10, 20]}})); +assert.writeOK(coll.insert({loc: {type: 'Point', coordinates: [10, 20]}})); +assert.writeOK(coll.insert({loc: {type: 'Point', coordinates: [20, 30]}})); +assert.writeOK(coll.insert({loc: {type: 'Point', coordinates: [20, 30]}})); +assert.eq(4, coll.count()); + +// Test distinct on GeoJSON points with/without a 2dsphere index. + +res = coll.runCommand('distinct', {key: 'loc'}); +assert.commandWorked(res); +assert.eq(res.values.sort(bsonWoCompare), + [{type: 'Point', coordinates: [10, 20]}, {type: 'Point', coordinates: [20, 30]}]); + +assert.commandWorked(coll.createIndex({loc: '2dsphere'})); + +res = coll.runCommand('distinct', {key: 'loc'}); +assert.commandWorked(res); +assert.eq(res.values.sort(bsonWoCompare), + [{type: 'Point', coordinates: [10, 20]}, {type: 'Point', coordinates: [20, 30]}]); + +// Test distinct on legacy points with/without a 2d index. + +// (Note that distinct on a 2d-indexed field doesn't produce a list of coordinate pairs, since +// distinct logically operates on unique values in an array. Hence, the results are unintuitive +// and not semantically meaningful.) + +assert.commandWorked(coll.dropIndexes()); + +res = coll.runCommand('distinct', {key: 'loc.coordinates'}); +assert.commandWorked(res); +assert.eq(res.values.sort(), [10, 20, 30]); + +assert.commandWorked(coll.createIndex({'loc.coordinates': '2d'})); + +res = coll.runCommand('distinct', {key: 'loc.coordinates'}); +assert.commandWorked(res); +assert.eq(res.values.sort(), [10, 20, 30]); + +// +// 2. Test distinct with geo predicates for 'query'. +// + +assert(coll.drop()); +const bulk = coll.initializeUnorderedBulkOp(); +for (let i = 0; i < 50; ++i) { + bulk.insert({zone: 1, loc: {type: 'Point', coordinates: [-20, -20]}}); + bulk.insert({zone: 2, loc: {type: 'Point', coordinates: [-10, -10]}}); + bulk.insert({zone: 3, loc: {type: 'Point', coordinates: [0, 0]}}); + bulk.insert({zone: 4, loc: {type: 'Point', coordinates: [10, 10]}}); + bulk.insert({zone: 5, loc: {type: 'Point', coordinates: [20, 20]}}); +} +assert.writeOK(bulk.execute()); + +const originGeoJSON = { + type: 'Point', + coordinates: [0, 0] +}; + +// Test distinct with $nearSphere query predicate. + +// A. Unindexed key, no geo index on query predicate. +res = coll.runCommand( + 'distinct', + {key: 'zone', query: {loc: {$nearSphere: {$geometry: originGeoJSON, $maxDistance: 1}}}}); +assert.commandFailed(res); +// B. Unindexed key, with 2dsphere index on query predicate. +assert.commandWorked(coll.createIndex({loc: '2dsphere'})); +res = coll.runCommand( + 'distinct', + {key: 'zone', query: {loc: {$nearSphere: {$geometry: originGeoJSON, $maxDistance: 1}}}}); +assert.commandWorked(res); +assert.eq(res.values.sort(), [3]); +// C. Indexed key, with 2dsphere index on query predicate. +assert.commandWorked(coll.createIndex({zone: 1})); +res = coll.runCommand( + 'distinct', + {key: 'zone', query: {loc: {$nearSphere: {$geometry: originGeoJSON, $maxDistance: 1}}}}); +assert.commandWorked(res); +assert.eq(res.values.sort(), [3]); + +// Test distinct with $near query predicate. + +assert.commandWorked(coll.dropIndexes()); + +// A. Unindexed key, no geo index on query predicate. +res = coll.runCommand('distinct', + {key: 'zone', query: {'loc.coordinates': {$near: [0, 0], $maxDistance: 1}}}); +assert.commandFailed(res); +// B. Unindexed key, with 2d index on query predicate. +assert.commandWorked(coll.createIndex({'loc.coordinates': '2d'})); +res = coll.runCommand('distinct', + {key: 'zone', query: {'loc.coordinates': {$near: [0, 0], $maxDistance: 1}}}); +assert.commandWorked(res); +assert.eq(res.values.sort(), [3]); +// C. Indexed key, with 2d index on query predicate. +assert.commandWorked(coll.createIndex({zone: 1})); +res = coll.runCommand('distinct', + {key: 'zone', query: {'loc.coordinates': {$near: [0, 0], $maxDistance: 1}}}); +assert.commandWorked(res); +assert.eq(res.values.sort(), [3]); }()); diff --git a/jstests/core/geo_fiddly_box.js b/jstests/core/geo_fiddly_box.js index b99a28c01f7..efb185e2dfd 100644 --- a/jstests/core/geo_fiddly_box.js +++ b/jstests/core/geo_fiddly_box.js @@ -42,15 +42,14 @@ for (var x = min; x <= max; x += step) { } } -assert.eq(numItems, - t.count({ - loc: { - $within: { - $box: [ - [min - epsilon / 3, min - epsilon / 3], - [max + epsilon / 3, max + epsilon / 3] - ] - } - } - }), - "Not all locations found!"); +assert.eq( + numItems, + t.count({ + loc: { + $within: { + $box: + [[min - epsilon / 3, min - epsilon / 3], [max + epsilon / 3, max + epsilon / 3]] + } + } + }), + "Not all locations found!"); diff --git a/jstests/core/geo_mapreduce2.js b/jstests/core/geo_mapreduce2.js index d7f73ce3d69..43eaffed82e 100644 --- a/jstests/core/geo_mapreduce2.js +++ b/jstests/core/geo_mapreduce2.js @@ -21,7 +21,6 @@ m = function() { // reduce function r = function(key, values) { - var total = 0; for (var i = 0; i < values.length; i++) { total += values[i].count; diff --git a/jstests/core/geo_mindistance.js b/jstests/core/geo_mindistance.js index 92ccc617cf5..4ca58b26003 100644 --- a/jstests/core/geo_mindistance.js +++ b/jstests/core/geo_mindistance.js @@ -2,253 +2,247 @@ // @tags: [requires_fastcount, requires_getmore] (function() { - "use strict"; - - load("jstests/libs/geo_math.js"); - - var t = db.geo_mindistance; - t.drop(); - - const km = 1000; - - /** - * Asserts that two numeric values are equal within some absolute error. - */ - function assertApproxEqual(rhs, lhs, error, msg) { - assert.lt(Math.abs(rhs - rhs), error, msg); - } - - /** - * Count documents within some radius of (0, 0), in kilometers. With this function we can use - * the existing $maxDistance option to test the newer $minDistance option's behavior. - */ - function n_docs_within(radius_km) { - // $geoNear's distances are in meters for geoJSON points. - return t - .aggregate([ - { - $geoNear: { - near: {type: 'Point', coordinates: [0, 0]}, - distanceField: "dis", - spherical: true, - maxDistance: radius_km * km, - } - }, - {$limit: 1000} - ]) - .itcount(); +"use strict"; + +load("jstests/libs/geo_math.js"); + +var t = db.geo_mindistance; +t.drop(); + +const km = 1000; + +/** + * Asserts that two numeric values are equal within some absolute error. + */ +function assertApproxEqual(rhs, lhs, error, msg) { + assert.lt(Math.abs(rhs - rhs), error, msg); +} + +/** + * Count documents within some radius of (0, 0), in kilometers. With this function we can use + * the existing $maxDistance option to test the newer $minDistance option's behavior. + */ +function n_docs_within(radius_km) { + // $geoNear's distances are in meters for geoJSON points. + return t + .aggregate([ + { + $geoNear: { + near: {type: 'Point', coordinates: [0, 0]}, + distanceField: "dis", + spherical: true, + maxDistance: radius_km * km, + } + }, + {$limit: 1000} + ]) + .itcount(); +} + +// +// Setup. +// + +/** + * Make 121 points from long, lat = (0, 0) (in Gulf of Guinea) to (10, 10) (inland Nigeria). + */ +for (var x = 0; x <= 10; x += 1) { + for (var y = 0; y <= 10; y += 1) { + t.insert({loc: [x, y]}); } - - // - // Setup. - // - - /** - * Make 121 points from long, lat = (0, 0) (in Gulf of Guinea) to (10, 10) (inland Nigeria). - */ - for (var x = 0; x <= 10; x += 1) { - for (var y = 0; y <= 10; y += 1) { - t.insert({loc: [x, y]}); - } - } - - t.ensureIndex({loc: "2dsphere"}); - - var n_docs = t.count(), geoJSONPoint = {type: 'Point', coordinates: [0, 0]}, - legacyPoint = [0, 0]; - - // - // Test $near with GeoJSON point (required for $near with 2dsphere index). min/maxDistance are - // in meters. - // - - var n_min1400_count = - t.find({loc: {$near: {$geometry: geoJSONPoint, $minDistance: 1400 * km}}}).count(); - - assert.eq(n_docs - n_docs_within(1400), - n_min1400_count, - "Expected " + (n_docs - n_docs_within(1400)) + - " points $near (0, 0) with $minDistance 1400 km, got " + n_min1400_count); - - var n_bw500_and_1000_count = - t.find({ - loc: { - $near: - {$geometry: geoJSONPoint, $minDistance: 500 * km, $maxDistance: 1000 * km} - } - }).count(); - - assert.eq(n_docs_within(1000) - n_docs_within(500), - n_bw500_and_1000_count, - "Expected " + (n_docs_within(1000) - n_docs_within(500)) + - " points $near (0, 0) with $minDistance 500 km and $maxDistance 1000 km, got " + - n_bw500_and_1000_count); - - // - // $nearSphere with 2dsphere index can take a legacy or GeoJSON point. First test $nearSphere - // with legacy point. min/maxDistance are in radians. - // - - n_min1400_count = - t.find({loc: {$nearSphere: legacyPoint, $minDistance: metersToRadians(1400 * km)}}).count(); - - assert.eq(n_docs - n_docs_within(1400), - n_min1400_count, - "Expected " + (n_docs - n_docs_within(1400)) + - " points $nearSphere (0, 0) with $minDistance 1400 km, got " + n_min1400_count); - - n_bw500_and_1000_count = t.find({ - loc: { - $nearSphere: legacyPoint, - $minDistance: metersToRadians(500 * km), - $maxDistance: metersToRadians(1000 * km) - } - }).count(); - - assert.eq( - n_docs_within(1000) - n_docs_within(500), - n_bw500_and_1000_count, - "Expected " + (n_docs_within(1000) - n_docs_within(500)) + - " points $nearSphere (0, 0) with $minDistance 500 km and $maxDistance 1000 km, got " + - n_bw500_and_1000_count); - - // - // Test $nearSphere with GeoJSON point. min/maxDistance are in meters. - // - - n_min1400_count = t.find({loc: {$nearSphere: geoJSONPoint, $minDistance: 1400 * km}}).count(); - - assert.eq(n_docs - n_docs_within(1400), - n_min1400_count, - "Expected " + (n_docs - n_docs_within(1400)) + - " points $nearSphere (0, 0) with $minDistance 1400 km, got " + n_min1400_count); - - n_bw500_and_1000_count = - t.find({ - loc: {$nearSphere: geoJSONPoint, $minDistance: 500 * km, $maxDistance: 1000 * km} - }).count(); - - assert.eq( - n_docs_within(1000) - n_docs_within(500), - n_bw500_and_1000_count, - "Expected " + (n_docs_within(1000) - n_docs_within(500)) + - " points $nearSphere (0, 0) with $minDistance 500 km and $maxDistance 1000 km, got " + - n_bw500_and_1000_count); - - // - // Test $geoNear aggregation stage with GeoJSON point. Distances are in meters. - // - - let geoNearCount = t.aggregate({ - $geoNear: { - near: {type: 'Point', coordinates: [0, 0]}, - minDistance: 1400 * km, - spherical: true, - distanceField: "d", - } - }).itcount(); - assert.eq(n_docs - n_docs_within(1400), - geoNearCount, - "Expected " + (n_docs - n_docs_within(1400)) + - " points geoNear (0, 0) with $minDistance 1400 km, got " + geoNearCount); - - geoNearCount = t.aggregate({ +} + +t.ensureIndex({loc: "2dsphere"}); + +var n_docs = t.count(), geoJSONPoint = {type: 'Point', coordinates: [0, 0]}, legacyPoint = [0, 0]; + +// +// Test $near with GeoJSON point (required for $near with 2dsphere index). min/maxDistance are +// in meters. +// + +var n_min1400_count = + t.find({loc: {$near: {$geometry: geoJSONPoint, $minDistance: 1400 * km}}}).count(); + +assert.eq(n_docs - n_docs_within(1400), + n_min1400_count, + "Expected " + (n_docs - n_docs_within(1400)) + + " points $near (0, 0) with $minDistance 1400 km, got " + n_min1400_count); + +var n_bw500_and_1000_count = + t.find({ + loc: {$near: {$geometry: geoJSONPoint, $minDistance: 500 * km, $maxDistance: 1000 * km}} + }).count(); + +assert.eq(n_docs_within(1000) - n_docs_within(500), + n_bw500_and_1000_count, + "Expected " + (n_docs_within(1000) - n_docs_within(500)) + + " points $near (0, 0) with $minDistance 500 km and $maxDistance 1000 km, got " + + n_bw500_and_1000_count); + +// +// $nearSphere with 2dsphere index can take a legacy or GeoJSON point. First test $nearSphere +// with legacy point. min/maxDistance are in radians. +// + +n_min1400_count = + t.find({loc: {$nearSphere: legacyPoint, $minDistance: metersToRadians(1400 * km)}}).count(); + +assert.eq(n_docs - n_docs_within(1400), + n_min1400_count, + "Expected " + (n_docs - n_docs_within(1400)) + + " points $nearSphere (0, 0) with $minDistance 1400 km, got " + n_min1400_count); + +n_bw500_and_1000_count = t.find({ + loc: { + $nearSphere: legacyPoint, + $minDistance: metersToRadians(500 * km), + $maxDistance: metersToRadians(1000 * km) + } + }).count(); + +assert.eq(n_docs_within(1000) - n_docs_within(500), + n_bw500_and_1000_count, + "Expected " + (n_docs_within(1000) - n_docs_within(500)) + + " points $nearSphere (0, 0) with $minDistance 500 km and $maxDistance 1000 km, got " + + n_bw500_and_1000_count); + +// +// Test $nearSphere with GeoJSON point. min/maxDistance are in meters. +// + +n_min1400_count = t.find({loc: {$nearSphere: geoJSONPoint, $minDistance: 1400 * km}}).count(); + +assert.eq(n_docs - n_docs_within(1400), + n_min1400_count, + "Expected " + (n_docs - n_docs_within(1400)) + + " points $nearSphere (0, 0) with $minDistance 1400 km, got " + n_min1400_count); + +n_bw500_and_1000_count = + t.find({ + loc: {$nearSphere: geoJSONPoint, $minDistance: 500 * km, $maxDistance: 1000 * km} + }).count(); + +assert.eq(n_docs_within(1000) - n_docs_within(500), + n_bw500_and_1000_count, + "Expected " + (n_docs_within(1000) - n_docs_within(500)) + + " points $nearSphere (0, 0) with $minDistance 500 km and $maxDistance 1000 km, got " + + n_bw500_and_1000_count); + +// +// Test $geoNear aggregation stage with GeoJSON point. Distances are in meters. +// + +let geoNearCount = t.aggregate({ $geoNear: { near: {type: 'Point', coordinates: [0, 0]}, - minDistance: 500 * km, - maxDistance: 1000 * km, - spherical: true, - distanceField: "d", - } - }).itcount(); - assert.eq(n_docs_within(1000) - n_docs_within(500), - geoNearCount, - "Expected " + (n_docs_within(1000) - n_docs_within(500)) + - " points geoNear (0, 0) with $minDistance 500 km and $maxDistance 1000 km, got " + - geoNearCount); - - // - // Test $geoNear aggregation stage with legacy point. Distances are in radians. - // - - geoNearCount = t.aggregate({ - $geoNear: { - near: legacyPoint, - minDistance: metersToRadians(1400 * km), - spherical: true, - distanceField: "d", - } - }).itcount(); - assert.eq(n_docs - n_docs_within(1400), - geoNearCount, - "Expected " + (n_docs - n_docs_within(1400)) + - " points geoNear (0, 0) with $minDistance 1400 km, got " + geoNearCount); - - geoNearCount = t.aggregate({ - $geoNear: { - near: legacyPoint, - minDistance: metersToRadians(500 * km), - maxDistance: metersToRadians(1000 * km), + minDistance: 1400 * km, spherical: true, distanceField: "d", } }).itcount(); - assert.eq(n_docs_within(1000) - n_docs_within(500), - geoNearCount, - "Expected " + (n_docs_within(1000) - n_docs_within(500)) + - " points geoNear (0, 0) with $minDistance 500 km and $maxDistance 1000 km, got " + - geoNearCount); - - t.drop(); - assert.commandWorked(t.createIndex({loc: "2d"})); - assert.writeOK(t.insert({loc: [0, 40]})); - assert.writeOK(t.insert({loc: [0, 41]})); - assert.writeOK(t.insert({loc: [0, 42]})); - - // Test minDistance for 2d index with $near. - assert.eq(3, t.find({loc: {$near: [0, 0]}}).itcount()); - assert.eq(1, t.find({loc: {$near: [0, 0], $minDistance: 41.5}}).itcount()); - - // Test minDistance for 2d index with $nearSphere. Distances are in radians. - assert.eq(3, t.find({loc: {$nearSphere: [0, 0]}}).itcount()); - assert.eq(1, t.find({loc: {$nearSphere: [0, 0], $minDistance: deg2rad(41.5)}}).itcount()); - - // Test minDistance for 2d index with $geoNear stage and spherical=false. - let cmdResult = - t.aggregate({$geoNear: {near: [0, 0], spherical: false, distanceField: "dis"}}).toArray(); - assert.eq(3, cmdResult.length); - assert.eq(40, cmdResult[0].dis); - assert.eq(41, cmdResult[1].dis); - assert.eq(42, cmdResult[2].dis); - - cmdResult = t.aggregate({ - $geoNear: { - near: [0, 0], - minDistance: 41.5, - spherical: false, - distanceField: "dis", - } - }).toArray(); - assert.eq(1, cmdResult.length); - assert.eq(42, cmdResult[0].dis); - - // Test minDistance for 2d index with $geoNear stage and spherical=true. Distances are in - // radians. - cmdResult = - t.aggregate({$geoNear: {near: [0, 0], spherical: true, distanceField: "dis"}}).toArray(); - assert.eq(3, cmdResult.length); - assertApproxEqual(deg2rad(40), cmdResult[0].dis, 1e-3); - assertApproxEqual(deg2rad(41), cmdResult[1].dis, 1e-3); - assertApproxEqual(deg2rad(42), cmdResult[2].dis, 1e-3); - - cmdResult = t.aggregate({ - $geoNear: { - near: [0, 0], - minDistance: deg2rad(41.5), - spherical: true, - distanceField: "dis", - } - }).toArray(); - assert.eq(1, cmdResult.length); - assertApproxEqual(deg2rad(42), cmdResult[0].dis, 1e-3); +assert.eq(n_docs - n_docs_within(1400), + geoNearCount, + "Expected " + (n_docs - n_docs_within(1400)) + + " points geoNear (0, 0) with $minDistance 1400 km, got " + geoNearCount); + +geoNearCount = t.aggregate({ + $geoNear: { + near: {type: 'Point', coordinates: [0, 0]}, + minDistance: 500 * km, + maxDistance: 1000 * km, + spherical: true, + distanceField: "d", + } + }).itcount(); +assert.eq(n_docs_within(1000) - n_docs_within(500), + geoNearCount, + "Expected " + (n_docs_within(1000) - n_docs_within(500)) + + " points geoNear (0, 0) with $minDistance 500 km and $maxDistance 1000 km, got " + + geoNearCount); + +// +// Test $geoNear aggregation stage with legacy point. Distances are in radians. +// + +geoNearCount = t.aggregate({ + $geoNear: { + near: legacyPoint, + minDistance: metersToRadians(1400 * km), + spherical: true, + distanceField: "d", + } + }).itcount(); +assert.eq(n_docs - n_docs_within(1400), + geoNearCount, + "Expected " + (n_docs - n_docs_within(1400)) + + " points geoNear (0, 0) with $minDistance 1400 km, got " + geoNearCount); + +geoNearCount = t.aggregate({ + $geoNear: { + near: legacyPoint, + minDistance: metersToRadians(500 * km), + maxDistance: metersToRadians(1000 * km), + spherical: true, + distanceField: "d", + } + }).itcount(); +assert.eq(n_docs_within(1000) - n_docs_within(500), + geoNearCount, + "Expected " + (n_docs_within(1000) - n_docs_within(500)) + + " points geoNear (0, 0) with $minDistance 500 km and $maxDistance 1000 km, got " + + geoNearCount); + +t.drop(); +assert.commandWorked(t.createIndex({loc: "2d"})); +assert.writeOK(t.insert({loc: [0, 40]})); +assert.writeOK(t.insert({loc: [0, 41]})); +assert.writeOK(t.insert({loc: [0, 42]})); + +// Test minDistance for 2d index with $near. +assert.eq(3, t.find({loc: {$near: [0, 0]}}).itcount()); +assert.eq(1, t.find({loc: {$near: [0, 0], $minDistance: 41.5}}).itcount()); + +// Test minDistance for 2d index with $nearSphere. Distances are in radians. +assert.eq(3, t.find({loc: {$nearSphere: [0, 0]}}).itcount()); +assert.eq(1, t.find({loc: {$nearSphere: [0, 0], $minDistance: deg2rad(41.5)}}).itcount()); + +// Test minDistance for 2d index with $geoNear stage and spherical=false. +let cmdResult = + t.aggregate({$geoNear: {near: [0, 0], spherical: false, distanceField: "dis"}}).toArray(); +assert.eq(3, cmdResult.length); +assert.eq(40, cmdResult[0].dis); +assert.eq(41, cmdResult[1].dis); +assert.eq(42, cmdResult[2].dis); + +cmdResult = t.aggregate({ + $geoNear: { + near: [0, 0], + minDistance: 41.5, + spherical: false, + distanceField: "dis", + } + }).toArray(); +assert.eq(1, cmdResult.length); +assert.eq(42, cmdResult[0].dis); + +// Test minDistance for 2d index with $geoNear stage and spherical=true. Distances are in +// radians. +cmdResult = + t.aggregate({$geoNear: {near: [0, 0], spherical: true, distanceField: "dis"}}).toArray(); +assert.eq(3, cmdResult.length); +assertApproxEqual(deg2rad(40), cmdResult[0].dis, 1e-3); +assertApproxEqual(deg2rad(41), cmdResult[1].dis, 1e-3); +assertApproxEqual(deg2rad(42), cmdResult[2].dis, 1e-3); + +cmdResult = t.aggregate({ + $geoNear: { + near: [0, 0], + minDistance: deg2rad(41.5), + spherical: true, + distanceField: "dis", + } + }).toArray(); +assert.eq(1, cmdResult.length); +assertApproxEqual(deg2rad(42), cmdResult[0].dis, 1e-3); }()); diff --git a/jstests/core/geo_mindistance_boundaries.js b/jstests/core/geo_mindistance_boundaries.js index 7e97732dfd1..32977ac4b12 100644 --- a/jstests/core/geo_mindistance_boundaries.js +++ b/jstests/core/geo_mindistance_boundaries.js @@ -1,6 +1,6 @@ /* Test boundary conditions for $minDistance option for $near and $nearSphere * queries. SERVER-9395. -*/ + */ var t = db.geo_mindistance_boundaries; t.drop(); t.insert({loc: [1, 0]}); // 1 degree of longitude from origin. @@ -19,7 +19,7 @@ var km = 1000, earthRadiusMeters = 6378.1 * km, geoJSONPoint = {type: 'Point', c /* Grow epsilon's exponent until epsilon exceeds the margin of error for the * representation of degreeInMeters. The server uses 64-bit math, too, so we'll * find the smallest epsilon the server can detect. -*/ + */ while (degreeInMeters + metersEpsilon == degreeInMeters) { metersEpsilon *= 2; } @@ -37,19 +37,17 @@ assert.eq(1, t.find({loc: {$near: {$geometry: geoJSONPoint, $minDistance: degreeInMeters}}}).itcount(), "Expected to find (0, 1) within $minDistance 1 degree from origin"); -assert.eq( - 1, - t.find({ - loc: {$near: {$geometry: geoJSONPoint, $minDistance: degreeInMeters - metersEpsilon}} - }).itcount(), - "Expected to find (0, 1) within $minDistance (1 degree - epsilon) from origin"); +assert.eq(1, + t.find({ + loc: {$near: {$geometry: geoJSONPoint, $minDistance: degreeInMeters - metersEpsilon}} + }).itcount(), + "Expected to find (0, 1) within $minDistance (1 degree - epsilon) from origin"); -assert.eq( - 0, - t.find({ - loc: {$near: {$geometry: geoJSONPoint, $minDistance: degreeInMeters + metersEpsilon}} - }).itcount(), - "Expected *not* to find (0, 1) within $minDistance (1 degree + epsilon) from origin"); +assert.eq(0, + t.find({ + loc: {$near: {$geometry: geoJSONPoint, $minDistance: degreeInMeters + metersEpsilon}} + }).itcount(), + "Expected *not* to find (0, 1) within $minDistance (1 degree + epsilon) from origin"); // // Test boundary conditions for $nearSphere and GeoJSON, in meters. diff --git a/jstests/core/geo_nearwithin.js b/jstests/core/geo_nearwithin.js index a63871c3195..49b8d155a44 100644 --- a/jstests/core/geo_nearwithin.js +++ b/jstests/core/geo_nearwithin.js @@ -1,40 +1,40 @@ // Test $near + $within. (function() { - t = db.geo_nearwithin; - t.drop(); - - points = 10; - for (var x = -points; x < points; x += 1) { - for (var y = -points; y < points; y += 1) { - assert.commandWorked(t.insert({geo: [x, y]})); - } +t = db.geo_nearwithin; +t.drop(); + +points = 10; +for (var x = -points; x < points; x += 1) { + for (var y = -points; y < points; y += 1) { + assert.commandWorked(t.insert({geo: [x, y]})); } +} - assert.commandWorked(t.ensureIndex({geo: "2d"})); +assert.commandWorked(t.ensureIndex({geo: "2d"})); - const runQuery = (center) => - t.find({$and: [{geo: {$near: [0, 0]}}, {geo: {$within: {$center: center}}}]}).toArray(); +const runQuery = (center) => + t.find({$and: [{geo: {$near: [0, 0]}}, {geo: {$within: {$center: center}}}]}).toArray(); - resNear = runQuery([[0, 0], 1]); - assert.eq(resNear.length, 5); +resNear = runQuery([[0, 0], 1]); +assert.eq(resNear.length, 5); - resNear = runQuery([[0, 0], 0]); - assert.eq(resNear.length, 1); +resNear = runQuery([[0, 0], 0]); +assert.eq(resNear.length, 1); - resNear = runQuery([[1, 0], 0.5]); - assert.eq(resNear.length, 1); +resNear = runQuery([[1, 0], 0.5]); +assert.eq(resNear.length, 1); - resNear = runQuery([[1, 0], 1.5]); - assert.eq(resNear.length, 9); +resNear = runQuery([[1, 0], 1.5]); +assert.eq(resNear.length, 9); - // We want everything distance >1 from us but <1.5 - // These points are (-+1, -+1) - resNear = t.find({ - $and: [ - {geo: {$near: [0, 0]}}, - {geo: {$within: {$center: [[0, 0], 1.5]}}}, - {geo: {$not: {$within: {$center: [[0, 0], 1]}}}} - ] - }).toArray(); - assert.eq(resNear.length, 4); +// We want everything distance >1 from us but <1.5 +// These points are (-+1, -+1) +resNear = t.find({ + $and: [ + {geo: {$near: [0, 0]}}, + {geo: {$within: {$center: [[0, 0], 1.5]}}}, + {geo: {$not: {$within: {$center: [[0, 0], 1]}}}} + ] + }).toArray(); +assert.eq(resNear.length, 4); }()); diff --git a/jstests/core/geo_operator_crs.js b/jstests/core/geo_operator_crs.js index b2cc8fe0439..063426b6b45 100644 --- a/jstests/core/geo_operator_crs.js +++ b/jstests/core/geo_operator_crs.js @@ -4,55 +4,55 @@ // Tests that the correct CRSes are used for geo queries (based on input geometry) // (function() { - var coll = db.geo_operator_crs; - coll.drop(); +var coll = db.geo_operator_crs; +coll.drop(); - // - // Test 2dsphere index - // +// +// Test 2dsphere index +// - assert.commandWorked(coll.ensureIndex({geo: "2dsphere"})); +assert.commandWorked(coll.ensureIndex({geo: "2dsphere"})); - var legacyZeroPt = [0, 0]; - var jsonZeroPt = {type: "Point", coordinates: [0, 0]}; - var legacy90Pt = [90, 0]; - var json90Pt = {type: "Point", coordinates: [90, 0]}; +var legacyZeroPt = [0, 0]; +var jsonZeroPt = {type: "Point", coordinates: [0, 0]}; +var legacy90Pt = [90, 0]; +var json90Pt = {type: "Point", coordinates: [90, 0]}; - assert.writeOK(coll.insert({geo: json90Pt})); +assert.writeOK(coll.insert({geo: json90Pt})); - var earthRadiusMeters = 6378.1 * 1000; - var result = null; +var earthRadiusMeters = 6378.1 * 1000; +var result = null; - const runQuery = (point) => - coll.find({geo: {$nearSphere: point}}, {dis: {$meta: "geoNearDistance"}}).toArray(); +const runQuery = (point) => + coll.find({geo: {$nearSphere: point}}, {dis: {$meta: "geoNearDistance"}}).toArray(); - result = runQuery(legacyZeroPt); - assert.close(result[0].dis, Math.PI / 2); +result = runQuery(legacyZeroPt); +assert.close(result[0].dis, Math.PI / 2); - result = runQuery(jsonZeroPt); - assert.close(result[0].dis, (Math.PI / 2) * earthRadiusMeters); +result = runQuery(jsonZeroPt); +assert.close(result[0].dis, (Math.PI / 2) * earthRadiusMeters); - assert.writeOK(coll.remove({})); - assert.commandWorked(coll.dropIndexes()); +assert.writeOK(coll.remove({})); +assert.commandWorked(coll.dropIndexes()); - // - // Test 2d Index - // +// +// Test 2d Index +// - assert.commandWorked(coll.ensureIndex({geo: "2d"})); +assert.commandWorked(coll.ensureIndex({geo: "2d"})); - assert.writeOK(coll.insert({geo: legacy90Pt})); +assert.writeOK(coll.insert({geo: legacy90Pt})); - result = runQuery(legacyZeroPt); - assert.close(result[0].dis, Math.PI / 2); +result = runQuery(legacyZeroPt); +assert.close(result[0].dis, Math.PI / 2); - // GeoJSON not supported unless there's a 2dsphere index +// GeoJSON not supported unless there's a 2dsphere index - // - // Test with a 2d and 2dsphere index using the aggregation $geoNear stage. - // +// +// Test with a 2d and 2dsphere index using the aggregation $geoNear stage. +// - assert.commandWorked(coll.ensureIndex({geo: "2dsphere"})); - result = coll.aggregate({$geoNear: {near: jsonZeroPt, distanceField: "dis"}}).toArray(); - assert.close(result[0].dis, (Math.PI / 2) * earthRadiusMeters); +assert.commandWorked(coll.ensureIndex({geo: "2dsphere"})); +result = coll.aggregate({$geoNear: {near: jsonZeroPt, distanceField: "dis"}}).toArray(); +assert.close(result[0].dis, (Math.PI / 2) * earthRadiusMeters); }()); diff --git a/jstests/core/geo_or.js b/jstests/core/geo_or.js index 1324d581be8..4da82d49ca2 100644 --- a/jstests/core/geo_or.js +++ b/jstests/core/geo_or.js @@ -42,10 +42,10 @@ assert.eq( $or: [ {loc: {$geoIntersects: {$geometry: {type: 'LineString', coordinates: [p, q]}}}}, { - loc: { - $geoIntersects: - {$geometry: {type: 'LineString', coordinates: [[0, 0], [1, 1]]}} - } + loc: { + $geoIntersects: + {$geometry: {type: 'LineString', coordinates: [[0, 0], [1, 1]]}} + } } ] }).itcount(), @@ -63,18 +63,18 @@ assert.eq( t.find({ $or: [ { - loc: { - $geoIntersects: - {$geometry: {type: 'Polygon', coordinates: [[[0, 0], p, q, [0, 0]]]}} - } + loc: { + $geoIntersects: + {$geometry: {type: 'Polygon', coordinates: [[[0, 0], p, q, [0, 0]]]}} + } }, { - loc: { - $geoIntersects: { - $geometry: - {type: 'Polygon', coordinates: [[[0, 0], [1, 1], [0, 1], [0, 0]]]} - } - } + loc: { + $geoIntersects: { + $geometry: + {type: 'Polygon', coordinates: [[[0, 0], [1, 1], [0, 1], [0, 0]]]} + } + } } ] }).itcount(), diff --git a/jstests/core/geo_polygon1.js b/jstests/core/geo_polygon1.js index 45f0eb71d64..34b0cafa1d4 100644 --- a/jstests/core/geo_polygon1.js +++ b/jstests/core/geo_polygon1.js @@ -25,10 +25,10 @@ boxBounds = [[0, 0], [0, 10], [10, 10], [10, 0]]; assert.eq(num, t.find({loc: {"$within": {"$polygon": boxBounds}}}).count(), "Bounding Box Test"); // Make sure we can add object-based polygons -assert.eq( - num, t.find({ - loc: {$within: {$polygon: {a: [-10, -10], b: [-10, 10], c: [10, 10], d: [10, -10]}}} - }).count()); +assert.eq(num, + t.find({ + loc: {$within: {$polygon: {a: [-10, -10], b: [-10, 10], c: [10, 10], d: [10, -10]}}} + }).count()); // Look in a box much bigger than the one we have data in boxBounds = [[-100, -100], [-100, 100], [100, 100], [100, -100]]; diff --git a/jstests/core/geo_polygon1_noindex.js b/jstests/core/geo_polygon1_noindex.js index e5aabb5043d..d9831a6990c 100644 --- a/jstests/core/geo_polygon1_noindex.js +++ b/jstests/core/geo_polygon1_noindex.js @@ -21,10 +21,10 @@ boxBounds = [[0, 0], [0, 10], [10, 10], [10, 0]]; assert.eq(num, t.find({loc: {"$within": {"$polygon": boxBounds}}}).count(), "Bounding Box Test"); // Make sure we can add object-based polygons -assert.eq( - num, t.find({ - loc: {$within: {$polygon: {a: [-10, -10], b: [-10, 10], c: [10, 10], d: [10, -10]}}} - }).count()); +assert.eq(num, + t.find({ + loc: {$within: {$polygon: {a: [-10, -10], b: [-10, 10], c: [10, 10], d: [10, -10]}}} + }).count()); // Look in a box much bigger than the one we have data in boxBounds = [[-100, -100], [-100, 100], [100, 100], [100, -100]]; diff --git a/jstests/core/geo_polygon2.js b/jstests/core/geo_polygon2.js index 6891cd7c8a8..2bfaf0b1087 100644 --- a/jstests/core/geo_polygon2.js +++ b/jstests/core/geo_polygon2.js @@ -22,7 +22,6 @@ for (var test = 0; test < numTests; test++) { printjson({test: test, rotation: rotation, bits: bits}); var rotatePoint = function(x, y) { - if (y == undefined) { y = x[1]; x = x[0]; @@ -46,7 +45,6 @@ for (var test = 0; test < numTests; test++) { } grid.toString = function() { - var gridStr = ""; for (var j = grid[0].length - 1; j >= -1; j--) { for (var i = 0; i < grid.length; i++) { @@ -81,7 +79,6 @@ for (var test = 0; test < numTests; test++) { // print( grid.toString() ) var pickDirections = function() { - var up = Math.floor(Random.rand() * 3); if (up == 2) up = -1; @@ -127,7 +124,6 @@ for (var test = 0; test < numTests; test++) { turtlePath = []; var nextSeg = function(currTurtle, prevTurtle) { - var pathX = currTurtle[0]; if (currTurtle[1] < prevTurtle[1]) { diff --git a/jstests/core/geo_polygon3.js b/jstests/core/geo_polygon3.js index ac668f37c04..f1e819e1920 100644 --- a/jstests/core/geo_polygon3.js +++ b/jstests/core/geo_polygon3.js @@ -3,65 +3,63 @@ // (function() { - "use strict"; +"use strict"; - const numTests = 31; +const numTests = 31; - for (let n = 0; n < numTests; n++) { - let t = db.geo_polygon3; - t.drop(); +for (let n = 0; n < numTests; n++) { + let t = db.geo_polygon3; + t.drop(); - let num = 0; - for (let x = 1; x < 9; x++) { - for (let y = 1; y < 9; y++) { - let o = {_id: num++, loc: [x, y]}; - assert.writeOK(t.insert(o)); - } + let num = 0; + for (let x = 1; x < 9; x++) { + for (let y = 1; y < 9; y++) { + let o = {_id: num++, loc: [x, y]}; + assert.writeOK(t.insert(o)); } + } - assert.commandWorked(t.createIndex({loc: "2d"}, {bits: 2 + n})); + assert.commandWorked(t.createIndex({loc: "2d"}, {bits: 2 + n})); - const triangle = [[0, 0], [1, 1], [0, 2]]; + const triangle = [[0, 0], [1, 1], [0, 2]]; - // Look at only a small slice of the data within a triangle - assert.eq(1, t.find({loc: {"$within": {"$polygon": triangle}}}).itcount(), "Triangle Test"); + // Look at only a small slice of the data within a triangle + assert.eq(1, t.find({loc: {"$within": {"$polygon": triangle}}}).itcount(), "Triangle Test"); - let boxBounds = [[0, 0], [0, 10], [10, 10], [10, 0]]; + let boxBounds = [[0, 0], [0, 10], [10, 10], [10, 0]]; - assert.eq(num, - t.find({loc: {"$within": {"$polygon": boxBounds}}}).itcount(), - "Bounding Box Test"); + assert.eq( + num, t.find({loc: {"$within": {"$polygon": boxBounds}}}).itcount(), "Bounding Box Test"); - // Look in a box much bigger than the one we have data in - boxBounds = [[-100, -100], [-100, 100], [100, 100], [100, -100]]; - assert.eq(num, - t.find({loc: {"$within": {"$polygon": boxBounds}}}).itcount(), - "Big Bounding Box Test"); + // Look in a box much bigger than the one we have data in + boxBounds = [[-100, -100], [-100, 100], [100, 100], [100, -100]]; + assert.eq(num, + t.find({loc: {"$within": {"$polygon": boxBounds}}}).itcount(), + "Big Bounding Box Test"); - assert(t.drop()); + assert(t.drop()); - const pacman = [ - [0, 2], - [0, 4], - [2, 6], - [4, 6], // Head - [6, 4], - [4, 3], - [6, 2], // Mouth - [4, 0], - [2, 0] // Bottom - ]; + const pacman = [ + [0, 2], + [0, 4], + [2, 6], + [4, 6], // Head + [6, 4], + [4, 3], + [6, 2], // Mouth + [4, 0], + [2, 0] // Bottom + ]; - assert.writeOK(t.insert({loc: [1, 3]})); // Add a point that's in - assert.commandWorked(t.createIndex({loc: "2d"}, {bits: 2 + n})); + assert.writeOK(t.insert({loc: [1, 3]})); // Add a point that's in + assert.commandWorked(t.createIndex({loc: "2d"}, {bits: 2 + n})); - assert.eq(1, t.find({loc: {$within: {$polygon: pacman}}}).itcount(), "Pacman single point"); + assert.eq(1, t.find({loc: {$within: {$polygon: pacman}}}).itcount(), "Pacman single point"); - assert.writeOK( - t.insert({loc: [5, 3]})); // Add a point that's out right in the mouth opening - assert.writeOK(t.insert({loc: [3, 7]})); // Add a point above the center of the head - assert.writeOK(t.insert({loc: [3, -1]})); // Add a point below the center of the bottom + assert.writeOK(t.insert({loc: [5, 3]})); // Add a point that's out right in the mouth opening + assert.writeOK(t.insert({loc: [3, 7]})); // Add a point above the center of the head + assert.writeOK(t.insert({loc: [3, -1]})); // Add a point below the center of the bottom - assert.eq(1, t.find({loc: {$within: {$polygon: pacman}}}).itcount(), "Pacman double point"); - } + assert.eq(1, t.find({loc: {$within: {$polygon: pacman}}}).itcount(), "Pacman double point"); +} })(); diff --git a/jstests/core/geo_s2disjoint_holes.js b/jstests/core/geo_s2disjoint_holes.js index 0b088434b36..dd17dd29b1d 100644 --- a/jstests/core/geo_s2disjoint_holes.js +++ b/jstests/core/geo_s2disjoint_holes.js @@ -12,13 +12,14 @@ // http://geojson.org/geojson-spec.html#polygon // -var t = db.geo_s2disjoint_holes, coordinates = - [ - // One square. - [[9, 9], [9, 11], [11, 11], [11, 9], [9, 9]], - // Another disjoint square. - [[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]] - ], +var t = db.geo_s2disjoint_holes, + coordinates = + [ + // One square. + [[9, 9], [9, 11], [11, 11], [11, 9], [9, 9]], + // Another disjoint square. + [[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]] + ], poly = {type: 'Polygon', coordinates: coordinates}, multiPoly = { type: 'MultiPolygon', // Multi-polygon's coordinates are wrapped in one more array. @@ -33,12 +34,12 @@ jsTest.log("We're going to print some error messages, don't be alarmed."); // Can't query with a polygon or multi-polygon that has a non-contained hole. // print(assert.throws(function() { - t.findOne({geo: {$geoWithin: {$geometry: poly}}}); -}, [], "parsing a polygon with non-overlapping holes.")); + t.findOne({geo: {$geoWithin: {$geometry: poly}}}); + }, [], "parsing a polygon with non-overlapping holes.")); print(assert.throws(function() { - t.findOne({geo: {$geoWithin: {$geometry: multiPoly}}}); -}, [], "parsing a multi-polygon with non-overlapping holes.")); + t.findOne({geo: {$geoWithin: {$geometry: multiPoly}}}); + }, [], "parsing a multi-polygon with non-overlapping holes.")); // // Can't insert a bad polygon or a bad multi-polygon with a 2dsphere index. diff --git a/jstests/core/geo_s2dupe_points.js b/jstests/core/geo_s2dupe_points.js index 406a7b1ff4c..faa06cabb9b 100644 --- a/jstests/core/geo_s2dupe_points.js +++ b/jstests/core/geo_s2dupe_points.js @@ -57,16 +57,23 @@ var multiPolygonWithDupes = { coordinates: [ [[[102.0, 2.0], [103.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]], [ - [[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]], - [ - [100.2, 0.2], - [100.8, 0.2], - [100.8, 0.8], - [100.8, 0.8], - [100.8, 0.8], - [100.2, 0.8], - [100.2, 0.2] - ] + [ + [100.0, 0.0], + [101.0, 0.0], + [101.0, 1.0], + [101.0, 1.0], + [100.0, 1.0], + [100.0, 0.0] + ], + [ + [100.2, 0.2], + [100.8, 0.2], + [100.8, 0.8], + [100.8, 0.8], + [100.8, 0.8], + [100.2, 0.8], + [100.2, 0.2] + ] ] ] } @@ -76,8 +83,8 @@ var multiPolygonWithoutDupes = { coordinates: [ [[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]], [ - [[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]], - [[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]] + [[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]], + [[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]] ] ] }; diff --git a/jstests/core/geo_s2index.js b/jstests/core/geo_s2index.js index 99c3852aae9..0b1644e41da 100644 --- a/jstests/core/geo_s2index.js +++ b/jstests/core/geo_s2index.js @@ -101,22 +101,14 @@ assert.throws(function() { t.drop(); t.ensureIndex({loc: "2dsphere"}); res = t.insert({ - loc: { - type: 'Point', - coordinates: [40, 5], - crs: {type: 'name', properties: {name: 'EPSG:2000'}} - } + loc: {type: 'Point', coordinates: [40, 5], crs: {type: 'name', properties: {name: 'EPSG:2000'}}} }); assert.writeError(res); assert.eq(0, t.find().itcount()); res = t.insert({loc: {type: 'Point', coordinates: [40, 5]}}); assert.writeOK(res); res = t.insert({ - loc: { - type: 'Point', - coordinates: [40, 5], - crs: {type: 'name', properties: {name: 'EPSG:4326'}} - } + loc: {type: 'Point', coordinates: [40, 5], crs: {type: 'name', properties: {name: 'EPSG:4326'}}} }); assert.writeOK(res); res = t.insert({ diff --git a/jstests/core/geo_s2indexversion1.js b/jstests/core/geo_s2indexversion1.js index d9797a67505..7b17796f29f 100644 --- a/jstests/core/geo_s2indexversion1.js +++ b/jstests/core/geo_s2indexversion1.js @@ -136,11 +136,11 @@ var multiPolygonDoc = { type: "MultiPolygon", coordinates: [ [[ - [-73.958, 40.8003], - [-73.9498, 40.7968], - [-73.9737, 40.7648], - [-73.9814, 40.7681], - [-73.958, 40.8003] + [-73.958, 40.8003], + [-73.9498, 40.7968], + [-73.9737, 40.7648], + [-73.9814, 40.7681], + [-73.958, 40.8003] ]], [[[-73.958, 40.8003], [-73.9498, 40.7968], [-73.9737, 40.7648], [-73.958, 40.8003]]] ] @@ -151,22 +151,22 @@ var geometryCollectionDoc = { type: "GeometryCollection", geometries: [ { - type: "MultiPoint", - coordinates: [ - [-73.9580, 40.8003], - [-73.9498, 40.7968], - [-73.9737, 40.7648], - [-73.9814, 40.7681] - ] + type: "MultiPoint", + coordinates: [ + [-73.9580, 40.8003], + [-73.9498, 40.7968], + [-73.9737, 40.7648], + [-73.9814, 40.7681] + ] }, { - type: "MultiLineString", - coordinates: [ - [[-73.96943, 40.78519], [-73.96082, 40.78095]], - [[-73.96415, 40.79229], [-73.95544, 40.78854]], - [[-73.97162, 40.78205], [-73.96374, 40.77715]], - [[-73.97880, 40.77247], [-73.97036, 40.76811]] - ] + type: "MultiLineString", + coordinates: [ + [[-73.96943, 40.78519], [-73.96082, 40.78095]], + [[-73.96415, 40.79229], [-73.95544, 40.78854]], + [[-73.97162, 40.78205], [-73.96374, 40.77715]], + [[-73.97880, 40.77247], [-73.97036, 40.76811]] + ] } ] } diff --git a/jstests/core/geo_s2meridian.js b/jstests/core/geo_s2meridian.js index 583b426845c..763067e8a34 100644 --- a/jstests/core/geo_s2meridian.js +++ b/jstests/core/geo_s2meridian.js @@ -45,8 +45,7 @@ t.insert(pointOnPositiveSideOfMeridian); meridianCrossingPoly = { type: "Polygon", - coordinates: - [[[-178.0, 10.0], [178.0, 10.0], [178.0, -10.0], [-178.0, -10.0], [-178.0, 10.0]]] + coordinates: [[[-178.0, 10.0], [178.0, 10.0], [178.0, -10.0], [-178.0, -10.0], [-178.0, 10.0]]] }; result = t.find({geo: {$geoWithin: {$geometry: meridianCrossingPoly}}}); diff --git a/jstests/core/geo_s2multi.js b/jstests/core/geo_s2multi.js index 8899c9d5561..d9a4032d070 100644 --- a/jstests/core/geo_s2multi.js +++ b/jstests/core/geo_s2multi.js @@ -21,8 +21,8 @@ multiPolygonA = { "coordinates": [ [[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]], [ - [[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]], - [[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]] + [[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]], + [[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]] ] ] }; @@ -31,36 +31,33 @@ assert.writeOK(t.insert({geo: multiPolygonA})); assert.eq(3, t.find({ geo: {$geoIntersects: {$geometry: {"type": "Point", "coordinates": [100, 0]}}} }).itcount()); -assert.eq(3, - t.find({ - geo: {$geoIntersects: {$geometry: {"type": "Point", "coordinates": [101.0, 1.0]}}} - }).itcount()); +assert.eq(3, t.find({ + geo: {$geoIntersects: {$geometry: {"type": "Point", "coordinates": [101.0, 1.0]}}} + }).itcount()); // Inside the hole in multiPolygonA -assert.eq( - 0, t.find({ - geo: {$geoIntersects: {$geometry: {"type": "Point", "coordinates": [100.21, 0.21]}}} - }).itcount()); +assert.eq(0, + t.find({ + geo: {$geoIntersects: {$geometry: {"type": "Point", "coordinates": [100.21, 0.21]}}} + }).itcount()); // One point inside the hole, one out. assert.eq( - 3, - t.find({ - geo: { - $geoIntersects: - {$geometry: {"type": "MultiPoint", "coordinates": [[100, 0], [100.21, 0.21]]}} - } - }).itcount()); + 3, t.find({ + geo: { + $geoIntersects: + {$geometry: {"type": "MultiPoint", "coordinates": [[100, 0], [100.21, 0.21]]}} + } + }).itcount()); assert.eq( - 3, - t.find({ - geo: { - $geoIntersects: { - $geometry: - {"type": "MultiPoint", "coordinates": [[100, 0], [100.21, 0.21], [101, 1]]} - } - } - }).itcount()); + 3, t.find({ + geo: { + $geoIntersects: { + $geometry: + {"type": "MultiPoint", "coordinates": [[100, 0], [100.21, 0.21], [101, 1]]} + } + } + }).itcount()); // Polygon contains itself and the multipoint. assert.eq(2, t.find({geo: {$geoWithin: {$geometry: multiPolygonA}}}).itcount()); diff --git a/jstests/core/geo_s2near.js b/jstests/core/geo_s2near.js index d0a591d45e6..86373c4aa11 100644 --- a/jstests/core/geo_s2near.js +++ b/jstests/core/geo_s2near.js @@ -2,142 +2,153 @@ // Test 2dsphere near search, called via find and $geoNear. (function() { - t = db.geo_s2near; - t.drop(); +t = db.geo_s2near; +t.drop(); - // Make sure that geoNear gives us back loc - goldenPoint = {type: "Point", coordinates: [31.0, 41.0]}; - t.insert({geo: goldenPoint}); - t.ensureIndex({geo: "2dsphere"}); - resNear = - t.aggregate([ - {$geoNear: {near: [30, 40], distanceField: "d", spherical: true, includeLocs: "loc"}}, - {$limit: 1} - ]).toArray(); - assert.eq(resNear.length, 1, tojson(resNear)); - assert.eq(resNear[0].loc, goldenPoint); +// Make sure that geoNear gives us back loc +goldenPoint = { + type: "Point", + coordinates: [31.0, 41.0] +}; +t.insert({geo: goldenPoint}); +t.ensureIndex({geo: "2dsphere"}); +resNear = + t.aggregate([ + {$geoNear: {near: [30, 40], distanceField: "d", spherical: true, includeLocs: "loc"}}, + {$limit: 1} + ]).toArray(); +assert.eq(resNear.length, 1, tojson(resNear)); +assert.eq(resNear[0].loc, goldenPoint); - // FYI: - // One degree of long @ 0 is 111km or so. - // One degree of lat @ 0 is 110km or so. - lat = 0; - lng = 0; - points = 10; - for (var x = -points; x < points; x += 1) { - for (var y = -points; y < points; y += 1) { - t.insert({geo: {"type": "Point", "coordinates": [lng + x / 1000.0, lat + y / 1000.0]}}); - } +// FYI: +// One degree of long @ 0 is 111km or so. +// One degree of lat @ 0 is 110km or so. +lat = 0; +lng = 0; +points = 10; +for (var x = -points; x < points; x += 1) { + for (var y = -points; y < points; y += 1) { + t.insert({geo: {"type": "Point", "coordinates": [lng + x / 1000.0, lat + y / 1000.0]}}); } +} - origin = {"type": "Point", "coordinates": [lng, lat]}; +origin = { + "type": "Point", + "coordinates": [lng, lat] +}; - t.ensureIndex({geo: "2dsphere"}); +t.ensureIndex({geo: "2dsphere"}); - // Near only works when the query is a point. - someline = {"type": "LineString", "coordinates": [[40, 5], [41, 6]]}; - somepoly = {"type": "Polygon", "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]]}; - assert.throws(function() { - return t.find({"geo": {"$near": {"$geometry": someline}}}).count(); - }); - assert.throws(function() { - return t.find({"geo": {"$near": {"$geometry": somepoly}}}).count(); - }); - assert.throws(function() { - return t.aggregate({$geoNear: {near: someline, distanceField: "dis", spherical: true}}); - }); - assert.throws(function() { - return t.aggregate({$geoNear: {near: somepoly, distanceField: "dis", spherical: true}}); - }); +// Near only works when the query is a point. +someline = { + "type": "LineString", + "coordinates": [[40, 5], [41, 6]] +}; +somepoly = { + "type": "Polygon", + "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]] +}; +assert.throws(function() { + return t.find({"geo": {"$near": {"$geometry": someline}}}).count(); +}); +assert.throws(function() { + return t.find({"geo": {"$near": {"$geometry": somepoly}}}).count(); +}); +assert.throws(function() { + return t.aggregate({$geoNear: {near: someline, distanceField: "dis", spherical: true}}); +}); +assert.throws(function() { + return t.aggregate({$geoNear: {near: somepoly, distanceField: "dis", spherical: true}}); +}); - // Do some basic near searches. - res = t.find({"geo": {"$near": {"$geometry": origin, $maxDistance: 2000}}}).limit(10); - resNear = t.aggregate([ - {$geoNear: {near: [0, 0], distanceField: "dis", maxDistance: Math.PI, spherical: true}}, - {$limit: 10}, - ]); - assert.eq(res.itcount(), resNear.itcount(), "10"); +// Do some basic near searches. +res = t.find({"geo": {"$near": {"$geometry": origin, $maxDistance: 2000}}}).limit(10); +resNear = t.aggregate([ + {$geoNear: {near: [0, 0], distanceField: "dis", maxDistance: Math.PI, spherical: true}}, + {$limit: 10}, +]); +assert.eq(res.itcount(), resNear.itcount(), "10"); - res = t.find({"geo": {"$near": {"$geometry": origin}}}).limit(10); - resNear = t.aggregate([ - {$geoNear: {near: [0, 0], distanceField: "dis", spherical: true}}, - {$limit: 10}, - ]); - assert.eq(res.itcount(), resNear.itcount(), "10"); +res = t.find({"geo": {"$near": {"$geometry": origin}}}).limit(10); +resNear = t.aggregate([ + {$geoNear: {near: [0, 0], distanceField: "dis", spherical: true}}, + {$limit: 10}, +]); +assert.eq(res.itcount(), resNear.itcount(), "10"); - // Find all the points! - res = t.find({"geo": {"$near": {"$geometry": origin}}}).limit(10000); - resNear = t.aggregate([ - {$geoNear: {near: [0, 0], distanceField: "dis", spherical: true}}, - {$limit: 10000}, - ]); - assert.eq(res.itcount(), resNear.itcount(), ((2 * points) * (2 * points)).toString()); +// Find all the points! +res = t.find({"geo": {"$near": {"$geometry": origin}}}).limit(10000); +resNear = t.aggregate([ + {$geoNear: {near: [0, 0], distanceField: "dis", spherical: true}}, + {$limit: 10000}, +]); +assert.eq(res.itcount(), resNear.itcount(), ((2 * points) * (2 * points)).toString()); - // longitude goes -180 to 180 - // latitude goes -90 to 90 - // Let's put in some perverse (polar) data and make sure we get it back. - // Points go long, lat. - t.insert({geo: {"type": "Point", "coordinates": [-180, -90]}}); - t.insert({geo: {"type": "Point", "coordinates": [180, -90]}}); - t.insert({geo: {"type": "Point", "coordinates": [180, 90]}}); - t.insert({geo: {"type": "Point", "coordinates": [-180, 90]}}); - res = t.find({"geo": {"$near": {"$geometry": origin}}}).limit(10000); - resNear = t.aggregate([ - {$geoNear: {near: [0, 0], distanceField: "dis", spherical: true}}, - {$limit: 10000}, - ]); - assert.eq(res.itcount(), resNear.itcount(), ((2 * points) * (2 * points) + 4).toString()); +// longitude goes -180 to 180 +// latitude goes -90 to 90 +// Let's put in some perverse (polar) data and make sure we get it back. +// Points go long, lat. +t.insert({geo: {"type": "Point", "coordinates": [-180, -90]}}); +t.insert({geo: {"type": "Point", "coordinates": [180, -90]}}); +t.insert({geo: {"type": "Point", "coordinates": [180, 90]}}); +t.insert({geo: {"type": "Point", "coordinates": [-180, 90]}}); +res = t.find({"geo": {"$near": {"$geometry": origin}}}).limit(10000); +resNear = t.aggregate([ + {$geoNear: {near: [0, 0], distanceField: "dis", spherical: true}}, + {$limit: 10000}, +]); +assert.eq(res.itcount(), resNear.itcount(), ((2 * points) * (2 * points) + 4).toString()); - function testRadAndDegreesOK(distance) { - // Distance for old style points is radians. - resRadians = - t.find({geo: {$nearSphere: [0, 0], $maxDistance: (distance / (6378.1 * 1000))}}); - // Distance for new style points is meters. - resMeters = t.find({"geo": {"$near": {"$geometry": origin, $maxDistance: distance}}}); - // And we should get the same # of results no matter what. - assert.eq(resRadians.itcount(), resMeters.itcount()); +function testRadAndDegreesOK(distance) { + // Distance for old style points is radians. + resRadians = t.find({geo: {$nearSphere: [0, 0], $maxDistance: (distance / (6378.1 * 1000))}}); + // Distance for new style points is meters. + resMeters = t.find({"geo": {"$near": {"$geometry": origin, $maxDistance: distance}}}); + // And we should get the same # of results no matter what. + assert.eq(resRadians.itcount(), resMeters.itcount()); - // Also, $geoNear should behave the same way. - resGNMeters = t.aggregate({ - $geoNear: { - near: origin, - distanceField: "dis", - maxDistance: distance, - spherical: true, - } - }).toArray(); - resGNRadians = t.aggregate({ - $geoNear: { - near: [0, 0], - distanceField: "dis", - maxDistance: (distance / (6378.1 * 1000)), - spherical: true, - } - }).toArray(); - const errmsg = `$geoNear using meter distances returned ${tojson(resGNMeters)}, but ` + - `$geoNear using radian distances returned ${tojson(resGNRadians)}`; - assert.eq(resGNRadians.length, resGNMeters.length, errmsg); - for (var i = 0; i < resGNRadians.length; ++i) { - // Radius of earth * radians = distance in meters. - assert.close(resGNRadians[i].dis * 6378.1 * 1000, resGNMeters[i].dis); - } + // Also, $geoNear should behave the same way. + resGNMeters = t.aggregate({ + $geoNear: { + near: origin, + distanceField: "dis", + maxDistance: distance, + spherical: true, + } + }).toArray(); + resGNRadians = t.aggregate({ + $geoNear: { + near: [0, 0], + distanceField: "dis", + maxDistance: (distance / (6378.1 * 1000)), + spherical: true, + } + }).toArray(); + const errmsg = `$geoNear using meter distances returned ${tojson(resGNMeters)}, but ` + + `$geoNear using radian distances returned ${tojson(resGNRadians)}`; + assert.eq(resGNRadians.length, resGNMeters.length, errmsg); + for (var i = 0; i < resGNRadians.length; ++i) { + // Radius of earth * radians = distance in meters. + assert.close(resGNRadians[i].dis * 6378.1 * 1000, resGNMeters[i].dis); } +} - testRadAndDegreesOK(1); - testRadAndDegreesOK(10); - testRadAndDegreesOK(50); - testRadAndDegreesOK(10000); +testRadAndDegreesOK(1); +testRadAndDegreesOK(10); +testRadAndDegreesOK(50); +testRadAndDegreesOK(10000); - // SERVER-13666 legacy coordinates must be in bounds for spherical near queries. - assert.commandFailedWithCode(db.runCommand({ - aggregate: t.getName(), - cursor: {}, - pipeline: [{ - $geoNear: { - near: [1210.466, 31.2051], - distanceField: "dis", - spherical: true, - } - }] - }), - 17444); +// SERVER-13666 legacy coordinates must be in bounds for spherical near queries. +assert.commandFailedWithCode(db.runCommand({ + aggregate: t.getName(), + cursor: {}, + pipeline: [{ + $geoNear: { + near: [1210.466, 31.2051], + distanceField: "dis", + spherical: true, + } + }] +}), + 17444); }()); diff --git a/jstests/core/geo_s2near_equator_opposite.js b/jstests/core/geo_s2near_equator_opposite.js index fb17310030b..485afc52fd4 100644 --- a/jstests/core/geo_s2near_equator_opposite.js +++ b/jstests/core/geo_s2near_equator_opposite.js @@ -2,55 +2,51 @@ // on the equator // First reported in SERVER-11830 as a regression in 2.5 (function() { - var t = db.geos2nearequatoropposite; +var t = db.geos2nearequatoropposite; - t.drop(); +t.drop(); - t.insert({loc: {type: 'Point', coordinates: [0, 0]}}); - t.insert({loc: {type: 'Point', coordinates: [-1, 0]}}); +t.insert({loc: {type: 'Point', coordinates: [0, 0]}}); +t.insert({loc: {type: 'Point', coordinates: [-1, 0]}}); - t.ensureIndex({loc: '2dsphere'}); +t.ensureIndex({loc: '2dsphere'}); - // upper bound for half of earth's circumference in meters - var dist = 40075000 / 2 + 1; +// upper bound for half of earth's circumference in meters +var dist = 40075000 / 2 + 1; - var nearSphereCount = - t.find({ - loc: { - $nearSphere: - {$geometry: {type: 'Point', coordinates: [180, 0]}, $maxDistance: dist} - } - }).itcount(); - var nearCount = - t.find({ - loc: {$near: {$geometry: {type: 'Point', coordinates: [180, 0]}, $maxDistance: dist}} - }).itcount(); - var geoNearResult = t.aggregate([ - { - $geoNear: { - near: {type: 'Point', coordinates: [180, 0]}, - spherical: true, - distanceField: "dist", - } - }, - { - $group: { - _id: null, - nResults: {$sum: 1}, - maxDistance: {$max: "$distanceField"}, - } +var nearSphereCount = + t.find({ + loc: {$nearSphere: {$geometry: {type: 'Point', coordinates: [180, 0]}, $maxDistance: dist}} + }).itcount(); +var nearCount = + t.find({ + loc: {$near: {$geometry: {type: 'Point', coordinates: [180, 0]}, $maxDistance: dist}} + }).itcount(); +var geoNearResult = t.aggregate([ + { + $geoNear: { + near: {type: 'Point', coordinates: [180, 0]}, + spherical: true, + distanceField: "dist", } - ]).toArray(); + }, + { + $group: { + _id: null, + nResults: {$sum: 1}, + maxDistance: {$max: "$distanceField"}, + } + } + ]).toArray(); - assert.eq(2, nearSphereCount, 'unexpected document count for nearSphere'); - assert.eq(2, nearCount, 'unexpected document count for near'); - assert.eq(1, geoNearResult.length, `unexpected $geoNear result: ${tojson(geoNearResult)}`); +assert.eq(2, nearSphereCount, 'unexpected document count for nearSphere'); +assert.eq(2, nearCount, 'unexpected document count for near'); +assert.eq(1, geoNearResult.length, `unexpected $geoNear result: ${tojson(geoNearResult)}`); - const geoNearStats = geoNearResult[0]; - assert.eq(2, - geoNearStats.nResults, - `unexpected document count for $geoNear: ${tojson(geoNearStats)}`); - assert.gt(dist, - geoNearStats.maxDistance, - `unexpected maximum distance in $geoNear results: ${tojson(geoNearStats)}`); +const geoNearStats = geoNearResult[0]; +assert.eq( + 2, geoNearStats.nResults, `unexpected document count for $geoNear: ${tojson(geoNearStats)}`); +assert.gt(dist, + geoNearStats.maxDistance, + `unexpected maximum distance in $geoNear results: ${tojson(geoNearStats)}`); }()); diff --git a/jstests/core/geo_s2nearwithin.js b/jstests/core/geo_s2nearwithin.js index 6df9a1940df..d8f15dcdb54 100644 --- a/jstests/core/geo_s2nearwithin.js +++ b/jstests/core/geo_s2nearwithin.js @@ -1,57 +1,57 @@ // Test $geoNear + $within. (function() { - t = db.geo_s2nearwithin; - t.drop(); - - points = 10; - for (var x = -points; x < points; x += 1) { - for (var y = -points; y < points; y += 1) { - assert.commandWorked(t.insert({geo: [x, y]})); - } - } +t = db.geo_s2nearwithin; +t.drop(); - origin = {"type": "Point", "coordinates": [0, 0]}; - - assert.commandWorked(t.ensureIndex({geo: "2dsphere"})); - // Near requires an index, and 2dsphere is an index. Spherical isn't - // specified so this doesn't work. - let res = assert.commandFailedWithCode(t.runCommand("aggregate", { - cursor: {}, - pipeline: [{ - $geoNear: { - near: [0, 0], - distanceField: "d", - query: {geo: {$within: {$center: [[0, 0], 1]}}} - } - }], - }), - ErrorCodes.BadValue); - assert(res.errmsg.includes("unable to find index for $geoNear query"), tojson(res)); - - // Spherical is specified so this does work. Old style points are weird - // because you can use them with both $center and $centerSphere. Points are - // the only things we will do this conversion for. - const runGeoNear = (within) => t.aggregate({ - $geoNear: { - near: [0, 0], - distanceField: "d", - spherical: true, - query: {geo: {$within: within}}, - } - }).toArray(); - - resNear = runGeoNear({$center: [[0, 0], 1]}); - assert.eq(resNear.length, 5); - - resNear = runGeoNear({$centerSphere: [[0, 0], Math.PI / 180.0]}); - assert.eq(resNear.length, 5); - - resNear = runGeoNear({$centerSphere: [[0, 0], 0]}); - assert.eq(resNear.length, 1); - - resNear = runGeoNear({$centerSphere: [[1, 0], 0.5 * Math.PI / 180.0]}); - assert.eq(resNear.length, 1); - - resNear = runGeoNear({$center: [[1, 0], 1.5]}); - assert.eq(resNear.length, 9); +points = 10; +for (var x = -points; x < points; x += 1) { + for (var y = -points; y < points; y += 1) { + assert.commandWorked(t.insert({geo: [x, y]})); + } +} + +origin = { + "type": "Point", + "coordinates": [0, 0] +}; + +assert.commandWorked(t.ensureIndex({geo: "2dsphere"})); +// Near requires an index, and 2dsphere is an index. Spherical isn't +// specified so this doesn't work. +let res = assert.commandFailedWithCode(t.runCommand("aggregate", { + cursor: {}, + pipeline: [{ + $geoNear: + {near: [0, 0], distanceField: "d", query: {geo: {$within: {$center: [[0, 0], 1]}}}} + }], +}), + ErrorCodes.BadValue); +assert(res.errmsg.includes("unable to find index for $geoNear query"), tojson(res)); + +// Spherical is specified so this does work. Old style points are weird +// because you can use them with both $center and $centerSphere. Points are +// the only things we will do this conversion for. +const runGeoNear = (within) => t.aggregate({ + $geoNear: { + near: [0, 0], + distanceField: "d", + spherical: true, + query: {geo: {$within: within}}, + } + }).toArray(); + +resNear = runGeoNear({$center: [[0, 0], 1]}); +assert.eq(resNear.length, 5); + +resNear = runGeoNear({$centerSphere: [[0, 0], Math.PI / 180.0]}); +assert.eq(resNear.length, 5); + +resNear = runGeoNear({$centerSphere: [[0, 0], 0]}); +assert.eq(resNear.length, 1); + +resNear = runGeoNear({$centerSphere: [[1, 0], 0.5 * Math.PI / 180.0]}); +assert.eq(resNear.length, 1); + +resNear = runGeoNear({$center: [[1, 0], 1.5]}); +assert.eq(resNear.length, 9); }()); diff --git a/jstests/core/geo_s2ordering.js b/jstests/core/geo_s2ordering.js index b20189e49b8..dc9f660ae6c 100644 --- a/jstests/core/geo_s2ordering.js +++ b/jstests/core/geo_s2ordering.js @@ -4,56 +4,56 @@ // for 2dsphere. // @tags: [assumes_balancer_off, operations_longer_than_stepdown_interval_in_txns] (function() { - "use strict"; - - const coll = db.geo_s2ordering; - coll.drop(); - - const needle = "hari"; - - // We insert lots of points in a region and look for a non-geo key which is rare. - function makepoints(needle) { - const lat = 0; - const lng = 0; - const points = 50.0; - - const bulk = coll.initializeUnorderedBulkOp(); - for (let x = -points; x < points; x += 1) { - for (let y = -points; y < points; y += 1) { - bulk.insert({ - nongeo: x.toString() + "," + y.toString(), - geo: {type: "Point", coordinates: [lng + x / points, lat + y / points]} - }); - } +"use strict"; + +const coll = db.geo_s2ordering; +coll.drop(); + +const needle = "hari"; + +// We insert lots of points in a region and look for a non-geo key which is rare. +function makepoints(needle) { + const lat = 0; + const lng = 0; + const points = 50.0; + + const bulk = coll.initializeUnorderedBulkOp(); + for (let x = -points; x < points; x += 1) { + for (let y = -points; y < points; y += 1) { + bulk.insert({ + nongeo: x.toString() + "," + y.toString(), + geo: {type: "Point", coordinates: [lng + x / points, lat + y / points]} + }); } - bulk.insert({nongeo: needle, geo: {type: "Point", coordinates: [0, 0]}}); - assert.writeOK(bulk.execute()); } - - function runTest(index) { - assert.commandWorked(coll.ensureIndex(index)); - const cursor = - coll.find({nongeo: needle, geo: {$within: {$centerSphere: [[0, 0], Math.PI / 180.0]}}}); - const stats = cursor.explain("executionStats").executionStats; - assert.commandWorked(coll.dropIndex(index)); - return stats; - } - - makepoints(needle); - - // Indexing non-geo first should be quicker. - const fast = runTest({nongeo: 1, geo: "2dsphere"}); - const slow = runTest({geo: "2dsphere", nongeo: 1}); - - // The nReturned should be the same. - assert.eq(fast.nReturned, 1); - assert.eq(slow.nReturned, 1); - - // Only one document is examined, since we use the index. - assert.eq(fast.totalDocsExamined, 1); - assert.eq(slow.totalDocsExamined, 1); - - // The ordering actually matters for lookup speed. - // totalKeysExamined is a direct measure of its speed. - assert.lt(fast.totalKeysExamined, slow.totalKeysExamined); + bulk.insert({nongeo: needle, geo: {type: "Point", coordinates: [0, 0]}}); + assert.writeOK(bulk.execute()); +} + +function runTest(index) { + assert.commandWorked(coll.ensureIndex(index)); + const cursor = + coll.find({nongeo: needle, geo: {$within: {$centerSphere: [[0, 0], Math.PI / 180.0]}}}); + const stats = cursor.explain("executionStats").executionStats; + assert.commandWorked(coll.dropIndex(index)); + return stats; +} + +makepoints(needle); + +// Indexing non-geo first should be quicker. +const fast = runTest({nongeo: 1, geo: "2dsphere"}); +const slow = runTest({geo: "2dsphere", nongeo: 1}); + +// The nReturned should be the same. +assert.eq(fast.nReturned, 1); +assert.eq(slow.nReturned, 1); + +// Only one document is examined, since we use the index. +assert.eq(fast.totalDocsExamined, 1); +assert.eq(slow.totalDocsExamined, 1); + +// The ordering actually matters for lookup speed. +// totalKeysExamined is a direct measure of its speed. +assert.lt(fast.totalKeysExamined, slow.totalKeysExamined); }()); diff --git a/jstests/core/geo_s2polywithholes.js b/jstests/core/geo_s2polywithholes.js index 80f7b0556c4..020ba350e85 100644 --- a/jstests/core/geo_s2polywithholes.js +++ b/jstests/core/geo_s2polywithholes.js @@ -73,9 +73,7 @@ assert.writeError(t.insert({geo: polyWithBiggerHole})); // Test 6: Holes cannot share more than one vertex with exterior loop var polySharedVertices = { "type": "Polygon", - "coordinates": [ - [[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]], - [[0, 0], [0.1, 0.9], [1, 1], [0.9, 0.1], [0, 0]] - ] + "coordinates": + [[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]], [[0, 0], [0.1, 0.9], [1, 1], [0.9, 0.1], [0, 0]]] }; assert.writeError(t.insert({geo: polySharedVertices})); diff --git a/jstests/core/geo_s2sparse.js b/jstests/core/geo_s2sparse.js index 57f4f73fa3a..2fb93200c44 100644 --- a/jstests/core/geo_s2sparse.js +++ b/jstests/core/geo_s2sparse.js @@ -2,131 +2,130 @@ // All V2 2dsphere indices are sparse in the geo fields. (function() { - "use strict"; - - var coll = db.geo_s2sparse; - var point = {type: "Point", coordinates: [5, 5]}; - var indexSpec = {geo: "2dsphere", nonGeo: 1}; - var indexName = 'geo_2dsphere_nonGeo_1'; - - // - // V2 indices are "geo sparse" always. - // - - // Clean up. - coll.drop(); - coll.ensureIndex(indexSpec); - - var bulkInsertDocs = function(coll, numDocs, makeDocFn) { - print("Bulk inserting " + numDocs + " documents"); - - var bulk = coll.initializeUnorderedBulkOp(); - for (var i = 0; i < numDocs; ++i) { - bulk.insert(makeDocFn(i)); - } - - assert.writeOK(bulk.execute()); - - print("Bulk inserting " + numDocs + " documents completed"); - }; - - // Insert N documents with the geo field. - var N = 1000; - bulkInsertDocs(coll, N, function(i) { - return {geo: point, nonGeo: "point_" + i}; - }); - - // Expect N keys. - assert.eq(N, coll.validate().keysPerIndex[indexName]); - - // Insert N documents without the geo field. - bulkInsertDocs(coll, N, function(i) { - return {wrongGeo: point, nonGeo: i}; - }); - - // Still expect N keys as we didn't insert any geo stuff. - assert.eq(N, coll.validate().keysPerIndex[indexName]); - - // Insert N documents with just the geo field. - bulkInsertDocs(coll, N, function(i) { - return {geo: point}; - }); - - // Expect 2N keys. - assert.eq(N + N, coll.validate().keysPerIndex[indexName]); - - // Add some "not geo" stuff. - bulkInsertDocs(coll, N, function(i) { - return {geo: null}; - }); - bulkInsertDocs(coll, N, function(i) { - return {geo: []}; - }); - bulkInsertDocs(coll, N, function(i) { - return {geo: undefined}; - }); - bulkInsertDocs(coll, N, function(i) { - return {geo: {}}; - }); - - // Still expect 2N keys. - assert.eq(N + N, coll.validate().keysPerIndex[indexName]); - - // - // V1 indices are never sparse - // - - coll.drop(); - coll.ensureIndex(indexSpec, {"2dsphereIndexVersion": 1}); - - // Insert N documents with the geo field. - bulkInsertDocs(coll, N, function(i) { - return {geo: point, nonGeo: "point_" + i}; - }); - - // Expect N keys. - assert.eq(N, coll.validate().keysPerIndex[indexName]); - - // Insert N documents without the geo field. - bulkInsertDocs(coll, N, function(i) { - return {wrongGeo: point, nonGeo: i}; - }); - - // Expect N keys as it's a V1 index. - assert.eq(N + N, coll.validate().keysPerIndex[indexName]); - - // - // V2 indices with several 2dsphere-indexed fields are only sparse if all are missing. - // - - // Clean up. - coll.drop(); - coll.ensureIndex({geo: "2dsphere", otherGeo: "2dsphere"}); - - indexName = 'geo_2dsphere_otherGeo_2dsphere'; - - // Insert N documents with the first geo field. - bulkInsertDocs(coll, N, function(i) { - return {geo: point}; - }); - - // Expect N keys. - assert.eq(N, coll.validate().keysPerIndex[indexName]); - - // Insert N documents with the second geo field. - bulkInsertDocs(coll, N, function(i) { - return {otherGeo: point}; - }); - - // They get inserted too. - assert.eq(N + N, coll.validate().keysPerIndex[indexName]); - - // Insert N documents with neither geo field. - bulkInsertDocs(coll, N, function(i) { - return {nonGeo: i}; - }); - - // Still expect 2N keys as the neither geo docs were omitted from the index. - assert.eq(N + N, coll.validate().keysPerIndex[indexName]); +"use strict"; + +var coll = db.geo_s2sparse; +var point = {type: "Point", coordinates: [5, 5]}; +var indexSpec = {geo: "2dsphere", nonGeo: 1}; +var indexName = 'geo_2dsphere_nonGeo_1'; + +// +// V2 indices are "geo sparse" always. +// +// Clean up. +coll.drop(); +coll.ensureIndex(indexSpec); + +var bulkInsertDocs = function(coll, numDocs, makeDocFn) { + print("Bulk inserting " + numDocs + " documents"); + + var bulk = coll.initializeUnorderedBulkOp(); + for (var i = 0; i < numDocs; ++i) { + bulk.insert(makeDocFn(i)); + } + + assert.writeOK(bulk.execute()); + + print("Bulk inserting " + numDocs + " documents completed"); +}; + +// Insert N documents with the geo field. +var N = 1000; +bulkInsertDocs(coll, N, function(i) { + return {geo: point, nonGeo: "point_" + i}; +}); + +// Expect N keys. +assert.eq(N, coll.validate().keysPerIndex[indexName]); + +// Insert N documents without the geo field. +bulkInsertDocs(coll, N, function(i) { + return {wrongGeo: point, nonGeo: i}; +}); + +// Still expect N keys as we didn't insert any geo stuff. +assert.eq(N, coll.validate().keysPerIndex[indexName]); + +// Insert N documents with just the geo field. +bulkInsertDocs(coll, N, function(i) { + return {geo: point}; +}); + +// Expect 2N keys. +assert.eq(N + N, coll.validate().keysPerIndex[indexName]); + +// Add some "not geo" stuff. +bulkInsertDocs(coll, N, function(i) { + return {geo: null}; +}); +bulkInsertDocs(coll, N, function(i) { + return {geo: []}; +}); +bulkInsertDocs(coll, N, function(i) { + return {geo: undefined}; +}); +bulkInsertDocs(coll, N, function(i) { + return {geo: {}}; +}); + +// Still expect 2N keys. +assert.eq(N + N, coll.validate().keysPerIndex[indexName]); + +// +// V1 indices are never sparse +// + +coll.drop(); +coll.ensureIndex(indexSpec, {"2dsphereIndexVersion": 1}); + +// Insert N documents with the geo field. +bulkInsertDocs(coll, N, function(i) { + return {geo: point, nonGeo: "point_" + i}; +}); + +// Expect N keys. +assert.eq(N, coll.validate().keysPerIndex[indexName]); + +// Insert N documents without the geo field. +bulkInsertDocs(coll, N, function(i) { + return {wrongGeo: point, nonGeo: i}; +}); + +// Expect N keys as it's a V1 index. +assert.eq(N + N, coll.validate().keysPerIndex[indexName]); + +// +// V2 indices with several 2dsphere-indexed fields are only sparse if all are missing. +// + +// Clean up. +coll.drop(); +coll.ensureIndex({geo: "2dsphere", otherGeo: "2dsphere"}); + +indexName = 'geo_2dsphere_otherGeo_2dsphere'; + +// Insert N documents with the first geo field. +bulkInsertDocs(coll, N, function(i) { + return {geo: point}; +}); + +// Expect N keys. +assert.eq(N, coll.validate().keysPerIndex[indexName]); + +// Insert N documents with the second geo field. +bulkInsertDocs(coll, N, function(i) { + return {otherGeo: point}; +}); + +// They get inserted too. +assert.eq(N + N, coll.validate().keysPerIndex[indexName]); + +// Insert N documents with neither geo field. +bulkInsertDocs(coll, N, function(i) { + return {nonGeo: i}; +}); + +// Still expect 2N keys as the neither geo docs were omitted from the index. +assert.eq(N + N, coll.validate().keysPerIndex[indexName]); })(); diff --git a/jstests/core/geo_s2twofields.js b/jstests/core/geo_s2twofields.js index c52b5d1d265..c50ca3c46b5 100644 --- a/jstests/core/geo_s2twofields.js +++ b/jstests/core/geo_s2twofields.js @@ -4,85 +4,83 @@ // @tags: [requires_fastcount, operations_longer_than_stepdown_interval_in_txns] (function() { - var t = db.geo_s2twofields; - t.drop(); +var t = db.geo_s2twofields; +t.drop(); - Random.setRandomSeed(); - var random = Random.rand; - var PI = Math.PI; +Random.setRandomSeed(); +var random = Random.rand; +var PI = Math.PI; - function randomCoord(center, minDistDeg, maxDistDeg) { - var dx = random() * (maxDistDeg - minDistDeg) + minDistDeg; - var dy = random() * (maxDistDeg - minDistDeg) + minDistDeg; - return [center[0] + dx, center[1] + dy]; - } +function randomCoord(center, minDistDeg, maxDistDeg) { + var dx = random() * (maxDistDeg - minDistDeg) + minDistDeg; + var dy = random() * (maxDistDeg - minDistDeg) + minDistDeg; + return [center[0] + dx, center[1] + dy]; +} - var nyc = {type: "Point", coordinates: [-74.0064, 40.7142]}; - var miami = {type: "Point", coordinates: [-80.1303, 25.7903]}; - var maxPoints = 10000; - var degrees = 5; +var nyc = {type: "Point", coordinates: [-74.0064, 40.7142]}; +var miami = {type: "Point", coordinates: [-80.1303, 25.7903]}; +var maxPoints = 10000; +var degrees = 5; - var arr = []; - for (var i = 0; i < maxPoints; ++i) { - var fromCoord = randomCoord(nyc.coordinates, 0, degrees); - var toCoord = randomCoord(miami.coordinates, 0, degrees); +var arr = []; +for (var i = 0; i < maxPoints; ++i) { + var fromCoord = randomCoord(nyc.coordinates, 0, degrees); + var toCoord = randomCoord(miami.coordinates, 0, degrees); - arr.push({ - from: {type: "Point", coordinates: fromCoord}, - to: {type: "Point", coordinates: toCoord} - }); - } - res = t.insert(arr); - assert.writeOK(res); - assert.eq(t.count(), maxPoints); + arr.push( + {from: {type: "Point", coordinates: fromCoord}, to: {type: "Point", coordinates: toCoord}}); +} +res = t.insert(arr); +assert.writeOK(res); +assert.eq(t.count(), maxPoints); - function semiRigorousTime(func) { - var lowestTime = func(); - var iter = 2; - for (var i = 0; i < iter; ++i) { - var run = func(); - if (run < lowestTime) { - lowestTime = run; - } +function semiRigorousTime(func) { + var lowestTime = func(); + var iter = 2; + for (var i = 0; i < iter; ++i) { + var run = func(); + if (run < lowestTime) { + lowestTime = run; } - return lowestTime; - } - - function timeWithoutAndWithAnIndex(index, query) { - t.dropIndex(index); - var withoutTime = semiRigorousTime(function() { - return t.find(query).explain("executionStats").executionStats.executionTimeMillis; - }); - t.ensureIndex(index); - var withTime = semiRigorousTime(function() { - return t.find(query).explain("executionStats").executionStats.executionTimeMillis; - }); - t.dropIndex(index); - return [withoutTime, withTime]; } + return lowestTime; +} - var maxQueryRad = 0.5 * PI / 180.0; - // When we're not looking at ALL the data, anything indexed should beat not-indexed. - var smallQuery = timeWithoutAndWithAnIndex({to: "2dsphere", from: "2dsphere"}, { - from: {$within: {$centerSphere: [nyc.coordinates, maxQueryRad]}}, - to: {$within: {$centerSphere: [miami.coordinates, maxQueryRad]}} +function timeWithoutAndWithAnIndex(index, query) { + t.dropIndex(index); + var withoutTime = semiRigorousTime(function() { + return t.find(query).explain("executionStats").executionStats.executionTimeMillis; }); - print("Indexed time " + smallQuery[1] + " unindexed " + smallQuery[0]); - // assert(smallQuery[0] > smallQuery[1]); - - // Let's just index one field. - var smallQuery = timeWithoutAndWithAnIndex({to: "2dsphere"}, { - from: {$within: {$centerSphere: [nyc.coordinates, maxQueryRad]}}, - to: {$within: {$centerSphere: [miami.coordinates, maxQueryRad]}} + t.ensureIndex(index); + var withTime = semiRigorousTime(function() { + return t.find(query).explain("executionStats").executionStats.executionTimeMillis; }); - print("Indexed time " + smallQuery[1] + " unindexed " + smallQuery[0]); - // assert(smallQuery[0] > smallQuery[1]); + t.dropIndex(index); + return [withoutTime, withTime]; +} - // And the other one. - var smallQuery = timeWithoutAndWithAnIndex({from: "2dsphere"}, { - from: {$within: {$centerSphere: [nyc.coordinates, maxQueryRad]}}, - to: {$within: {$centerSphere: [miami.coordinates, maxQueryRad]}} - }); - print("Indexed time " + smallQuery[1] + " unindexed " + smallQuery[0]); - // assert(smallQuery[0] > smallQuery[1]); +var maxQueryRad = 0.5 * PI / 180.0; +// When we're not looking at ALL the data, anything indexed should beat not-indexed. +var smallQuery = timeWithoutAndWithAnIndex({to: "2dsphere", from: "2dsphere"}, { + from: {$within: {$centerSphere: [nyc.coordinates, maxQueryRad]}}, + to: {$within: {$centerSphere: [miami.coordinates, maxQueryRad]}} +}); +print("Indexed time " + smallQuery[1] + " unindexed " + smallQuery[0]); +// assert(smallQuery[0] > smallQuery[1]); + +// Let's just index one field. +var smallQuery = timeWithoutAndWithAnIndex({to: "2dsphere"}, { + from: {$within: {$centerSphere: [nyc.coordinates, maxQueryRad]}}, + to: {$within: {$centerSphere: [miami.coordinates, maxQueryRad]}} +}); +print("Indexed time " + smallQuery[1] + " unindexed " + smallQuery[0]); +// assert(smallQuery[0] > smallQuery[1]); + +// And the other one. +var smallQuery = timeWithoutAndWithAnIndex({from: "2dsphere"}, { + from: {$within: {$centerSphere: [nyc.coordinates, maxQueryRad]}}, + to: {$within: {$centerSphere: [miami.coordinates, maxQueryRad]}} +}); +print("Indexed time " + smallQuery[1] + " unindexed " + smallQuery[0]); +// assert(smallQuery[0] > smallQuery[1]); }()); diff --git a/jstests/core/geo_s2within_line_polygon_sphere.js b/jstests/core/geo_s2within_line_polygon_sphere.js index cbd95f7a717..17b89d25f9e 100644 --- a/jstests/core/geo_s2within_line_polygon_sphere.js +++ b/jstests/core/geo_s2within_line_polygon_sphere.js @@ -1,249 +1,240 @@ // Tests for $geowithin $centerSphere operator with LineString and Polygon. (function() { - function testGeoWithinCenterSphereLinePolygon(coll) { - coll.drop(); - - // Convenient test function for $geowithin $centerSphere. - function testGeoWithinCenterSphere(centerSphere, expected) { - let result = coll.find({geoField: {$geoWithin: {$centerSphere: centerSphere}}}, - {"name": 1, "_id": 0}) - .sort({"name": 1}) - .toArray(); - assert.eq(result, expected); - } +function testGeoWithinCenterSphereLinePolygon(coll) { + coll.drop(); + + // Convenient test function for $geowithin $centerSphere. + function testGeoWithinCenterSphere(centerSphere, expected) { + let result = coll.find({geoField: {$geoWithin: {$centerSphere: centerSphere}}}, + {"name": 1, "_id": 0}) + .sort({"name": 1}) + .toArray(); + assert.eq(result, expected); + } - // Basic tests. - assert.writeOK( - coll.insert({name: "Point1", geoField: {type: "Point", coordinates: [1, 1]}})); - assert.writeOK(coll.insert( - {name: "LineString1", geoField: {type: "LineString", coordinates: [[1, 1], [2, 2]]}})); - assert.writeOK(coll.insert({ - name: "Polygon1", - geoField: {type: "Polygon", coordinates: [[[1, 1], [2, 2], [2, 1], [1, 1]]]} - })); - - // The second parameter of $centerSphere is in radian and the angle between [1, 1] and [2,2] - // is about 0.0246 radian, much less than 1. - testGeoWithinCenterSphere([[1, 1], 1], - [{name: 'LineString1'}, {name: 'Point1'}, {name: 'Polygon1'}]); - - let geoDoc = { - "name": "LineString2", - "geoField": { - "type": "LineString", - "coordinates": [ - [151.0997772216797, -33.86157820443923], - [151.21719360351562, -33.8952122494965] - ] - } - }; - assert.writeOK(coll.insert(geoDoc)); - - // Test for a LineString within a geowithin sphere. - testGeoWithinCenterSphere([[151.16789425018004, -33.8508357122312], 0.0011167360027064348], - [{name: "LineString2"}]); - - // Test for a LineString intersecting with geowithin sphere (should not return a match). - testGeoWithinCenterSphere([[151.09822404831158, -33.85109290503663], 0.0013568277575574095], - []); - - geoDoc = { - "name": "LineString3", - "geoField": { - "type": "LineString", - "coordinates": [ - [174.72896575927734, -36.86698689106876], - [174.72965240478516, -36.90707799098374], - [174.7808074951172, -36.9062544131224], - [174.77840423583982, -36.88154294352893], - [174.72827911376953, -36.88373984256185] - ] - } - }; - assert.writeOK(coll.insert(geoDoc)); - - // Test for a LineString forming a closed loop rectangle within a geowithin sphere. - testGeoWithinCenterSphere([[174.75211152791763, -36.88962755605813], 0.000550933650273084], - [{name: "LineString3"}]); - - // Test for a LineString intersecting with geowithin sphere (should not return a match). - testGeoWithinCenterSphere([[174.75689891704758, -36.8998373317427], 0.0005315628331256537], - []); - - // Test for a LineString outside of geowithin sphere (should not return a match). - testGeoWithinCenterSphere([[174.8099591465865, -36.89409450096385], 0.00027296698925637807], - []); - - // Test for a Polygon within a geowithin sphere. - geoDoc = { - "name": "Polygon2", - "city": "Wellington", - "geoField": { - "type": "Polygon", - "coordinates": [[ - [174.72930908203125, -41.281676559981676], - [174.76261138916013, -41.34820622928743], - [174.84329223632812, -41.32861539747227], - [174.8312759399414, -41.280902559820895], - [174.72930908203125, -41.281676559981676] + // Basic tests. + assert.writeOK(coll.insert({name: "Point1", geoField: {type: "Point", coordinates: [1, 1]}})); + assert.writeOK(coll.insert( + {name: "LineString1", geoField: {type: "LineString", coordinates: [[1, 1], [2, 2]]}})); + assert.writeOK(coll.insert({ + name: "Polygon1", + geoField: {type: "Polygon", coordinates: [[[1, 1], [2, 2], [2, 1], [1, 1]]]} + })); + + // The second parameter of $centerSphere is in radian and the angle between [1, 1] and [2,2] + // is about 0.0246 radian, much less than 1. + testGeoWithinCenterSphere([[1, 1], 1], + [{name: 'LineString1'}, {name: 'Point1'}, {name: 'Polygon1'}]); + + let geoDoc = { + "name": "LineString2", + "geoField": { + "type": "LineString", + "coordinates": + [[151.0997772216797, -33.86157820443923], [151.21719360351562, -33.8952122494965]] + } + }; + assert.writeOK(coll.insert(geoDoc)); + + // Test for a LineString within a geowithin sphere. + testGeoWithinCenterSphere([[151.16789425018004, -33.8508357122312], 0.0011167360027064348], + [{name: "LineString2"}]); + + // Test for a LineString intersecting with geowithin sphere (should not return a match). + testGeoWithinCenterSphere([[151.09822404831158, -33.85109290503663], 0.0013568277575574095], + []); + + geoDoc = { + "name": "LineString3", + "geoField": { + "type": "LineString", + "coordinates": [ + [174.72896575927734, -36.86698689106876], + [174.72965240478516, -36.90707799098374], + [174.7808074951172, -36.9062544131224], + [174.77840423583982, -36.88154294352893], + [174.72827911376953, -36.88373984256185] + ] + } + }; + assert.writeOK(coll.insert(geoDoc)); + + // Test for a LineString forming a closed loop rectangle within a geowithin sphere. + testGeoWithinCenterSphere([[174.75211152791763, -36.88962755605813], 0.000550933650273084], + [{name: "LineString3"}]); + + // Test for a LineString intersecting with geowithin sphere (should not return a match). + testGeoWithinCenterSphere([[174.75689891704758, -36.8998373317427], 0.0005315628331256537], []); + + // Test for a LineString outside of geowithin sphere (should not return a match). + testGeoWithinCenterSphere([[174.8099591465865, -36.89409450096385], 0.00027296698925637807], + []); + + // Test for a Polygon within a geowithin sphere. + geoDoc = { + "name": "Polygon2", + "city": "Wellington", + "geoField": { + "type": "Polygon", + "coordinates": [[ + [174.72930908203125, -41.281676559981676], + [174.76261138916013, -41.34820622928743], + [174.84329223632812, -41.32861539747227], + [174.8312759399414, -41.280902559820895], + [174.72930908203125, -41.281676559981676] + ]] + } + }; + assert.writeOK(coll.insert(geoDoc)); + + // Test for a Polygon within a geowithin sphere. + testGeoWithinCenterSphere([[174.78536621904806, -41.30510816038769], 0.0009483659386360411], + [{name: "Polygon2"}]); + + // Test for an empty query cap (radius 0) inside of a polygon that covers the centerSphere + // (should not return a match). + testGeoWithinCenterSphere([[174.79144274337722, -41.307682001033385], 0], []); + + // Test for a Polygon intersecting with geowithin sphere (should not return a match). + testGeoWithinCenterSphere([[174.7599527533759, -41.27137819591382], 0.0011247013153526434], []); + + // Test for a Polygon outside of geowithin sphere (should not return a match). + testGeoWithinCenterSphere([[174.80008799649448, -41.201484845543426], 0.0007748581633291528], + []); + + geoDoc = { + "name": "MultiPolygon1", + "city": "Sydney", + "geoField": { + "type": "MultiPolygon", + "coordinates": [ + [[ + [151.21032714843747, -33.85074408022877], + [151.23367309570312, -33.84333046657819], + [151.20929718017578, -33.81680727566872], + [151.1876678466797, -33.829927301798676], + [151.21032714843747, -33.85074408022877] + ]], + [[ + [151.20140075683594, -33.856446422184305], + [151.17565155029297, -33.88979749364442], + [151.2044906616211, -33.9151583833889], + [151.23058319091797, -33.87041555094182], + [151.20140075683594, -33.856446422184305] ]] - } - }; - assert.writeOK(coll.insert(geoDoc)); - - // Test for a Polygon within a geowithin sphere. - testGeoWithinCenterSphere([[174.78536621904806, -41.30510816038769], 0.0009483659386360411], - [{name: "Polygon2"}]); - - // Test for an empty query cap (radius 0) inside of a polygon that covers the centerSphere - // (should not return a match). - testGeoWithinCenterSphere([[174.79144274337722, -41.307682001033385], 0], []); - - // Test for a Polygon intersecting with geowithin sphere (should not return a match). - testGeoWithinCenterSphere([[174.7599527533759, -41.27137819591382], 0.0011247013153526434], - []); - - // Test for a Polygon outside of geowithin sphere (should not return a match). - testGeoWithinCenterSphere( - [[174.80008799649448, -41.201484845543426], 0.0007748581633291528], []); - - geoDoc = { - "name": "MultiPolygon1", - "city": "Sydney", - "geoField": { - "type": "MultiPolygon", - "coordinates": [ - [[ - [151.21032714843747, -33.85074408022877], - [151.23367309570312, -33.84333046657819], - [151.20929718017578, -33.81680727566872], - [151.1876678466797, -33.829927301798676], - [151.21032714843747, -33.85074408022877] - ]], - [[ - [151.20140075683594, -33.856446422184305], - [151.17565155029297, -33.88979749364442], - [151.2044906616211, -33.9151583833889], - [151.23058319091797, -33.87041555094182], - [151.20140075683594, -33.856446422184305] - ]] + ] + } + }; + + assert.writeOK(coll.insert(geoDoc)); + + // Test for a MultiPolygon (two seperate polygons) within a geowithin sphere. + testGeoWithinCenterSphere([[151.20821632978107, -33.865139891361636], 0.000981007241416606], + [{name: "MultiPolygon1"}]); + + // Verify that only one of the polygons of a MultiPolygon in the $centerSphere does not + // match + testGeoWithinCenterSphere([[151.20438542915883, -33.89006380099829], 0.0006390286437185907], + []); + + geoDoc = { + "name": "MultiPolygon2", + "city": "Sydney", + "geoField": { + "type": "MultiPolygon", + "coordinates": [[ + [ + [151.203031539917, -33.87116383262648], + [151.20401859283447, -33.88270791866475], + [151.21891021728516, -33.88256540860479], + [151.2138032913208, -33.86817066653049], + [151.203031539917, -33.87116383262648] + ], + [ + [151.21041297912598, -33.86980979429744], + [151.20938301086426, -33.8767579211837], + [151.2121295928955, -33.87722110953139], + [151.21315956115723, -33.86995232565932], + [151.21041297912598, -33.86980979429744] ] - } - }; - - assert.writeOK(coll.insert(geoDoc)); - - // Test for a MultiPolygon (two seperate polygons) within a geowithin sphere. - testGeoWithinCenterSphere([[151.20821632978107, -33.865139891361636], 0.000981007241416606], - [{name: "MultiPolygon1"}]); - - // Verify that only one of the polygons of a MultiPolygon in the $centerSphere does not - // match - testGeoWithinCenterSphere([[151.20438542915883, -33.89006380099829], 0.0006390286437185907], - []); - - geoDoc = { - "name": "MultiPolygon2", - "city": "Sydney", - "geoField": { - "type": "MultiPolygon", - "coordinates": [[ - [ - [151.203031539917, -33.87116383262648], - [151.20401859283447, -33.88270791866475], - [151.21891021728516, -33.88256540860479], - [151.2138032913208, -33.86817066653049], - [151.203031539917, -33.87116383262648] - ], - [ - [151.21041297912598, -33.86980979429744], - [151.20938301086426, -33.8767579211837], - [151.2121295928955, -33.87722110953139], - [151.21315956115723, -33.86995232565932], - [151.21041297912598, -33.86980979429744] - ] - ]] - } - }; - assert.writeOK(coll.insert(geoDoc)); - - // Test for a MultiPolygon (with a hole) within a geowithin sphere. - testGeoWithinCenterSphere( - [[151.20936119647115, -33.875266834633265], 0.00020277354002627845], - [{name: "MultiPolygon2"}]); - - // Test for centerSphere as big as earth radius (should return all). - testGeoWithinCenterSphere( - [[151.20936119647115, -33.875266834633265], 3.14159265358979323846], [ - {name: "LineString1"}, - {name: "LineString2"}, - {name: "LineString3"}, - {name: "MultiPolygon1"}, - {name: "MultiPolygon2"}, - {name: "Point1"}, - {name: "Polygon1"}, - {name: "Polygon2"} - ]); - - // Test for a MultiPolygon with holes intersecting with geowithin sphere (should not return - // a match). - testGeoWithinCenterSphere( - [[151.21028000820485, -33.87067923462358], 0.00013138775245714733], []); - - // Test for a MultiPolygon with holes with geowithin sphere inside the hole (should not - // return a match). - testGeoWithinCenterSphere( - [[151.21093787887645, -33.87533330567804], 0.000016565456776516003], []); - - coll.drop(); - - // Test for a large query cap containing both of line vertices but not the line itself. - // (should not return a match). - geoDoc = { - "name": "HorizontalLongLine", - "geoField": { - "type": "LineString", - "coordinates": [[96.328125, 5.61598581915534], [153.984375, -6.315298538330033]] - } - }; - assert.writeOK(coll.insert(geoDoc)); - - // Test for a large query cap containing both of line vertices but not the line itself. - // (should not return a match). - testGeoWithinCenterSphere([[-59.80246852929814, -2.3633072488322853], 2.768403272464979], - []); - - coll.drop(); - - // Test for a large query cap containing all polygon vertices but not the whole polygon. - // (should not return a match). - geoDoc = { - "name": "LargeRegion", - "geoField": { - "type": "Polygon", - "coordinates": [[ - [98.96484375, -11.350796722383672], - [135.35156249999997, -11.350796722383672], - [135.35156249999997, 0.8788717828324276], - [98.96484375, 0.8788717828324276], - [98.96484375, -11.350796722383672] - ]] - } - }; - assert.writeOK(coll.insert(geoDoc)); - - // Test for a large query cap containing both of line vertices but not the line itself. - // (should not return a match). - testGeoWithinCenterSphere([[-61.52266094410311, 17.79937981451866], 2.9592242752161573], - []); - } + ]] + } + }; + assert.writeOK(coll.insert(geoDoc)); + + // Test for a MultiPolygon (with a hole) within a geowithin sphere. + testGeoWithinCenterSphere([[151.20936119647115, -33.875266834633265], 0.00020277354002627845], + [{name: "MultiPolygon2"}]); + + // Test for centerSphere as big as earth radius (should return all). + testGeoWithinCenterSphere([[151.20936119647115, -33.875266834633265], 3.14159265358979323846], [ + {name: "LineString1"}, + {name: "LineString2"}, + {name: "LineString3"}, + {name: "MultiPolygon1"}, + {name: "MultiPolygon2"}, + {name: "Point1"}, + {name: "Polygon1"}, + {name: "Polygon2"} + ]); + + // Test for a MultiPolygon with holes intersecting with geowithin sphere (should not return + // a match). + testGeoWithinCenterSphere([[151.21028000820485, -33.87067923462358], 0.00013138775245714733], + []); + + // Test for a MultiPolygon with holes with geowithin sphere inside the hole (should not + // return a match). + testGeoWithinCenterSphere([[151.21093787887645, -33.87533330567804], 0.000016565456776516003], + []); + + coll.drop(); + + // Test for a large query cap containing both of line vertices but not the line itself. + // (should not return a match). + geoDoc = { + "name": "HorizontalLongLine", + "geoField": { + "type": "LineString", + "coordinates": [[96.328125, 5.61598581915534], [153.984375, -6.315298538330033]] + } + }; + assert.writeOK(coll.insert(geoDoc)); + + // Test for a large query cap containing both of line vertices but not the line itself. + // (should not return a match). + testGeoWithinCenterSphere([[-59.80246852929814, -2.3633072488322853], 2.768403272464979], []); + + coll.drop(); + + // Test for a large query cap containing all polygon vertices but not the whole polygon. + // (should not return a match). + geoDoc = { + "name": "LargeRegion", + "geoField": { + "type": "Polygon", + "coordinates": [[ + [98.96484375, -11.350796722383672], + [135.35156249999997, -11.350796722383672], + [135.35156249999997, 0.8788717828324276], + [98.96484375, 0.8788717828324276], + [98.96484375, -11.350796722383672] + ]] + } + }; + assert.writeOK(coll.insert(geoDoc)); + + // Test for a large query cap containing both of line vertices but not the line itself. + // (should not return a match). + testGeoWithinCenterSphere([[-61.52266094410311, 17.79937981451866], 2.9592242752161573], []); +} - // Test $geowithin $centerSphere for LineString and Polygon without index. - let coll = db.geo_s2within_line_polygon_sphere; - testGeoWithinCenterSphereLinePolygon(coll); +// Test $geowithin $centerSphere for LineString and Polygon without index. +let coll = db.geo_s2within_line_polygon_sphere; +testGeoWithinCenterSphereLinePolygon(coll); - // Test $geowithin $centerSphere for LineString and Polygon with 2dsphere index. - assert.commandWorked(coll.createIndex({geoField: "2dsphere"})); - testGeoWithinCenterSphereLinePolygon(coll); +// Test $geowithin $centerSphere for LineString and Polygon with 2dsphere index. +assert.commandWorked(coll.createIndex({geoField: "2dsphere"})); +testGeoWithinCenterSphereLinePolygon(coll); })();
\ No newline at end of file diff --git a/jstests/core/geo_update_btree.js b/jstests/core/geo_update_btree.js index 476921b5c2f..981f0c629c3 100644 --- a/jstests/core/geo_update_btree.js +++ b/jstests/core/geo_update_btree.js @@ -20,14 +20,15 @@ if (testingReplication) { Random.setRandomSeed(); var parallelInsert = startParallelShell( - "Random.setRandomSeed();" + "for ( var i = 0; i < 1000; i++ ) {" + + "Random.setRandomSeed();" + + "for ( var i = 0; i < 1000; i++ ) {" + " var doc = { loc: [ Random.rand() * 180, Random.rand() * 180 ], v: '' };" + - " db.jstests_geo_update_btree.insert(doc);" + "}"); + " db.jstests_geo_update_btree.insert(doc);" + + "}"); for (i = 0; i < 1000; i++) { coll.update({ - loc: - {$within: {$center: [[Random.rand() * 180, Random.rand() * 180], Random.rand() * 50]}} + loc: {$within: {$center: [[Random.rand() * 180, Random.rand() * 180], Random.rand() * 50]}} }, {$set: {v: big}}, false, diff --git a/jstests/core/geob.js b/jstests/core/geob.js index 2664d6c5921..d1f01bf7b9c 100644 --- a/jstests/core/geob.js +++ b/jstests/core/geob.js @@ -1,38 +1,38 @@ (function() { - "use strict"; - var t = db.geob; - t.drop(); - - var a = {p: [0, 0]}; - var b = {p: [1, 0]}; - var c = {p: [3, 4]}; - var d = {p: [0, 6]}; - - t.save(a); - t.save(b); - t.save(c); - t.save(d); - t.ensureIndex({p: "2d"}); - - let res = t.aggregate({$geoNear: {near: [0, 0], distanceField: "dis"}}).toArray(); - - assert.close(0, res[0].dis, "B1"); - assert.eq(a._id, res[0]._id, "B2"); - - assert.close(1, res[1].dis, "C1"); - assert.eq(b._id, res[1]._id, "C2"); - - assert.close(5, res[2].dis, "D1"); - assert.eq(c._id, res[2]._id, "D2"); - - assert.close(6, res[3].dis, "E1"); - assert.eq(d._id, res[3]._id, "E2"); - - res = t.aggregate({ - $geoNear: {near: [0, 0], distanceField: "dis", distanceMultiplier: 2.0} - }).toArray(); - assert.close(0, res[0].dis, "G"); - assert.close(2, res[1].dis, "H"); - assert.close(10, res[2].dis, "I"); - assert.close(12, res[3].dis, "J"); +"use strict"; +var t = db.geob; +t.drop(); + +var a = {p: [0, 0]}; +var b = {p: [1, 0]}; +var c = {p: [3, 4]}; +var d = {p: [0, 6]}; + +t.save(a); +t.save(b); +t.save(c); +t.save(d); +t.ensureIndex({p: "2d"}); + +let res = t.aggregate({$geoNear: {near: [0, 0], distanceField: "dis"}}).toArray(); + +assert.close(0, res[0].dis, "B1"); +assert.eq(a._id, res[0]._id, "B2"); + +assert.close(1, res[1].dis, "C1"); +assert.eq(b._id, res[1]._id, "C2"); + +assert.close(5, res[2].dis, "D1"); +assert.eq(c._id, res[2]._id, "D2"); + +assert.close(6, res[3].dis, "E1"); +assert.eq(d._id, res[3]._id, "E2"); + +res = t.aggregate({ + $geoNear: {near: [0, 0], distanceField: "dis", distanceMultiplier: 2.0} + }).toArray(); +assert.close(0, res[0].dis, "G"); +assert.close(2, res[1].dis, "H"); +assert.close(10, res[2].dis, "I"); +assert.close(12, res[3].dis, "J"); }()); diff --git a/jstests/core/geonear_cmd_input_validation.js b/jstests/core/geonear_cmd_input_validation.js index 9cc82cb6f25..611e29b01d1 100644 --- a/jstests/core/geonear_cmd_input_validation.js +++ b/jstests/core/geonear_cmd_input_validation.js @@ -69,7 +69,6 @@ indexTypes.forEach(function(indexType) { // Try several bad values for min/maxDistance. badNumbers.concat(outOfRangeDistances).forEach(function(badDistance) { - var msg = ("geoNear with spherical=" + spherical + " and " + pointDescription + " and " + indexType + " index should've failed with " + optionName + " " + badDistance); @@ -80,7 +79,6 @@ indexTypes.forEach(function(indexType) { // Bad values for limit / num. ['num', 'limit'].forEach(function(limitOptionName) { [-1, 'foo'].forEach(function(badLimit) { - var msg = ("geoNear with spherical=" + spherical + " and " + pointDescription + " and " + indexType + " index should've failed with '" + @@ -94,7 +92,6 @@ indexTypes.forEach(function(indexType) { // Bad values for distanceMultiplier. badNumbers.forEach(function(badNumber) { - var msg = ("geoNear with spherical=" + spherical + " and " + pointDescription + " and " + indexType + " index should've failed with distanceMultiplier " + badNumber); diff --git a/jstests/core/geonear_key.js b/jstests/core/geonear_key.js index 41fcfb0a5da..0238e012577 100644 --- a/jstests/core/geonear_key.js +++ b/jstests/core/geonear_key.js @@ -2,101 +2,99 @@ * Tests for the 'key' field accepted by the $geoNear aggregation stage. */ (function() { - "use strict"; - - load("jstests/libs/analyze_plan.js"); - - const coll = db.jstests_geonear_key; - coll.drop(); - - assert.writeOK(coll.insert({_id: 0, a: [1, 1]})); - assert.writeOK(coll.insert({_id: 1, a: [1, 2]})); - assert.writeOK(coll.insert({_id: 2, b: {c: [1, 1]}})); - assert.writeOK(coll.insert({_id: 3, b: {c: [1, 2]}})); - assert.writeOK(coll.insert({_id: 4, b: {d: [1, 1]}})); - assert.writeOK(coll.insert({_id: 5, b: {d: [1, 2]}})); - - /** - * Runs an aggregation consisting of a single $geoNear stage described by 'nearParams', and - * returns the raw command result object. 'nearParams' consists of the parameters to the - * $geoNear stage, but is expected to omit 'distanceField'. - */ - function runNearAgg(nearParams) { - let nearAggParams = Object.extend({distanceField: "dist"}, nearParams); - let nearAggStage = {$geoNear: nearAggParams}; - let aggCmd = {aggregate: coll.getName(), pipeline: [nearAggStage], cursor: {}}; - return db.runCommand(aggCmd); - } +"use strict"; - /** - * Runs the near described by 'nearParams' as a $geoNear aggregation and verifies that the - * operation fails with 'code'. - */ - function assertGeoNearFails(nearParams, code) { - assert.commandFailedWithCode(runNearAgg(nearParams), code); - } +load("jstests/libs/analyze_plan.js"); - /** - * Runs the near described by 'nearParams' as a $geoNear aggregation and verifies that the - * operation returns the _id values in 'expectedIds', in order. - */ - function assertGeoNearSucceedsAndReturnsIds(nearParams, expectedIds) { - let aggResult = assert.commandWorked(runNearAgg(nearParams)); - let res = aggResult.cursor.firstBatch; - let errfn = () => `expected ids ${tojson(expectedIds)}, but these documents were ` + - `returned: ${tojson(res)}`; - - assert.eq(expectedIds.length, res.length, errfn); - for (let i = 0; i < expectedIds.length; i++) { - assert.eq(expectedIds[i], aggResult.cursor.firstBatch[i]._id, errfn); - } - } +const coll = db.jstests_geonear_key; +coll.drop(); + +assert.writeOK(coll.insert({_id: 0, a: [1, 1]})); +assert.writeOK(coll.insert({_id: 1, a: [1, 2]})); +assert.writeOK(coll.insert({_id: 2, b: {c: [1, 1]}})); +assert.writeOK(coll.insert({_id: 3, b: {c: [1, 2]}})); +assert.writeOK(coll.insert({_id: 4, b: {d: [1, 1]}})); +assert.writeOK(coll.insert({_id: 5, b: {d: [1, 2]}})); + +/** + * Runs an aggregation consisting of a single $geoNear stage described by 'nearParams', and + * returns the raw command result object. 'nearParams' consists of the parameters to the + * $geoNear stage, but is expected to omit 'distanceField'. + */ +function runNearAgg(nearParams) { + let nearAggParams = Object.extend({distanceField: "dist"}, nearParams); + let nearAggStage = {$geoNear: nearAggParams}; + let aggCmd = {aggregate: coll.getName(), pipeline: [nearAggStage], cursor: {}}; + return db.runCommand(aggCmd); +} - // Verify that $geoNear fails when the key field is not a string. - assertGeoNearFails({near: [0, 0], key: 1}, ErrorCodes.TypeMismatch); - - // Verify that $geoNear fails when the key field the empty string. - assertGeoNearFails({near: [0, 0], key: ""}, ErrorCodes.BadValue); - - // Verify that $geoNear fails when there are no eligible indexes. - assertGeoNearFails({near: [0, 0]}, ErrorCodes.IndexNotFound); - - // Verify that the query system raises an error when an index is specified that doesn't exist. - assertGeoNearFails({near: [0, 0], key: "a"}, ErrorCodes.BadValue); - - // Create a number of 2d and 2dsphere indexes. - assert.commandWorked(coll.createIndex({a: "2d"})); - assert.commandWorked(coll.createIndex({a: "2dsphere"})); - assert.commandWorked(coll.createIndex({"b.c": "2d"})); - assert.commandWorked(coll.createIndex({"b.d": "2dsphere"})); - - // Verify that $geoNear fails when the index to use is ambiguous because of the absence of the - // key field. - assertGeoNearFails({near: [0, 0]}, ErrorCodes.IndexNotFound); - - // Verify that the key field can correctly identify the index to use, when there is only a - // single geo index on the relevant path. - assertGeoNearSucceedsAndReturnsIds({near: [0, 0], key: "b.c"}, [2, 3]); - assertGeoNearSucceedsAndReturnsIds({near: {type: "Point", coordinates: [0, 0]}, key: "b.d"}, - [4, 5]); - - // Verify that when the key path has both a 2d or 2dsphere index, the command still succeeds. - assertGeoNearSucceedsAndReturnsIds({near: [0, 0], key: "a"}, [0, 1]); - assertGeoNearSucceedsAndReturnsIds({near: [0, 0], spherical: true, key: "a"}, [0, 1]); - assertGeoNearSucceedsAndReturnsIds({near: {type: "Point", coordinates: [0, 0]}, key: "a"}, - [0, 1]); - assertGeoNearSucceedsAndReturnsIds( - {near: {type: "Point", coordinates: [0, 0]}, spherical: true, key: "a"}, [0, 1]); - - // Verify that $geoNear fails when a GeoJSON point is used with a 'key' path that only has a 2d - // index. GeoJSON points can only be used for spherical geometry. - assertGeoNearFails({near: {type: "Point", coordinates: [0, 0]}, key: "b.c"}, - ErrorCodes.BadValue); - - // Verify that $geoNear fails when: - // -- The only index available over the 'key' path is 2dsphere. - // -- spherical=false. - // -- The search point is a legacy coordinate pair. - assertGeoNearFails({near: [0, 0], key: "b.d"}, ErrorCodes.BadValue); - assertGeoNearFails({near: [0, 0], key: "b.d", spherical: false}, ErrorCodes.BadValue); +/** + * Runs the near described by 'nearParams' as a $geoNear aggregation and verifies that the + * operation fails with 'code'. + */ +function assertGeoNearFails(nearParams, code) { + assert.commandFailedWithCode(runNearAgg(nearParams), code); +} + +/** + * Runs the near described by 'nearParams' as a $geoNear aggregation and verifies that the + * operation returns the _id values in 'expectedIds', in order. + */ +function assertGeoNearSucceedsAndReturnsIds(nearParams, expectedIds) { + let aggResult = assert.commandWorked(runNearAgg(nearParams)); + let res = aggResult.cursor.firstBatch; + let errfn = () => `expected ids ${tojson(expectedIds)}, but these documents were ` + + `returned: ${tojson(res)}`; + + assert.eq(expectedIds.length, res.length, errfn); + for (let i = 0; i < expectedIds.length; i++) { + assert.eq(expectedIds[i], aggResult.cursor.firstBatch[i]._id, errfn); + } +} + +// Verify that $geoNear fails when the key field is not a string. +assertGeoNearFails({near: [0, 0], key: 1}, ErrorCodes.TypeMismatch); + +// Verify that $geoNear fails when the key field the empty string. +assertGeoNearFails({near: [0, 0], key: ""}, ErrorCodes.BadValue); + +// Verify that $geoNear fails when there are no eligible indexes. +assertGeoNearFails({near: [0, 0]}, ErrorCodes.IndexNotFound); + +// Verify that the query system raises an error when an index is specified that doesn't exist. +assertGeoNearFails({near: [0, 0], key: "a"}, ErrorCodes.BadValue); + +// Create a number of 2d and 2dsphere indexes. +assert.commandWorked(coll.createIndex({a: "2d"})); +assert.commandWorked(coll.createIndex({a: "2dsphere"})); +assert.commandWorked(coll.createIndex({"b.c": "2d"})); +assert.commandWorked(coll.createIndex({"b.d": "2dsphere"})); + +// Verify that $geoNear fails when the index to use is ambiguous because of the absence of the +// key field. +assertGeoNearFails({near: [0, 0]}, ErrorCodes.IndexNotFound); + +// Verify that the key field can correctly identify the index to use, when there is only a +// single geo index on the relevant path. +assertGeoNearSucceedsAndReturnsIds({near: [0, 0], key: "b.c"}, [2, 3]); +assertGeoNearSucceedsAndReturnsIds({near: {type: "Point", coordinates: [0, 0]}, key: "b.d"}, + [4, 5]); + +// Verify that when the key path has both a 2d or 2dsphere index, the command still succeeds. +assertGeoNearSucceedsAndReturnsIds({near: [0, 0], key: "a"}, [0, 1]); +assertGeoNearSucceedsAndReturnsIds({near: [0, 0], spherical: true, key: "a"}, [0, 1]); +assertGeoNearSucceedsAndReturnsIds({near: {type: "Point", coordinates: [0, 0]}, key: "a"}, [0, 1]); +assertGeoNearSucceedsAndReturnsIds( + {near: {type: "Point", coordinates: [0, 0]}, spherical: true, key: "a"}, [0, 1]); + +// Verify that $geoNear fails when a GeoJSON point is used with a 'key' path that only has a 2d +// index. GeoJSON points can only be used for spherical geometry. +assertGeoNearFails({near: {type: "Point", coordinates: [0, 0]}, key: "b.c"}, ErrorCodes.BadValue); + +// Verify that $geoNear fails when: +// -- The only index available over the 'key' path is 2dsphere. +// -- spherical=false. +// -- The search point is a legacy coordinate pair. +assertGeoNearFails({near: [0, 0], key: "b.d"}, ErrorCodes.BadValue); +assertGeoNearFails({near: [0, 0], key: "b.d", spherical: false}, ErrorCodes.BadValue); }()); diff --git a/jstests/core/getlog2.js b/jstests/core/getlog2.js index 4d9eebe9374..4d62966edfa 100644 --- a/jstests/core/getlog2.js +++ b/jstests/core/getlog2.js @@ -9,74 +9,74 @@ // ] (function() { - 'use strict'; +'use strict'; - // We turn off gossiping the mongo shell's clusterTime because it causes the slow command log - // messages to get truncated since they'll exceed 512 characters. The truncated log messages - // will fail to match the find and update patterns defined later on in this test. - TestData.skipGossipingClusterTime = true; +// We turn off gossiping the mongo shell's clusterTime because it causes the slow command log +// messages to get truncated since they'll exceed 512 characters. The truncated log messages +// will fail to match the find and update patterns defined later on in this test. +TestData.skipGossipingClusterTime = true; - const glcol = db.getLogTest2; - glcol.drop(); +const glcol = db.getLogTest2; +glcol.drop(); - function contains(arr, func) { - let i = arr.length; - while (i--) { - if (func(arr[i])) { - return true; - } +function contains(arr, func) { + let i = arr.length; + while (i--) { + if (func(arr[i])) { + return true; } - return false; } + return false; +} - // test doesn't work when talking to mongos - if (db.isMaster().msg === "isdbgrid") { - return; - } +// test doesn't work when talking to mongos +if (db.isMaster().msg === "isdbgrid") { + return; +} - // 1. Run a slow query - glcol.save({"SENTINEL": 1}); - glcol.findOne({ - "SENTINEL": 1, - "$where": function() { - sleep(1000); - return true; - } - }); +// 1. Run a slow query +glcol.save({"SENTINEL": 1}); +glcol.findOne({ + "SENTINEL": 1, + "$where": function() { + sleep(1000); + return true; + } +}); - const query = assert.commandWorked(db.adminCommand({getLog: "global"})); - assert(query.log, "no log field"); - assert.gt(query.log.length, 0, "no log lines"); +const query = assert.commandWorked(db.adminCommand({getLog: "global"})); +assert(query.log, "no log field"); +assert.gt(query.log.length, 0, "no log lines"); - // Ensure that slow query is logged in detail. - assert(contains(query.log, function(v) { - print(v); - const opString = db.getMongo().useReadCommands() ? " find " : " query "; - const filterString = db.getMongo().useReadCommands() ? "filter:" : "command:"; - return v.indexOf(opString) != -1 && v.indexOf(filterString) != -1 && - v.indexOf("keysExamined:") != -1 && v.indexOf("docsExamined:") != -1 && - v.indexOf("SENTINEL") != -1; - })); +// Ensure that slow query is logged in detail. +assert(contains(query.log, function(v) { + print(v); + const opString = db.getMongo().useReadCommands() ? " find " : " query "; + const filterString = db.getMongo().useReadCommands() ? "filter:" : "command:"; + return v.indexOf(opString) != -1 && v.indexOf(filterString) != -1 && + v.indexOf("keysExamined:") != -1 && v.indexOf("docsExamined:") != -1 && + v.indexOf("SENTINEL") != -1; +})); - // 2. Run a slow update - glcol.update({ - "SENTINEL": 1, - "$where": function() { - sleep(1000); - return true; - } - }, - {"x": "x"}); +// 2. Run a slow update +glcol.update({ + "SENTINEL": 1, + "$where": function() { + sleep(1000); + return true; + } +}, + {"x": "x"}); - const update = assert.commandWorked(db.adminCommand({getLog: "global"})); - assert(update.log, "no log field"); - assert.gt(update.log.length, 0, "no log lines"); +const update = assert.commandWorked(db.adminCommand({getLog: "global"})); +assert(update.log, "no log field"); +assert.gt(update.log.length, 0, "no log lines"); - // Ensure that slow update is logged in deail. - assert(contains(update.log, function(v) { - print(v); - return v.indexOf(" update ") != -1 && v.indexOf("command") != -1 && - v.indexOf("keysExamined:") != -1 && v.indexOf("docsExamined:") != -1 && - v.indexOf("SENTINEL") != -1; - })); +// Ensure that slow update is logged in deail. +assert(contains(update.log, function(v) { + print(v); + return v.indexOf(" update ") != -1 && v.indexOf("command") != -1 && + v.indexOf("keysExamined:") != -1 && v.indexOf("docsExamined:") != -1 && + v.indexOf("SENTINEL") != -1; +})); })(); diff --git a/jstests/core/getmore_cmd_maxtimems.js b/jstests/core/getmore_cmd_maxtimems.js index 7b13f858bc1..1b8e20ba962 100644 --- a/jstests/core/getmore_cmd_maxtimems.js +++ b/jstests/core/getmore_cmd_maxtimems.js @@ -4,46 +4,46 @@ // Test attaching maxTimeMS to a getMore command. (function() { - 'use strict'; - - var cmdRes; - var collName = 'getmore_cmd_maxtimems'; - var coll = db[collName]; - coll.drop(); - - for (var i = 0; i < 10; i++) { - assert.writeOK(coll.insert({a: i})); - } - - // Can't attach maxTimeMS to a getMore command for a non-tailable cursor over a non-capped - // collection. - cmdRes = db.runCommand({find: collName, batchSize: 2}); - assert.commandWorked(cmdRes); - cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName, maxTimeMS: 60000}); - assert.commandFailed(cmdRes); - - coll.drop(); - assert.commandWorked(db.createCollection(collName, {capped: true, size: 1024})); - for (var i = 0; i < 10; i++) { - assert.writeOK(coll.insert({a: i})); - } - - // Can't attach maxTimeMS to a getMore command for a non-tailable cursor over a capped - // collection. - cmdRes = db.runCommand({find: collName, batchSize: 2}); - assert.commandWorked(cmdRes); - cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName, maxTimeMS: 60000}); - assert.commandFailed(cmdRes); - - // Can't attach maxTimeMS to a getMore command for a non-awaitData tailable cursor. - cmdRes = db.runCommand({find: collName, batchSize: 2, tailable: true}); - assert.commandWorked(cmdRes); - cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName, maxTimeMS: 60000}); - assert.commandFailed(cmdRes); - - // Can attach maxTimeMS to a getMore command for an awaitData cursor. - cmdRes = db.runCommand({find: collName, batchSize: 2, tailable: true, awaitData: true}); - assert.commandWorked(cmdRes); - cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName, maxTimeMS: 60000}); - assert.commandWorked(cmdRes); +'use strict'; + +var cmdRes; +var collName = 'getmore_cmd_maxtimems'; +var coll = db[collName]; +coll.drop(); + +for (var i = 0; i < 10; i++) { + assert.writeOK(coll.insert({a: i})); +} + +// Can't attach maxTimeMS to a getMore command for a non-tailable cursor over a non-capped +// collection. +cmdRes = db.runCommand({find: collName, batchSize: 2}); +assert.commandWorked(cmdRes); +cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName, maxTimeMS: 60000}); +assert.commandFailed(cmdRes); + +coll.drop(); +assert.commandWorked(db.createCollection(collName, {capped: true, size: 1024})); +for (var i = 0; i < 10; i++) { + assert.writeOK(coll.insert({a: i})); +} + +// Can't attach maxTimeMS to a getMore command for a non-tailable cursor over a capped +// collection. +cmdRes = db.runCommand({find: collName, batchSize: 2}); +assert.commandWorked(cmdRes); +cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName, maxTimeMS: 60000}); +assert.commandFailed(cmdRes); + +// Can't attach maxTimeMS to a getMore command for a non-awaitData tailable cursor. +cmdRes = db.runCommand({find: collName, batchSize: 2, tailable: true}); +assert.commandWorked(cmdRes); +cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName, maxTimeMS: 60000}); +assert.commandFailed(cmdRes); + +// Can attach maxTimeMS to a getMore command for an awaitData cursor. +cmdRes = db.runCommand({find: collName, batchSize: 2, tailable: true, awaitData: true}); +assert.commandWorked(cmdRes); +cmdRes = db.runCommand({getMore: cmdRes.cursor.id, collection: collName, maxTimeMS: 60000}); +assert.commandWorked(cmdRes); })(); diff --git a/jstests/core/getmore_invalidated_cursors.js b/jstests/core/getmore_invalidated_cursors.js index c244b071716..43f27ed5e49 100644 --- a/jstests/core/getmore_invalidated_cursors.js +++ b/jstests/core/getmore_invalidated_cursors.js @@ -4,117 +4,116 @@ // Tests that running a getMore on a cursor that has been invalidated by something like a collection // drop will return an appropriate error message. (function() { - 'use strict'; +'use strict'; - load('jstests/libs/fixture_helpers.js'); // For FixtureHelpers. +load('jstests/libs/fixture_helpers.js'); // For FixtureHelpers. - const testDB = db.getSiblingDB("getmore_invalidated_cursors"); - const coll = testDB.test; +const testDB = db.getSiblingDB("getmore_invalidated_cursors"); +const coll = testDB.test; - const nDocs = 100; +const nDocs = 100; - function setupCollection() { - coll.drop(); - const bulk = coll.initializeUnorderedBulkOp(); - for (let i = 0; i < nDocs; ++i) { - bulk.insert({_id: i, x: i}); - } - assert.writeOK(bulk.execute()); - assert.commandWorked(coll.createIndex({x: 1})); +function setupCollection() { + coll.drop(); + const bulk = coll.initializeUnorderedBulkOp(); + for (let i = 0; i < nDocs; ++i) { + bulk.insert({_id: i, x: i}); } + assert.writeOK(bulk.execute()); + assert.commandWorked(coll.createIndex({x: 1})); +} - // Test that dropping the database between a find and a getMore will return an appropriate error - // code and message. - setupCollection(); - - // Make sure the batch size is small enough to ensure a getMore will need to be sent to at least - // one shard. - const batchSize = (nDocs / FixtureHelpers.numberOfShardsForCollection(coll)) - 1; - - const isShardedCollection = coll.stats().sharded; - const shellReadMode = testDB.getMongo().readMode(); +// Test that dropping the database between a find and a getMore will return an appropriate error +// code and message. +setupCollection(); - let cursor = coll.find().batchSize(batchSize); - cursor.next(); // Send the query to the server. +// Make sure the batch size is small enough to ensure a getMore will need to be sent to at least +// one shard. +const batchSize = (nDocs / FixtureHelpers.numberOfShardsForCollection(coll)) - 1; - assert.commandWorked(testDB.dropDatabase()); +const isShardedCollection = coll.stats().sharded; +const shellReadMode = testDB.getMongo().readMode(); - let error = assert.throws(() => cursor.itcount()); +let cursor = coll.find().batchSize(batchSize); +cursor.next(); // Send the query to the server. - if (testDB.runCommand({isdbgrid: 1}).isdbgrid && shellReadMode == 'legacy') { - // The cursor will be invalidated on mongos, and we won't be able to find it. - assert.neq(-1, error.message.indexOf('didn\'t exist on server'), error.message); - } else { - assert.eq(error.code, ErrorCodes.QueryPlanKilled, tojson(error)); - assert.neq(-1, error.message.indexOf('collection dropped'), error.message); - } +assert.commandWorked(testDB.dropDatabase()); - // Test that dropping the collection between a find and a getMore will return an appropriate - // error code and message. - setupCollection(); - cursor = coll.find().batchSize(batchSize); - cursor.next(); // Send the query to the server. +let error = assert.throws(() => cursor.itcount()); - coll.drop(); - error = assert.throws(() => cursor.itcount()); +if (testDB.runCommand({isdbgrid: 1}).isdbgrid && shellReadMode == 'legacy') { + // The cursor will be invalidated on mongos, and we won't be able to find it. + assert.neq(-1, error.message.indexOf('didn\'t exist on server'), error.message); +} else { assert.eq(error.code, ErrorCodes.QueryPlanKilled, tojson(error)); - // In replica sets, collection drops are done in two phases, first renaming the collection to a - // "drop pending" namespace, and then later reaping the collection. Therefore, we expect to - // either see an error message related to a collection drop, or one related to a collection - // rename. - const droppedMsg = 'collection dropped'; - const renamedMsg = 'collection renamed'; - assert(-1 !== error.message.indexOf(droppedMsg) || -1 !== error.message.indexOf(renamedMsg), - error.message); - - // Test that dropping an index between a find and a getMore has no effect on the query if the - // query is not using the index. + assert.neq(-1, error.message.indexOf('collection dropped'), error.message); +} + +// Test that dropping the collection between a find and a getMore will return an appropriate +// error code and message. +setupCollection(); +cursor = coll.find().batchSize(batchSize); +cursor.next(); // Send the query to the server. + +coll.drop(); +error = assert.throws(() => cursor.itcount()); +assert.eq(error.code, ErrorCodes.QueryPlanKilled, tojson(error)); +// In replica sets, collection drops are done in two phases, first renaming the collection to a +// "drop pending" namespace, and then later reaping the collection. Therefore, we expect to +// either see an error message related to a collection drop, or one related to a collection +// rename. +const droppedMsg = 'collection dropped'; +const renamedMsg = 'collection renamed'; +assert(-1 !== error.message.indexOf(droppedMsg) || -1 !== error.message.indexOf(renamedMsg), + error.message); + +// Test that dropping an index between a find and a getMore has no effect on the query if the +// query is not using the index. +setupCollection(); +cursor = coll.find().batchSize(batchSize); +cursor.next(); // Send the query to the server. +assert.commandWorked(testDB.runCommand({dropIndexes: coll.getName(), index: {x: 1}})); +assert.eq(cursor.itcount(), nDocs - 1); + +// Test that dropping the index being scanned by a cursor between a find and a getMore kills the +// query with the appropriate code and message. +setupCollection(); +cursor = coll.find().hint({x: 1}).batchSize(batchSize); +cursor.next(); // Send the query to the server. +assert.commandWorked(testDB.runCommand({dropIndexes: coll.getName(), index: {x: 1}})); +error = assert.throws(() => cursor.itcount()); +assert.eq(error.code, ErrorCodes.QueryPlanKilled, tojson(error)); +assert.neq(-1, error.message.indexOf('index \'x_1\' dropped'), error.message); + +// Test that killing a cursor between a find and a getMore will return an appropriate error +// code and message. + +setupCollection(); +// Use the find command so that we can extract the cursor id to pass to the killCursors command. +let cursorId = + assert + .commandWorked(testDB.runCommand({find: coll.getName(), filter: {}, batchSize: batchSize})) + .cursor.id; +assert.commandWorked(testDB.runCommand({killCursors: coll.getName(), cursors: [cursorId]})); +assert.commandFailedWithCode(testDB.runCommand({getMore: cursorId, collection: coll.getName()}), + ErrorCodes.CursorNotFound); + +// Test that all cursors on collections to be renamed get invalidated. Note that we can't do +// renames on sharded collections. +if (!isShardedCollection) { setupCollection(); + const collRenamed = testDB.test_rename; + collRenamed.drop(); cursor = coll.find().batchSize(batchSize); - cursor.next(); // Send the query to the server. - assert.commandWorked(testDB.runCommand({dropIndexes: coll.getName(), index: {x: 1}})); - assert.eq(cursor.itcount(), nDocs - 1); + assert(cursor.hasNext(), "Expected more data from find call on " + coll.getName()); + assert.commandWorked(testDB.adminCommand({ + renameCollection: testDB.getName() + "." + coll.getName(), + to: testDB.getName() + "." + collRenamed.getName() + })); - // Test that dropping the index being scanned by a cursor between a find and a getMore kills the - // query with the appropriate code and message. - setupCollection(); - cursor = coll.find().hint({x: 1}).batchSize(batchSize); - cursor.next(); // Send the query to the server. - assert.commandWorked(testDB.runCommand({dropIndexes: coll.getName(), index: {x: 1}})); + // Ensure getMore fails with an appropriate error code and message. error = assert.throws(() => cursor.itcount()); assert.eq(error.code, ErrorCodes.QueryPlanKilled, tojson(error)); - assert.neq(-1, error.message.indexOf('index \'x_1\' dropped'), error.message); - - // Test that killing a cursor between a find and a getMore will return an appropriate error - // code and message. - - setupCollection(); - // Use the find command so that we can extract the cursor id to pass to the killCursors command. - let cursorId = assert - .commandWorked(testDB.runCommand( - {find: coll.getName(), filter: {}, batchSize: batchSize})) - .cursor.id; - assert.commandWorked(testDB.runCommand({killCursors: coll.getName(), cursors: [cursorId]})); - assert.commandFailedWithCode(testDB.runCommand({getMore: cursorId, collection: coll.getName()}), - ErrorCodes.CursorNotFound); - - // Test that all cursors on collections to be renamed get invalidated. Note that we can't do - // renames on sharded collections. - if (!isShardedCollection) { - setupCollection(); - const collRenamed = testDB.test_rename; - collRenamed.drop(); - cursor = coll.find().batchSize(batchSize); - assert(cursor.hasNext(), "Expected more data from find call on " + coll.getName()); - assert.commandWorked(testDB.adminCommand({ - renameCollection: testDB.getName() + "." + coll.getName(), - to: testDB.getName() + "." + collRenamed.getName() - })); - - // Ensure getMore fails with an appropriate error code and message. - error = assert.throws(() => cursor.itcount()); - assert.eq(error.code, ErrorCodes.QueryPlanKilled, tojson(error)); - assert.neq(-1, error.message.indexOf('collection renamed'), error.message); - } - + assert.neq(-1, error.message.indexOf('collection renamed'), error.message); +} }()); diff --git a/jstests/core/getmore_invalidated_documents.js b/jstests/core/getmore_invalidated_documents.js index 7d00748bbce..378fde3b02a 100644 --- a/jstests/core/getmore_invalidated_documents.js +++ b/jstests/core/getmore_invalidated_documents.js @@ -6,231 +6,230 @@ // Tests for invalidation during a getmore. This behavior is storage-engine dependent. // See SERVER-16675. (function() { - "use strict"; - - var t = db.getmore_invalidated_documents; - - var count; - var cursor; - var nextDoc; - var x; - var y; - - // Case #1: Text search with deletion invalidation. - t.drop(); - assert.commandWorked(t.ensureIndex({a: "text"})); - assert.writeOK(t.insert({_id: 1, a: "bar"})); - assert.writeOK(t.insert({_id: 2, a: "bar"})); - assert.writeOK(t.insert({_id: 3, a: "bar"})); - - cursor = t.find({$text: {$search: "bar"}}).batchSize(2); - cursor.next(); - cursor.next(); - - assert.writeOK(t.remove({_id: 3})); - - // We should get back the document or not (depending on the storage engine / concurrency model). - // Either is fine as long as we don't crash. - count = cursor.itcount(); - assert(count === 0 || count === 1); - - // Case #2: Text search with mutation invalidation. - t.drop(); - assert.commandWorked(t.ensureIndex({a: "text"})); - assert.writeOK(t.insert({_id: 1, a: "bar"})); - assert.writeOK(t.insert({_id: 2, a: "bar"})); - assert.writeOK(t.insert({_id: 3, a: "bar"})); - - cursor = t.find({$text: {$search: "bar"}}).batchSize(2); - cursor.next(); - cursor.next(); - - // Update the next matching doc so that it no longer matches. - assert.writeOK(t.update({_id: 3}, {$set: {a: "nomatch"}})); - - // Either the cursor should skip the result that no longer matches, or we should get back the - // old - // version of the doc. - assert(!cursor.hasNext() || cursor.next()["a"] === "bar"); - - // Case #3: Merge sort with deletion invalidation. - t.drop(); - assert.commandWorked(t.ensureIndex({a: 1, b: 1})); - assert.writeOK(t.insert({a: 1, b: 1})); - assert.writeOK(t.insert({a: 1, b: 2})); - assert.writeOK(t.insert({a: 2, b: 3})); - assert.writeOK(t.insert({a: 2, b: 4})); - - cursor = t.find({a: {$in: [1, 2]}}).sort({b: 1}).batchSize(2); - cursor.next(); - cursor.next(); - - assert.writeOK(t.remove({a: 2, b: 3})); - - count = cursor.itcount(); - assert(count === 1 || count === 2); - - // Case #4: Merge sort with mutation invalidation. - t.drop(); - assert.commandWorked(t.ensureIndex({a: 1, b: 1})); - assert.writeOK(t.insert({a: 1, b: 1})); - assert.writeOK(t.insert({a: 1, b: 2})); - assert.writeOK(t.insert({a: 2, b: 3})); - assert.writeOK(t.insert({a: 2, b: 4})); - - cursor = t.find({a: {$in: [1, 2]}}).sort({b: 1}).batchSize(2); - cursor.next(); - cursor.next(); - - assert.writeOK(t.update({a: 2, b: 3}, {$set: {a: 6}})); - - // Either the cursor should skip the result that no longer matches, or we should get back the - // old - // version of the doc. - assert(cursor.hasNext()); +"use strict"; + +var t = db.getmore_invalidated_documents; + +var count; +var cursor; +var nextDoc; +var x; +var y; + +// Case #1: Text search with deletion invalidation. +t.drop(); +assert.commandWorked(t.ensureIndex({a: "text"})); +assert.writeOK(t.insert({_id: 1, a: "bar"})); +assert.writeOK(t.insert({_id: 2, a: "bar"})); +assert.writeOK(t.insert({_id: 3, a: "bar"})); + +cursor = t.find({$text: {$search: "bar"}}).batchSize(2); +cursor.next(); +cursor.next(); + +assert.writeOK(t.remove({_id: 3})); + +// We should get back the document or not (depending on the storage engine / concurrency model). +// Either is fine as long as we don't crash. +count = cursor.itcount(); +assert(count === 0 || count === 1); + +// Case #2: Text search with mutation invalidation. +t.drop(); +assert.commandWorked(t.ensureIndex({a: "text"})); +assert.writeOK(t.insert({_id: 1, a: "bar"})); +assert.writeOK(t.insert({_id: 2, a: "bar"})); +assert.writeOK(t.insert({_id: 3, a: "bar"})); + +cursor = t.find({$text: {$search: "bar"}}).batchSize(2); +cursor.next(); +cursor.next(); + +// Update the next matching doc so that it no longer matches. +assert.writeOK(t.update({_id: 3}, {$set: {a: "nomatch"}})); + +// Either the cursor should skip the result that no longer matches, or we should get back the +// old +// version of the doc. +assert(!cursor.hasNext() || cursor.next()["a"] === "bar"); + +// Case #3: Merge sort with deletion invalidation. +t.drop(); +assert.commandWorked(t.ensureIndex({a: 1, b: 1})); +assert.writeOK(t.insert({a: 1, b: 1})); +assert.writeOK(t.insert({a: 1, b: 2})); +assert.writeOK(t.insert({a: 2, b: 3})); +assert.writeOK(t.insert({a: 2, b: 4})); + +cursor = t.find({a: {$in: [1, 2]}}).sort({b: 1}).batchSize(2); +cursor.next(); +cursor.next(); + +assert.writeOK(t.remove({a: 2, b: 3})); + +count = cursor.itcount(); +assert(count === 1 || count === 2); + +// Case #4: Merge sort with mutation invalidation. +t.drop(); +assert.commandWorked(t.ensureIndex({a: 1, b: 1})); +assert.writeOK(t.insert({a: 1, b: 1})); +assert.writeOK(t.insert({a: 1, b: 2})); +assert.writeOK(t.insert({a: 2, b: 3})); +assert.writeOK(t.insert({a: 2, b: 4})); + +cursor = t.find({a: {$in: [1, 2]}}).sort({b: 1}).batchSize(2); +cursor.next(); +cursor.next(); + +assert.writeOK(t.update({a: 2, b: 3}, {$set: {a: 6}})); + +// Either the cursor should skip the result that no longer matches, or we should get back the +// old +// version of the doc. +assert(cursor.hasNext()); +assert(cursor.next()["a"] === 2); +if (cursor.hasNext()) { assert(cursor.next()["a"] === 2); - if (cursor.hasNext()) { - assert(cursor.next()["a"] === 2); - } - assert(!cursor.hasNext()); - - // Case #5: 2d near with deletion invalidation. - t.drop(); - t.ensureIndex({geo: "2d"}); - for (x = -1; x < 1; x++) { - for (y = -1; y < 1; y++) { - assert.writeOK(t.insert({geo: [x, y]})); - } +} +assert(!cursor.hasNext()); + +// Case #5: 2d near with deletion invalidation. +t.drop(); +t.ensureIndex({geo: "2d"}); +for (x = -1; x < 1; x++) { + for (y = -1; y < 1; y++) { + assert.writeOK(t.insert({geo: [x, y]})); } +} - cursor = t.find({geo: {$near: [0, 0], $maxDistance: 5}}).batchSize(2); - cursor.next(); - cursor.next(); +cursor = t.find({geo: {$near: [0, 0], $maxDistance: 5}}).batchSize(2); +cursor.next(); +cursor.next(); - // Drop all documents in the collection. - assert.writeOK(t.remove({})); +// Drop all documents in the collection. +assert.writeOK(t.remove({})); - // Both MMAP v1 and doc-locking storage engines should force fetch the doc (it will be buffered - // because it is the same distance from the center point as a doc already returned). - assert(cursor.hasNext()); +// Both MMAP v1 and doc-locking storage engines should force fetch the doc (it will be buffered +// because it is the same distance from the center point as a doc already returned). +assert(cursor.hasNext()); - // Case #6: 2dsphere near with deletion invalidation. - t.drop(); - t.ensureIndex({geo: "2dsphere"}); - for (x = -1; x < 1; x++) { - for (y = -1; y < 1; y++) { - assert.writeOK(t.insert({geo: [x, y]})); - } +// Case #6: 2dsphere near with deletion invalidation. +t.drop(); +t.ensureIndex({geo: "2dsphere"}); +for (x = -1; x < 1; x++) { + for (y = -1; y < 1; y++) { + assert.writeOK(t.insert({geo: [x, y]})); } - - cursor = t.find({geo: {$nearSphere: [0, 0], $maxDistance: 5}}).batchSize(2); - cursor.next(); - cursor.next(); - - // Drop all documents in the collection. - assert.writeOK(t.remove({})); - - // Both MMAP v1 and doc-locking storage engines should force fetch the doc (it will be buffered - // because it is the same distance from the center point as a doc already returned). - assert(cursor.hasNext()); - - // Case #7: 2dsphere near with deletion invalidation (again). - t.drop(); - t.ensureIndex({geo: "2dsphere"}); - for (x = 0; x < 6; x++) { - assert.writeOK(t.insert({geo: [x, x]})); +} + +cursor = t.find({geo: {$nearSphere: [0, 0], $maxDistance: 5}}).batchSize(2); +cursor.next(); +cursor.next(); + +// Drop all documents in the collection. +assert.writeOK(t.remove({})); + +// Both MMAP v1 and doc-locking storage engines should force fetch the doc (it will be buffered +// because it is the same distance from the center point as a doc already returned). +assert(cursor.hasNext()); + +// Case #7: 2dsphere near with deletion invalidation (again). +t.drop(); +t.ensureIndex({geo: "2dsphere"}); +for (x = 0; x < 6; x++) { + assert.writeOK(t.insert({geo: [x, x]})); +} + +cursor = t.find({geo: {$nearSphere: [0, 0], $maxDistance: 10}}).batchSize(2); +cursor.next(); +cursor.next(); + +// Drop all documents in the collection. +assert.writeOK(t.remove({})); + +// We might force-fetch or we might skip over the deleted documents, depending on the internals +// of the geo near search. Just make sure that we can exhaust the cursor without crashing. +assert.gte(cursor.itcount(), 0); + +// Case #8: 2d near with mutation invalidation. +t.drop(); +t.ensureIndex({geo: "2d"}); +for (x = -1; x < 1; x++) { + for (y = -1; y < 1; y++) { + assert.writeOK(t.insert({geo: [x, y]})); } - - cursor = t.find({geo: {$nearSphere: [0, 0], $maxDistance: 10}}).batchSize(2); - cursor.next(); - cursor.next(); - - // Drop all documents in the collection. - assert.writeOK(t.remove({})); - - // We might force-fetch or we might skip over the deleted documents, depending on the internals - // of the geo near search. Just make sure that we can exhaust the cursor without crashing. - assert.gte(cursor.itcount(), 0); - - // Case #8: 2d near with mutation invalidation. - t.drop(); - t.ensureIndex({geo: "2d"}); - for (x = -1; x < 1; x++) { - for (y = -1; y < 1; y++) { - assert.writeOK(t.insert({geo: [x, y]})); - } - } - - cursor = t.find({geo: {$near: [0, 0], $maxDistance: 5}}).batchSize(2); - cursor.next(); - cursor.next(); - - // Update all documents in the collection to have position [15, 15]. - assert.writeOK(t.update({}, {$set: {geo: [15, 15]}}, false, true)); - - // The old version of the document should be returned (the update should not be reflected in the - // results of the near search). - nextDoc = cursor.next(); - printjson(nextDoc); - assert.neq([15, 15], nextDoc.geo); - assert(nextDoc.geo[0] === 0 || nextDoc.geo[1] === 0); - - // Case #9: 2dsphere near with mutation invalidation. - t.drop(); - t.ensureIndex({geo: "2dsphere"}); - for (x = -1; x < 1; x++) { - for (y = -1; y < 1; y++) { - assert.writeOK(t.insert({geo: [x, y]})); - } +} + +cursor = t.find({geo: {$near: [0, 0], $maxDistance: 5}}).batchSize(2); +cursor.next(); +cursor.next(); + +// Update all documents in the collection to have position [15, 15]. +assert.writeOK(t.update({}, {$set: {geo: [15, 15]}}, false, true)); + +// The old version of the document should be returned (the update should not be reflected in the +// results of the near search). +nextDoc = cursor.next(); +printjson(nextDoc); +assert.neq([15, 15], nextDoc.geo); +assert(nextDoc.geo[0] === 0 || nextDoc.geo[1] === 0); + +// Case #9: 2dsphere near with mutation invalidation. +t.drop(); +t.ensureIndex({geo: "2dsphere"}); +for (x = -1; x < 1; x++) { + for (y = -1; y < 1; y++) { + assert.writeOK(t.insert({geo: [x, y]})); } - - cursor = t.find({geo: {$nearSphere: [0, 0], $maxDistance: 5}}).batchSize(2); - cursor.next(); - cursor.next(); - - // Update all documents in the collection to have position [15, 15]. - assert.writeOK(t.update({}, {$set: {geo: [15, 15]}}, false, true)); - - // The old version of the document should be returned (the update should not be reflected in the - // results of the near search). - nextDoc = cursor.next(); - printjson(nextDoc); - assert.neq([15, 15], nextDoc.geo); - assert(nextDoc.geo[0] === 0 || nextDoc.geo[1] === 0); - - // Case #10: sort with deletion invalidation. - t.drop(); - t.ensureIndex({a: 1}); - t.insert({a: 1, b: 2}); - t.insert({a: 3, b: 3}); - t.insert({a: 2, b: 1}); - - cursor = t.find({a: {$in: [1, 2, 3]}}).sort({b: 1}).batchSize(2); - cursor.next(); - cursor.next(); - - assert.writeOK(t.remove({a: 2})); - - if (cursor.hasNext()) { - assert.eq(cursor.next().b, 3); - } - - // Case #11: sort with mutation invalidation. - t.drop(); - t.ensureIndex({a: 1}); - t.insert({a: 1, b: 2}); - t.insert({a: 3, b: 3}); - t.insert({a: 2, b: 1}); - - cursor = t.find({a: {$in: [1, 2, 3]}}).sort({b: 1}).batchSize(2); - cursor.next(); - cursor.next(); - - assert.writeOK(t.update({a: 2}, {$set: {a: 4}})); - - count = cursor.itcount(); - if (cursor.hasNext()) { - assert.eq(cursor.next().b, 3); - } - +} + +cursor = t.find({geo: {$nearSphere: [0, 0], $maxDistance: 5}}).batchSize(2); +cursor.next(); +cursor.next(); + +// Update all documents in the collection to have position [15, 15]. +assert.writeOK(t.update({}, {$set: {geo: [15, 15]}}, false, true)); + +// The old version of the document should be returned (the update should not be reflected in the +// results of the near search). +nextDoc = cursor.next(); +printjson(nextDoc); +assert.neq([15, 15], nextDoc.geo); +assert(nextDoc.geo[0] === 0 || nextDoc.geo[1] === 0); + +// Case #10: sort with deletion invalidation. +t.drop(); +t.ensureIndex({a: 1}); +t.insert({a: 1, b: 2}); +t.insert({a: 3, b: 3}); +t.insert({a: 2, b: 1}); + +cursor = t.find({a: {$in: [1, 2, 3]}}).sort({b: 1}).batchSize(2); +cursor.next(); +cursor.next(); + +assert.writeOK(t.remove({a: 2})); + +if (cursor.hasNext()) { + assert.eq(cursor.next().b, 3); +} + +// Case #11: sort with mutation invalidation. +t.drop(); +t.ensureIndex({a: 1}); +t.insert({a: 1, b: 2}); +t.insert({a: 3, b: 3}); +t.insert({a: 2, b: 1}); + +cursor = t.find({a: {$in: [1, 2, 3]}}).sort({b: 1}).batchSize(2); +cursor.next(); +cursor.next(); + +assert.writeOK(t.update({a: 2}, {$set: {a: 4}})); + +count = cursor.itcount(); +if (cursor.hasNext()) { + assert.eq(cursor.next().b, 3); +} })(); diff --git a/jstests/core/hash.js b/jstests/core/hash.js index 2e8ad576159..4f7cebcdb5a 100644 --- a/jstests/core/hash.js +++ b/jstests/core/hash.js @@ -6,58 +6,58 @@ * architectures. */ (function() { - 'use strict'; +'use strict'; - const hashOfMaxNumberLong = NumberLong("1136124329541638701"); - const hashOfLowestNumberLong = NumberLong("5744114172487291558"); - const hashOfZeroNumberLong = NumberLong("5574369198691456941"); +const hashOfMaxNumberLong = NumberLong("1136124329541638701"); +const hashOfLowestNumberLong = NumberLong("5744114172487291558"); +const hashOfZeroNumberLong = NumberLong("5574369198691456941"); - const hashTests = [ - // Hash value of a string. - {key: "hashthis", expected: NumberLong("6271151123721111923")}, +const hashTests = [ + // Hash value of a string. + {key: "hashthis", expected: NumberLong("6271151123721111923")}, - // The smallest positive double that overflows a 64-bit signed int. This is a special case, - // as described in SERVER-37183. - {key: Math.pow(2, 63), expected: hashOfLowestNumberLong}, + // The smallest positive double that overflows a 64-bit signed int. This is a special case, + // as described in SERVER-37183. + {key: Math.pow(2, 63), expected: hashOfLowestNumberLong}, - // The next biggest number. Large doubles get clamped to the max 64-bit signed value before - // being hashed. - {key: Math.pow(2, 63) + Math.pow(2, 11), expected: hashOfMaxNumberLong}, + // The next biggest number. Large doubles get clamped to the max 64-bit signed value before + // being hashed. + {key: Math.pow(2, 63) + Math.pow(2, 11), expected: hashOfMaxNumberLong}, - // Really large numbers and positive infinity also get clamped to the same value. - {key: Math.pow(2, 500), expected: hashOfMaxNumberLong}, - {key: Infinity, expected: hashOfMaxNumberLong}, + // Really large numbers and positive infinity also get clamped to the same value. + {key: Math.pow(2, 500), expected: hashOfMaxNumberLong}, + {key: Infinity, expected: hashOfMaxNumberLong}, - // Just under the largest double that overflows a 64-bit signed int. This value gets - // converted to a signed 64-bit int and then hashed. - {key: Math.pow(2, 63) - Math.pow(2, 10), expected: NumberLong("-3954856262017896439")}, + // Just under the largest double that overflows a 64-bit signed int. This value gets + // converted to a signed 64-bit int and then hashed. + {key: Math.pow(2, 63) - Math.pow(2, 10), expected: NumberLong("-3954856262017896439")}, - // Lowest negative double that does not overflow a 64-bit signed int. - {key: -Math.pow(2, 63), expected: hashOfLowestNumberLong}, + // Lowest negative double that does not overflow a 64-bit signed int. + {key: -Math.pow(2, 63), expected: hashOfLowestNumberLong}, - // Just above the lowest negative double that does not overflow a 64-bit signed int. - {key: -(Math.pow(2, 63) - Math.pow(2, 10)), expected: NumberLong("-1762411739488908479")}, + // Just above the lowest negative double that does not overflow a 64-bit signed int. + {key: -(Math.pow(2, 63) - Math.pow(2, 10)), expected: NumberLong("-1762411739488908479")}, - // A negative overflowing double gets clamped to -2^63 before being hashed. - {key: -(Math.pow(2, 63) + Math.pow(2, 11)), expected: hashOfLowestNumberLong}, - {key: -Infinity, expected: hashOfLowestNumberLong}, + // A negative overflowing double gets clamped to -2^63 before being hashed. + {key: -(Math.pow(2, 63) + Math.pow(2, 11)), expected: hashOfLowestNumberLong}, + {key: -Infinity, expected: hashOfLowestNumberLong}, - // NaN values get converted to 0 and then hashed. - {key: 0, expected: hashOfZeroNumberLong}, - {key: NumberLong("0"), expected: hashOfZeroNumberLong}, - {key: NaN, expected: hashOfZeroNumberLong}, - {key: -NaN, expected: hashOfZeroNumberLong}, + // NaN values get converted to 0 and then hashed. + {key: 0, expected: hashOfZeroNumberLong}, + {key: NumberLong("0"), expected: hashOfZeroNumberLong}, + {key: NaN, expected: hashOfZeroNumberLong}, + {key: -NaN, expected: hashOfZeroNumberLong}, - // Hash an object. - {key: {a: 1, b: 2}, expected: NumberLong("-7076810813311352857")}, + // Hash an object. + {key: {a: 1, b: 2}, expected: NumberLong("-7076810813311352857")}, - // Hash an object with some corner-case values. - {key: {a: Math.pow(2, 63), b: NaN}, expected: NumberLong("1223292051903137684")}, - ]; + // Hash an object with some corner-case values. + {key: {a: Math.pow(2, 63), b: NaN}, expected: NumberLong("1223292051903137684")}, +]; - hashTests.forEach(test => { - const hashResult = db.runCommand({_hashBSONElement: test.key, seed: 1}); - assert.commandWorked(hashResult); - assert.eq(test.expected, hashResult.out, tojson(test.key)); - }); +hashTests.forEach(test => { + const hashResult = db.runCommand({_hashBSONElement: test.key, seed: 1}); + assert.commandWorked(hashResult); + assert.eq(test.expected, hashResult.out, tojson(test.key)); +}); })(); diff --git a/jstests/core/idhack.js b/jstests/core/idhack.js index a2fb8f39640..880716ed206 100644 --- a/jstests/core/idhack.js +++ b/jstests/core/idhack.js @@ -1,103 +1,104 @@ // @tags: [requires_non_retryable_writes, assumes_balancer_off] (function() { - "use strict"; - - const t = db.idhack; - t.drop(); - - // Include helpers for analyzing explain output. - load("jstests/libs/analyze_plan.js"); - - assert.writeOK(t.insert({_id: {x: 1}, z: 1})); - assert.writeOK(t.insert({_id: {x: 2}, z: 2})); - assert.writeOK(t.insert({_id: {x: 3}, z: 3})); - assert.writeOK(t.insert({_id: 1, z: 4})); - assert.writeOK(t.insert({_id: 2, z: 5})); - assert.writeOK(t.insert({_id: 3, z: 6})); - - assert.eq(2, t.findOne({_id: {x: 2}}).z); - assert.eq(2, t.find({_id: {$gte: 2}}).count()); - assert.eq(2, t.find({_id: {$gte: 2}}).itcount()); - - t.update({_id: {x: 2}}, {$set: {z: 7}}); - assert.eq(7, t.findOne({_id: {x: 2}}).z); - - t.update({_id: {$gte: 2}}, {$set: {z: 8}}, false, true); - assert.eq(4, t.findOne({_id: 1}).z); - assert.eq(8, t.findOne({_id: 2}).z); - assert.eq(8, t.findOne({_id: 3}).z); - - // explain output should show that the ID hack was applied. - const query = {_id: {x: 2}}; - let explain = t.find(query).explain(true); - assert.eq(1, explain.executionStats.nReturned); - assert.eq(1, explain.executionStats.totalKeysExamined); - assert(isIdhack(db, explain.queryPlanner.winningPlan)); - - // ID hack cannot be used with hint(). - t.ensureIndex({_id: 1, a: 1}); - explain = t.find(query).hint({_id: 1, a: 1}).explain(); - assert(!isIdhack(db, explain.queryPlanner.winningPlan)); - - // ID hack cannot be used with skip(). - explain = t.find(query).skip(1).explain(); - assert(!isIdhack(db, explain.queryPlanner.winningPlan)); - - // ID hack cannot be used with a regex predicate. - assert.writeOK(t.insert({_id: "abc"})); - explain = t.find({_id: /abc/}).explain(); - assert.eq({_id: "abc"}, t.findOne({_id: /abc/})); - assert(!isIdhack(db, explain.queryPlanner.winningPlan)); - - // Covered query returning _id field only can be handled by ID hack. - explain = t.find(query, {_id: 1}).explain(); - assert(isIdhack(db, explain.queryPlanner.winningPlan)); - // Check doc from covered ID hack query. - assert.eq({_id: {x: 2}}, t.findOne(query, {_id: 1})); - - // - // Non-covered projection for idhack. - // - - t.drop(); - assert.writeOK(t.insert({_id: 0, a: 0, b: [{c: 1}, {c: 2}]})); - assert.writeOK(t.insert({_id: 1, a: 1, b: [{c: 3}, {c: 4}]})); - - // Simple inclusion. - assert.eq({_id: 1, a: 1}, t.find({_id: 1}, {a: 1}).next()); - assert.eq({a: 1}, t.find({_id: 1}, {_id: 0, a: 1}).next()); - assert.eq({_id: 0, a: 0}, t.find({_id: 0}, {_id: 1, a: 1}).next()); - - // Non-simple: exclusion. - assert.eq({_id: 1, a: 1}, t.find({_id: 1}, {b: 0}).next()); - assert.eq({_id: 0}, t.find({_id: 0}, {a: 0, b: 0}).next()); - - // Non-simple: dotted fields. - assert.eq({b: [{c: 1}, {c: 2}]}, t.find({_id: 0}, {_id: 0, "b.c": 1}).next()); - assert.eq({_id: 1}, t.find({_id: 1}, {"foo.bar": 1}).next()); - - // Non-simple: elemMatch projection. - assert.eq({_id: 1, b: [{c: 4}]}, t.find({_id: 1}, {b: {$elemMatch: {c: 4}}}).next()); - - // Non-simple: .returnKey(). - assert.eq({_id: 1}, t.find({_id: 1}).returnKey().next()); - - // Non-simple: .returnKey() overrides other projections. - assert.eq({_id: 1}, t.find({_id: 1}, {a: 1}).returnKey().next()); - - // Test that equality queries on _id with min() or max() require hint(). - let err = assert.throws(() => t.find({_id: 2}).min({_id: 1}).itcount()); - assert.commandFailedWithCode(err, 51173); - err = assert.throws(() => t.find({_id: 2}).max({_id: 3}).itcount()); - assert.commandFailedWithCode(err, 51173); - - // Test that equality queries on _id respect min() and max(). - assert.eq({_id: 1}, t.find({_id: 1}).hint({_id: 1}).min({_id: 0}).returnKey().next()); - assert.eq({_id: 1}, - t.find({_id: 1}).hint({_id: 1}).min({_id: 0}).max({_id: 2}).returnKey().next()); - assert.eq(0, t.find({_id: 1}).hint({_id: 1}).max({_id: 0}).itcount()); - assert.eq(0, t.find({_id: 1}).hint({_id: 1}).min({_id: 2}).itcount()); - - explain = t.find({_id: 2}).hint({_id: 1}).min({_id: 1}).max({_id: 3}).explain(); - assert(!isIdhack(db, explain.queryPlanner.winningPlan)); +"use strict"; + +const t = db.idhack; +t.drop(); + +// Include helpers for analyzing explain output. +load("jstests/libs/analyze_plan.js"); + +assert.writeOK(t.insert({_id: {x: 1}, z: 1})); +assert.writeOK(t.insert({_id: {x: 2}, z: 2})); +assert.writeOK(t.insert({_id: {x: 3}, z: 3})); +assert.writeOK(t.insert({_id: 1, z: 4})); +assert.writeOK(t.insert({_id: 2, z: 5})); +assert.writeOK(t.insert({_id: 3, z: 6})); + +assert.eq(2, t.findOne({_id: {x: 2}}).z); +assert.eq(2, t.find({_id: {$gte: 2}}).count()); +assert.eq(2, t.find({_id: {$gte: 2}}).itcount()); + +t.update({_id: {x: 2}}, {$set: {z: 7}}); +assert.eq(7, t.findOne({_id: {x: 2}}).z); + +t.update({_id: {$gte: 2}}, {$set: {z: 8}}, false, true); +assert.eq(4, t.findOne({_id: 1}).z); +assert.eq(8, t.findOne({_id: 2}).z); +assert.eq(8, t.findOne({_id: 3}).z); + +// explain output should show that the ID hack was applied. +const query = { + _id: {x: 2} +}; +let explain = t.find(query).explain(true); +assert.eq(1, explain.executionStats.nReturned); +assert.eq(1, explain.executionStats.totalKeysExamined); +assert(isIdhack(db, explain.queryPlanner.winningPlan)); + +// ID hack cannot be used with hint(). +t.ensureIndex({_id: 1, a: 1}); +explain = t.find(query).hint({_id: 1, a: 1}).explain(); +assert(!isIdhack(db, explain.queryPlanner.winningPlan)); + +// ID hack cannot be used with skip(). +explain = t.find(query).skip(1).explain(); +assert(!isIdhack(db, explain.queryPlanner.winningPlan)); + +// ID hack cannot be used with a regex predicate. +assert.writeOK(t.insert({_id: "abc"})); +explain = t.find({_id: /abc/}).explain(); +assert.eq({_id: "abc"}, t.findOne({_id: /abc/})); +assert(!isIdhack(db, explain.queryPlanner.winningPlan)); + +// Covered query returning _id field only can be handled by ID hack. +explain = t.find(query, {_id: 1}).explain(); +assert(isIdhack(db, explain.queryPlanner.winningPlan)); +// Check doc from covered ID hack query. +assert.eq({_id: {x: 2}}, t.findOne(query, {_id: 1})); + +// +// Non-covered projection for idhack. +// + +t.drop(); +assert.writeOK(t.insert({_id: 0, a: 0, b: [{c: 1}, {c: 2}]})); +assert.writeOK(t.insert({_id: 1, a: 1, b: [{c: 3}, {c: 4}]})); + +// Simple inclusion. +assert.eq({_id: 1, a: 1}, t.find({_id: 1}, {a: 1}).next()); +assert.eq({a: 1}, t.find({_id: 1}, {_id: 0, a: 1}).next()); +assert.eq({_id: 0, a: 0}, t.find({_id: 0}, {_id: 1, a: 1}).next()); + +// Non-simple: exclusion. +assert.eq({_id: 1, a: 1}, t.find({_id: 1}, {b: 0}).next()); +assert.eq({_id: 0}, t.find({_id: 0}, {a: 0, b: 0}).next()); + +// Non-simple: dotted fields. +assert.eq({b: [{c: 1}, {c: 2}]}, t.find({_id: 0}, {_id: 0, "b.c": 1}).next()); +assert.eq({_id: 1}, t.find({_id: 1}, {"foo.bar": 1}).next()); + +// Non-simple: elemMatch projection. +assert.eq({_id: 1, b: [{c: 4}]}, t.find({_id: 1}, {b: {$elemMatch: {c: 4}}}).next()); + +// Non-simple: .returnKey(). +assert.eq({_id: 1}, t.find({_id: 1}).returnKey().next()); + +// Non-simple: .returnKey() overrides other projections. +assert.eq({_id: 1}, t.find({_id: 1}, {a: 1}).returnKey().next()); + +// Test that equality queries on _id with min() or max() require hint(). +let err = assert.throws(() => t.find({_id: 2}).min({_id: 1}).itcount()); +assert.commandFailedWithCode(err, 51173); +err = assert.throws(() => t.find({_id: 2}).max({_id: 3}).itcount()); +assert.commandFailedWithCode(err, 51173); + +// Test that equality queries on _id respect min() and max(). +assert.eq({_id: 1}, t.find({_id: 1}).hint({_id: 1}).min({_id: 0}).returnKey().next()); +assert.eq({_id: 1}, t.find({_id: 1}).hint({_id: 1}).min({_id: 0}).max({_id: 2}).returnKey().next()); +assert.eq(0, t.find({_id: 1}).hint({_id: 1}).max({_id: 0}).itcount()); +assert.eq(0, t.find({_id: 1}).hint({_id: 1}).min({_id: 2}).itcount()); + +explain = t.find({_id: 2}).hint({_id: 1}).min({_id: 1}).max({_id: 3}).explain(); +assert(!isIdhack(db, explain.queryPlanner.winningPlan)); })(); diff --git a/jstests/core/index_bigkeys.js b/jstests/core/index_bigkeys.js index 3b598333102..be8ae1f8d65 100644 --- a/jstests/core/index_bigkeys.js +++ b/jstests/core/index_bigkeys.js @@ -7,11 +7,11 @@ * @tags: [assumes_no_implicit_index_creation, requires_non_retryable_writes] */ (function() { - "use strict"; +"use strict"; - load("jstests/libs/index_bigkeys.js"); +load("jstests/libs/index_bigkeys.js"); - const collName = "index_bigkeys_foreground_test"; +const collName = "index_bigkeys_foreground_test"; - testAllInteractionsWithBigIndexKeys(db, collName, false); +testAllInteractionsWithBigIndexKeys(db, collName, false); }()); diff --git a/jstests/core/index_bigkeys_background.js b/jstests/core/index_bigkeys_background.js index 88cfb7c1222..b7963f3235b 100644 --- a/jstests/core/index_bigkeys_background.js +++ b/jstests/core/index_bigkeys_background.js @@ -14,11 +14,11 @@ * ] */ (function() { - "use strict"; +"use strict"; - load("jstests/libs/index_bigkeys.js"); +load("jstests/libs/index_bigkeys.js"); - const collName = "index_bigkeys_background_test"; +const collName = "index_bigkeys_background_test"; - testAllInteractionsWithBigIndexKeys(db, collName, true); +testAllInteractionsWithBigIndexKeys(db, collName, true); }()); diff --git a/jstests/core/index_bounds_code.js b/jstests/core/index_bounds_code.js index 5070c3fe0d0..cd1fa58b306 100644 --- a/jstests/core/index_bounds_code.js +++ b/jstests/core/index_bounds_code.js @@ -1,55 +1,50 @@ // Index bounds generation tests for Code/CodeWSCope values. // @tags: [requires_non_retryable_writes, assumes_unsharded_collection] (function() { - "use strict"; +"use strict"; - load("jstests/libs/analyze_plan.js"); // For assertCoveredQueryAndCount. +load("jstests/libs/analyze_plan.js"); // For assertCoveredQueryAndCount. - const coll = db.index_bounds_code; - coll.drop(); +const coll = db.index_bounds_code; +coll.drop(); - assert.commandWorked(coll.createIndex({a: 1})); - const insertedFunc = function() { - return 1; - }; - assert.writeOK(coll.insert({a: insertedFunc})); +assert.commandWorked(coll.createIndex({a: 1})); +const insertedFunc = function() { + return 1; +}; +assert.writeOK(coll.insert({a: insertedFunc})); - // Test that queries involving comparison operators with values of type Code are covered. - const proj = {a: 1, _id: 0}; - const func = function() { - return 2; - }; - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$gt: func}}, project: proj, count: 0}); - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$gte: func}}, project: proj, count: 0}); - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$lt: func}}, project: proj, count: 1}); - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$lte: func}}, project: proj, count: 1}); +// Test that queries involving comparison operators with values of type Code are covered. +const proj = { + a: 1, + _id: 0 +}; +const func = function() { + return 2; +}; +assertCoveredQueryAndCount({collection: coll, query: {a: {$gt: func}}, project: proj, count: 0}); +assertCoveredQueryAndCount({collection: coll, query: {a: {$gte: func}}, project: proj, count: 0}); +assertCoveredQueryAndCount({collection: coll, query: {a: {$lt: func}}, project: proj, count: 1}); +assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: func}}, project: proj, count: 1}); - // Test for equality against the original inserted function. - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$gt: insertedFunc}}, project: proj, count: 0}); - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$gte: insertedFunc}}, project: proj, count: 1}); - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$lt: insertedFunc}}, project: proj, count: 0}); - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$lte: insertedFunc}}, project: proj, count: 1}); +// Test for equality against the original inserted function. +assertCoveredQueryAndCount( + {collection: coll, query: {a: {$gt: insertedFunc}}, project: proj, count: 0}); +assertCoveredQueryAndCount( + {collection: coll, query: {a: {$gte: insertedFunc}}, project: proj, count: 1}); +assertCoveredQueryAndCount( + {collection: coll, query: {a: {$lt: insertedFunc}}, project: proj, count: 0}); +assertCoveredQueryAndCount( + {collection: coll, query: {a: {$lte: insertedFunc}}, project: proj, count: 1}); - // Test that documents that lie outside of the generated index bounds are not returned. - coll.remove({}); - assert.writeOK(coll.insert({a: "string"})); - assert.writeOK(coll.insert({a: {b: 1}})); - assert.writeOK(coll.insert({a: MaxKey})); +// Test that documents that lie outside of the generated index bounds are not returned. +coll.remove({}); +assert.writeOK(coll.insert({a: "string"})); +assert.writeOK(coll.insert({a: {b: 1}})); +assert.writeOK(coll.insert({a: MaxKey})); - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$gt: func}}, project: proj, count: 0}); - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$gte: func}}, project: proj, count: 0}); - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$lt: func}}, project: proj, count: 0}); - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$lte: func}}, project: proj, count: 0}); +assertCoveredQueryAndCount({collection: coll, query: {a: {$gt: func}}, project: proj, count: 0}); +assertCoveredQueryAndCount({collection: coll, query: {a: {$gte: func}}, project: proj, count: 0}); +assertCoveredQueryAndCount({collection: coll, query: {a: {$lt: func}}, project: proj, count: 0}); +assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: func}}, project: proj, count: 0}); })(); diff --git a/jstests/core/index_bounds_maxkey.js b/jstests/core/index_bounds_maxkey.js index b22af082b13..f7cd1eb2e66 100644 --- a/jstests/core/index_bounds_maxkey.js +++ b/jstests/core/index_bounds_maxkey.js @@ -1,39 +1,34 @@ // Index bounds generation tests for MaxKey values. // @tags: [requires_non_retryable_writes, assumes_unsharded_collection] (function() { - "use strict"; +"use strict"; - load("jstests/libs/analyze_plan.js"); // For assertCoveredQueryAndCount. +load("jstests/libs/analyze_plan.js"); // For assertCoveredQueryAndCount. - const coll = db.index_bounds_maxkey; - coll.drop(); +const coll = db.index_bounds_maxkey; +coll.drop(); - assert.commandWorked(coll.createIndex({a: 1})); - assert.writeOK(coll.insert({a: MaxKey})); +assert.commandWorked(coll.createIndex({a: 1})); +assert.writeOK(coll.insert({a: MaxKey})); - // Test that queries involving comparison operators with MaxKey are covered. - const proj = {a: 1, _id: 0}; - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$gt: MaxKey}}, project: proj, count: 0}); - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$gte: MaxKey}}, project: proj, count: 1}); - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$lt: MaxKey}}, project: proj, count: 1}); - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$lte: MaxKey}}, project: proj, count: 1}); +// Test that queries involving comparison operators with MaxKey are covered. +const proj = { + a: 1, + _id: 0 +}; +assertCoveredQueryAndCount({collection: coll, query: {a: {$gt: MaxKey}}, project: proj, count: 0}); +assertCoveredQueryAndCount({collection: coll, query: {a: {$gte: MaxKey}}, project: proj, count: 1}); +assertCoveredQueryAndCount({collection: coll, query: {a: {$lt: MaxKey}}, project: proj, count: 1}); +assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: MaxKey}}, project: proj, count: 1}); - // Test that all documents are considered less than MaxKey, regardless of the presence of - // the queried field 'a'. - coll.remove({}); - assert.writeOK(coll.insert({a: "string"})); - assert.writeOK(coll.insert({a: {b: 1}})); - assert.writeOK(coll.insert({})); - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$gt: MaxKey}}, project: proj, count: 0}); - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$gte: MaxKey}}, project: proj, count: 0}); - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$lt: MaxKey}}, project: proj, count: 3}); - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$lte: MaxKey}}, project: proj, count: 3}); +// Test that all documents are considered less than MaxKey, regardless of the presence of +// the queried field 'a'. +coll.remove({}); +assert.writeOK(coll.insert({a: "string"})); +assert.writeOK(coll.insert({a: {b: 1}})); +assert.writeOK(coll.insert({})); +assertCoveredQueryAndCount({collection: coll, query: {a: {$gt: MaxKey}}, project: proj, count: 0}); +assertCoveredQueryAndCount({collection: coll, query: {a: {$gte: MaxKey}}, project: proj, count: 0}); +assertCoveredQueryAndCount({collection: coll, query: {a: {$lt: MaxKey}}, project: proj, count: 3}); +assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: MaxKey}}, project: proj, count: 3}); })(); diff --git a/jstests/core/index_bounds_minkey.js b/jstests/core/index_bounds_minkey.js index 6fa9d4f0d1e..31d38a2115e 100644 --- a/jstests/core/index_bounds_minkey.js +++ b/jstests/core/index_bounds_minkey.js @@ -1,39 +1,34 @@ // Index bounds generation tests for MinKey values. // @tags: [requires_non_retryable_writes, assumes_unsharded_collection] (function() { - "use strict"; +"use strict"; - load("jstests/libs/analyze_plan.js"); // For assertCoveredQueryAndCount. +load("jstests/libs/analyze_plan.js"); // For assertCoveredQueryAndCount. - const coll = db.index_bounds_minkey; - coll.drop(); +const coll = db.index_bounds_minkey; +coll.drop(); - assert.commandWorked(coll.createIndex({a: 1})); - assert.writeOK(coll.insert({a: MinKey})); +assert.commandWorked(coll.createIndex({a: 1})); +assert.writeOK(coll.insert({a: MinKey})); - // Test that queries involving comparison operators with MinKey are covered. - const proj = {a: 1, _id: 0}; - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$gt: MinKey}}, project: proj, count: 1}); - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$gte: MinKey}}, project: proj, count: 1}); - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$lt: MinKey}}, project: proj, count: 0}); - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$lte: MinKey}}, project: proj, count: 1}); +// Test that queries involving comparison operators with MinKey are covered. +const proj = { + a: 1, + _id: 0 +}; +assertCoveredQueryAndCount({collection: coll, query: {a: {$gt: MinKey}}, project: proj, count: 1}); +assertCoveredQueryAndCount({collection: coll, query: {a: {$gte: MinKey}}, project: proj, count: 1}); +assertCoveredQueryAndCount({collection: coll, query: {a: {$lt: MinKey}}, project: proj, count: 0}); +assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: MinKey}}, project: proj, count: 1}); - // Test that all documents are considered greater than MinKey, regardless of the presence of - // the queried field 'a'. - coll.remove({}); - assert.writeOK(coll.insert({a: "string"})); - assert.writeOK(coll.insert({a: {b: 1}})); - assert.writeOK(coll.insert({})); - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$gt: MinKey}}, project: proj, count: 3}); - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$gte: MinKey}}, project: proj, count: 3}); - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$lt: MinKey}}, project: proj, count: 0}); - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$lte: MinKey}}, project: proj, count: 0}); +// Test that all documents are considered greater than MinKey, regardless of the presence of +// the queried field 'a'. +coll.remove({}); +assert.writeOK(coll.insert({a: "string"})); +assert.writeOK(coll.insert({a: {b: 1}})); +assert.writeOK(coll.insert({})); +assertCoveredQueryAndCount({collection: coll, query: {a: {$gt: MinKey}}, project: proj, count: 3}); +assertCoveredQueryAndCount({collection: coll, query: {a: {$gte: MinKey}}, project: proj, count: 3}); +assertCoveredQueryAndCount({collection: coll, query: {a: {$lt: MinKey}}, project: proj, count: 0}); +assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: MinKey}}, project: proj, count: 0}); })(); diff --git a/jstests/core/index_bounds_object.js b/jstests/core/index_bounds_object.js index 22a7f433efd..b1bdb2e9591 100644 --- a/jstests/core/index_bounds_object.js +++ b/jstests/core/index_bounds_object.js @@ -1,61 +1,59 @@ // Index bounds generation tests for Object values. // @tags: [requires_non_retryable_writes, assumes_unsharded_collection] (function() { - "use strict"; - - load("jstests/libs/analyze_plan.js"); // For assertCoveredQueryAndCount. - - const coll = db.index_bounds_object; - coll.drop(); - - assert.commandWorked(coll.createIndex({a: 1})); - assert.writeOK(coll.insert({a: {b: 1}})); - - // Test that queries involving comparison operators with objects are covered. - const proj = {a: 1, _id: 0}; - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$gt: {b: 0}}}, project: proj, count: 1}); - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$gt: {b: 2}}}, project: proj, count: 0}); - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$gte: {b: 1}}}, project: proj, count: 1}); - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$gte: {b: 1, c: 2}}}, project: proj, count: 0}); - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$lt: {b: 2}}}, project: proj, count: 1}); - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$lte: {b: 1}}}, project: proj, count: 1}); - - // Test that queries involving comparisons with an empty object are covered. - assert.writeOK(coll.insert({a: {}})); - assertCoveredQueryAndCount({collection: coll, query: {a: {$gt: {}}}, project: proj, count: 1}); - assertCoveredQueryAndCount({collection: coll, query: {a: {$gte: {}}}, project: proj, count: 2}); - assertCoveredQueryAndCount({collection: coll, query: {a: {$lt: {}}}, project: proj, count: 0}); - assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: {}}}, project: proj, count: 1}); - - // Test that queries involving comparisons with a range of objects are covered. - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$gt: {}, $lt: {b: 2}}}, project: proj, count: 1}); - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$gte: {}, $lt: {b: 2}}}, project: proj, count: 2}); - assertCoveredQueryAndCount( - {collection: coll, query: {a: {$lt: {}, $gte: {}}}, project: proj, count: 0}); - - // Test that documents that lie outside of the generated index bounds are not returned. Cannot - // test empty array upper bounds since that would force the index to be multi-key. - coll.remove({}); - assert.writeOK(coll.insert({a: "string"})); - assert.writeOK(coll.insert({a: true})); - assertCoveredQueryAndCount({collection: coll, query: {a: {$gt: {}}}, project: proj, count: 0}); - assertCoveredQueryAndCount({collection: coll, query: {a: {$gte: {}}}, project: proj, count: 0}); - assertCoveredQueryAndCount({collection: coll, query: {a: {$lt: {}}}, project: proj, count: 0}); - assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: {}}}, project: proj, count: 0}); - - // Adding a document containing an array makes the index multi-key which can never be used for a - // covered query. - assert.writeOK(coll.insert({a: []})); - assert(!isIndexOnly(db, coll.find({a: {$gt: {}}}, proj).explain().queryPlanner.winningPlan)); - assert(!isIndexOnly(db, coll.find({a: {$gte: {}}}, proj).explain().queryPlanner.winningPlan)); - assert(!isIndexOnly(db, coll.find({a: {$lt: {}}}, proj).explain().queryPlanner.winningPlan)); - assert(!isIndexOnly(db, coll.find({a: {$lte: {}}}, proj).explain().queryPlanner.winningPlan)); +"use strict"; + +load("jstests/libs/analyze_plan.js"); // For assertCoveredQueryAndCount. + +const coll = db.index_bounds_object; +coll.drop(); + +assert.commandWorked(coll.createIndex({a: 1})); +assert.writeOK(coll.insert({a: {b: 1}})); + +// Test that queries involving comparison operators with objects are covered. +const proj = { + a: 1, + _id: 0 +}; +assertCoveredQueryAndCount({collection: coll, query: {a: {$gt: {b: 0}}}, project: proj, count: 1}); +assertCoveredQueryAndCount({collection: coll, query: {a: {$gt: {b: 2}}}, project: proj, count: 0}); +assertCoveredQueryAndCount({collection: coll, query: {a: {$gte: {b: 1}}}, project: proj, count: 1}); +assertCoveredQueryAndCount( + {collection: coll, query: {a: {$gte: {b: 1, c: 2}}}, project: proj, count: 0}); +assertCoveredQueryAndCount({collection: coll, query: {a: {$lt: {b: 2}}}, project: proj, count: 1}); +assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: {b: 1}}}, project: proj, count: 1}); + +// Test that queries involving comparisons with an empty object are covered. +assert.writeOK(coll.insert({a: {}})); +assertCoveredQueryAndCount({collection: coll, query: {a: {$gt: {}}}, project: proj, count: 1}); +assertCoveredQueryAndCount({collection: coll, query: {a: {$gte: {}}}, project: proj, count: 2}); +assertCoveredQueryAndCount({collection: coll, query: {a: {$lt: {}}}, project: proj, count: 0}); +assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: {}}}, project: proj, count: 1}); + +// Test that queries involving comparisons with a range of objects are covered. +assertCoveredQueryAndCount( + {collection: coll, query: {a: {$gt: {}, $lt: {b: 2}}}, project: proj, count: 1}); +assertCoveredQueryAndCount( + {collection: coll, query: {a: {$gte: {}, $lt: {b: 2}}}, project: proj, count: 2}); +assertCoveredQueryAndCount( + {collection: coll, query: {a: {$lt: {}, $gte: {}}}, project: proj, count: 0}); + +// Test that documents that lie outside of the generated index bounds are not returned. Cannot +// test empty array upper bounds since that would force the index to be multi-key. +coll.remove({}); +assert.writeOK(coll.insert({a: "string"})); +assert.writeOK(coll.insert({a: true})); +assertCoveredQueryAndCount({collection: coll, query: {a: {$gt: {}}}, project: proj, count: 0}); +assertCoveredQueryAndCount({collection: coll, query: {a: {$gte: {}}}, project: proj, count: 0}); +assertCoveredQueryAndCount({collection: coll, query: {a: {$lt: {}}}, project: proj, count: 0}); +assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: {}}}, project: proj, count: 0}); + +// Adding a document containing an array makes the index multi-key which can never be used for a +// covered query. +assert.writeOK(coll.insert({a: []})); +assert(!isIndexOnly(db, coll.find({a: {$gt: {}}}, proj).explain().queryPlanner.winningPlan)); +assert(!isIndexOnly(db, coll.find({a: {$gte: {}}}, proj).explain().queryPlanner.winningPlan)); +assert(!isIndexOnly(db, coll.find({a: {$lt: {}}}, proj).explain().queryPlanner.winningPlan)); +assert(!isIndexOnly(db, coll.find({a: {$lte: {}}}, proj).explain().queryPlanner.winningPlan)); })(); diff --git a/jstests/core/index_bounds_pipe.js b/jstests/core/index_bounds_pipe.js index ee6cbd5b5f7..e0ef8cf915a 100644 --- a/jstests/core/index_bounds_pipe.js +++ b/jstests/core/index_bounds_pipe.js @@ -3,115 +3,112 @@ * non-escaped pipe '|' characters. */ (function() { - 'use strict'; - - load('jstests/libs/analyze_plan.js'); - - const collName = 'index_bounds_pipe'; - const coll = db.getCollection(collName); - coll.drop(); - - assert.writeOK(coll.insert({_id: ''})); - assert.writeOK(coll.insert({_id: '\\|'})); - assert.writeOK(coll.insert({_id: 'a'})); - assert.writeOK(coll.insert({_id: 'a|b'})); - assert.writeOK(coll.insert({_id: 'b'})); - assert.writeOK(coll.insert({_id: '|'})); - - /** - * Asserts that a query on a field using 'params.regex' uses index bounds 'params.bounds' and - * returns results identical to 'params.results'. - * - * Also tests that a query using 'params.regex' will return documents with a field of type regex - * with an identical regular expression value. - */ - function assertIndexBoundsAndResult(params) { - const query = {_id: params.regex}; - const command = {find: collName, filter: query, projection: {_id: 1}, sort: {_id: 1}}; - const explain = db.runCommand({explain: command}); - assert.commandWorked(explain); - - // Check that the query uses correct index bounds. When run against a sharded cluster, there - // may be multiple index scan stages, but each should have the same index bounds. - const ixscans = getPlanStages(explain.queryPlanner.winningPlan, 'IXSCAN'); - assert.gt(ixscans.length, 0, 'Plan unexpectedly missing IXSCAN stage: ' + tojson(explain)); - for (let i = 0; i < ixscans.length; i++) { - const ixscan = ixscans[i]; - assert.eq(ixscan.indexBounds._id, - params.bounds, - `Expected bounds of ${tojson(params.bounds)} but got ${ - tojson(ixscan.indexBounds._id)}. i=${i}, all output: ${tojson(explain)}`); - } - - // Check that the query regex matches expected strings. - const results = db.runCommand(command); - assert.commandWorked(results); - assert.eq(results.cursor.firstBatch, - params.results, - 'Regex query ' + tojson(query) + ' returned incorrect results'); - - // Check that the query regex will exactly match identical regular expression objects. - const collRegexValue = db.getCollection(collName + params.regex); - collRegexValue.drop(); - assert.commandWorked(collRegexValue.createIndex({x: 1})); - - const doc = {_id: 0, x: params.regex}; - assert.writeOK(collRegexValue.insert(doc)); - - const regexQuery = {x: params.regex}; - assert.eq(collRegexValue.findOne(regexQuery), - doc, - 'Regex query ' + tojson(regexQuery) + - ' did not match document with identical regex value'); +'use strict'; + +load('jstests/libs/analyze_plan.js'); + +const collName = 'index_bounds_pipe'; +const coll = db.getCollection(collName); +coll.drop(); + +assert.writeOK(coll.insert({_id: ''})); +assert.writeOK(coll.insert({_id: '\\|'})); +assert.writeOK(coll.insert({_id: 'a'})); +assert.writeOK(coll.insert({_id: 'a|b'})); +assert.writeOK(coll.insert({_id: 'b'})); +assert.writeOK(coll.insert({_id: '|'})); + +/** + * Asserts that a query on a field using 'params.regex' uses index bounds 'params.bounds' and + * returns results identical to 'params.results'. + * + * Also tests that a query using 'params.regex' will return documents with a field of type regex + * with an identical regular expression value. + */ +function assertIndexBoundsAndResult(params) { + const query = {_id: params.regex}; + const command = {find: collName, filter: query, projection: {_id: 1}, sort: {_id: 1}}; + const explain = db.runCommand({explain: command}); + assert.commandWorked(explain); + + // Check that the query uses correct index bounds. When run against a sharded cluster, there + // may be multiple index scan stages, but each should have the same index bounds. + const ixscans = getPlanStages(explain.queryPlanner.winningPlan, 'IXSCAN'); + assert.gt(ixscans.length, 0, 'Plan unexpectedly missing IXSCAN stage: ' + tojson(explain)); + for (let i = 0; i < ixscans.length; i++) { + const ixscan = ixscans[i]; + assert.eq(ixscan.indexBounds._id, + params.bounds, + `Expected bounds of ${tojson(params.bounds)} but got ${ + tojson(ixscan.indexBounds._id)}. i=${i}, all output: ${tojson(explain)}`); } - // An anchored regex that uses no special operators can use tight index bounds. - assertIndexBoundsAndResult( - {regex: /^a/, bounds: ['["a", "b")', '[/^a/, /^a/]'], results: [{_id: 'a'}, {_id: 'a|b'}]}); - assertIndexBoundsAndResult( - {regex: /^\\/, bounds: ['["\\", "]")', '[/^\\\\/, /^\\\\/]'], results: [{_id: '\\|'}]}); - - // An anchored regex using the alternation operator cannot use tight index bounds. - assertIndexBoundsAndResult({ - regex: /^a|b/, - bounds: ['["", {})', '[/^a|b/, /^a|b/]'], - results: [{_id: 'a'}, {_id: 'a|b'}, {_id: 'b'}] - }); - - // An anchored regex that uses an escaped pipe character can use tight index bounds. - assertIndexBoundsAndResult( - {regex: /^a\|/, bounds: ['["a|", "a}")', '[/^a\\|/, /^a\\|/]'], results: [{_id: 'a|b'}]}); - assertIndexBoundsAndResult( - {regex: /^\|/, bounds: ['["|", "}")', '[/^\\|/, /^\\|/]'], results: [{_id: '|'}]}); - - // A pipe character that is preceded by an escaped backslash is correctly interpreted as the - // alternation operator and cannot use tight index bounds. - assertIndexBoundsAndResult({ - regex: /^\\|b/, - bounds: ['["", {})', '[/^\\\\|b/, /^\\\\|b/]'], - results: [{_id: '\\|'}, {_id: 'a|b'}, {_id: 'b'}] - }); - assertIndexBoundsAndResult({ - regex: /^\\|^b/, - bounds: ['["", {})', '[/^\\\\|^b/, /^\\\\|^b/]'], - results: [{_id: '\\|'}, {_id: 'b'}] - }); - - // An escaped backslash immediately followed by an escaped pipe does not use tight index bounds. - assertIndexBoundsAndResult({ - regex: /^\\\|/, - bounds: ['["", {})', '[/^\\\\\\|/, /^\\\\\\|/]'], - results: [{_id: '\\|'}] - }); - - // A pipe escaped with the \Q...\E escape sequence does not use tight index bounds. - assertIndexBoundsAndResult( - {regex: /^\Q|\E/, bounds: ['["", {})', '[/^\\Q|\\E/, /^\\Q|\\E/]'], results: [{_id: '|'}]}); - - // An escaped pipe within \Q...\E can use tight index bounds. - assertIndexBoundsAndResult({ - regex: /^\Q\|\E/, - bounds: ['["\\|", "\\}")', '[/^\\Q\\|\\E/, /^\\Q\\|\\E/]'], - results: [{_id: '\\|'}] - }); + // Check that the query regex matches expected strings. + const results = db.runCommand(command); + assert.commandWorked(results); + assert.eq(results.cursor.firstBatch, + params.results, + 'Regex query ' + tojson(query) + ' returned incorrect results'); + + // Check that the query regex will exactly match identical regular expression objects. + const collRegexValue = db.getCollection(collName + params.regex); + collRegexValue.drop(); + assert.commandWorked(collRegexValue.createIndex({x: 1})); + + const doc = {_id: 0, x: params.regex}; + assert.writeOK(collRegexValue.insert(doc)); + + const regexQuery = {x: params.regex}; + assert.eq( + collRegexValue.findOne(regexQuery), + doc, + 'Regex query ' + tojson(regexQuery) + ' did not match document with identical regex value'); +} + +// An anchored regex that uses no special operators can use tight index bounds. +assertIndexBoundsAndResult( + {regex: /^a/, bounds: ['["a", "b")', '[/^a/, /^a/]'], results: [{_id: 'a'}, {_id: 'a|b'}]}); +assertIndexBoundsAndResult( + {regex: /^\\/, bounds: ['["\\", "]")', '[/^\\\\/, /^\\\\/]'], results: [{_id: '\\|'}]}); + +// An anchored regex using the alternation operator cannot use tight index bounds. +assertIndexBoundsAndResult({ + regex: /^a|b/, + bounds: ['["", {})', '[/^a|b/, /^a|b/]'], + results: [{_id: 'a'}, {_id: 'a|b'}, {_id: 'b'}] +}); + +// An anchored regex that uses an escaped pipe character can use tight index bounds. +assertIndexBoundsAndResult( + {regex: /^a\|/, bounds: ['["a|", "a}")', '[/^a\\|/, /^a\\|/]'], results: [{_id: 'a|b'}]}); +assertIndexBoundsAndResult( + {regex: /^\|/, bounds: ['["|", "}")', '[/^\\|/, /^\\|/]'], results: [{_id: '|'}]}); + +// A pipe character that is preceded by an escaped backslash is correctly interpreted as the +// alternation operator and cannot use tight index bounds. +assertIndexBoundsAndResult({ + regex: /^\\|b/, + bounds: ['["", {})', '[/^\\\\|b/, /^\\\\|b/]'], + results: [{_id: '\\|'}, {_id: 'a|b'}, {_id: 'b'}] +}); +assertIndexBoundsAndResult({ + regex: /^\\|^b/, + bounds: ['["", {})', '[/^\\\\|^b/, /^\\\\|^b/]'], + results: [{_id: '\\|'}, {_id: 'b'}] +}); + +// An escaped backslash immediately followed by an escaped pipe does not use tight index bounds. +assertIndexBoundsAndResult( + {regex: /^\\\|/, bounds: ['["", {})', '[/^\\\\\\|/, /^\\\\\\|/]'], results: [{_id: '\\|'}]}); + +// A pipe escaped with the \Q...\E escape sequence does not use tight index bounds. +assertIndexBoundsAndResult( + {regex: /^\Q|\E/, bounds: ['["", {})', '[/^\\Q|\\E/, /^\\Q|\\E/]'], results: [{_id: '|'}]}); + +// An escaped pipe within \Q...\E can use tight index bounds. +assertIndexBoundsAndResult({ + regex: /^\Q\|\E/, + bounds: ['["\\|", "\\}")', '[/^\\Q\\|\\E/, /^\\Q\\|\\E/]'], + results: [{_id: '\\|'}] +}); }()); diff --git a/jstests/core/index_bounds_timestamp.js b/jstests/core/index_bounds_timestamp.js index 1f7cc261c30..fe0acf12936 100644 --- a/jstests/core/index_bounds_timestamp.js +++ b/jstests/core/index_bounds_timestamp.js @@ -3,147 +3,136 @@ // inclusiveness and exactness. (function() { - "use strict"; - - load("jstests/libs/analyze_plan.js"); - - // Setup the test collection. - let coll = db.index_bounds_timestamp; - coll.drop(); - - // Create an index on the ts and _id fields. - assert.commandWorked(coll.createIndex({ts: 1, _id: 1})); - - // Insert some test documents. - // NOTE: Inserting Timestamp() or Timestamp(0, 0) into a collection creates a Timestamp for the - // current time. Max Timestamp value is Timestamp(2^32 - 1, 2^32 - 1). - const documents = [ - {_id: 0, ts: new Timestamp(0, 1)}, - {_id: 1, ts: new Timestamp(0, Math.pow(2, 31))}, - {_id: 2, ts: new Timestamp(0, Math.pow(2, 32) - 1)}, - {_id: 3, ts: new Timestamp(1, 0)}, - {_id: 4, ts: new Timestamp(Math.pow(2, 32) - 1, Math.pow(2, 32) - 1)} - ]; - assert.writeOK(coll.insert(documents)); - - // Sanity check the timestamp bounds generation plan. - let plan; - - // Check that count over (Timestamp(0, 0), Timestamp(2^32 - 1, 2^32 - 1)] is a covered query. - plan = coll.explain("executionStats").find({ts: {$gt: Timestamp(0, 0)}}).count(); - assert(isIndexOnly(db, plan.queryPlanner.winningPlan), - "ts $gt count should be a covered query"); - assertExplainCount({explainResults: plan, expectedCount: 5}); - - // Check that find over (Timestamp(0, 0), Timestamp(2^32 - 1, 2^32 - 1)] does not require a - // FETCH stage when the query is covered by an index. - plan = - coll.explain("executionStats").find({ts: {$gt: Timestamp(0, 0)}}, {ts: 1, _id: 0}).finish(); - assert(isIndexOnly(db, plan.queryPlanner.winningPlan), - "ts $gt find with project should be a covered query"); - - // Check that count over [Timestamp(0, 0), Timestamp(2^32 - 1, 2^32 - 1)] is a covered query. - plan = coll.explain("executionStats").find({ts: {$gte: Timestamp(0, 0)}}).count(); - assert(isIndexOnly(db, plan.queryPlanner.winningPlan), - "ts $gte count should be a covered query"); - assertExplainCount({explainResults: plan, expectedCount: 5}); - - // Check that find over [Timestamp(0, 0), Timestamp(2^32 - 1, 2^32 - 1)] does not require a - // FETCH stage when the query is covered by an index. - plan = coll.explain("executionStats") - .find({ts: {$gte: Timestamp(0, 0)}}, {ts: 1, _id: 0}) - .finish(); - assert(isIndexOnly(db, plan.queryPlanner.winningPlan), - "ts $gte find with project should be a covered query"); - - // Check that count over [Timestamp(0, 0), Timestamp(1, 0)) is a covered query. - plan = coll.explain("executionStats").find({ts: {$lt: Timestamp(1, 0)}}).count(); - assert(isIndexOnly(db, plan.queryPlanner.winningPlan), - "ts $lt count should be a covered query"); - assertExplainCount({explainResults: plan, expectedCount: 3}); - - // Check that find over [Timestamp(0, 0), Timestamp(1, 0)) does not require a FETCH stage when - // the query is covered by an index. - plan = - coll.explain("executionStats").find({ts: {$lt: Timestamp(1, 0)}}, {ts: 1, _id: 0}).finish(); - assert(isIndexOnly(db, plan.queryPlanner.winningPlan), - "ts $lt find with project should be a covered query"); - - // Check that count over [Timestamp(0, 0), Timestamp(1, 0)] is a covered query. - plan = coll.explain("executionStats").find({ts: {$lte: Timestamp(1, 0)}}).count(); - assert(isIndexOnly(db, plan.queryPlanner.winningPlan), - "ts $lte count should be a covered query"); - assertExplainCount({explainResults: plan, expectedCount: 4}); - - // Check that find over [Timestamp(0, 0), Timestamp(1, 0)] does not require a FETCH stage when - // the query is covered by an index. - plan = coll.explain("executionStats") - .find({ts: {$lte: Timestamp(1, 0)}}, {ts: 1, _id: 0}) - .finish(); - assert(isIndexOnly(db, plan.queryPlanner.winningPlan), - "ts $lte find with project should be a covered query"); - - // Check that count over (Timestamp(0, 1), Timestamp(1, 0)) is a covered query. - plan = coll.explain("executionStats") - .find({ts: {$gt: Timestamp(0, 1), $lt: Timestamp(1, 0)}}) - .count(); - assert(isIndexOnly(db, plan.queryPlanner.winningPlan), - "ts $gt, $lt count should be a covered query"); - assertExplainCount({explainResults: plan, expectedCount: 2}); - - // Check that find over (Timestamp(0, 1), Timestamp(1, 0)) does not require a FETCH stage when - // the query is covered by an index. - plan = coll.explain("executionStats") - .find({ts: {$gt: Timestamp(0, 1), $lt: Timestamp(1, 0)}}, {ts: 1, _id: 0}) - .finish(); - assert(isIndexOnly(db, plan.queryPlanner.winningPlan), - "ts $gt, $lt find with project should be a covered query"); - - // Check that count over (Timestamp(0, 1), Timestamp(1, 0)] is a covered query. - plan = coll.explain("executionStats") - .find({ts: {$gt: Timestamp(0, 1), $lte: Timestamp(1, 0)}}) - .count(); - assert(isIndexOnly(db, plan.queryPlanner.winningPlan), - "ts $gt, $lte count should be a covered query"); - assertExplainCount({explainResults: plan, expectedCount: 3}); - - // Check that find over (Timestamp(0, 1), Timestamp(1, 0)] does not require a FETCH stage when - // the query is covered by an index. - plan = coll.explain("executionStats") - .find({ts: {$gt: Timestamp(0, 1), $lte: Timestamp(1, 0)}}, {ts: 1, _id: 0}) - .finish(); - assert(isIndexOnly(db, plan.queryPlanner.winningPlan), - "ts $gt, $lte find with project should be a covered query"); - - // Check that count over [Timestamp(0, 1), Timestamp(1, 0)) is a covered query. - plan = coll.explain("executionStats") - .find({ts: {$gte: Timestamp(0, 1), $lt: Timestamp(1, 0)}}) - .count(); - assert(isIndexOnly(db, plan.queryPlanner.winningPlan), - "ts $gte, $lt count should be a covered query"); - assertExplainCount({explainResults: plan, expectedCount: 3}); - - // Check that find over [Timestamp(0, 1), Timestamp(1, 0)) does not require a FETCH stage when - // the query is covered by an index. - plan = coll.explain("executionStats") - .find({ts: {$gte: Timestamp(0, 1), $lt: Timestamp(1, 0)}}, {ts: 1, _id: 0}) - .finish(); - assert(isIndexOnly(db, plan.queryPlanner.winningPlan), - "ts $gte, $lt find with project should be a covered query"); - - // Check that count over [Timestamp(0, 1), Timestamp(1, 0)] is a covered query. - plan = coll.explain("executionStats") - .find({ts: {$gte: Timestamp(0, 1), $lte: Timestamp(1, 0)}}) - .count(); - assert(isIndexOnly(db, plan.queryPlanner.winningPlan), - "ts $gte, $lte count should be a covered query"); - assertExplainCount({explainResults: plan, expectedCount: 4}); - - // Check that find over [Timestamp(0, 1), Timestamp(1, 0)] does not require a FETCH stage when - // the query is covered by an index. - plan = coll.explain("executionStats") - .find({ts: {$gte: Timestamp(0, 1), $lte: Timestamp(1, 0)}}, {ts: 1, _id: 0}) - .finish(); - assert(isIndexOnly(db, plan.queryPlanner.winningPlan), - "ts $gte, $lte find with project should be a covered query"); +"use strict"; + +load("jstests/libs/analyze_plan.js"); + +// Setup the test collection. +let coll = db.index_bounds_timestamp; +coll.drop(); + +// Create an index on the ts and _id fields. +assert.commandWorked(coll.createIndex({ts: 1, _id: 1})); + +// Insert some test documents. +// NOTE: Inserting Timestamp() or Timestamp(0, 0) into a collection creates a Timestamp for the +// current time. Max Timestamp value is Timestamp(2^32 - 1, 2^32 - 1). +const documents = [ + {_id: 0, ts: new Timestamp(0, 1)}, + {_id: 1, ts: new Timestamp(0, Math.pow(2, 31))}, + {_id: 2, ts: new Timestamp(0, Math.pow(2, 32) - 1)}, + {_id: 3, ts: new Timestamp(1, 0)}, + {_id: 4, ts: new Timestamp(Math.pow(2, 32) - 1, Math.pow(2, 32) - 1)} +]; +assert.writeOK(coll.insert(documents)); + +// Sanity check the timestamp bounds generation plan. +let plan; + +// Check that count over (Timestamp(0, 0), Timestamp(2^32 - 1, 2^32 - 1)] is a covered query. +plan = coll.explain("executionStats").find({ts: {$gt: Timestamp(0, 0)}}).count(); +assert(isIndexOnly(db, plan.queryPlanner.winningPlan), "ts $gt count should be a covered query"); +assertExplainCount({explainResults: plan, expectedCount: 5}); + +// Check that find over (Timestamp(0, 0), Timestamp(2^32 - 1, 2^32 - 1)] does not require a +// FETCH stage when the query is covered by an index. +plan = coll.explain("executionStats").find({ts: {$gt: Timestamp(0, 0)}}, {ts: 1, _id: 0}).finish(); +assert(isIndexOnly(db, plan.queryPlanner.winningPlan), + "ts $gt find with project should be a covered query"); + +// Check that count over [Timestamp(0, 0), Timestamp(2^32 - 1, 2^32 - 1)] is a covered query. +plan = coll.explain("executionStats").find({ts: {$gte: Timestamp(0, 0)}}).count(); +assert(isIndexOnly(db, plan.queryPlanner.winningPlan), "ts $gte count should be a covered query"); +assertExplainCount({explainResults: plan, expectedCount: 5}); + +// Check that find over [Timestamp(0, 0), Timestamp(2^32 - 1, 2^32 - 1)] does not require a +// FETCH stage when the query is covered by an index. +plan = coll.explain("executionStats").find({ts: {$gte: Timestamp(0, 0)}}, {ts: 1, _id: 0}).finish(); +assert(isIndexOnly(db, plan.queryPlanner.winningPlan), + "ts $gte find with project should be a covered query"); + +// Check that count over [Timestamp(0, 0), Timestamp(1, 0)) is a covered query. +plan = coll.explain("executionStats").find({ts: {$lt: Timestamp(1, 0)}}).count(); +assert(isIndexOnly(db, plan.queryPlanner.winningPlan), "ts $lt count should be a covered query"); +assertExplainCount({explainResults: plan, expectedCount: 3}); + +// Check that find over [Timestamp(0, 0), Timestamp(1, 0)) does not require a FETCH stage when +// the query is covered by an index. +plan = coll.explain("executionStats").find({ts: {$lt: Timestamp(1, 0)}}, {ts: 1, _id: 0}).finish(); +assert(isIndexOnly(db, plan.queryPlanner.winningPlan), + "ts $lt find with project should be a covered query"); + +// Check that count over [Timestamp(0, 0), Timestamp(1, 0)] is a covered query. +plan = coll.explain("executionStats").find({ts: {$lte: Timestamp(1, 0)}}).count(); +assert(isIndexOnly(db, plan.queryPlanner.winningPlan), "ts $lte count should be a covered query"); +assertExplainCount({explainResults: plan, expectedCount: 4}); + +// Check that find over [Timestamp(0, 0), Timestamp(1, 0)] does not require a FETCH stage when +// the query is covered by an index. +plan = coll.explain("executionStats").find({ts: {$lte: Timestamp(1, 0)}}, {ts: 1, _id: 0}).finish(); +assert(isIndexOnly(db, plan.queryPlanner.winningPlan), + "ts $lte find with project should be a covered query"); + +// Check that count over (Timestamp(0, 1), Timestamp(1, 0)) is a covered query. +plan = + coll.explain("executionStats").find({ts: {$gt: Timestamp(0, 1), $lt: Timestamp(1, 0)}}).count(); +assert(isIndexOnly(db, plan.queryPlanner.winningPlan), + "ts $gt, $lt count should be a covered query"); +assertExplainCount({explainResults: plan, expectedCount: 2}); + +// Check that find over (Timestamp(0, 1), Timestamp(1, 0)) does not require a FETCH stage when +// the query is covered by an index. +plan = coll.explain("executionStats") + .find({ts: {$gt: Timestamp(0, 1), $lt: Timestamp(1, 0)}}, {ts: 1, _id: 0}) + .finish(); +assert(isIndexOnly(db, plan.queryPlanner.winningPlan), + "ts $gt, $lt find with project should be a covered query"); + +// Check that count over (Timestamp(0, 1), Timestamp(1, 0)] is a covered query. +plan = coll.explain("executionStats") + .find({ts: {$gt: Timestamp(0, 1), $lte: Timestamp(1, 0)}}) + .count(); +assert(isIndexOnly(db, plan.queryPlanner.winningPlan), + "ts $gt, $lte count should be a covered query"); +assertExplainCount({explainResults: plan, expectedCount: 3}); + +// Check that find over (Timestamp(0, 1), Timestamp(1, 0)] does not require a FETCH stage when +// the query is covered by an index. +plan = coll.explain("executionStats") + .find({ts: {$gt: Timestamp(0, 1), $lte: Timestamp(1, 0)}}, {ts: 1, _id: 0}) + .finish(); +assert(isIndexOnly(db, plan.queryPlanner.winningPlan), + "ts $gt, $lte find with project should be a covered query"); + +// Check that count over [Timestamp(0, 1), Timestamp(1, 0)) is a covered query. +plan = coll.explain("executionStats") + .find({ts: {$gte: Timestamp(0, 1), $lt: Timestamp(1, 0)}}) + .count(); +assert(isIndexOnly(db, plan.queryPlanner.winningPlan), + "ts $gte, $lt count should be a covered query"); +assertExplainCount({explainResults: plan, expectedCount: 3}); + +// Check that find over [Timestamp(0, 1), Timestamp(1, 0)) does not require a FETCH stage when +// the query is covered by an index. +plan = coll.explain("executionStats") + .find({ts: {$gte: Timestamp(0, 1), $lt: Timestamp(1, 0)}}, {ts: 1, _id: 0}) + .finish(); +assert(isIndexOnly(db, plan.queryPlanner.winningPlan), + "ts $gte, $lt find with project should be a covered query"); + +// Check that count over [Timestamp(0, 1), Timestamp(1, 0)] is a covered query. +plan = coll.explain("executionStats") + .find({ts: {$gte: Timestamp(0, 1), $lte: Timestamp(1, 0)}}) + .count(); +assert(isIndexOnly(db, plan.queryPlanner.winningPlan), + "ts $gte, $lte count should be a covered query"); +assertExplainCount({explainResults: plan, expectedCount: 4}); + +// Check that find over [Timestamp(0, 1), Timestamp(1, 0)] does not require a FETCH stage when +// the query is covered by an index. +plan = coll.explain("executionStats") + .find({ts: {$gte: Timestamp(0, 1), $lte: Timestamp(1, 0)}}, {ts: 1, _id: 0}) + .finish(); +assert(isIndexOnly(db, plan.queryPlanner.winningPlan), + "ts $gte, $lte find with project should be a covered query"); })(); diff --git a/jstests/core/index_check6.js b/jstests/core/index_check6.js index fd4a7177ffb..6e5ccdb7a4c 100644 --- a/jstests/core/index_check6.js +++ b/jstests/core/index_check6.js @@ -38,7 +38,8 @@ assert.eq( "D"); // SERVER-371 assert.eq.automsg("2", - "t.find( { age:30, rating:{ $gte:4, $lte:5} } )" + ".explain('executionStats')" + + "t.find( { age:30, rating:{ $gte:4, $lte:5} } )" + + ".explain('executionStats')" + ".executionStats.totalKeysExamined"); t.drop(); diff --git a/jstests/core/index_create_with_nul_in_name.js b/jstests/core/index_create_with_nul_in_name.js index c128dcc5880..c4894830fc8 100644 --- a/jstests/core/index_create_with_nul_in_name.js +++ b/jstests/core/index_create_with_nul_in_name.js @@ -1,14 +1,14 @@ // SERVER-16672 disallow creating indexes with NUL bytes in the name (function() { - 'use strict'; +'use strict'; - var coll = db.create_index_with_nul_in_name; - coll.drop(); +var coll = db.create_index_with_nul_in_name; +coll.drop(); - var idx = {key: {'a': 1}, name: 'foo\0bar', ns: coll.getFullName()}; +var idx = {key: {'a': 1}, name: 'foo\0bar', ns: coll.getFullName()}; - var res = coll.runCommand('createIndexes', {indexes: [idx]}); - assert.commandFailed(res, tojson(res)); - assert.eq(res.code, 67); // CannotCreateIndex +var res = coll.runCommand('createIndexes', {indexes: [idx]}); +assert.commandFailed(res, tojson(res)); +assert.eq(res.code, 67); // CannotCreateIndex }()); diff --git a/jstests/core/index_decimal.js b/jstests/core/index_decimal.js index 1fbb62332a2..9736d8f0903 100644 --- a/jstests/core/index_decimal.js +++ b/jstests/core/index_decimal.js @@ -5,53 +5,52 @@ // Test indexing of decimal numbers (function() { - 'use strict'; - - // Include helpers for analyzing explain output. - load('jstests/libs/analyze_plan.js'); - - var t = db.decimal_indexing; - t.drop(); - - // Create doubles and NumberDecimals. The double 0.1 is actually 0.10000000000000000555 - // and the double 0.3 is actually 0.2999999999999999888, so we can check ordering. - assert.writeOK(t.insert({x: 0.1, y: NumberDecimal('0.3000')})); - assert.writeOK(t.insert({x: 0.1})); - assert.writeOK(t.insert({y: 0.3})); - - // Create an index on existing numbers. - assert.commandWorked(t.createIndex({x: 1})); - assert.commandWorked(t.createIndex({y: -1})); - - // Insert some more items after index creation. Use _id for decimal. - assert.writeOK(t.insert({x: NumberDecimal('0.10')})); - assert.writeOK(t.insert({_id: NumberDecimal('0E3')})); - assert.writeError(t.insert({_id: -0.0})); - - // Check that we return exactly the right document, use an index to do so, and that the - // result of the covered query has the right number of trailing zeros. - var qres = t.find({x: NumberDecimal('0.1')}, {_id: 0, x: 1}).toArray(); - var qplan = t.find({x: NumberDecimal('0.1')}, {_id: 0, x: 1}).explain(); - assert.neq(tojson(NumberDecimal('0.1')), - tojson(NumberDecimal('0.10')), - 'trailing zeros are significant for exact equality'); - assert.eq(qres, - [{x: NumberDecimal('0.10')}], - 'query for x equal to decimal 0.10 returns wrong value'); - assert(isIndexOnly(db, qplan.queryPlanner.winningPlan), - 'query on decimal should be covered: ' + tojson(qplan)); - - // Check that queries for exact floating point numbers don't return nearby decimals. - assert.eq(t.find({x: 0.1}, {_id: 0}).sort({x: 1, y: 1}).toArray(), - [{x: 0.1}, {x: 0.1, y: NumberDecimal('0.3000')}], - 'wrong result for querying {x: 0.1}'); - assert.eq(t.find({x: {$lt: 0.1}}, {_id: 0}).toArray(), - [{x: NumberDecimal('0.10')}], - 'querying for decimal less than double 0.1 should return decimal 0.10'); - assert.eq(t.find({y: {$lt: NumberDecimal('0.3')}}, {y: 1, _id: 0}).toArray(), - [{y: 0.3}], - 'querying for double less than decimal 0.3 should return double 0.3'); - assert.eq(t.find({_id: 0}, {_id: 1}).toArray(), - [{_id: NumberDecimal('0E3')}], - 'querying for zero does not return the correct decimal'); +'use strict'; + +// Include helpers for analyzing explain output. +load('jstests/libs/analyze_plan.js'); + +var t = db.decimal_indexing; +t.drop(); + +// Create doubles and NumberDecimals. The double 0.1 is actually 0.10000000000000000555 +// and the double 0.3 is actually 0.2999999999999999888, so we can check ordering. +assert.writeOK(t.insert({x: 0.1, y: NumberDecimal('0.3000')})); +assert.writeOK(t.insert({x: 0.1})); +assert.writeOK(t.insert({y: 0.3})); + +// Create an index on existing numbers. +assert.commandWorked(t.createIndex({x: 1})); +assert.commandWorked(t.createIndex({y: -1})); + +// Insert some more items after index creation. Use _id for decimal. +assert.writeOK(t.insert({x: NumberDecimal('0.10')})); +assert.writeOK(t.insert({_id: NumberDecimal('0E3')})); +assert.writeError(t.insert({_id: -0.0})); + +// Check that we return exactly the right document, use an index to do so, and that the +// result of the covered query has the right number of trailing zeros. +var qres = t.find({x: NumberDecimal('0.1')}, {_id: 0, x: 1}).toArray(); +var qplan = t.find({x: NumberDecimal('0.1')}, {_id: 0, x: 1}).explain(); +assert.neq(tojson(NumberDecimal('0.1')), + tojson(NumberDecimal('0.10')), + 'trailing zeros are significant for exact equality'); +assert.eq( + qres, [{x: NumberDecimal('0.10')}], 'query for x equal to decimal 0.10 returns wrong value'); +assert(isIndexOnly(db, qplan.queryPlanner.winningPlan), + 'query on decimal should be covered: ' + tojson(qplan)); + +// Check that queries for exact floating point numbers don't return nearby decimals. +assert.eq(t.find({x: 0.1}, {_id: 0}).sort({x: 1, y: 1}).toArray(), + [{x: 0.1}, {x: 0.1, y: NumberDecimal('0.3000')}], + 'wrong result for querying {x: 0.1}'); +assert.eq(t.find({x: {$lt: 0.1}}, {_id: 0}).toArray(), + [{x: NumberDecimal('0.10')}], + 'querying for decimal less than double 0.1 should return decimal 0.10'); +assert.eq(t.find({y: {$lt: NumberDecimal('0.3')}}, {y: 1, _id: 0}).toArray(), + [{y: 0.3}], + 'querying for double less than decimal 0.3 should return double 0.3'); +assert.eq(t.find({_id: 0}, {_id: 1}).toArray(), + [{_id: NumberDecimal('0E3')}], + 'querying for zero does not return the correct decimal'); })(); diff --git a/jstests/core/index_elemmatch1.js b/jstests/core/index_elemmatch1.js index 7b37c55d37d..3957e9d185c 100644 --- a/jstests/core/index_elemmatch1.js +++ b/jstests/core/index_elemmatch1.js @@ -3,30 +3,34 @@ * @tags: [assumes_balancer_off] */ (function() { - "use strict"; +"use strict"; - const coll = db.index_elemmatch1; - coll.drop(); +const coll = db.index_elemmatch1; +coll.drop(); - let x = 0; - let y = 0; - const bulk = coll.initializeUnorderedBulkOp(); - for (let a = 0; a < 10; a++) { - for (let b = 0; b < 10; b++) { - bulk.insert({a: a, b: b % 10, arr: [{x: x++ % 10, y: y++ % 10}]}); - } +let x = 0; +let y = 0; +const bulk = coll.initializeUnorderedBulkOp(); +for (let a = 0; a < 10; a++) { + for (let b = 0; b < 10; b++) { + bulk.insert({a: a, b: b % 10, arr: [{x: x++ % 10, y: y++ % 10}]}); } - assert.commandWorked(bulk.execute()); +} +assert.commandWorked(bulk.execute()); - assert.commandWorked(coll.createIndex({a: 1, b: 1})); - assert.commandWorked(coll.createIndex({"arr.x": 1, a: 1})); +assert.commandWorked(coll.createIndex({a: 1, b: 1})); +assert.commandWorked(coll.createIndex({"arr.x": 1, a: 1})); - const query = {a: 5, b: {$in: [1, 3, 5]}, arr: {$elemMatch: {x: 5, y: 5}}}; +const query = { + a: 5, + b: {$in: [1, 3, 5]}, + arr: {$elemMatch: {x: 5, y: 5}} +}; - const count = coll.find(query).itcount(); - assert.eq(count, 1); +const count = coll.find(query).itcount(); +assert.eq(count, 1); - const explain = coll.find(query).hint({"arr.x": 1, a: 1}).explain("executionStats"); - assert.commandWorked(explain); - assert.eq(count, explain.executionStats.totalKeysExamined, explain); +const explain = coll.find(query).hint({"arr.x": 1, a: 1}).explain("executionStats"); +assert.commandWorked(explain); +assert.eq(count, explain.executionStats.totalKeysExamined, explain); })(); diff --git a/jstests/core/index_elemmatch2.js b/jstests/core/index_elemmatch2.js index d87b26e7642..d2ff872dc49 100644 --- a/jstests/core/index_elemmatch2.js +++ b/jstests/core/index_elemmatch2.js @@ -3,61 +3,62 @@ * compatible with the index. */ (function() { - "use strict"; - - load("jstests/libs/analyze_plan.js"); - - const coll = db.elemMatch_index; - coll.drop(); - - assert.writeOK(coll.insert({a: 1})); - assert.writeOK(coll.insert({a: [{}]})); - assert.writeOK(coll.insert({a: [1, null]})); - assert.writeOK(coll.insert({a: [{type: "Point", coordinates: [0, 0]}]})); - - assert.commandWorked(coll.createIndex({a: 1}, {sparse: true})); - - function assertIndexResults(coll, query, useIndex, nReturned) { - const explainPlan = coll.find(query).explain("executionStats"); - assert.eq(isIxscan(db, explainPlan.queryPlanner.winningPlan), useIndex); - assert.eq(explainPlan.executionStats.nReturned, nReturned); - } - - assertIndexResults(coll, {a: {$elemMatch: {$exists: false}}}, false, 0); - - // An $elemMatch predicate is treated as nested, and the index should be used for $exists:true. - assertIndexResults(coll, {a: {$elemMatch: {$exists: true}}}, true, 3); - - // $not within $elemMatch should not attempt to use a sparse index for $exists:false. - assertIndexResults(coll, {a: {$elemMatch: {$not: {$exists: false}}}}, false, 3); - assertIndexResults(coll, {a: {$elemMatch: {$gt: 0, $not: {$exists: false}}}}, false, 1); - - // $geo within $elemMatch should not attempt to use a non-geo index. - assertIndexResults( - coll, - { - a: { - $elemMatch: { - $geoWithin: { - $geometry: - {type: "Polygon", coordinates: [[[0, 0], [0, 1], [1, 0], [0, 0]]]} - } - } - } - }, - false, - 1); - - // $in with a null value within $elemMatch should use a sparse index. - assertIndexResults(coll, {a: {$elemMatch: {$in: [null]}}}, true, 1); - - // $eq with a null value within $elemMatch should use a sparse index. - assertIndexResults(coll, {a: {$elemMatch: {$eq: null}}}, true, 1); - - // A negated regex within $elemMatch should not use an index, sparse or not. - assertIndexResults(coll, {a: {$elemMatch: {$not: {$in: [/^a/]}}}}, false, 3); - - coll.dropIndexes(); - assert.commandWorked(coll.createIndex({a: 1})); - assertIndexResults(coll, {a: {$elemMatch: {$not: {$in: [/^a/]}}}}, false, 3); +"use strict"; + +load("jstests/libs/analyze_plan.js"); + +const coll = db.elemMatch_index; +coll.drop(); + +assert.writeOK(coll.insert({a: 1})); +assert.writeOK(coll.insert({a: [{}]})); +assert.writeOK(coll.insert({a: [1, null]})); +assert.writeOK(coll.insert({a: [{type: "Point", coordinates: [0, 0]}]})); + +assert.commandWorked(coll.createIndex({a: 1}, {sparse: true})); + +function assertIndexResults(coll, query, useIndex, nReturned) { + const explainPlan = coll.find(query).explain("executionStats"); + assert.eq(isIxscan(db, explainPlan.queryPlanner.winningPlan), useIndex); + assert.eq(explainPlan.executionStats.nReturned, nReturned); +} + +assertIndexResults(coll, {a: {$elemMatch: {$exists: false}}}, false, 0); + +// An $elemMatch predicate is treated as nested, and the index should be used for $exists:true. +assertIndexResults(coll, {a: {$elemMatch: {$exists: true}}}, true, 3); + +// $not within $elemMatch should not attempt to use a sparse index for $exists:false. +assertIndexResults(coll, {a: {$elemMatch: {$not: {$exists: false}}}}, false, 3); +assertIndexResults(coll, {a: {$elemMatch: {$gt: 0, $not: {$exists: false}}}}, false, 1); + +// $geo within $elemMatch should not attempt to use a non-geo index. +assertIndexResults(coll, + { + a: { + $elemMatch: { + $geoWithin: { + $geometry: { + type: "Polygon", + coordinates: [[[0, 0], [0, 1], [1, 0], [0, 0]]] + } + } + } + } + }, + false, + 1); + +// $in with a null value within $elemMatch should use a sparse index. +assertIndexResults(coll, {a: {$elemMatch: {$in: [null]}}}, true, 1); + +// $eq with a null value within $elemMatch should use a sparse index. +assertIndexResults(coll, {a: {$elemMatch: {$eq: null}}}, true, 1); + +// A negated regex within $elemMatch should not use an index, sparse or not. +assertIndexResults(coll, {a: {$elemMatch: {$not: {$in: [/^a/]}}}}, false, 3); + +coll.dropIndexes(); +assert.commandWorked(coll.createIndex({a: 1})); +assertIndexResults(coll, {a: {$elemMatch: {$not: {$in: [/^a/]}}}}, false, 3); })(); diff --git a/jstests/core/index_filter_catalog_independent.js b/jstests/core/index_filter_catalog_independent.js index f3ea81a6627..32c7c1669a6 100644 --- a/jstests/core/index_filter_catalog_independent.js +++ b/jstests/core/index_filter_catalog_independent.js @@ -11,79 +11,79 @@ * ] */ (function() { - "use strict"; +"use strict"; - load("jstests/libs/analyze_plan.js"); // For getPlanStages. +load("jstests/libs/analyze_plan.js"); // For getPlanStages. - const collName = "index_filter_catalog_independent"; - const coll = db[collName]; - coll.drop(); +const collName = "index_filter_catalog_independent"; +const coll = db[collName]; +coll.drop(); - /* - * Check that there's one index filter on the given query which allows only 'indexes'. - */ - function assertOneIndexFilter(query, indexes) { - let res = assert.commandWorked(db.runCommand({planCacheListFilters: collName})); - assert.eq(res.filters.length, 1); - assert.eq(res.filters[0].query, query); - assert.eq(res.filters[0].indexes, indexes); - } +/* + * Check that there's one index filter on the given query which allows only 'indexes'. + */ +function assertOneIndexFilter(query, indexes) { + let res = assert.commandWorked(db.runCommand({planCacheListFilters: collName})); + assert.eq(res.filters.length, 1); + assert.eq(res.filters[0].query, query); + assert.eq(res.filters[0].indexes, indexes); +} - function assertIsIxScanOnIndex(winningPlan, keyPattern) { - const ixScans = getPlanStages(winningPlan, "IXSCAN"); - assert.gt(ixScans.length, 0); - ixScans.every((ixScan) => assert.eq(ixScan.keyPattern, keyPattern)); +function assertIsIxScanOnIndex(winningPlan, keyPattern) { + const ixScans = getPlanStages(winningPlan, "IXSCAN"); + assert.gt(ixScans.length, 0); + ixScans.every((ixScan) => assert.eq(ixScan.keyPattern, keyPattern)); - const collScans = getPlanStages(winningPlan, "COLLSCAN"); - assert.eq(collScans.length, 0); - } + const collScans = getPlanStages(winningPlan, "COLLSCAN"); + assert.eq(collScans.length, 0); +} - function checkIndexFilterSet(explain, shouldBeSet) { - if (explain.queryPlanner.winningPlan.shards) { - for (let shard of explain.queryPlanner.winningPlan.shards) { - assert.eq(shard.indexFilterSet, shouldBeSet); - } - } else { - assert.eq(explain.queryPlanner.indexFilterSet, shouldBeSet); +function checkIndexFilterSet(explain, shouldBeSet) { + if (explain.queryPlanner.winningPlan.shards) { + for (let shard of explain.queryPlanner.winningPlan.shards) { + assert.eq(shard.indexFilterSet, shouldBeSet); } + } else { + assert.eq(explain.queryPlanner.indexFilterSet, shouldBeSet); } +} - assert.commandWorked(coll.createIndexes([{x: 1}, {x: 1, y: 1}])); - assert.commandWorked( - db.runCommand({planCacheSetFilter: collName, query: {"x": 3}, indexes: [{x: 1, y: 1}]})); - assertOneIndexFilter({x: 3}, [{x: 1, y: 1}]); +assert.commandWorked(coll.createIndexes([{x: 1}, {x: 1, y: 1}])); +assert.commandWorked( + db.runCommand({planCacheSetFilter: collName, query: {"x": 3}, indexes: [{x: 1, y: 1}]})); +assertOneIndexFilter({x: 3}, [{x: 1, y: 1}]); - let explain = assert.commandWorked(coll.find({x: 3}).explain()); - checkIndexFilterSet(explain, true); - assertIsIxScanOnIndex(explain.queryPlanner.winningPlan, {x: 1, y: 1}); +let explain = assert.commandWorked(coll.find({x: 3}).explain()); +checkIndexFilterSet(explain, true); +assertIsIxScanOnIndex(explain.queryPlanner.winningPlan, {x: 1, y: 1}); - // Drop an index. The filter should not change. - assert.commandWorked(coll.dropIndex({x: 1, y: 1})); - assertOneIndexFilter({x: 3}, [{x: 1, y: 1}]); +// Drop an index. The filter should not change. +assert.commandWorked(coll.dropIndex({x: 1, y: 1})); +assertOneIndexFilter({x: 3}, [{x: 1, y: 1}]); - // The {x: 1} index _could_ be used, but should not be considered because of the filter. - // Since we dropped the {x: 1, y: 1} index, a COLLSCAN must be used. - explain = coll.find({x: 3}).explain(); - checkIndexFilterSet(explain, true); - assert(isCollscan(db, explain.queryPlanner.winningPlan)); +// The {x: 1} index _could_ be used, but should not be considered because of the filter. +// Since we dropped the {x: 1, y: 1} index, a COLLSCAN must be used. +explain = coll.find({x: 3}).explain(); +checkIndexFilterSet(explain, true); +assert(isCollscan(db, explain.queryPlanner.winningPlan)); - // Create another index. This should not change whether the index filter is applied. - assert.commandWorked(coll.createIndex({x: 1, z: 1})); - explain = assert.commandWorked(coll.find({x: 3}).explain()); - checkIndexFilterSet(explain, true); - assert(isCollscan(db, explain.queryPlanner.winningPlan)); +// Create another index. This should not change whether the index filter is applied. +assert.commandWorked(coll.createIndex({x: 1, z: 1})); +explain = assert.commandWorked(coll.find({x: 3}).explain()); +checkIndexFilterSet(explain, true); +assert(isCollscan(db, explain.queryPlanner.winningPlan)); - // Changing the catalog and then setting an index filter should not result in duplicate entries. - assert.commandWorked(coll.createIndex({x: 1, a: 1})); - assert.commandWorked( - db.runCommand({planCacheSetFilter: collName, query: {"x": 3}, indexes: [{x: 1, y: 1}]})); - assertOneIndexFilter({x: 3}, [{x: 1, y: 1}]); +// Changing the catalog and then setting an index filter should not result in duplicate entries. +assert.commandWorked(coll.createIndex({x: 1, a: 1})); +assert.commandWorked( + db.runCommand({planCacheSetFilter: collName, query: {"x": 3}, indexes: [{x: 1, y: 1}]})); +assertOneIndexFilter({x: 3}, [{x: 1, y: 1}]); - // Recreate the {x: 1, y: 1} index and be sure that it's still used. - assert.commandWorked(coll.createIndexes([{x: 1}, {x: 1, y: 1}])); - assertOneIndexFilter({x: 3}, [{x: 1, y: 1}]); +// Recreate the {x: 1, y: 1} index and be sure that it's still used. +assert.commandWorked(coll.createIndexes([{x: 1}, {x: 1, y: 1}])); +assertOneIndexFilter({x: 3}, [{x: 1, y: 1}]); - explain = assert.commandWorked(coll.find({x: 3}).explain()); - checkIndexFilterSet(explain, true); - assertIsIxScanOnIndex(explain.queryPlanner.winningPlan, {x: 1, y: 1}); +explain = assert.commandWorked(coll.find({x: 3}).explain()); +checkIndexFilterSet(explain, true); +assertIsIxScanOnIndex(explain.queryPlanner.winningPlan, {x: 1, y: 1}); })(); diff --git a/jstests/core/index_filter_collation.js b/jstests/core/index_filter_collation.js index 92bbe005ce4..d6fa0daaa73 100644 --- a/jstests/core/index_filter_collation.js +++ b/jstests/core/index_filter_collation.js @@ -11,73 +11,76 @@ * ] */ (function() { - "use strict"; +"use strict"; - load("jstests/libs/analyze_plan.js"); // For getPlanStages. +load("jstests/libs/analyze_plan.js"); // For getPlanStages. - const collName = "index_filter_collation"; - const coll = db[collName]; +const collName = "index_filter_collation"; +const coll = db[collName]; - const caseInsensitive = {locale: "fr", strength: 2}; - coll.drop(); - assert.commandWorked(db.createCollection(collName, {collation: caseInsensitive})); +const caseInsensitive = { + locale: "fr", + strength: 2 +}; +coll.drop(); +assert.commandWorked(db.createCollection(collName, {collation: caseInsensitive})); - function checkIndexFilterSet(explain, shouldBeSet) { - if (explain.queryPlanner.winningPlan.shards) { - for (let shard of explain.queryPlanner.winningPlan.shards) { - assert.eq(shard.indexFilterSet, shouldBeSet); - } - } else { - assert.eq(explain.queryPlanner.indexFilterSet, shouldBeSet); +function checkIndexFilterSet(explain, shouldBeSet) { + if (explain.queryPlanner.winningPlan.shards) { + for (let shard of explain.queryPlanner.winningPlan.shards) { + assert.eq(shard.indexFilterSet, shouldBeSet); } + } else { + assert.eq(explain.queryPlanner.indexFilterSet, shouldBeSet); } +} - // Now create an index filter on a query with no collation specified. - assert.commandWorked(coll.createIndexes([{x: 1}, {x: 1, y: 1}])); - assert.commandWorked( - db.runCommand({planCacheSetFilter: collName, query: {"x": 3}, indexes: [{x: 1, y: 1}]})); +// Now create an index filter on a query with no collation specified. +assert.commandWorked(coll.createIndexes([{x: 1}, {x: 1, y: 1}])); +assert.commandWorked( + db.runCommand({planCacheSetFilter: collName, query: {"x": 3}, indexes: [{x: 1, y: 1}]})); - const listFilters = assert.commandWorked(db.runCommand({planCacheListFilters: collName})); - assert.eq(listFilters.filters.length, 1); - assert.eq(listFilters.filters[0].query, {x: 3}); - assert.eq(listFilters.filters[0].indexes, [{x: 1, y: 1}]); +const listFilters = assert.commandWorked(db.runCommand({planCacheListFilters: collName})); +assert.eq(listFilters.filters.length, 1); +assert.eq(listFilters.filters[0].query, {x: 3}); +assert.eq(listFilters.filters[0].indexes, [{x: 1, y: 1}]); - // Create an index filter on a query with the default collation specified. - assert.commandWorked(db.runCommand({ - planCacheSetFilter: collName, - query: {"x": 3}, - collation: caseInsensitive, - indexes: [{x: 1}] - })); +// Create an index filter on a query with the default collation specified. +assert.commandWorked(db.runCommand({ + planCacheSetFilter: collName, + query: {"x": 3}, + collation: caseInsensitive, + indexes: [{x: 1}] +})); - // Although these two queries would run with the same collation, they have different "shapes" - // so we expect there to be two index filters present. - let res = assert.commandWorked(db.runCommand({planCacheListFilters: collName})); - assert.eq(res.filters.length, 2); +// Although these two queries would run with the same collation, they have different "shapes" +// so we expect there to be two index filters present. +let res = assert.commandWorked(db.runCommand({planCacheListFilters: collName})); +assert.eq(res.filters.length, 2); - // One of the filters should only be applied to queries with the "fr" collation - // and use the {x: 1} index. - assert(res.filters.some((filter) => filter.hasOwnProperty("collation") && - filter.collation.locale === "fr" && - friendlyEqual(filter.indexes, [{x: 1}]))); +// One of the filters should only be applied to queries with the "fr" collation +// and use the {x: 1} index. +assert(res.filters.some((filter) => filter.hasOwnProperty("collation") && + filter.collation.locale === "fr" && + friendlyEqual(filter.indexes, [{x: 1}]))); - // The other should not have any collation, and allow the index {x: 1, y: 1}. - assert(res.filters.some((filter) => !filter.hasOwnProperty("collation") && - friendlyEqual(filter.indexes, [{x: 1, y: 1}]))); +// The other should not have any collation, and allow the index {x: 1, y: 1}. +assert(res.filters.some((filter) => !filter.hasOwnProperty("collation") && + friendlyEqual(filter.indexes, [{x: 1, y: 1}]))); - function assertIsIxScanOnIndex(winningPlan, keyPattern) { - const ixScans = getPlanStages(winningPlan, "IXSCAN"); - assert.gt(ixScans.length, 0); - assert.eq(ixScans[0].keyPattern, keyPattern); - } +function assertIsIxScanOnIndex(winningPlan, keyPattern) { + const ixScans = getPlanStages(winningPlan, "IXSCAN"); + assert.gt(ixScans.length, 0); + assert.eq(ixScans[0].keyPattern, keyPattern); +} - // Run the queries and be sure the correct indexes are used. - let explain = coll.find({x: 3}).explain(); - checkIndexFilterSet(explain, true); - assertIsIxScanOnIndex(explain.queryPlanner.winningPlan, {x: 1, y: 1}); +// Run the queries and be sure the correct indexes are used. +let explain = coll.find({x: 3}).explain(); +checkIndexFilterSet(explain, true); +assertIsIxScanOnIndex(explain.queryPlanner.winningPlan, {x: 1, y: 1}); - // Run the queries and be sure the correct indexes are used. - explain = coll.find({x: 3}).collation(caseInsensitive).explain(); - checkIndexFilterSet(explain, true); - assertIsIxScanOnIndex(explain.queryPlanner.winningPlan, {x: 1}); +// Run the queries and be sure the correct indexes are used. +explain = coll.find({x: 3}).collation(caseInsensitive).explain(); +checkIndexFilterSet(explain, true); +assertIsIxScanOnIndex(explain.queryPlanner.winningPlan, {x: 1}); })(); diff --git a/jstests/core/index_id_options.js b/jstests/core/index_id_options.js index 7f16c7ba8c9..91bdce8090f 100644 --- a/jstests/core/index_id_options.js +++ b/jstests/core/index_id_options.js @@ -10,69 +10,66 @@ // - Non-_id indexes cannot have the name "_id_". (function() { - "use strict"; +"use strict"; - load("jstests/libs/get_index_helpers.js"); +load("jstests/libs/get_index_helpers.js"); - // Must use local db for testing because autoIndexId:false collections are not allowed in - // replicated databases. - var coll = db.getSiblingDB("local").index_id_options; +// Must use local db for testing because autoIndexId:false collections are not allowed in +// replicated databases. +var coll = db.getSiblingDB("local").index_id_options; - // _id indexes must have key pattern {_id: 1}. - coll.drop(); - assert.commandWorked(coll.runCommand("create", {autoIndexId: false})); - assert.commandFailed(coll.ensureIndex({_id: -1}, {name: "_id_"})); +// _id indexes must have key pattern {_id: 1}. +coll.drop(); +assert.commandWorked(coll.runCommand("create", {autoIndexId: false})); +assert.commandFailed(coll.ensureIndex({_id: -1}, {name: "_id_"})); - // The name of an _id index gets corrected to "_id_". - coll.drop(); - assert.commandWorked(coll.runCommand("create", {autoIndexId: false})); - assert.commandWorked(coll.ensureIndex({_id: 1}, {name: "bad"})); - var spec = GetIndexHelpers.findByKeyPattern(coll.getIndexes(), {_id: 1}); - assert.neq(null, spec, "_id index spec not found"); - assert.eq("_id_", spec.name, tojson(spec)); +// The name of an _id index gets corrected to "_id_". +coll.drop(); +assert.commandWorked(coll.runCommand("create", {autoIndexId: false})); +assert.commandWorked(coll.ensureIndex({_id: 1}, {name: "bad"})); +var spec = GetIndexHelpers.findByKeyPattern(coll.getIndexes(), {_id: 1}); +assert.neq(null, spec, "_id index spec not found"); +assert.eq("_id_", spec.name, tojson(spec)); - // _id indexes cannot have any options other than "key", "name", "ns", "v", and "collation." - coll.drop(); - assert.commandWorked(coll.runCommand("create", {autoIndexId: false})); - assert.commandFailed(coll.ensureIndex({_id: 1}, {name: "_id_", unique: true})); - assert.commandFailed(coll.ensureIndex({_id: 1}, {name: "_id_", sparse: false})); - assert.commandFailed( - coll.ensureIndex({_id: 1}, {name: "_id_", partialFilterExpression: {a: 1}})); - assert.commandFailed(coll.ensureIndex({_id: 1}, {name: "_id_", expireAfterSeconds: 3600})); - assert.commandFailed(coll.ensureIndex({_id: 1}, {name: "_id_", background: false})); - assert.commandFailed(coll.ensureIndex({_id: 1}, {name: "_id_", unknown: true})); - assert.commandWorked(coll.ensureIndex( - {_id: 1}, {name: "_id_", ns: coll.getFullName(), v: 2, collation: {locale: "simple"}})); +// _id indexes cannot have any options other than "key", "name", "ns", "v", and "collation." +coll.drop(); +assert.commandWorked(coll.runCommand("create", {autoIndexId: false})); +assert.commandFailed(coll.ensureIndex({_id: 1}, {name: "_id_", unique: true})); +assert.commandFailed(coll.ensureIndex({_id: 1}, {name: "_id_", sparse: false})); +assert.commandFailed(coll.ensureIndex({_id: 1}, {name: "_id_", partialFilterExpression: {a: 1}})); +assert.commandFailed(coll.ensureIndex({_id: 1}, {name: "_id_", expireAfterSeconds: 3600})); +assert.commandFailed(coll.ensureIndex({_id: 1}, {name: "_id_", background: false})); +assert.commandFailed(coll.ensureIndex({_id: 1}, {name: "_id_", unknown: true})); +assert.commandWorked(coll.ensureIndex( + {_id: 1}, {name: "_id_", ns: coll.getFullName(), v: 2, collation: {locale: "simple"}})); - // _id indexes must have the collection default collation. - coll.drop(); - assert.commandWorked(coll.runCommand("create", {autoIndexId: false})); - assert.commandFailed(coll.ensureIndex({_id: 1}, {name: "_id_", collation: {locale: "en_US"}})); - assert.commandWorked(coll.ensureIndex({_id: 1}, {name: "_id_", collation: {locale: "simple"}})); +// _id indexes must have the collection default collation. +coll.drop(); +assert.commandWorked(coll.runCommand("create", {autoIndexId: false})); +assert.commandFailed(coll.ensureIndex({_id: 1}, {name: "_id_", collation: {locale: "en_US"}})); +assert.commandWorked(coll.ensureIndex({_id: 1}, {name: "_id_", collation: {locale: "simple"}})); - coll.drop(); - assert.commandWorked(coll.runCommand("create", {autoIndexId: false})); - assert.commandWorked(coll.ensureIndex({_id: 1}, {name: "_id_"})); +coll.drop(); +assert.commandWorked(coll.runCommand("create", {autoIndexId: false})); +assert.commandWorked(coll.ensureIndex({_id: 1}, {name: "_id_"})); - coll.drop(); - assert.commandWorked( - coll.runCommand("create", {autoIndexId: false, collation: {locale: "en_US"}})); - assert.commandFailed(coll.ensureIndex({_id: 1}, {name: "_id_", collation: {locale: "simple"}})); - assert.commandFailed(coll.ensureIndex({_id: 1}, {name: "_id_", collation: {locale: "fr_CA"}})); - assert.commandWorked( - coll.ensureIndex({_id: 1}, {name: "_id_", collation: {locale: "en_US", strength: 3}})); +coll.drop(); +assert.commandWorked(coll.runCommand("create", {autoIndexId: false, collation: {locale: "en_US"}})); +assert.commandFailed(coll.ensureIndex({_id: 1}, {name: "_id_", collation: {locale: "simple"}})); +assert.commandFailed(coll.ensureIndex({_id: 1}, {name: "_id_", collation: {locale: "fr_CA"}})); +assert.commandWorked( + coll.ensureIndex({_id: 1}, {name: "_id_", collation: {locale: "en_US", strength: 3}})); - coll.drop(); - assert.commandWorked( - coll.runCommand("create", {autoIndexId: false, collation: {locale: "en_US"}})); - assert.commandWorked(coll.ensureIndex({_id: 1}, {name: "_id_"})); - spec = GetIndexHelpers.findByName(coll.getIndexes(), "_id_"); - assert.neq(null, spec, "_id index spec not found"); - assert.eq("en_US", spec.collation.locale, tojson(spec)); +coll.drop(); +assert.commandWorked(coll.runCommand("create", {autoIndexId: false, collation: {locale: "en_US"}})); +assert.commandWorked(coll.ensureIndex({_id: 1}, {name: "_id_"})); +spec = GetIndexHelpers.findByName(coll.getIndexes(), "_id_"); +assert.neq(null, spec, "_id index spec not found"); +assert.eq("en_US", spec.collation.locale, tojson(spec)); - // Non-_id indexes cannot have the name "_id_". - coll.drop(); - assert.commandWorked(coll.runCommand("create", {autoIndexId: false})); - assert.commandFailed(coll.ensureIndex({_id: "hashed"}, {name: "_id_"})); - assert.commandFailed(coll.ensureIndex({a: 1}, {name: "_id_"})); +// Non-_id indexes cannot have the name "_id_". +coll.drop(); +assert.commandWorked(coll.runCommand("create", {autoIndexId: false})); +assert.commandFailed(coll.ensureIndex({_id: "hashed"}, {name: "_id_"})); +assert.commandFailed(coll.ensureIndex({a: 1}, {name: "_id_"})); })(); diff --git a/jstests/core/index_multikey.js b/jstests/core/index_multikey.js index 8adde5a4ef0..32ff3bd58c4 100644 --- a/jstests/core/index_multikey.js +++ b/jstests/core/index_multikey.js @@ -3,36 +3,35 @@ * successful and unsuccessful inserts. */ (function() { - "use strict"; +"use strict"; - // For making assertions about explain output. - load("jstests/libs/analyze_plan.js"); +// For making assertions about explain output. +load("jstests/libs/analyze_plan.js"); - const coll = db.getCollection("index_multikey"); - coll.drop(); +const coll = db.getCollection("index_multikey"); +coll.drop(); - function getIndexScanExplainOutput() { - const explain = coll.find().hint({a: 1, b: 1}).explain(); - assert.commandWorked(explain); - return getPlanStage(explain.queryPlanner.winningPlan, "IXSCAN"); - } +function getIndexScanExplainOutput() { + const explain = coll.find().hint({a: 1, b: 1}).explain(); + assert.commandWorked(explain); + return getPlanStage(explain.queryPlanner.winningPlan, "IXSCAN"); +} - assert.commandWorked(coll.createIndex({a: 1, b: 1})); - assert.commandWorked(coll.createIndex({"a.0.0": 1})); - let ixscan = getIndexScanExplainOutput(); - assert.eq(ixscan.isMultiKey, - false, - "empty index should not be marked multikey; plan: " + tojson(ixscan)); - assert.eq(ixscan.multiKeyPaths, - {a: [], b: []}, - "empty index should have no multiKeyPaths; plan: " + tojson(ixscan)); +assert.commandWorked(coll.createIndex({a: 1, b: 1})); +assert.commandWorked(coll.createIndex({"a.0.0": 1})); +let ixscan = getIndexScanExplainOutput(); +assert.eq( + ixscan.isMultiKey, false, "empty index should not be marked multikey; plan: " + tojson(ixscan)); +assert.eq(ixscan.multiKeyPaths, + {a: [], b: []}, + "empty index should have no multiKeyPaths; plan: " + tojson(ixscan)); - assert.commandWorked(coll.insert({a: [1, 2, 3]})); - ixscan = getIndexScanExplainOutput(); - assert.eq(ixscan.isMultiKey, - true, - "index should have been marked as multikey after insert; plan: " + tojson(ixscan)); - assert.eq(ixscan.multiKeyPaths, - {a: ["a"], b: []}, - "index has wrong multikey paths after insert; plan: " + ixscan); +assert.commandWorked(coll.insert({a: [1, 2, 3]})); +ixscan = getIndexScanExplainOutput(); +assert.eq(ixscan.isMultiKey, + true, + "index should have been marked as multikey after insert; plan: " + tojson(ixscan)); +assert.eq(ixscan.multiKeyPaths, + {a: ["a"], b: []}, + "index has wrong multikey paths after insert; plan: " + ixscan); })(); diff --git a/jstests/core/index_multiple_compatibility.js b/jstests/core/index_multiple_compatibility.js index 1ebe785abfc..8c203a49941 100644 --- a/jstests/core/index_multiple_compatibility.js +++ b/jstests/core/index_multiple_compatibility.js @@ -4,231 +4,236 @@ // Test that multiple indexes behave correctly together. (function() { - 'use strict'; - var coll = db.index_multiple_compatibility; +'use strict'; +var coll = db.index_multiple_compatibility; +coll.drop(); + +const enUSStrength1 = { + locale: "en_US", + strength: 1 +}; +const enUSStrength2 = { + locale: "en_US", + strength: 2 +}; +const enUSStrength3 = { + locale: "en_US", + strength: 3 +}; + +/** + * testIndexCompat runs a series of operations on two indexes to ensure that the two behave + * properly in combination. + * + * 'index1' and 'index2' take a document in the following format: + * + * { + * index: {key: Document, name: String, collation: Document, options...} + * doc: Document + * } + * + * The 'index' key indicates the index to create, and 'doc' (optional) indicates a document to + * insert in the collection, and look for in *only* this index. The 'index' key will be passed + * directly to the createIndexes command. + * + * 'both' optionally provides a document to insert into the collection, and expect in both + * indexes. + * + * - Create both indexes. + * - Insert document in index1. + * - Check that it is present in index1, and absent in index2, using find and a hint. + * - Insert document in index2. + * - Check that it is present in index2, and absent in index1, using find and a hint. + * - Insert the document 'both', if it is provided. Check that it is inserted in both indexes. + * - Delete documents ensuring they are removed from the appropriate indexes. + */ +function testIndexCompat(coll, index1, index2, both) { coll.drop(); - const enUSStrength1 = {locale: "en_US", strength: 1}; - const enUSStrength2 = {locale: "en_US", strength: 2}; - const enUSStrength3 = {locale: "en_US", strength: 3}; - - /** - * testIndexCompat runs a series of operations on two indexes to ensure that the two behave - * properly in combination. - * - * 'index1' and 'index2' take a document in the following format: - * - * { - * index: {key: Document, name: String, collation: Document, options...} - * doc: Document - * } - * - * The 'index' key indicates the index to create, and 'doc' (optional) indicates a document to - * insert in the collection, and look for in *only* this index. The 'index' key will be passed - * directly to the createIndexes command. - * - * 'both' optionally provides a document to insert into the collection, and expect in both - * indexes. - * - * - Create both indexes. - * - Insert document in index1. - * - Check that it is present in index1, and absent in index2, using find and a hint. - * - Insert document in index2. - * - Check that it is present in index2, and absent in index1, using find and a hint. - * - Insert the document 'both', if it is provided. Check that it is inserted in both indexes. - * - Delete documents ensuring they are removed from the appropriate indexes. - */ - function testIndexCompat(coll, index1, index2, both) { - coll.drop(); - - assert(index1.hasOwnProperty('index')); - assert(index2.hasOwnProperty('index')); - - assert.commandWorked( - db.runCommand({createIndexes: coll.getName(), indexes: [index1.index, index2.index]})); - - // Check index 1 document. - if (index1.hasOwnProperty('doc')) { - assert.writeOK(coll.insert(index1.doc)); - assert.eq(coll.find(index1.doc).hint(index1.index.name).itcount(), 1); - assert.eq(coll.find(index1.doc).hint(index2.index.name).itcount(), 0); - } + assert(index1.hasOwnProperty('index')); + assert(index2.hasOwnProperty('index')); - // Check index 2 document. - if (index2.hasOwnProperty('doc')) { - assert.writeOK(coll.insert(index2.doc)); - assert.eq(coll.find(index2.doc).hint(index2.index.name).itcount(), 1); - assert.eq(coll.find(index2.doc).hint(index1.index.name).itcount(), 0); - } + assert.commandWorked( + db.runCommand({createIndexes: coll.getName(), indexes: [index1.index, index2.index]})); - // Check for present of both in both index1 and index2. - if (typeof both !== "undefined") { - assert.writeOK(coll.insert(both)); - assert.eq(coll.find(both).hint(index1.index.name).itcount(), 1); - assert.eq(coll.find(both).hint(index2.index.name).itcount(), 1); - } + // Check index 1 document. + if (index1.hasOwnProperty('doc')) { + assert.writeOK(coll.insert(index1.doc)); + assert.eq(coll.find(index1.doc).hint(index1.index.name).itcount(), 1); + assert.eq(coll.find(index1.doc).hint(index2.index.name).itcount(), 0); + } - // Remove index 1 document. - if (index1.hasOwnProperty('doc')) { - assert.writeOK(coll.remove(index1.doc)); - assert.eq(coll.find(index1.doc).hint(index1.index.name).itcount(), 0); - } + // Check index 2 document. + if (index2.hasOwnProperty('doc')) { + assert.writeOK(coll.insert(index2.doc)); + assert.eq(coll.find(index2.doc).hint(index2.index.name).itcount(), 1); + assert.eq(coll.find(index2.doc).hint(index1.index.name).itcount(), 0); + } - // Remove index 2 document. - if (index2.hasOwnProperty('doc')) { - assert.writeOK(coll.remove(index2.doc)); - assert.eq(coll.find(index2.doc).hint(index2.index.name).itcount(), 0); - } + // Check for present of both in both index1 and index2. + if (typeof both !== "undefined") { + assert.writeOK(coll.insert(both)); + assert.eq(coll.find(both).hint(index1.index.name).itcount(), 1); + assert.eq(coll.find(both).hint(index2.index.name).itcount(), 1); + } - // Remove both. - if (typeof both !== "undefined") { - assert.writeOK(coll.remove(both)); - assert.eq(coll.find(both).hint(index1.index.name).itcount(), 0); - assert.eq(coll.find(both).hint(index2.index.name).itcount(), 0); - } + // Remove index 1 document. + if (index1.hasOwnProperty('doc')) { + assert.writeOK(coll.remove(index1.doc)); + assert.eq(coll.find(index1.doc).hint(index1.index.name).itcount(), 0); } - // Two identical partial indexes. - testIndexCompat(coll, - { - index: { - key: {a: 1}, - name: "a1", - collation: enUSStrength1, - partialFilterExpression: {a: {$type: 'string'}} - } - }, - { - index: { - key: {a: 1}, - name: "a2", - collation: enUSStrength2, - partialFilterExpression: {a: {$type: 'string'}} - } - }, - {a: "A"}); - - // Two non-overlapping partial indexes. - testIndexCompat(coll, - { - index: { - key: {a: 1}, - name: "a1", - collation: enUSStrength1, - partialFilterExpression: {a: {$lt: 10}} - }, - doc: {a: 5} + // Remove index 2 document. + if (index2.hasOwnProperty('doc')) { + assert.writeOK(coll.remove(index2.doc)); + assert.eq(coll.find(index2.doc).hint(index2.index.name).itcount(), 0); + } + + // Remove both. + if (typeof both !== "undefined") { + assert.writeOK(coll.remove(both)); + assert.eq(coll.find(both).hint(index1.index.name).itcount(), 0); + assert.eq(coll.find(both).hint(index2.index.name).itcount(), 0); + } +} + +// Two identical partial indexes. +testIndexCompat(coll, + { + index: { + key: {a: 1}, + name: "a1", + collation: enUSStrength1, + partialFilterExpression: {a: {$type: 'string'}} + } + }, + { + index: { + key: {a: 1}, + name: "a2", + collation: enUSStrength2, + partialFilterExpression: {a: {$type: 'string'}} + } + }, + {a: "A"}); + +// Two non-overlapping partial indexes. +testIndexCompat(coll, + { + index: { + key: {a: 1}, + name: "a1", + collation: enUSStrength1, + partialFilterExpression: {a: {$lt: 10}} }, - { - index: { - key: {a: 1}, - name: "a2", - collation: enUSStrength2, - partialFilterExpression: {a: {$gt: 20}} - }, - doc: {a: 25} - }); - - // Two partially overlapping partial indexes. - testIndexCompat(coll, - { - index: { - key: {a: 1}, - name: "a1", - collation: enUSStrength1, - partialFilterExpression: {a: {$lt: 10}}, - }, - doc: {a: -5} + doc: {a: 5} + }, + { + index: { + key: {a: 1}, + name: "a2", + collation: enUSStrength2, + partialFilterExpression: {a: {$gt: 20}} }, - { - index: { - key: {a: 1}, - name: "a2", - collation: enUSStrength2, - partialFilterExpression: {a: {$gte: 0}} - }, - doc: {a: 15} + doc: {a: 25} + }); + +// Two partially overlapping partial indexes. +testIndexCompat(coll, + { + index: { + key: {a: 1}, + name: "a1", + collation: enUSStrength1, + partialFilterExpression: {a: {$lt: 10}}, }, - {a: 5}); - - // A partial and sparse index. - testIndexCompat( - coll, - { - index: - {key: {a: 1}, name: "a1", collation: enUSStrength1, partialFilterExpression: {b: 0}}, - doc: {b: 0} - }, - { - index: {key: {a: 1}, name: "a2", collation: enUSStrength2, sparse: true}, - doc: {a: 5, b: 1} - }, - {a: -1, b: 0}); - - // A sparse and non-sparse index. - testIndexCompat( - coll, - { - index: {key: {a: 1}, name: "a1", collation: enUSStrength1, sparse: true}, - }, - {index: {key: {a: 1}, name: "a2", collation: enUSStrength2, sparse: false}, doc: {b: 0}}, - {a: 1}); - - // A unique index and non-unique index. - testIndexCompat(coll, - { - index: {key: {a: 1}, name: "unique", collation: enUSStrength1, unique: true}, + doc: {a: -5} + }, + { + index: { + key: {a: 1}, + name: "a2", + collation: enUSStrength2, + partialFilterExpression: {a: {$gte: 0}} }, - {index: {key: {a: 1}, name: "reg", collation: enUSStrength2, unique: false}}, - {a: "foo"}); - - // Test that unique constraints are still enforced. - assert.writeOK(coll.insert({a: "f"})); - assert.writeError(coll.insert({a: "F"})); - - // A unique partial index and non-unique index. - testIndexCompat( - coll, - { - index: { - key: {a: 1}, - name: "unique", - collation: enUSStrength1, - unique: true, - partialFilterExpression: {a: {$type: 'number'}} - } - }, - {index: {key: {a: 1}, name: "a", collation: enUSStrength2, unique: false}, doc: {a: "foo"}}, - {a: 5}); - - assert.writeOK(coll.insert({a: 5})); - // Test that uniqueness is only enforced by the partial index. - assert.writeOK(coll.insert({a: "foo"})); - assert.writeOK(coll.insert({a: "foo"})); - assert.writeError(coll.insert({a: 5})); - - // Two unique indexes with different collations. - testIndexCompat(coll, - {index: {key: {a: 1}, name: "a1", collation: enUSStrength1, unique: true}}, - {index: {key: {a: 1}, name: "a2", collation: enUSStrength3, unique: true}}, - {a: "a"}); - - // Unique enforced on both indexes. - assert.writeOK(coll.insert({a: "a"})); - assert.writeError(coll.insert({a: "a"})); - assert.writeError(coll.insert({a: "A"})); - - // A unique and sparse index. - testIndexCompat( - coll, - { - index: {key: {a: 1}, name: "a1", collation: enUSStrength1, unique: true, sparse: true}, - }, - {index: {key: {a: 1}, name: "a2", collation: enUSStrength2, unique: false}, doc: {b: 0}}, - {a: "a"}); - - assert.writeOK(coll.insert({a: "a"})); - assert.writeOK(coll.insert({})); - assert.writeOK(coll.insert({})); - assert.writeError(coll.insert({a: "a"})); + doc: {a: 15} + }, + {a: 5}); + +// A partial and sparse index. +testIndexCompat( + coll, + { + index: {key: {a: 1}, name: "a1", collation: enUSStrength1, partialFilterExpression: {b: 0}}, + doc: {b: 0} + }, + {index: {key: {a: 1}, name: "a2", collation: enUSStrength2, sparse: true}, doc: {a: 5, b: 1}}, + {a: -1, b: 0}); + +// A sparse and non-sparse index. +testIndexCompat( + coll, + { + index: {key: {a: 1}, name: "a1", collation: enUSStrength1, sparse: true}, + }, + {index: {key: {a: 1}, name: "a2", collation: enUSStrength2, sparse: false}, doc: {b: 0}}, + {a: 1}); + +// A unique index and non-unique index. +testIndexCompat(coll, + { + index: {key: {a: 1}, name: "unique", collation: enUSStrength1, unique: true}, + }, + {index: {key: {a: 1}, name: "reg", collation: enUSStrength2, unique: false}}, + {a: "foo"}); + +// Test that unique constraints are still enforced. +assert.writeOK(coll.insert({a: "f"})); +assert.writeError(coll.insert({a: "F"})); + +// A unique partial index and non-unique index. +testIndexCompat( + coll, + { + index: { + key: {a: 1}, + name: "unique", + collation: enUSStrength1, + unique: true, + partialFilterExpression: {a: {$type: 'number'}} + } + }, + {index: {key: {a: 1}, name: "a", collation: enUSStrength2, unique: false}, doc: {a: "foo"}}, + {a: 5}); + +assert.writeOK(coll.insert({a: 5})); +// Test that uniqueness is only enforced by the partial index. +assert.writeOK(coll.insert({a: "foo"})); +assert.writeOK(coll.insert({a: "foo"})); +assert.writeError(coll.insert({a: 5})); + +// Two unique indexes with different collations. +testIndexCompat(coll, + {index: {key: {a: 1}, name: "a1", collation: enUSStrength1, unique: true}}, + {index: {key: {a: 1}, name: "a2", collation: enUSStrength3, unique: true}}, + {a: "a"}); + +// Unique enforced on both indexes. +assert.writeOK(coll.insert({a: "a"})); +assert.writeError(coll.insert({a: "a"})); +assert.writeError(coll.insert({a: "A"})); + +// A unique and sparse index. +testIndexCompat( + coll, + { + index: {key: {a: 1}, name: "a1", collation: enUSStrength1, unique: true, sparse: true}, + }, + {index: {key: {a: 1}, name: "a2", collation: enUSStrength2, unique: false}, doc: {b: 0}}, + {a: "a"}); + +assert.writeOK(coll.insert({a: "a"})); +assert.writeOK(coll.insert({})); +assert.writeOK(coll.insert({})); +assert.writeError(coll.insert({a: "a"})); })(); diff --git a/jstests/core/index_partial_2dsphere.js b/jstests/core/index_partial_2dsphere.js index 502f70aa556..15e6427667a 100644 --- a/jstests/core/index_partial_2dsphere.js +++ b/jstests/core/index_partial_2dsphere.js @@ -4,67 +4,64 @@ (function() { - "use strict"; +"use strict"; - let coll = db.index_partial_2dsphere; - coll.drop(); +let coll = db.index_partial_2dsphere; +coll.drop(); - // Create a 2dsphere partial index for documents where isIndexed is greater than 0. - let partialIndex = {geoJson: '2dsphere'}; - assert.commandWorked( - coll.createIndex(partialIndex, {partialFilterExpression: {isIndexed: {$gt: 0}}})); +// Create a 2dsphere partial index for documents where isIndexed is greater than 0. +let partialIndex = {geoJson: '2dsphere'}; +assert.commandWorked( + coll.createIndex(partialIndex, {partialFilterExpression: {isIndexed: {$gt: 0}}})); - // This document has an invalid geoJSON format (duplicated points), but will not be indexed. - let unindexedDoc = { - "_id": 0, - "isIndexed": -1, - "geoJson": {"type": "Polygon", "coordinates": [[[0, 0], [0, 1], [1, 1], [0, 1], [0, 0]]]} - }; +// This document has an invalid geoJSON format (duplicated points), but will not be indexed. +let unindexedDoc = { + "_id": 0, + "isIndexed": -1, + "geoJson": {"type": "Polygon", "coordinates": [[[0, 0], [0, 1], [1, 1], [0, 1], [0, 0]]]} +}; - // This document has valid geoJson, and will be indexed. - let indexedDoc = { - "_id": 1, - "isIndexed": 1, - "geoJson": {"type": "Polygon", "coordinates": [[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]]} - }; +// This document has valid geoJson, and will be indexed. +let indexedDoc = { + "_id": 1, + "isIndexed": 1, + "geoJson": {"type": "Polygon", "coordinates": [[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]]} +}; - assert.writeOK(coll.insert(unindexedDoc)); - assert.writeOK(coll.insert(indexedDoc)); +assert.writeOK(coll.insert(unindexedDoc)); +assert.writeOK(coll.insert(indexedDoc)); - // Return the one indexed document. - assert.eq(1, - coll.find({ - isIndexed: 1, - geoJson: {$geoNear: {$geometry: {type: "Point", coordinates: [0, 0]}}} - }) - .itcount()); +// Return the one indexed document. +assert.eq( + 1, + coll.find( + {isIndexed: 1, geoJson: {$geoNear: {$geometry: {type: "Point", coordinates: [0, 0]}}}}) + .itcount()); - // Don't let an update to a document with an invalid geoJson succeed. - assert.writeError(coll.update({_id: 0}, {$set: {isIndexed: 1}})); +// Don't let an update to a document with an invalid geoJson succeed. +assert.writeError(coll.update({_id: 0}, {$set: {isIndexed: 1}})); - // Update the indexed document to remove it from the index. - assert.writeOK(coll.update({_id: 1}, {$set: {isIndexed: -1}})); +// Update the indexed document to remove it from the index. +assert.writeOK(coll.update({_id: 1}, {$set: {isIndexed: -1}})); - // This query should now return zero documents. - assert.eq(0, - coll.find({ - isIndexed: 1, - geoJson: {$geoNear: {$geometry: {type: "Point", coordinates: [0, 0]}}} - }) - .itcount()); +// This query should now return zero documents. +assert.eq( + 0, + coll.find( + {isIndexed: 1, geoJson: {$geoNear: {$geometry: {type: "Point", coordinates: [0, 0]}}}}) + .itcount()); - // Re-index the document. - assert.writeOK(coll.update({_id: 1}, {$set: {isIndexed: 1}})); +// Re-index the document. +assert.writeOK(coll.update({_id: 1}, {$set: {isIndexed: 1}})); - // Remove both should succeed without error. - assert.writeOK(coll.remove({_id: 0})); - assert.writeOK(coll.remove({_id: 1})); +// Remove both should succeed without error. +assert.writeOK(coll.remove({_id: 0})); +assert.writeOK(coll.remove({_id: 1})); - assert.eq(0, - coll.find({ - isIndexed: 1, - geoJson: {$geoNear: {$geometry: {type: "Point", coordinates: [0, 0]}}} - }) - .itcount()); - assert.commandWorked(coll.dropIndex(partialIndex)); +assert.eq( + 0, + coll.find( + {isIndexed: 1, geoJson: {$geoNear: {$geometry: {type: "Point", coordinates: [0, 0]}}}}) + .itcount()); +assert.commandWorked(coll.dropIndex(partialIndex)); })(); diff --git a/jstests/core/index_partial_create_drop.js b/jstests/core/index_partial_create_drop.js index 55a6b06d117..0233f3fb8a0 100644 --- a/jstests/core/index_partial_create_drop.js +++ b/jstests/core/index_partial_create_drop.js @@ -10,73 +10,71 @@ // Test partial index creation and drops. (function() { - "use strict"; - var coll = db.index_partial_create_drop; +"use strict"; +var coll = db.index_partial_create_drop; - var getNumKeys = function(idxName) { - var res = assert.commandWorked(coll.validate(true)); - var kpi; +var getNumKeys = function(idxName) { + var res = assert.commandWorked(coll.validate(true)); + var kpi; - var isShardedNS = res.hasOwnProperty('raw'); - if (isShardedNS) { - kpi = res.raw[Object.getOwnPropertyNames(res.raw)[0]].keysPerIndex; - } else { - kpi = res.keysPerIndex; - } - return kpi[idxName]; - }; + var isShardedNS = res.hasOwnProperty('raw'); + if (isShardedNS) { + kpi = res.raw[Object.getOwnPropertyNames(res.raw)[0]].keysPerIndex; + } else { + kpi = res.keysPerIndex; + } + return kpi[idxName]; +}; - coll.drop(); +coll.drop(); - // Check bad filter spec on create. - assert.commandFailed(coll.ensureIndex({x: 1}, {partialFilterExpression: 5})); - assert.commandFailed(coll.ensureIndex({x: 1}, {partialFilterExpression: {x: {$asdasd: 3}}})); - assert.commandFailed(coll.ensureIndex({x: 1}, {partialFilterExpression: {$and: 5}})); - assert.commandFailed(coll.ensureIndex({x: 1}, {partialFilterExpression: {x: /abc/}})); - assert.commandFailed(coll.ensureIndex({x: 1}, { - partialFilterExpression: - {$and: [{$and: [{x: {$lt: 2}}, {x: {$gt: 0}}]}, {x: {$exists: true}}]} - })); - // Use of $expr is banned in a partial index filter. - assert.commandFailed( - coll.createIndex({x: 1}, {partialFilterExpression: {$expr: {$eq: ["$x", 5]}}})); - assert.commandFailed(coll.createIndex( - {x: 1}, {partialFilterExpression: {$expr: {$eq: [{$trim: {input: "$x"}}, "hi"]}}})); +// Check bad filter spec on create. +assert.commandFailed(coll.ensureIndex({x: 1}, {partialFilterExpression: 5})); +assert.commandFailed(coll.ensureIndex({x: 1}, {partialFilterExpression: {x: {$asdasd: 3}}})); +assert.commandFailed(coll.ensureIndex({x: 1}, {partialFilterExpression: {$and: 5}})); +assert.commandFailed(coll.ensureIndex({x: 1}, {partialFilterExpression: {x: /abc/}})); +assert.commandFailed(coll.ensureIndex({x: 1}, { + partialFilterExpression: {$and: [{$and: [{x: {$lt: 2}}, {x: {$gt: 0}}]}, {x: {$exists: true}}]} +})); +// Use of $expr is banned in a partial index filter. +assert.commandFailed( + coll.createIndex({x: 1}, {partialFilterExpression: {$expr: {$eq: ["$x", 5]}}})); +assert.commandFailed(coll.createIndex( + {x: 1}, {partialFilterExpression: {$expr: {$eq: [{$trim: {input: "$x"}}, "hi"]}}})); - for (var i = 0; i < 10; i++) { - assert.writeOK(coll.insert({x: i, a: i})); - } +for (var i = 0; i < 10; i++) { + assert.writeOK(coll.insert({x: i, a: i})); +} - // Create partial index. - assert.commandWorked(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: {$lt: 5}}})); - assert.eq(5, getNumKeys("x_1")); - assert.commandWorked(coll.dropIndex({x: 1})); - assert.eq(1, coll.getIndexes().length); +// Create partial index. +assert.commandWorked(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: {$lt: 5}}})); +assert.eq(5, getNumKeys("x_1")); +assert.commandWorked(coll.dropIndex({x: 1})); +assert.eq(1, coll.getIndexes().length); - // Create partial index in background. - assert.commandWorked( - coll.ensureIndex({x: 1}, {background: true, partialFilterExpression: {a: {$lt: 5}}})); - assert.eq(5, getNumKeys("x_1")); - assert.commandWorked(coll.dropIndex({x: 1})); - assert.eq(1, coll.getIndexes().length); +// Create partial index in background. +assert.commandWorked( + coll.ensureIndex({x: 1}, {background: true, partialFilterExpression: {a: {$lt: 5}}})); +assert.eq(5, getNumKeys("x_1")); +assert.commandWorked(coll.dropIndex({x: 1})); +assert.eq(1, coll.getIndexes().length); - // Create complete index, same key as previous indexes. - assert.commandWorked(coll.ensureIndex({x: 1})); - assert.eq(10, getNumKeys("x_1")); - assert.commandWorked(coll.dropIndex({x: 1})); - assert.eq(1, coll.getIndexes().length); +// Create complete index, same key as previous indexes. +assert.commandWorked(coll.ensureIndex({x: 1})); +assert.eq(10, getNumKeys("x_1")); +assert.commandWorked(coll.dropIndex({x: 1})); +assert.eq(1, coll.getIndexes().length); - // Partial indexes can't also be sparse indexes. - assert.commandFailed(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: 1}, sparse: true})); - assert.commandFailed(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: 1}, sparse: 1})); - assert.commandWorked( - coll.ensureIndex({x: 1}, {partialFilterExpression: {a: 1}, sparse: false})); - assert.eq(2, coll.getIndexes().length); - assert.commandWorked(coll.dropIndex({x: 1})); - assert.eq(1, coll.getIndexes().length); +// Partial indexes can't also be sparse indexes. +assert.commandFailed(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: 1}, sparse: true})); +assert.commandFailed(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: 1}, sparse: 1})); +assert.commandWorked(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: 1}, sparse: false})); +assert.eq(2, coll.getIndexes().length); +assert.commandWorked(coll.dropIndex({x: 1})); +assert.eq(1, coll.getIndexes().length); - // SERVER-18858: Verify that query compatible w/ partial index succeeds after index drop. - assert.commandWorked(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: {$lt: 5}}})); - assert.commandWorked(coll.dropIndex({x: 1})); - assert.eq(1, coll.find({x: 0, a: 0}).itcount()); +// SERVER-18858: Verify that query compatible w/ partial index succeeds after index drop. +assert.commandWorked(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: {$lt: 5}}})); +assert.commandWorked(coll.dropIndex({x: 1})); +assert.eq(1, coll.find({x: 0, a: 0}).itcount()); })(); diff --git a/jstests/core/index_partial_read_ops.js b/jstests/core/index_partial_read_ops.js index 27fdb430fba..eba93a7ee23 100644 --- a/jstests/core/index_partial_read_ops.js +++ b/jstests/core/index_partial_read_ops.js @@ -8,75 +8,75 @@ load("jstests/libs/analyze_plan.js"); (function() { - "use strict"; - var explain; - var coll = db.index_partial_read_ops; - coll.drop(); - - assert.commandWorked(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: {$lte: 1.5}}})); - assert.writeOK(coll.insert({x: 5, a: 2})); // Not in index. - assert.writeOK(coll.insert({x: 6, a: 1})); // In index. - - // - // Verify basic functionality with find(). - // - - // find() operations that should use index. - explain = coll.explain('executionStats').find({x: 6, a: 1}).finish(); - assert.eq(1, explain.executionStats.nReturned); - assert(isIxscan(db, explain.queryPlanner.winningPlan)); - explain = coll.explain('executionStats').find({x: {$gt: 1}, a: 1}).finish(); - assert.eq(1, explain.executionStats.nReturned); - assert(isIxscan(db, explain.queryPlanner.winningPlan)); - explain = coll.explain('executionStats').find({x: 6, a: {$lte: 1}}).finish(); - assert.eq(1, explain.executionStats.nReturned); - assert(isIxscan(db, explain.queryPlanner.winningPlan)); - - // find() operations that should not use index. - explain = coll.explain('executionStats').find({x: 6, a: {$lt: 1.6}}).finish(); - assert.eq(1, explain.executionStats.nReturned); - assert(isCollscan(db, explain.queryPlanner.winningPlan)); - explain = coll.explain('executionStats').find({x: 6}).finish(); - assert.eq(1, explain.executionStats.nReturned); - assert(isCollscan(db, explain.queryPlanner.winningPlan)); - - // - // Verify basic functionality with the count command. - // - - // Count operation that should use index. - explain = coll.explain('executionStats').count({x: {$gt: 1}, a: 1}); - assert(isIxscan(db, explain.queryPlanner.winningPlan)); - - // Count operation that should not use index. - explain = coll.explain('executionStats').count({x: {$gt: 1}, a: 2}); - assert(isCollscan(db, explain.queryPlanner.winningPlan)); - - // - // Verify basic functionality with the aggregate command. - // - - // Aggregate operation that should use index. - explain = coll.aggregate([{$match: {x: {$gt: 1}, a: 1}}], {explain: true}); - assert(isIxscan(db, explain.queryPlanner.winningPlan)); - - // Aggregate operation that should not use index. - explain = coll.aggregate([{$match: {x: {$gt: 1}, a: 2}}], {explain: true}); - assert(isCollscan(db, explain.queryPlanner.winningPlan)); - - // - // Verify basic functionality with the findAndModify command. - // - - // findAndModify operation that should use index. - explain = coll.explain('executionStats') - .findAndModify({query: {x: {$gt: 1}, a: 1}, update: {$inc: {x: 1}}}); - assert.eq(1, explain.executionStats.nReturned); - assert(isIxscan(db, explain.queryPlanner.winningPlan)); - - // findAndModify operation that should not use index. - explain = coll.explain('executionStats') - .findAndModify({query: {x: {$gt: 1}, a: 2}, update: {$inc: {x: 1}}}); - assert.eq(1, explain.executionStats.nReturned); - assert(isCollscan(db, explain.queryPlanner.winningPlan)); +"use strict"; +var explain; +var coll = db.index_partial_read_ops; +coll.drop(); + +assert.commandWorked(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: {$lte: 1.5}}})); +assert.writeOK(coll.insert({x: 5, a: 2})); // Not in index. +assert.writeOK(coll.insert({x: 6, a: 1})); // In index. + +// +// Verify basic functionality with find(). +// + +// find() operations that should use index. +explain = coll.explain('executionStats').find({x: 6, a: 1}).finish(); +assert.eq(1, explain.executionStats.nReturned); +assert(isIxscan(db, explain.queryPlanner.winningPlan)); +explain = coll.explain('executionStats').find({x: {$gt: 1}, a: 1}).finish(); +assert.eq(1, explain.executionStats.nReturned); +assert(isIxscan(db, explain.queryPlanner.winningPlan)); +explain = coll.explain('executionStats').find({x: 6, a: {$lte: 1}}).finish(); +assert.eq(1, explain.executionStats.nReturned); +assert(isIxscan(db, explain.queryPlanner.winningPlan)); + +// find() operations that should not use index. +explain = coll.explain('executionStats').find({x: 6, a: {$lt: 1.6}}).finish(); +assert.eq(1, explain.executionStats.nReturned); +assert(isCollscan(db, explain.queryPlanner.winningPlan)); +explain = coll.explain('executionStats').find({x: 6}).finish(); +assert.eq(1, explain.executionStats.nReturned); +assert(isCollscan(db, explain.queryPlanner.winningPlan)); + +// +// Verify basic functionality with the count command. +// + +// Count operation that should use index. +explain = coll.explain('executionStats').count({x: {$gt: 1}, a: 1}); +assert(isIxscan(db, explain.queryPlanner.winningPlan)); + +// Count operation that should not use index. +explain = coll.explain('executionStats').count({x: {$gt: 1}, a: 2}); +assert(isCollscan(db, explain.queryPlanner.winningPlan)); + +// +// Verify basic functionality with the aggregate command. +// + +// Aggregate operation that should use index. +explain = coll.aggregate([{$match: {x: {$gt: 1}, a: 1}}], {explain: true}); +assert(isIxscan(db, explain.queryPlanner.winningPlan)); + +// Aggregate operation that should not use index. +explain = coll.aggregate([{$match: {x: {$gt: 1}, a: 2}}], {explain: true}); +assert(isCollscan(db, explain.queryPlanner.winningPlan)); + +// +// Verify basic functionality with the findAndModify command. +// + +// findAndModify operation that should use index. +explain = coll.explain('executionStats') + .findAndModify({query: {x: {$gt: 1}, a: 1}, update: {$inc: {x: 1}}}); +assert.eq(1, explain.executionStats.nReturned); +assert(isIxscan(db, explain.queryPlanner.winningPlan)); + +// findAndModify operation that should not use index. +explain = coll.explain('executionStats') + .findAndModify({query: {x: {$gt: 1}, a: 2}, update: {$inc: {x: 1}}}); +assert.eq(1, explain.executionStats.nReturned); +assert(isCollscan(db, explain.queryPlanner.winningPlan)); })(); diff --git a/jstests/core/index_partial_validate.js b/jstests/core/index_partial_validate.js index bd854de9751..321fede5c19 100644 --- a/jstests/core/index_partial_validate.js +++ b/jstests/core/index_partial_validate.js @@ -3,19 +3,19 @@ 'use strict'; (function() { - var t = db.index_partial_validate; - t.drop(); +var t = db.index_partial_validate; +t.drop(); - var res = t.ensureIndex({a: 1}, {partialFilterExpression: {a: {$lte: 1}}}); - assert.commandWorked(res); +var res = t.ensureIndex({a: 1}, {partialFilterExpression: {a: {$lte: 1}}}); +assert.commandWorked(res); - res = t.ensureIndex({b: 1}); - assert.commandWorked(res); +res = t.ensureIndex({b: 1}); +assert.commandWorked(res); - res = t.insert({non_indexed_field: 'x'}); - assert.writeOK(res); +res = t.insert({non_indexed_field: 'x'}); +assert.writeOK(res); - res = t.validate(true); - assert.commandWorked(res); - assert(res.valid, 'Validate failed with response:\n' + tojson(res)); +res = t.validate(true); +assert.commandWorked(res); +assert(res.valid, 'Validate failed with response:\n' + tojson(res)); })(); diff --git a/jstests/core/index_partial_write_ops.js b/jstests/core/index_partial_write_ops.js index 730bcca5318..d79ce93155f 100644 --- a/jstests/core/index_partial_write_ops.js +++ b/jstests/core/index_partial_write_ops.js @@ -2,79 +2,78 @@ // @tags: [cannot_create_unique_index_when_using_hashed_shard_key, requires_non_retryable_writes] (function() { - "use strict"; - var coll = db.index_partial_write_ops; - - var getNumKeys = function(idxName) { - var res = assert.commandWorked(coll.validate(true)); - var kpi; - - var isShardedNS = res.hasOwnProperty('raw'); - if (isShardedNS) { - kpi = res.raw[Object.getOwnPropertyNames(res.raw)[0]].keysPerIndex; - } else { - kpi = res.keysPerIndex; - } - return kpi[idxName]; - }; +"use strict"; +var coll = db.index_partial_write_ops; + +var getNumKeys = function(idxName) { + var res = assert.commandWorked(coll.validate(true)); + var kpi; + + var isShardedNS = res.hasOwnProperty('raw'); + if (isShardedNS) { + kpi = res.raw[Object.getOwnPropertyNames(res.raw)[0]].keysPerIndex; + } else { + kpi = res.keysPerIndex; + } + return kpi[idxName]; +}; - coll.drop(); +coll.drop(); - // Create partial index. - assert.commandWorked(coll.ensureIndex({x: 1}, {unique: true, partialFilterExpression: {a: 1}})); +// Create partial index. +assert.commandWorked(coll.ensureIndex({x: 1}, {unique: true, partialFilterExpression: {a: 1}})); - assert.writeOK(coll.insert({_id: 1, x: 5, a: 2, b: 1})); // Not in index. - assert.writeOK(coll.insert({_id: 2, x: 6, a: 1, b: 1})); // In index. +assert.writeOK(coll.insert({_id: 1, x: 5, a: 2, b: 1})); // Not in index. +assert.writeOK(coll.insert({_id: 2, x: 6, a: 1, b: 1})); // In index. - assert.eq(1, getNumKeys("x_1")); +assert.eq(1, getNumKeys("x_1")); - // Move into partial index, then back out. - assert.writeOK(coll.update({_id: 1}, {$set: {a: 1}})); - assert.eq(2, getNumKeys("x_1")); +// Move into partial index, then back out. +assert.writeOK(coll.update({_id: 1}, {$set: {a: 1}})); +assert.eq(2, getNumKeys("x_1")); - assert.writeOK(coll.update({_id: 1}, {$set: {a: 2}})); - assert.eq(1, getNumKeys("x_1")); +assert.writeOK(coll.update({_id: 1}, {$set: {a: 2}})); +assert.eq(1, getNumKeys("x_1")); - // Bit blip doc in partial index, and out of partial index. - assert.writeOK(coll.update({_id: 2}, {$set: {b: 2}})); - assert.eq(1, getNumKeys("x_1")); +// Bit blip doc in partial index, and out of partial index. +assert.writeOK(coll.update({_id: 2}, {$set: {b: 2}})); +assert.eq(1, getNumKeys("x_1")); - assert.writeOK(coll.update({_id: 1}, {$set: {b: 2}})); - assert.eq(1, getNumKeys("x_1")); +assert.writeOK(coll.update({_id: 1}, {$set: {b: 2}})); +assert.eq(1, getNumKeys("x_1")); - var array = []; - for (var i = 0; i < 2048; i++) { - array.push({arbitrary: i}); - } +var array = []; +for (var i = 0; i < 2048; i++) { + array.push({arbitrary: i}); +} - // Update that causes record relocation. - assert.writeOK(coll.update({_id: 2}, {$set: {b: array}})); - assert.eq(1, getNumKeys("x_1")); +// Update that causes record relocation. +assert.writeOK(coll.update({_id: 2}, {$set: {b: array}})); +assert.eq(1, getNumKeys("x_1")); - assert.writeOK(coll.update({_id: 1}, {$set: {b: array}})); - assert.eq(1, getNumKeys("x_1")); +assert.writeOK(coll.update({_id: 1}, {$set: {b: array}})); +assert.eq(1, getNumKeys("x_1")); - // Delete that doesn't affect partial index. - assert.writeOK(coll.remove({x: 5})); - assert.eq(1, getNumKeys("x_1")); +// Delete that doesn't affect partial index. +assert.writeOK(coll.remove({x: 5})); +assert.eq(1, getNumKeys("x_1")); - // Delete that does affect partial index. - assert.writeOK(coll.remove({x: 6})); - assert.eq(0, getNumKeys("x_1")); +// Delete that does affect partial index. +assert.writeOK(coll.remove({x: 6})); +assert.eq(0, getNumKeys("x_1")); - // Documents with duplicate keys that straddle the index. - assert.writeOK(coll.insert({_id: 3, x: 1, a: 1})); // In index. - assert.writeOK(coll.insert({_id: 4, x: 1, a: 0})); // Not in index. - assert.writeErrorWithCode( - coll.insert({_id: 5, x: 1, a: 1}), - ErrorCodes.DuplicateKey); // Duplicate key constraint prevents insertion. +// Documents with duplicate keys that straddle the index. +assert.writeOK(coll.insert({_id: 3, x: 1, a: 1})); // In index. +assert.writeOK(coll.insert({_id: 4, x: 1, a: 0})); // Not in index. +assert.writeErrorWithCode(coll.insert({_id: 5, x: 1, a: 1}), + ErrorCodes.DuplicateKey); // Duplicate key constraint prevents insertion. - // Only _id 3 is in the index. - assert.eq(1, getNumKeys("x_1")); +// Only _id 3 is in the index. +assert.eq(1, getNumKeys("x_1")); - // Remove _id 4, _id 3 should remain in index. - assert.writeOK(coll.remove({_id: 4})); +// Remove _id 4, _id 3 should remain in index. +assert.writeOK(coll.remove({_id: 4})); - // _id 3 is still in the index. - assert.eq(1, getNumKeys("x_1")); +// _id 3 is still in the index. +assert.eq(1, getNumKeys("x_1")); })(); diff --git a/jstests/core/index_stats.js b/jstests/core/index_stats.js index d815dde1b8d..ee99fdc4831 100644 --- a/jstests/core/index_stats.js +++ b/jstests/core/index_stats.js @@ -12,211 +12,211 @@ // ] (function() { - "use strict"; +"use strict"; - load("jstests/libs/analyze_plan.js"); +load("jstests/libs/analyze_plan.js"); - var colName = "jstests_index_stats"; - var col = db[colName]; - col.drop(); +var colName = "jstests_index_stats"; +var col = db[colName]; +col.drop(); - var getUsageCount = function(indexName, collection) { - collection = collection || col; - var cursor = collection.aggregate([{$indexStats: {}}]); - while (cursor.hasNext()) { - var doc = cursor.next(); +var getUsageCount = function(indexName, collection) { + collection = collection || col; + var cursor = collection.aggregate([{$indexStats: {}}]); + while (cursor.hasNext()) { + var doc = cursor.next(); - if (doc.name === indexName) { - return doc.accesses.ops; - } + if (doc.name === indexName) { + return doc.accesses.ops; } + } - return undefined; - }; - - var getIndexKey = function(indexName) { - var cursor = col.aggregate([{$indexStats: {}}]); - while (cursor.hasNext()) { - var doc = cursor.next(); - - if (doc.name === indexName) { - return doc.key; - } - } + return undefined; +}; - return undefined; - }; +var getIndexKey = function(indexName) { + var cursor = col.aggregate([{$indexStats: {}}]); + while (cursor.hasNext()) { + var doc = cursor.next(); - var getIndexNamesForWinningPlan = function(explain) { - var indexNameList = []; - var winningStages = getPlanStages(explain.queryPlanner.winningPlan, "IXSCAN"); - for (var i = 0; i < winningStages.length; ++i) { - indexNameList.push(winningStages[i].indexName); + if (doc.name === indexName) { + return doc.key; } + } - return indexNameList; - }; - - assert.writeOK(col.insert({a: 1, b: 1, c: 1})); - assert.writeOK(col.insert({a: 2, b: 2, c: 2})); - assert.writeOK(col.insert({a: 3, b: 3, c: 3})); - - // - // Confirm no index stats object exists prior to index creation. - // - col.findOne({a: 1}); - assert.eq(undefined, getUsageCount("a_1")); - - // - // Create indexes. - // - assert.commandWorked(col.createIndex({a: 1}, {name: "a_1"})); - assert.commandWorked(col.createIndex({b: 1, c: 1}, {name: "b_1_c_1"})); - var countA = 0; // Tracks expected index access for "a_1". - var countB = 0; // Tracks expected index access for "b_1_c_1". - - // - // Confirm a stats object exists post index creation (with 0 count). - // - assert.eq(countA, getUsageCount("a_1")); - assert.eq({a: 1}, getIndexKey("a_1")); - - // - // Confirm index stats tick on find(). - // - col.findOne({a: 1}); - countA++; - - assert.eq(countA, getUsageCount("a_1")); - - // - // Confirm index stats tick on findAndModify() update. - // - var res = - db.runCommand({findAndModify: colName, query: {a: 1}, update: {$set: {d: 1}}, 'new': true}); - assert.commandWorked(res); - countA++; - assert.eq(countA, getUsageCount("a_1")); + return undefined; +}; - // - // Confirm index stats tick on findAndModify() delete. - // - res = db.runCommand({findAndModify: colName, query: {a: 2}, remove: true}); - assert.commandWorked(res); - countA++; - assert.eq(countA, getUsageCount("a_1")); - assert.writeOK(col.insert(res.value)); - - // - // Confirm $and operation ticks indexes for winning plan, but not rejected plans. - // - - // We cannot use explain() to determine which indexes would be used for this query, since - // 1) explain() will not bump the access counters - // 2) explain() always runs the multi planner, and the multi planner may choose a different - // index each run. We therefore run the query, and check that only one of the indexes has its - // counter bumped (assuming we never choose an index intersection plan). - const results = col.find({a: 2, b: 2}).itcount(); - if (countA + 1 == getUsageCount("a_1")) { - // Plan using index A was chosen. Index B should not have been used (assuming no index - // intersection plans are used). - countA++; - } else { - // Plan using index B was chosen. Index A should not have been used (assuming no index - // intersection plans are used). - assert.eq(++countB, getUsageCount("b_1_c_1")); +var getIndexNamesForWinningPlan = function(explain) { + var indexNameList = []; + var winningStages = getPlanStages(explain.queryPlanner.winningPlan, "IXSCAN"); + for (var i = 0; i < winningStages.length; ++i) { + indexNameList.push(winningStages[i].indexName); } - assert.eq(countA, getUsageCount("a_1")); - assert.eq(countB, getUsageCount("b_1_c_1")); - assert.eq(0, getUsageCount("_id_")); - - // - // Confirm index stats tick on distinct(). - // - res = db.runCommand({distinct: colName, key: "b", query: {b: 1}}); - assert.commandWorked(res); - countB++; - assert.eq(countB, getUsageCount("b_1_c_1")); - - // - // Confirm index stats tick on aggregate w/ match. - // - res = db.runCommand({aggregate: colName, pipeline: [{$match: {b: 1}}], cursor: {}}); - assert.commandWorked(res); - countB++; - assert.eq(countB, getUsageCount("b_1_c_1")); - - // - // Confirm index stats tick on mapReduce with query. - // - res = db.runCommand({ - mapReduce: colName, - map: function() { - emit(this.b, this.c); - }, - reduce: function(key, val) { - return val; - }, - query: {b: 2}, - out: {inline: true} - }); - assert.commandWorked(res); - countB++; - assert.eq(countB, getUsageCount("b_1_c_1")); - - // - // Confirm index stats tick on update(). - // - assert.writeOK(col.update({a: 2}, {$set: {d: 2}})); - countA++; - assert.eq(countA, getUsageCount("a_1")); - - // - // Confirm index stats tick on remove(). - // - assert.writeOK(col.remove({a: 2})); - countA++; - assert.eq(countA, getUsageCount("a_1")); - // - // Confirm multiple index $or operation ticks all involved indexes. - // - col.findOne({$or: [{a: 1}, {b: 1, c: 1}]}); + return indexNameList; +}; + +assert.writeOK(col.insert({a: 1, b: 1, c: 1})); +assert.writeOK(col.insert({a: 2, b: 2, c: 2})); +assert.writeOK(col.insert({a: 3, b: 3, c: 3})); + +// +// Confirm no index stats object exists prior to index creation. +// +col.findOne({a: 1}); +assert.eq(undefined, getUsageCount("a_1")); + +// +// Create indexes. +// +assert.commandWorked(col.createIndex({a: 1}, {name: "a_1"})); +assert.commandWorked(col.createIndex({b: 1, c: 1}, {name: "b_1_c_1"})); +var countA = 0; // Tracks expected index access for "a_1". +var countB = 0; // Tracks expected index access for "b_1_c_1". + +// +// Confirm a stats object exists post index creation (with 0 count). +// +assert.eq(countA, getUsageCount("a_1")); +assert.eq({a: 1}, getIndexKey("a_1")); + +// +// Confirm index stats tick on find(). +// +col.findOne({a: 1}); +countA++; + +assert.eq(countA, getUsageCount("a_1")); + +// +// Confirm index stats tick on findAndModify() update. +// +var res = + db.runCommand({findAndModify: colName, query: {a: 1}, update: {$set: {d: 1}}, 'new': true}); +assert.commandWorked(res); +countA++; +assert.eq(countA, getUsageCount("a_1")); + +// +// Confirm index stats tick on findAndModify() delete. +// +res = db.runCommand({findAndModify: colName, query: {a: 2}, remove: true}); +assert.commandWorked(res); +countA++; +assert.eq(countA, getUsageCount("a_1")); +assert.writeOK(col.insert(res.value)); + +// +// Confirm $and operation ticks indexes for winning plan, but not rejected plans. +// + +// We cannot use explain() to determine which indexes would be used for this query, since +// 1) explain() will not bump the access counters +// 2) explain() always runs the multi planner, and the multi planner may choose a different +// index each run. We therefore run the query, and check that only one of the indexes has its +// counter bumped (assuming we never choose an index intersection plan). +const results = col.find({a: 2, b: 2}).itcount(); +if (countA + 1 == getUsageCount("a_1")) { + // Plan using index A was chosen. Index B should not have been used (assuming no index + // intersection plans are used). countA++; - countB++; - assert.eq(countA, getUsageCount("a_1")); - assert.eq(countB, getUsageCount("b_1_c_1")); - - // - // Confirm index stats object does not exist post index drop. - // - assert.commandWorked(col.dropIndex("b_1_c_1")); - countB = 0; - assert.eq(undefined, getUsageCount("b_1_c_1")); - - // - // Confirm index stats object exists with count 0 once index is recreated. - // - assert.commandWorked(col.createIndex({b: 1, c: 1}, {name: "b_1_c_1"})); - assert.eq(countB, getUsageCount("b_1_c_1")); - - // - // Confirm that retrieval fails if $indexStats is not in the first pipeline position. - // - assert.throws(function() { - col.aggregate([{$match: {}}, {$indexStats: {}}]); - }); - - // - // Confirm index use is recorded for $lookup. - // - const foreignCollection = db[colName + "_foreign"]; - foreignCollection.drop(); - assert.writeOK(foreignCollection.insert([{_id: 0}, {_id: 1}, {_id: 2}])); - col.drop(); - assert.writeOK(col.insert([{_id: 0, foreignId: 1}, {_id: 1, foreignId: 2}])); - assert.eq(0, getUsageCount("_id_")); - assert.eq(2, +} else { + // Plan using index B was chosen. Index A should not have been used (assuming no index + // intersection plans are used). + assert.eq(++countB, getUsageCount("b_1_c_1")); +} +assert.eq(countA, getUsageCount("a_1")); +assert.eq(countB, getUsageCount("b_1_c_1")); +assert.eq(0, getUsageCount("_id_")); + +// +// Confirm index stats tick on distinct(). +// +res = db.runCommand({distinct: colName, key: "b", query: {b: 1}}); +assert.commandWorked(res); +countB++; +assert.eq(countB, getUsageCount("b_1_c_1")); + +// +// Confirm index stats tick on aggregate w/ match. +// +res = db.runCommand({aggregate: colName, pipeline: [{$match: {b: 1}}], cursor: {}}); +assert.commandWorked(res); +countB++; +assert.eq(countB, getUsageCount("b_1_c_1")); + +// +// Confirm index stats tick on mapReduce with query. +// +res = db.runCommand({ + mapReduce: colName, + map: function() { + emit(this.b, this.c); + }, + reduce: function(key, val) { + return val; + }, + query: {b: 2}, + out: {inline: true} +}); +assert.commandWorked(res); +countB++; +assert.eq(countB, getUsageCount("b_1_c_1")); + +// +// Confirm index stats tick on update(). +// +assert.writeOK(col.update({a: 2}, {$set: {d: 2}})); +countA++; +assert.eq(countA, getUsageCount("a_1")); + +// +// Confirm index stats tick on remove(). +// +assert.writeOK(col.remove({a: 2})); +countA++; +assert.eq(countA, getUsageCount("a_1")); + +// +// Confirm multiple index $or operation ticks all involved indexes. +// +col.findOne({$or: [{a: 1}, {b: 1, c: 1}]}); +countA++; +countB++; +assert.eq(countA, getUsageCount("a_1")); +assert.eq(countB, getUsageCount("b_1_c_1")); + +// +// Confirm index stats object does not exist post index drop. +// +assert.commandWorked(col.dropIndex("b_1_c_1")); +countB = 0; +assert.eq(undefined, getUsageCount("b_1_c_1")); + +// +// Confirm index stats object exists with count 0 once index is recreated. +// +assert.commandWorked(col.createIndex({b: 1, c: 1}, {name: "b_1_c_1"})); +assert.eq(countB, getUsageCount("b_1_c_1")); + +// +// Confirm that retrieval fails if $indexStats is not in the first pipeline position. +// +assert.throws(function() { + col.aggregate([{$match: {}}, {$indexStats: {}}]); +}); + +// +// Confirm index use is recorded for $lookup. +// +const foreignCollection = db[colName + "_foreign"]; +foreignCollection.drop(); +assert.writeOK(foreignCollection.insert([{_id: 0}, {_id: 1}, {_id: 2}])); +col.drop(); +assert.writeOK(col.insert([{_id: 0, foreignId: 1}, {_id: 1, foreignId: 2}])); +assert.eq(0, getUsageCount("_id_")); +assert.eq(2, col.aggregate([ {$match: {_id: {$in: [0, 1]}}}, { @@ -229,26 +229,26 @@ } ]) .itcount()); - assert.eq(1, getUsageCount("_id_", col), "Expected aggregation to use _id index"); - assert.eq(2, - getUsageCount("_id_", foreignCollection), - "Expected each lookup to be tracked as an index use"); - - // - // Confirm index use is recorded for $graphLookup. - // - foreignCollection.drop(); - assert.writeOK(foreignCollection.insert([ - {_id: 0, connectedTo: 1}, - {_id: 1, connectedTo: "X"}, - {_id: 2, connectedTo: 3}, - {_id: 3, connectedTo: "Y"}, // Be sure to use a different value here to make sure - // $graphLookup doesn't cache the query. - ])); - col.drop(); - assert.writeOK(col.insert([{_id: 0, foreignId: 0}, {_id: 1, foreignId: 2}])); - assert.eq(0, getUsageCount("_id_")); - assert.eq(2, +assert.eq(1, getUsageCount("_id_", col), "Expected aggregation to use _id index"); +assert.eq(2, + getUsageCount("_id_", foreignCollection), + "Expected each lookup to be tracked as an index use"); + +// +// Confirm index use is recorded for $graphLookup. +// +foreignCollection.drop(); +assert.writeOK(foreignCollection.insert([ + {_id: 0, connectedTo: 1}, + {_id: 1, connectedTo: "X"}, + {_id: 2, connectedTo: 3}, + {_id: 3, connectedTo: "Y"}, // Be sure to use a different value here to make sure + // $graphLookup doesn't cache the query. +])); +col.drop(); +assert.writeOK(col.insert([{_id: 0, foreignId: 0}, {_id: 1, foreignId: 2}])); +assert.eq(0, getUsageCount("_id_")); +assert.eq(2, col.aggregate([ {$match: {_id: {$in: [0, 1]}}}, { @@ -262,8 +262,8 @@ } ]) .itcount()); - assert.eq(1, getUsageCount("_id_", col), "Expected aggregation to use _id index"); - assert.eq(2 * 3, - getUsageCount("_id_", foreignCollection), - "Expected each of two graph searches to issue 3 queries, each using the _id index"); +assert.eq(1, getUsageCount("_id_", col), "Expected aggregation to use _id index"); +assert.eq(2 * 3, + getUsageCount("_id_", foreignCollection), + "Expected each of two graph searches to issue 3 queries, each using the _id index"); })(); diff --git a/jstests/core/index_type_change.js b/jstests/core/index_type_change.js index ad2525fe015..af2671338a2 100644 --- a/jstests/core/index_type_change.js +++ b/jstests/core/index_type_change.js @@ -11,32 +11,32 @@ load("jstests/libs/analyze_plan.js"); // For 'isIndexOnly'. (function() { - "use strict"; +"use strict"; - var coll = db.index_type_change; - coll.drop(); - assert.commandWorked(coll.ensureIndex({a: 1})); +var coll = db.index_type_change; +coll.drop(); +assert.commandWorked(coll.ensureIndex({a: 1})); - assert.writeOK(coll.insert({a: 2})); - assert.eq(1, coll.find({a: {$type: "double"}}).itcount()); +assert.writeOK(coll.insert({a: 2})); +assert.eq(1, coll.find({a: {$type: "double"}}).itcount()); - var newVal = new NumberLong(2); - var res = coll.update({}, {a: newVal}); // Replacement update. - assert.writeOK(res); - assert.eq(res.nMatched, 1); - if (coll.getMongo().writeMode() == "commands") - assert.eq(res.nModified, 1); +var newVal = new NumberLong(2); +var res = coll.update({}, {a: newVal}); // Replacement update. +assert.writeOK(res); +assert.eq(res.nMatched, 1); +if (coll.getMongo().writeMode() == "commands") + assert.eq(res.nModified, 1); - // Make sure it actually changed the type. - assert.eq(1, coll.find({a: {$type: "long"}}).itcount()); +// Make sure it actually changed the type. +assert.eq(1, coll.find({a: {$type: "long"}}).itcount()); - // Now use a covered query to ensure the index entry has been updated. +// Now use a covered query to ensure the index entry has been updated. - // First make sure it's actually using a covered index scan. - var explain = coll.explain().find({a: 2}, {_id: 0, a: 1}); - assert(isIndexOnly(db, explain)); +// First make sure it's actually using a covered index scan. +var explain = coll.explain().find({a: 2}, {_id: 0, a: 1}); +assert(isIndexOnly(db, explain)); - var updated = coll.findOne({a: 2}, {_id: 0, a: 1}); +var updated = coll.findOne({a: 2}, {_id: 0, a: 1}); - assert(updated.a instanceof NumberLong, "Index entry did not change type"); +assert(updated.a instanceof NumberLong, "Index entry did not change type"); })(); diff --git a/jstests/core/indexes_multiple_commands.js b/jstests/core/indexes_multiple_commands.js index 60bc2b69173..7058fd32019 100644 --- a/jstests/core/indexes_multiple_commands.js +++ b/jstests/core/indexes_multiple_commands.js @@ -5,162 +5,158 @@ // Test that commands behave correctly under the presence of multiple indexes with the same key // pattern. (function() { - 'use strict'; - - var coll = db.indexes_multiple_commands; - var usingWriteCommands = db.getMongo().writeMode() === "commands"; - - /** - * Assert that the result of the index creation ('cmd') indicates that 'numIndexes' were - * created. - * - * If omitted, 'numIndexes' defaults to 1. - * - * @param cmd {Function} A function to execute that attempts to create indexes. - * @param numIndexes {Number} The expected number of indexes that cmd creates. - */ - function assertIndexesCreated(cmd, numIndexes) { - var cmdResult; - - if (typeof numIndexes === "undefined") { - numIndexes = 1; - } - - if (usingWriteCommands) { - cmdResult = cmd(); - if (numIndexes == 0) { - assert.commandFailedWithCode(cmdResult, ErrorCodes.IndexOptionsConflict); - return; - } - - assert.commandWorked(cmdResult); - var isShardedNS = cmdResult.hasOwnProperty('raw'); - if (isShardedNS) { - cmdResult = cmdResult['raw'][Object.getOwnPropertyNames(cmdResult['raw'])[0]]; - } - assert.eq(cmdResult.numIndexesAfter - cmdResult.numIndexesBefore, - numIndexes, - tojson(cmdResult)); - } else { - var nIndexesBefore = coll.getIndexes().length; - cmdResult = cmd(); - if (numIndexes == 0) { - assert.commandFailedWithCode(cmdResult, ErrorCodes.IndexOptionsConflict); - return; - } - - assert.commandWorked(cmdResult); - var nIndexesAfter = coll.getIndexes().length; - assert.eq(nIndexesAfter - nIndexesBefore, numIndexes, tojson(coll.getIndexes())); - } - } - - /** - * Assert that the result of the index create command indicates no indexes were created since - * the indexes were the same (collation and key pattern matched). - * - * (Index creation succeeds if none are created, as long as no options conflict.) - * - * @param {Function} A function to execute that attempts to create indexes. - */ - function assertIndexNotCreated(cmd) { - assertIndexesCreated(cmd, 0); +'use strict'; + +var coll = db.indexes_multiple_commands; +var usingWriteCommands = db.getMongo().writeMode() === "commands"; + +/** + * Assert that the result of the index creation ('cmd') indicates that 'numIndexes' were + * created. + * + * If omitted, 'numIndexes' defaults to 1. + * + * @param cmd {Function} A function to execute that attempts to create indexes. + * @param numIndexes {Number} The expected number of indexes that cmd creates. + */ +function assertIndexesCreated(cmd, numIndexes) { + var cmdResult; + + if (typeof numIndexes === "undefined") { + numIndexes = 1; } - coll.drop(); - assert.commandWorked(db.createCollection(coll.getName())); - - // Test that multiple indexes with the same key pattern and different collation can be created. - - assertIndexesCreated(() => coll.createIndex({a: 1}, {name: "a_1"})); - // The requested index already exists, but with a different name, so the index is not created. - assertIndexNotCreated(() => coll.createIndex({a: 1}, {name: "a_1:1"})); - - // Indexes with different collations and the same key pattern are allowed if the names are - // not the same. - assertIndexesCreated(() => coll.createIndex({a: 1}, {name: "fr", collation: {locale: "fr"}})); - assertIndexesCreated( - () => coll.createIndex({a: 1}, {name: "en_US", collation: {locale: "en_US"}})); - - // The requested index doesn't yet exist, but the name is used, so this command fails. - assert.commandFailed(coll.createIndex({a: 1}, {name: "a_1", collation: {locale: "en_US"}})); - - // The requested index already exists with a different name, so the index is not created. - assertIndexNotCreated(() => coll.createIndex({a: 1}, {name: "fr2", collation: {locale: "fr"}})); - - // Options can differ on indexes with different collations. - assertIndexesCreated( - () => coll.createIndex( - {a: 1}, {name: "fr1_sparse", collation: {locale: "fr", strength: 1}, sparse: true})); - - // The requested index already exists, but with different options, so the command fails. - assert.commandFailed( - coll.createIndex({a: 1}, {name: "fr_sparse", collation: {locale: "fr"}, sparse: true})); - - coll.drop(); - assert.commandWorked(db.createCollection(coll.getName())); - - // Multiple non-conflicting indexes can be created in one command. - var multipleCreate = () => db.runCommand({ - createIndexes: coll.getName(), - indexes: [ - {key: {a: 1}, name: "en_US", collation: {locale: "en_US"}}, - {key: {a: 1}, name: "en_US_1", collation: {locale: "en_US", strength: 1}} - ] - }); - assertIndexesCreated(multipleCreate, 2); - - // Cannot create another _id index. - assert.commandFailed(coll.createIndex({_id: 1}, {name: "other", collation: {locale: "fr"}})); - - // Test that indexes must be dropped by name if the key pattern is ambiguous. - coll.drop(); - assert.commandWorked(db.createCollection(coll.getName())); - - // Create multiple indexes with the same key pattern and collation. - assertIndexesCreated(() => - coll.createIndex({a: 1}, {name: "foo", collation: {locale: "en_US"}})); - assertIndexesCreated( - () => coll.createIndex({a: 1}, {name: "bar", collation: {locale: "en_US", strength: 1}})); - - // Indexes cannot be dropped by an ambiguous key pattern. - assert.commandFailed(coll.dropIndex({a: 1})); - - // Indexes can be dropped by name. - assert.commandWorked(coll.dropIndex("foo")); - assert.commandWorked(coll.dropIndex("bar")); - - // Test that hint behaves correctly in the presence of multiple indexes. - coll.drop(); - assert.commandWorked(db.createCollection(coll.getName())); - - assertIndexesCreated(() => coll.createIndex({a: 1}, {name: "sbc"})); - assertIndexesCreated( - () => coll.createIndex( - {a: 1}, {name: "caseInsensitive", collation: {locale: "en_US", strength: 2}})); - - assert.writeOK(coll.insert([{a: "a"}, {a: "A"}, {a: 20}])); - - // An ambiguous hint pattern fails. - assert.throws(() => coll.find({a: 1}).hint({a: 1}).itcount()); - if (db.getMongo().useReadCommands()) { - assert.throws( - () => - coll.find({a: 1}).collation({locale: "en_US", strength: 2}).hint({a: 1}).itcount()); - } + if (usingWriteCommands) { + cmdResult = cmd(); + if (numIndexes == 0) { + assert.commandFailedWithCode(cmdResult, ErrorCodes.IndexOptionsConflict); + return; + } - // Index hint by name succeeds. - assert.eq(coll.find({a: "a"}).hint("sbc").itcount(), 1); - // A hint on an incompatible index does a whole index scan, and then filters using the query - // collation. - assert.eq(coll.find({a: "a"}).hint("caseInsensitive").itcount(), 1); - if (db.getMongo().useReadCommands()) { + assert.commandWorked(cmdResult); + var isShardedNS = cmdResult.hasOwnProperty('raw'); + if (isShardedNS) { + cmdResult = cmdResult['raw'][Object.getOwnPropertyNames(cmdResult['raw'])[0]]; + } assert.eq( - coll.find({a: "a"}).collation({locale: "en_US", strength: 2}).hint("sbc").itcount(), 2); + cmdResult.numIndexesAfter - cmdResult.numIndexesBefore, numIndexes, tojson(cmdResult)); + } else { + var nIndexesBefore = coll.getIndexes().length; + cmdResult = cmd(); + if (numIndexes == 0) { + assert.commandFailedWithCode(cmdResult, ErrorCodes.IndexOptionsConflict); + return; + } - // A non-ambiguous index hint by key pattern is allowed, even if the collation doesn't - // match. - assertIndexesCreated(() => coll.createIndex({b: 1}, {collation: {locale: "fr"}})); - assert.eq(coll.find({a: "a"}).collation({locale: "en_US"}).hint({b: 1}).itcount(), 1); + assert.commandWorked(cmdResult); + var nIndexesAfter = coll.getIndexes().length; + assert.eq(nIndexesAfter - nIndexesBefore, numIndexes, tojson(coll.getIndexes())); } +} + +/** + * Assert that the result of the index create command indicates no indexes were created since + * the indexes were the same (collation and key pattern matched). + * + * (Index creation succeeds if none are created, as long as no options conflict.) + * + * @param {Function} A function to execute that attempts to create indexes. + */ +function assertIndexNotCreated(cmd) { + assertIndexesCreated(cmd, 0); +} + +coll.drop(); +assert.commandWorked(db.createCollection(coll.getName())); + +// Test that multiple indexes with the same key pattern and different collation can be created. + +assertIndexesCreated(() => coll.createIndex({a: 1}, {name: "a_1"})); +// The requested index already exists, but with a different name, so the index is not created. +assertIndexNotCreated(() => coll.createIndex({a: 1}, {name: "a_1:1"})); + +// Indexes with different collations and the same key pattern are allowed if the names are +// not the same. +assertIndexesCreated(() => coll.createIndex({a: 1}, {name: "fr", collation: {locale: "fr"}})); +assertIndexesCreated(() => coll.createIndex({a: 1}, {name: "en_US", collation: {locale: "en_US"}})); + +// The requested index doesn't yet exist, but the name is used, so this command fails. +assert.commandFailed(coll.createIndex({a: 1}, {name: "a_1", collation: {locale: "en_US"}})); + +// The requested index already exists with a different name, so the index is not created. +assertIndexNotCreated(() => coll.createIndex({a: 1}, {name: "fr2", collation: {locale: "fr"}})); + +// Options can differ on indexes with different collations. +assertIndexesCreated( + () => coll.createIndex( + {a: 1}, {name: "fr1_sparse", collation: {locale: "fr", strength: 1}, sparse: true})); + +// The requested index already exists, but with different options, so the command fails. +assert.commandFailed( + coll.createIndex({a: 1}, {name: "fr_sparse", collation: {locale: "fr"}, sparse: true})); + +coll.drop(); +assert.commandWorked(db.createCollection(coll.getName())); + +// Multiple non-conflicting indexes can be created in one command. +var multipleCreate = () => db.runCommand({ + createIndexes: coll.getName(), + indexes: [ + {key: {a: 1}, name: "en_US", collation: {locale: "en_US"}}, + {key: {a: 1}, name: "en_US_1", collation: {locale: "en_US", strength: 1}} + ] +}); +assertIndexesCreated(multipleCreate, 2); + +// Cannot create another _id index. +assert.commandFailed(coll.createIndex({_id: 1}, {name: "other", collation: {locale: "fr"}})); + +// Test that indexes must be dropped by name if the key pattern is ambiguous. +coll.drop(); +assert.commandWorked(db.createCollection(coll.getName())); + +// Create multiple indexes with the same key pattern and collation. +assertIndexesCreated(() => coll.createIndex({a: 1}, {name: "foo", collation: {locale: "en_US"}})); +assertIndexesCreated( + () => coll.createIndex({a: 1}, {name: "bar", collation: {locale: "en_US", strength: 1}})); + +// Indexes cannot be dropped by an ambiguous key pattern. +assert.commandFailed(coll.dropIndex({a: 1})); + +// Indexes can be dropped by name. +assert.commandWorked(coll.dropIndex("foo")); +assert.commandWorked(coll.dropIndex("bar")); + +// Test that hint behaves correctly in the presence of multiple indexes. +coll.drop(); +assert.commandWorked(db.createCollection(coll.getName())); + +assertIndexesCreated(() => coll.createIndex({a: 1}, {name: "sbc"})); +assertIndexesCreated( + () => coll.createIndex({a: 1}, + {name: "caseInsensitive", collation: {locale: "en_US", strength: 2}})); + +assert.writeOK(coll.insert([{a: "a"}, {a: "A"}, {a: 20}])); + +// An ambiguous hint pattern fails. +assert.throws(() => coll.find({a: 1}).hint({a: 1}).itcount()); +if (db.getMongo().useReadCommands()) { + assert.throws( + () => coll.find({a: 1}).collation({locale: "en_US", strength: 2}).hint({a: 1}).itcount()); +} + +// Index hint by name succeeds. +assert.eq(coll.find({a: "a"}).hint("sbc").itcount(), 1); +// A hint on an incompatible index does a whole index scan, and then filters using the query +// collation. +assert.eq(coll.find({a: "a"}).hint("caseInsensitive").itcount(), 1); +if (db.getMongo().useReadCommands()) { + assert.eq(coll.find({a: "a"}).collation({locale: "en_US", strength: 2}).hint("sbc").itcount(), + 2); + + // A non-ambiguous index hint by key pattern is allowed, even if the collation doesn't + // match. + assertIndexesCreated(() => coll.createIndex({b: 1}, {collation: {locale: "fr"}})); + assert.eq(coll.find({a: "a"}).collation({locale: "en_US"}).hint({b: 1}).itcount(), 1); +} })(); diff --git a/jstests/core/insert_one.js b/jstests/core/insert_one.js index 0a50ee1b7f3..9a1a6d393f8 100644 --- a/jstests/core/insert_one.js +++ b/jstests/core/insert_one.js @@ -3,36 +3,36 @@ * object's prototype's methods. */ (function() { - 'use strict'; - var col = db.insert_one_number; - col.drop(); +'use strict'; +var col = db.insert_one_number; +col.drop(); - assert.eq(col.find().itcount(), 0, "collection should be empty"); +assert.eq(col.find().itcount(), 0, "collection should be empty"); - assert.throws(function() { - col.insertOne(1); - }, [], "insertOne should only accept objects"); +assert.throws(function() { + col.insertOne(1); +}, [], "insertOne should only accept objects"); - assert.eq(col.find().itcount(), 0, "collection should still be empty"); +assert.eq(col.find().itcount(), 0, "collection should still be empty"); - var result = col.insertOne({abc: 'def'}); - assert(result.acknowledged, "insertOne should succeed on documents"); +var result = col.insertOne({abc: 'def'}); +assert(result.acknowledged, "insertOne should succeed on documents"); - assert.docEq(col.findOne({_id: result.insertedId}), - {_id: result.insertedId, abc: 'def'}, - "simple document not equal to collection find result"); +assert.docEq(col.findOne({_id: result.insertedId}), + {_id: result.insertedId, abc: 'def'}, + "simple document not equal to collection find result"); - var doc = new Number(); - doc.x = 12; - assert('zeroPad' in doc, "number object should have 'zeroPad' in prototype"); +var doc = new Number(); +doc.x = 12; +assert('zeroPad' in doc, "number object should have 'zeroPad' in prototype"); - result = col.insertOne(doc); - assert(result.acknowledged, "insertOne should succeed on documents"); +result = col.insertOne(doc); +assert(result.acknowledged, "insertOne should succeed on documents"); - assert(!('zeroPad' in col.findOne({_id: result.insertedId})), - "inserted result should not have functions from the number object's prototype"); +assert(!('zeroPad' in col.findOne({_id: result.insertedId})), + "inserted result should not have functions from the number object's prototype"); - assert.docEq(col.findOne({_id: result.insertedId}), - {_id: result.insertedId, x: doc.x}, - "document with prototype not equal to collection find result"); +assert.docEq(col.findOne({_id: result.insertedId}), + {_id: result.insertedId, x: doc.x}, + "document with prototype not equal to collection find result"); })(); diff --git a/jstests/core/invalid_collation_locale.js b/jstests/core/invalid_collation_locale.js index 38209ed1f49..d520aef5920 100644 --- a/jstests/core/invalid_collation_locale.js +++ b/jstests/core/invalid_collation_locale.js @@ -1,30 +1,25 @@ // This test is meant to reproduce SERVER-38840, where the ICU library crashes on Windows when // attempting to parse an invalid ID-prefixed locale. (function() { - "use strict"; +"use strict"; - const coll = db.invalid_collation_locale; - coll.drop(); +const coll = db.invalid_collation_locale; +coll.drop(); - // Locale's which start with "x" or "i" followed by a separator ("_" or "-") are considered - // ID-prefixed. - assert.commandFailedWithCode( - db.createCollection(coll.getName(), {collation: {locale: "x_invalid"}}), - ErrorCodes.BadValue); +// Locale's which start with "x" or "i" followed by a separator ("_" or "-") are considered +// ID-prefixed. +assert.commandFailedWithCode( + db.createCollection(coll.getName(), {collation: {locale: "x_invalid"}}), ErrorCodes.BadValue); - assert.commandFailedWithCode( - db.createCollection(coll.getName(), {collation: {locale: "X_invalid"}}), - ErrorCodes.BadValue); +assert.commandFailedWithCode( + db.createCollection(coll.getName(), {collation: {locale: "X_invalid"}}), ErrorCodes.BadValue); - assert.commandFailedWithCode( - db.createCollection(coll.getName(), {collation: {locale: "i-invalid"}}), - ErrorCodes.BadValue); +assert.commandFailedWithCode( + db.createCollection(coll.getName(), {collation: {locale: "i-invalid"}}), ErrorCodes.BadValue); - assert.commandFailedWithCode( - db.createCollection(coll.getName(), {collation: {locale: "I-invalid"}}), - ErrorCodes.BadValue); +assert.commandFailedWithCode( + db.createCollection(coll.getName(), {collation: {locale: "I-invalid"}}), ErrorCodes.BadValue); - assert.commandFailedWithCode( - db.createCollection(coll.getName(), {collation: {locale: "xx_invalid"}}), - ErrorCodes.BadValue); +assert.commandFailedWithCode( + db.createCollection(coll.getName(), {collation: {locale: "xx_invalid"}}), ErrorCodes.BadValue); })(); diff --git a/jstests/core/invalid_db_name.js b/jstests/core/invalid_db_name.js index 18da1e229f9..23cec76d446 100644 --- a/jstests/core/invalid_db_name.js +++ b/jstests/core/invalid_db_name.js @@ -3,16 +3,16 @@ // Can't shard collection with invalid db name. // @tags: [assumes_unsharded_collection] (function() { - var invalidDB = db.getSiblingDB("NonExistentDB"); +var invalidDB = db.getSiblingDB("NonExistentDB"); - // This is a hack to bypass invalid database name checking by the DB constructor - invalidDB._name = "Invalid DB Name"; +// This is a hack to bypass invalid database name checking by the DB constructor +invalidDB._name = "Invalid DB Name"; - assert.writeError(invalidDB.coll.insert({x: 1})); +assert.writeError(invalidDB.coll.insert({x: 1})); - // Ensure that no database was created - var dbList = db.getSiblingDB('admin').runCommand({listDatabases: 1}).databases; - dbList.forEach(function(dbInfo) { - assert.neq('Invalid DB Name', dbInfo.name, 'database with invalid name was created'); - }); +// Ensure that no database was created +var dbList = db.getSiblingDB('admin').runCommand({listDatabases: 1}).databases; +dbList.forEach(function(dbInfo) { + assert.neq('Invalid DB Name', dbInfo.name, 'database with invalid name was created'); +}); }()); diff --git a/jstests/core/js_jit.js b/jstests/core/js_jit.js index 4ccdd2917ae..72290d45758 100644 --- a/jstests/core/js_jit.js +++ b/jstests/core/js_jit.js @@ -5,36 +5,36 @@ * implementations correctly. We force the JIT to kick in by using large loops. */ (function() { - 'use strict'; +'use strict'; - function testDBCollection() { - const c = new DBCollection(null, null, "foo", "test.foo"); - for (let i = 0; i < 100000; i++) { - if (c.toString() != "test.foo") { - throw i; - } +function testDBCollection() { + const c = new DBCollection(null, null, "foo", "test.foo"); + for (let i = 0; i < 100000; i++) { + if (c.toString() != "test.foo") { + throw i; } } +} - function testDB() { - const c = new DB(null, "test"); - for (let i = 0; i < 100000; i++) { - if (c.toString() != "test") { - throw i; - } +function testDB() { + const c = new DB(null, "test"); + for (let i = 0; i < 100000; i++) { + if (c.toString() != "test") { + throw i; } } +} - function testDBQuery() { - const c = DBQuery('a', 'b', 'c', 'd'); - for (let i = 0; i < 100000; i++) { - if (c.toString() != "DBQuery: d -> null") { - throw i; - } +function testDBQuery() { + const c = DBQuery('a', 'b', 'c', 'd'); + for (let i = 0; i < 100000; i++) { + if (c.toString() != "DBQuery: d -> null") { + throw i; } } +} - testDBCollection(); - testDB(); - testDBQuery(); +testDBCollection(); +testDB(); +testDBQuery(); })();
\ No newline at end of file diff --git a/jstests/core/json1.js b/jstests/core/json1.js index 127795a5126..731bef9fcdc 100644 --- a/jstests/core/json1.js +++ b/jstests/core/json1.js @@ -5,7 +5,7 @@ x = { }; eval("y = " + tojson(x)); assert.eq(tojson(x), tojson(y), "A"); -assert.eq(typeof(x.nulls), typeof(y.nulls), "B"); +assert.eq(typeof (x.nulls), typeof (y.nulls), "B"); // each type is parsed properly x = { diff --git a/jstests/core/json_schema/additional_items.js b/jstests/core/json_schema/additional_items.js index c3866c88565..7165e9d4363 100644 --- a/jstests/core/json_schema/additional_items.js +++ b/jstests/core/json_schema/additional_items.js @@ -4,83 +4,88 @@ * Tests the JSON Schema "additionalItems" keyword. */ (function() { - "use strict"; +"use strict"; - load("jstests/libs/assert_schema_match.js"); +load("jstests/libs/assert_schema_match.js"); - const coll = db.getCollection("json_schema_additional_items"); - coll.drop(); +const coll = db.getCollection("json_schema_additional_items"); +coll.drop(); - // Test that the JSON Schema fails to parse if "additionalItems" is not a boolean or object. - assert.throws(() => coll.find({$jsonSchema: {additionalItems: 1}}).itcount()); - assert.throws(() => coll.find({$jsonSchema: {additionalItems: 1.0}}).itcount()); - assert.throws(() => coll.find({$jsonSchema: {additionalItems: "true"}}).itcount()); +// Test that the JSON Schema fails to parse if "additionalItems" is not a boolean or object. +assert.throws(() => coll.find({$jsonSchema: {additionalItems: 1}}).itcount()); +assert.throws(() => coll.find({$jsonSchema: {additionalItems: 1.0}}).itcount()); +assert.throws(() => coll.find({$jsonSchema: {additionalItems: "true"}}).itcount()); - // Test that "additionalItems" has no effect at the top level (but is still accepted). - assertSchemaMatch(coll, {items: [{type: "number"}], additionalItems: false}, {}, true); - assertSchemaMatch(coll, {items: [{type: "number"}], additionalItems: true}, {}, true); - assertSchemaMatch( - coll, {items: [{type: "number"}], additionalItems: {type: "string"}}, {}, true); +// Test that "additionalItems" has no effect at the top level (but is still accepted). +assertSchemaMatch(coll, {items: [{type: "number"}], additionalItems: false}, {}, true); +assertSchemaMatch(coll, {items: [{type: "number"}], additionalItems: true}, {}, true); +assertSchemaMatch(coll, {items: [{type: "number"}], additionalItems: {type: "string"}}, {}, true); - // Test that "additionalItems" has no effect when "items" is not present. - let schema = {properties: {a: {additionalItems: false}}}; - assertSchemaMatch(coll, schema, {}, true); - assertSchemaMatch(coll, schema, {a: "blah"}, true); - assertSchemaMatch(coll, schema, {a: []}, true); - assertSchemaMatch(coll, schema, {a: [1, 2, 3]}, true); +// Test that "additionalItems" has no effect when "items" is not present. +let schema = {properties: {a: {additionalItems: false}}}; +assertSchemaMatch(coll, schema, {}, true); +assertSchemaMatch(coll, schema, {a: "blah"}, true); +assertSchemaMatch(coll, schema, {a: []}, true); +assertSchemaMatch(coll, schema, {a: [1, 2, 3]}, true); - schema = {properties: {a: {additionalItems: {type: "object"}}}}; - assertSchemaMatch(coll, schema, {}, true); - assertSchemaMatch(coll, schema, {a: "blah"}, true); - assertSchemaMatch(coll, schema, {a: []}, true); - assertSchemaMatch(coll, schema, {a: [1, 2, 3]}, true); +schema = { + properties: {a: {additionalItems: {type: "object"}}} +}; +assertSchemaMatch(coll, schema, {}, true); +assertSchemaMatch(coll, schema, {a: "blah"}, true); +assertSchemaMatch(coll, schema, {a: []}, true); +assertSchemaMatch(coll, schema, {a: [1, 2, 3]}, true); - // Test that "additionalItems" has no effect when "items" is a schema that applies to every - // element in the array. - schema = {properties: {a: {items: {}, additionalItems: false}}}; - assertSchemaMatch(coll, schema, {}, true); - assertSchemaMatch(coll, schema, {a: "blah"}, true); - assertSchemaMatch(coll, schema, {a: []}, true); - assertSchemaMatch(coll, schema, {a: [1, 2, 3]}, true); +// Test that "additionalItems" has no effect when "items" is a schema that applies to every +// element in the array. +schema = { + properties: {a: {items: {}, additionalItems: false}} +}; +assertSchemaMatch(coll, schema, {}, true); +assertSchemaMatch(coll, schema, {a: "blah"}, true); +assertSchemaMatch(coll, schema, {a: []}, true); +assertSchemaMatch(coll, schema, {a: [1, 2, 3]}, true); - schema = {properties: {a: {items: {}, additionalItems: {type: "object"}}}}; - assertSchemaMatch(coll, schema, {}, true); - assertSchemaMatch(coll, schema, {a: "blah"}, true); - assertSchemaMatch(coll, schema, {a: []}, true); - assertSchemaMatch(coll, schema, {a: [1, 2, 3]}, true); +schema = { + properties: {a: {items: {}, additionalItems: {type: "object"}}} +}; +assertSchemaMatch(coll, schema, {}, true); +assertSchemaMatch(coll, schema, {a: "blah"}, true); +assertSchemaMatch(coll, schema, {a: []}, true); +assertSchemaMatch(coll, schema, {a: [1, 2, 3]}, true); - // Test that {additionalItems: false} correctly bans array indexes not covered by "items". - schema = { - properties: {a: {items: [{type: "number"}, {type: "string"}], additionalItems: false}} - }; - assertSchemaMatch(coll, schema, {a: []}, true); - assertSchemaMatch(coll, schema, {a: [229]}, true); - assertSchemaMatch(coll, schema, {a: [229, "West 43rd"]}, true); - assertSchemaMatch(coll, schema, {a: [229, "West 43rd", "Street"]}, false); +// Test that {additionalItems: false} correctly bans array indexes not covered by "items". +schema = { + properties: {a: {items: [{type: "number"}, {type: "string"}], additionalItems: false}} +}; +assertSchemaMatch(coll, schema, {a: []}, true); +assertSchemaMatch(coll, schema, {a: [229]}, true); +assertSchemaMatch(coll, schema, {a: [229, "West 43rd"]}, true); +assertSchemaMatch(coll, schema, {a: [229, "West 43rd", "Street"]}, false); - // Test that {additionalItems: true} has no effect. - assertSchemaMatch( - coll, - {properties: {a: {items: [{type: "number"}, {type: "string"}], additionalItems: true}}}, - {a: [229, "West 43rd", "Street"]}, - true); - assertSchemaMatch( - coll, {properties: {a: {items: [{not: {}}], additionalItems: true}}}, {a: []}, true); +// Test that {additionalItems: true} has no effect. +assertSchemaMatch( + coll, + {properties: {a: {items: [{type: "number"}, {type: "string"}], additionalItems: true}}}, + {a: [229, "West 43rd", "Street"]}, + true); +assertSchemaMatch( + coll, {properties: {a: {items: [{not: {}}], additionalItems: true}}}, {a: []}, true); - // Test that the "additionalItems" schema only applies to array indexes not covered by "items". - schema = { - properties: - {a: {items: [{type: "number"}, {type: "string"}], additionalItems: {type: "object"}}} - }; - assertSchemaMatch(coll, schema, {a: []}, true); - assertSchemaMatch(coll, schema, {a: [229]}, true); - assertSchemaMatch(coll, schema, {a: [229, "West 43rd"]}, true); - assertSchemaMatch(coll, schema, {a: [229, "West 43rd", "Street"]}, false); - assertSchemaMatch(coll, schema, {a: [229, "West 43rd", {}]}, true); +// Test that the "additionalItems" schema only applies to array indexes not covered by "items". +schema = { + properties: + {a: {items: [{type: "number"}, {type: "string"}], additionalItems: {type: "object"}}} +}; +assertSchemaMatch(coll, schema, {a: []}, true); +assertSchemaMatch(coll, schema, {a: [229]}, true); +assertSchemaMatch(coll, schema, {a: [229, "West 43rd"]}, true); +assertSchemaMatch(coll, schema, {a: [229, "West 43rd", "Street"]}, false); +assertSchemaMatch(coll, schema, {a: [229, "West 43rd", {}]}, true); - // Test that an empty array does not fail against "additionalItems". - assertSchemaMatch( - coll, {properties: {a: {items: [{not: {}}], additionalItems: false}}}, {a: []}, true); - assertSchemaMatch( - coll, {properties: {a: {items: [{not: {}}], additionalItems: {not: {}}}}}, {a: []}, true); +// Test that an empty array does not fail against "additionalItems". +assertSchemaMatch( + coll, {properties: {a: {items: [{not: {}}], additionalItems: false}}}, {a: []}, true); +assertSchemaMatch( + coll, {properties: {a: {items: [{not: {}}], additionalItems: {not: {}}}}}, {a: []}, true); }()); diff --git a/jstests/core/json_schema/additional_properties.js b/jstests/core/json_schema/additional_properties.js index ce699a6036c..0a78d2415ef 100644 --- a/jstests/core/json_schema/additional_properties.js +++ b/jstests/core/json_schema/additional_properties.js @@ -4,247 +4,234 @@ * Tests for the JSON Schema 'additionalProperties' keyword. */ (function() { - "use strict"; +"use strict"; - load("jstests/libs/assert_schema_match.js"); +load("jstests/libs/assert_schema_match.js"); - const coll = db.schema_allowed_properties; +const coll = db.schema_allowed_properties; - // Tests for {additionalProperties:false} at the top level. - assertSchemaMatch( - coll, {properties: {_id: {}, a: {}}, additionalProperties: false}, {_id: 1}, true); - assertSchemaMatch( - coll, {properties: {_id: {}, a: {}}, additionalProperties: false}, {_id: 1, a: 1}, true); - assertSchemaMatch( - coll, {properties: {_id: {}, a: {}}, additionalProperties: false}, {_id: 1, b: 1}, false); - assertSchemaMatch(coll, - {properties: {_id: {}, a: {}}, additionalProperties: false}, - {_id: 1, a: 1, b: 1}, - false); +// Tests for {additionalProperties:false} at the top level. +assertSchemaMatch( + coll, {properties: {_id: {}, a: {}}, additionalProperties: false}, {_id: 1}, true); +assertSchemaMatch( + coll, {properties: {_id: {}, a: {}}, additionalProperties: false}, {_id: 1, a: 1}, true); +assertSchemaMatch( + coll, {properties: {_id: {}, a: {}}, additionalProperties: false}, {_id: 1, b: 1}, false); +assertSchemaMatch( + coll, {properties: {_id: {}, a: {}}, additionalProperties: false}, {_id: 1, a: 1, b: 1}, false); - // Tests for {additionalProperties:true} at the top level. - assertSchemaMatch( - coll, {properties: {_id: {}, a: {}}, additionalProperties: true}, {_id: 1}, true); - assertSchemaMatch( - coll, {properties: {_id: {}, a: {}}, additionalProperties: true}, {_id: 1, a: 1}, true); - assertSchemaMatch( - coll, {properties: {_id: {}, a: {}}, additionalProperties: true}, {_id: 1, b: 1}, true); - assertSchemaMatch(coll, - {properties: {_id: {}, a: {}}, additionalProperties: true}, - {_id: 1, a: 1, b: 1}, - true); +// Tests for {additionalProperties:true} at the top level. +assertSchemaMatch(coll, {properties: {_id: {}, a: {}}, additionalProperties: true}, {_id: 1}, true); +assertSchemaMatch( + coll, {properties: {_id: {}, a: {}}, additionalProperties: true}, {_id: 1, a: 1}, true); +assertSchemaMatch( + coll, {properties: {_id: {}, a: {}}, additionalProperties: true}, {_id: 1, b: 1}, true); +assertSchemaMatch( + coll, {properties: {_id: {}, a: {}}, additionalProperties: true}, {_id: 1, a: 1, b: 1}, true); - // Tests for additionalProperties with a nested schema at the top level. - assertSchemaMatch(coll, - {properties: {_id: {}, a: {}}, additionalProperties: {type: "number"}}, - {_id: 1}, - true); - assertSchemaMatch(coll, - {properties: {_id: {}, a: {}}, additionalProperties: {type: "number"}}, - {_id: 1, a: 1}, - true); - assertSchemaMatch(coll, - {properties: {_id: {}, a: {}}, additionalProperties: {type: "number"}}, - {_id: 1, b: 1}, - true); - assertSchemaMatch(coll, - {properties: {_id: {}, a: {}}, additionalProperties: {type: "number"}}, - {_id: 1, b: "str"}, - false); +// Tests for additionalProperties with a nested schema at the top level. +assertSchemaMatch( + coll, {properties: {_id: {}, a: {}}, additionalProperties: {type: "number"}}, {_id: 1}, true); +assertSchemaMatch(coll, + {properties: {_id: {}, a: {}}, additionalProperties: {type: "number"}}, + {_id: 1, a: 1}, + true); +assertSchemaMatch(coll, + {properties: {_id: {}, a: {}}, additionalProperties: {type: "number"}}, + {_id: 1, b: 1}, + true); +assertSchemaMatch(coll, + {properties: {_id: {}, a: {}}, additionalProperties: {type: "number"}}, + {_id: 1, b: "str"}, + false); - // Tests for additionalProperties together with patternProperties at the top level. - assertSchemaMatch(coll, - { - properties: {_id: {}, a: {}}, - patternProperties: {"^b": {type: "string"}}, - additionalProperties: {type: "number"} - }, - {_id: 1}, - true); - assertSchemaMatch(coll, - { - properties: {_id: {}, a: {}}, - patternProperties: {"^b": {type: "string"}}, - additionalProperties: {type: "number"} - }, - {_id: 1, a: 1}, - true); - assertSchemaMatch(coll, - { - properties: {_id: {}, a: {}}, - patternProperties: {"^b": {type: "string"}}, - additionalProperties: {type: "number"} - }, - {_id: 1, a: 1, ba: "str"}, - true); - assertSchemaMatch(coll, - { - properties: {_id: {}, a: {}}, - patternProperties: {"^b": {type: "string"}}, - additionalProperties: {type: "number"} - }, - {_id: 1, a: 1, ba: "str", other: 1}, - true); - assertSchemaMatch(coll, - { - properties: {_id: {}, a: {}}, - patternProperties: {"^b": {type: "string"}}, - additionalProperties: {type: "number"} - }, - {_id: 1, a: 1, ba: "str", other: "str"}, - false); - assertSchemaMatch(coll, - { - properties: {_id: {}, a: {}}, - patternProperties: {"^b": {type: "string"}}, - additionalProperties: {type: "number"} - }, - {_id: 1, a: 1, ba: 1, other: 1}, - false); - assertSchemaMatch(coll, - { - properties: {_id: {}, a: {}}, - patternProperties: {"^b": {type: "string"}}, - additionalProperties: false - }, - {_id: 1, a: 1, ba: "str"}, - true); - assertSchemaMatch(coll, - { - properties: {_id: {}, a: {}}, - patternProperties: {"^b": {type: "string"}}, - additionalProperties: false - }, - {_id: 1, a: 1, ba: "str", other: 1}, - false); +// Tests for additionalProperties together with patternProperties at the top level. +assertSchemaMatch(coll, + { + properties: {_id: {}, a: {}}, + patternProperties: {"^b": {type: "string"}}, + additionalProperties: {type: "number"} + }, + {_id: 1}, + true); +assertSchemaMatch(coll, + { + properties: {_id: {}, a: {}}, + patternProperties: {"^b": {type: "string"}}, + additionalProperties: {type: "number"} + }, + {_id: 1, a: 1}, + true); +assertSchemaMatch(coll, + { + properties: {_id: {}, a: {}}, + patternProperties: {"^b": {type: "string"}}, + additionalProperties: {type: "number"} + }, + {_id: 1, a: 1, ba: "str"}, + true); +assertSchemaMatch(coll, + { + properties: {_id: {}, a: {}}, + patternProperties: {"^b": {type: "string"}}, + additionalProperties: {type: "number"} + }, + {_id: 1, a: 1, ba: "str", other: 1}, + true); +assertSchemaMatch(coll, + { + properties: {_id: {}, a: {}}, + patternProperties: {"^b": {type: "string"}}, + additionalProperties: {type: "number"} + }, + {_id: 1, a: 1, ba: "str", other: "str"}, + false); +assertSchemaMatch(coll, + { + properties: {_id: {}, a: {}}, + patternProperties: {"^b": {type: "string"}}, + additionalProperties: {type: "number"} + }, + {_id: 1, a: 1, ba: 1, other: 1}, + false); +assertSchemaMatch(coll, + { + properties: {_id: {}, a: {}}, + patternProperties: {"^b": {type: "string"}}, + additionalProperties: false + }, + {_id: 1, a: 1, ba: "str"}, + true); +assertSchemaMatch(coll, + { + properties: {_id: {}, a: {}}, + patternProperties: {"^b": {type: "string"}}, + additionalProperties: false + }, + {_id: 1, a: 1, ba: "str", other: 1}, + false); - // Tests for {additionalProperties:false} in a nested schema. - assertSchemaMatch( - coll, {properties: {obj: {properties: {a: {}}, additionalProperties: false}}}, {}, true); - assertSchemaMatch(coll, - {properties: {obj: {properties: {a: {}}, additionalProperties: false}}}, - {obj: 1}, - true); - assertSchemaMatch(coll, - {properties: {obj: {properties: {a: {}}, additionalProperties: false}}}, - {obj: {}}, - true); - assertSchemaMatch(coll, - {properties: {obj: {properties: {a: {}}, additionalProperties: false}}}, - {obj: {a: 1}}, - true); - assertSchemaMatch(coll, - {properties: {obj: {properties: {a: {}}, additionalProperties: false}}}, - {obj: {a: 1, b: 1}}, - false); - assertSchemaMatch(coll, - {properties: {obj: {properties: {a: {}}, additionalProperties: false}}}, - {obj: {b: 1}}, - false); +// Tests for {additionalProperties:false} in a nested schema. +assertSchemaMatch( + coll, {properties: {obj: {properties: {a: {}}, additionalProperties: false}}}, {}, true); +assertSchemaMatch( + coll, {properties: {obj: {properties: {a: {}}, additionalProperties: false}}}, {obj: 1}, true); +assertSchemaMatch( + coll, {properties: {obj: {properties: {a: {}}, additionalProperties: false}}}, {obj: {}}, true); +assertSchemaMatch(coll, + {properties: {obj: {properties: {a: {}}, additionalProperties: false}}}, + {obj: {a: 1}}, + true); +assertSchemaMatch(coll, + {properties: {obj: {properties: {a: {}}, additionalProperties: false}}}, + {obj: {a: 1, b: 1}}, + false); +assertSchemaMatch(coll, + {properties: {obj: {properties: {a: {}}, additionalProperties: false}}}, + {obj: {b: 1}}, + false); - // Tests for {additionalProperties:true} in a nested schema. - assertSchemaMatch(coll, - {properties: {obj: {properties: {a: {}}, additionalProperties: true}}}, - {obj: {}}, - true); - assertSchemaMatch(coll, - {properties: {obj: {properties: {a: {}}, additionalProperties: true}}}, - {obj: {a: 1}}, - true); - assertSchemaMatch(coll, - {properties: {obj: {properties: {a: {}}, additionalProperties: true}}}, - {obj: {a: 1, b: 1}}, - true); - assertSchemaMatch(coll, - {properties: {obj: {properties: {a: {}}, additionalProperties: true}}}, - {obj: {b: 1}}, - true); +// Tests for {additionalProperties:true} in a nested schema. +assertSchemaMatch( + coll, {properties: {obj: {properties: {a: {}}, additionalProperties: true}}}, {obj: {}}, true); +assertSchemaMatch(coll, + {properties: {obj: {properties: {a: {}}, additionalProperties: true}}}, + {obj: {a: 1}}, + true); +assertSchemaMatch(coll, + {properties: {obj: {properties: {a: {}}, additionalProperties: true}}}, + {obj: {a: 1, b: 1}}, + true); +assertSchemaMatch(coll, + {properties: {obj: {properties: {a: {}}, additionalProperties: true}}}, + {obj: {b: 1}}, + true); - // Tests for additionalProperties whose value is a nested schema, which is itself contained - // within a nested schema. - assertSchemaMatch( - coll, - {properties: {obj: {properties: {a: {}}, additionalProperties: {type: "number"}}}}, - {}, - true); - assertSchemaMatch( - coll, - {properties: {obj: {properties: {a: {}}, additionalProperties: {type: "number"}}}}, - {obj: 1}, - true); - assertSchemaMatch( - coll, - {properties: {obj: {properties: {a: {}}, additionalProperties: {type: "number"}}}}, - {obj: {}}, - true); - assertSchemaMatch( - coll, - {properties: {obj: {properties: {a: {}}, additionalProperties: {type: "number"}}}}, - {obj: {a: 1}}, - true); - assertSchemaMatch( - coll, - {properties: {obj: {properties: {a: {}}, additionalProperties: {type: "number"}}}}, - {obj: {a: 1, b: 1}}, - true); - assertSchemaMatch( - coll, - {properties: {obj: {properties: {a: {}}, additionalProperties: {type: "number"}}}}, - {obj: {a: 1, b: "str"}}, - false); - assertSchemaMatch( - coll, - {properties: {obj: {properties: {a: {}}, additionalProperties: {type: "number"}}}}, - {obj: {b: "str"}}, - false); +// Tests for additionalProperties whose value is a nested schema, which is itself contained +// within a nested schema. +assertSchemaMatch( + coll, + {properties: {obj: {properties: {a: {}}, additionalProperties: {type: "number"}}}}, + {}, + true); +assertSchemaMatch( + coll, + {properties: {obj: {properties: {a: {}}, additionalProperties: {type: "number"}}}}, + {obj: 1}, + true); +assertSchemaMatch( + coll, + {properties: {obj: {properties: {a: {}}, additionalProperties: {type: "number"}}}}, + {obj: {}}, + true); +assertSchemaMatch( + coll, + {properties: {obj: {properties: {a: {}}, additionalProperties: {type: "number"}}}}, + {obj: {a: 1}}, + true); +assertSchemaMatch( + coll, + {properties: {obj: {properties: {a: {}}, additionalProperties: {type: "number"}}}}, + {obj: {a: 1, b: 1}}, + true); +assertSchemaMatch( + coll, + {properties: {obj: {properties: {a: {}}, additionalProperties: {type: "number"}}}}, + {obj: {a: 1, b: "str"}}, + false); +assertSchemaMatch( + coll, + {properties: {obj: {properties: {a: {}}, additionalProperties: {type: "number"}}}}, + {obj: {b: "str"}}, + false); - // Tests for additionalProperties together with patternProperties, both inside a nested schema. - assertSchemaMatch(coll, - { - properties: { - obj: { - properties: {a: {}}, - patternProperties: {"^b": {type: "string"}}, - additionalProperties: {type: "number"} - } - } - }, - {obj: {}}, - true); - assertSchemaMatch(coll, - { - properties: { - obj: { - properties: {a: {}}, - patternProperties: {"^b": {type: "string"}}, - additionalProperties: {type: "number"} - } - } - }, - {obj: {a: 1, ba: "str", c: 1}}, - true); - assertSchemaMatch(coll, - { - properties: { - obj: { - properties: {a: {}}, - patternProperties: {"^b": {type: "string"}}, - additionalProperties: {type: "number"} - } - } - }, - {obj: {a: 1, ba: 1, c: 1}}, - false); - assertSchemaMatch(coll, - { - properties: { - obj: { - properties: {a: {}}, - patternProperties: {"^b": {type: "string"}}, - additionalProperties: {type: "number"} - } - } - }, - {obj: {a: 1, ba: 1, c: "str"}}, - false); +// Tests for additionalProperties together with patternProperties, both inside a nested schema. +assertSchemaMatch(coll, + { + properties: { + obj: { + properties: {a: {}}, + patternProperties: {"^b": {type: "string"}}, + additionalProperties: {type: "number"} + } + } + }, + {obj: {}}, + true); +assertSchemaMatch(coll, + { + properties: { + obj: { + properties: {a: {}}, + patternProperties: {"^b": {type: "string"}}, + additionalProperties: {type: "number"} + } + } + }, + {obj: {a: 1, ba: "str", c: 1}}, + true); +assertSchemaMatch(coll, + { + properties: { + obj: { + properties: {a: {}}, + patternProperties: {"^b": {type: "string"}}, + additionalProperties: {type: "number"} + } + } + }, + {obj: {a: 1, ba: 1, c: 1}}, + false); +assertSchemaMatch(coll, + { + properties: { + obj: { + properties: {a: {}}, + patternProperties: {"^b": {type: "string"}}, + additionalProperties: {type: "number"} + } + } + }, + {obj: {a: 1, ba: 1, c: "str"}}, + false); }()); diff --git a/jstests/core/json_schema/bsontype.js b/jstests/core/json_schema/bsontype.js index f5ec15a06a8..ac874c66788 100644 --- a/jstests/core/json_schema/bsontype.js +++ b/jstests/core/json_schema/bsontype.js @@ -4,301 +4,285 @@ * Tests for the non-standard 'bsonType' keyword in JSON Schema, as well as some tests for 'type'. */ (function() { - "use strict"; - - load("jstests/libs/assert_schema_match.js"); - - const coll = db.jstests_schema_bsontype; - - // bsonType "double". - assertSchemaMatch(coll, {properties: {num: {bsonType: "double"}}}, {num: 3}, true); - assertSchemaMatch(coll, {properties: {num: {bsonType: "double"}}}, {num: NumberLong(3)}, false); - assertSchemaMatch(coll, {properties: {num: {bsonType: "double"}}}, {num: NumberInt(3)}, false); - assertSchemaMatch( - coll, {properties: {num: {bsonType: "double"}}}, {num: NumberDecimal(3)}, false); - assertSchemaMatch(coll, {properties: {num: {bsonType: "double"}}}, {num: {}}, false); - assertSchemaMatch(coll, {properties: {num: {bsonType: "double"}}}, {num: [3]}, false); - assertSchemaMatch(coll, {properties: {num: {bsonType: "double"}}}, {foo: {}}, true); - - // type "double" should fail. - assert.throws(() => coll.find({$jsonSchema: {properties: {num: {type: "double"}}}}).itcount()); - - // bsonType "string". - assertSchemaMatch(coll, {properties: {str: {bsonType: "string"}}}, {str: ""}, true); - assertSchemaMatch(coll, {properties: {str: {bsonType: "string"}}}, {str: true}, false); - assertSchemaMatch(coll, {properties: {str: {bsonType: "string"}}}, {str: [1, "foo"]}, false); - - // type "string". - assertSchemaMatch(coll, {properties: {str: {type: "string"}}}, {str: ""}, true); - assertSchemaMatch(coll, {properties: {str: {type: "string"}}}, {str: true}, false); - assertSchemaMatch(coll, {properties: {str: {type: "string"}}}, {str: [1, "foo"]}, false); - - // bsonType "object". - assertSchemaMatch(coll, {bsonType: "object"}, {}, true); - assertSchemaMatch(coll, {properties: {obj: {bsonType: "object"}}}, {obj: {}}, true); - assertSchemaMatch(coll, {properties: {obj: {bsonType: "object"}}}, {obj: true}, false); - assertSchemaMatch(coll, {properties: {obj: {bsonType: "object"}}}, {obj: [{}]}, false); - - // type "object". - assertSchemaMatch(coll, {type: "object"}, {}, true); - assertSchemaMatch(coll, {properties: {obj: {type: "object"}}}, {obj: {}}, true); - assertSchemaMatch(coll, {properties: {obj: {type: "object"}}}, {obj: true}, false); - assertSchemaMatch(coll, {properties: {obj: {type: "object"}}}, {obj: [{}]}, false); - - // bsonType "array". - assertSchemaMatch(coll, {bsonType: "array"}, {arr: []}, false); - assertSchemaMatch(coll, {properties: {arr: {bsonType: "array"}}}, {arr: []}, true); - assertSchemaMatch(coll, {properties: {arr: {bsonType: "array"}}}, {arr: {}}, false); - - // type "array". - assertSchemaMatch(coll, {type: "array"}, {arr: []}, false); - assertSchemaMatch(coll, {properties: {arr: {type: "array"}}}, {arr: []}, true); - assertSchemaMatch(coll, {properties: {arr: {type: "array"}}}, {arr: {}}, false); - - // bsonType "binData". - assertSchemaMatch(coll, - {properties: {bin: {bsonType: "binData"}}}, - {bin: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA")}, - true); - assertSchemaMatch(coll, {properties: {bin: {bsonType: "binData"}}}, {bin: {}}, false); - - // type "binData" should fail. - assert.throws(() => coll.find({$jsonSchema: {properties: {bin: {type: "binData"}}}}).itcount()); - - // bsonType "undefined". - assertSchemaMatch( - coll, {properties: {u: {bsonType: "undefined"}}, required: ["u"]}, {u: undefined}, true); - assertSchemaMatch(coll, {properties: {u: {bsonType: "undefined"}}, required: ["u"]}, {}, false); - assertSchemaMatch( - coll, {properties: {u: {bsonType: "undefined"}}, required: ["u"]}, {u: null}, false); - - // type "undefined" should fail. - assert.throws(() => coll.find({$jsonSchema: {properties: {u: {type: "undefined"}}}}).itcount()); - - // bsonType "objectId". - assertSchemaMatch(coll, {properties: {o: {bsonType: "objectId"}}}, {o: ObjectId()}, true); - assertSchemaMatch(coll, {properties: {o: {bsonType: "objectId"}}}, {o: 1}, false); - - // type "objectId" should fail. - assert.throws(() => coll.find({$jsonSchema: {properties: {o: {type: "objectId"}}}}).itcount()); - - // bsonType "bool". - assertSchemaMatch(coll, {properties: {b: {bsonType: "bool"}}}, {b: true}, true); - assertSchemaMatch(coll, {properties: {b: {bsonType: "bool"}}}, {b: false}, true); - assertSchemaMatch(coll, {properties: {b: {bsonType: "bool"}}}, {b: 1}, false); - - // bsonType "boolean" should fail. - assert.throws(() => - coll.find({$jsonSchema: {properties: {b: {bsonType: "boolean"}}}}).itcount()); - - // type "boolean". - assertSchemaMatch(coll, {properties: {b: {type: "boolean"}}}, {b: true}, true); - assertSchemaMatch(coll, {properties: {b: {type: "boolean"}}}, {b: false}, true); - assertSchemaMatch(coll, {properties: {b: {type: "boolean"}}}, {b: 1}, false); - - // type "bool" should fail. - assert.throws(() => coll.find({$jsonSchema: {properties: {b: {type: "bool"}}}}).itcount()); - - // bsonType "date". - assertSchemaMatch(coll, {properties: {date: {bsonType: "date"}}}, {date: new Date()}, true); - assertSchemaMatch(coll, {properties: {date: {bsonType: "date"}}}, {date: 1}, false); - - // type "date" should fail. - assert.throws(() => coll.find({$jsonSchema: {properties: {b: {type: "date"}}}}).itcount()); - - // bsonType "null". - assertSchemaMatch( - coll, {properties: {n: {bsonType: "null"}}, required: ["n"]}, {n: null}, true); - assertSchemaMatch(coll, {properties: {n: {bsonType: "null"}}, required: ["n"]}, {}, false); - assertSchemaMatch( - coll, {properties: {n: {bsonType: "null"}}, required: ["n"]}, {u: undefined}, false); - - // type "null". - assertSchemaMatch(coll, {properties: {n: {type: "null"}}, required: ["n"]}, {n: null}, true); - assertSchemaMatch(coll, {properties: {n: {type: "null"}}, required: ["n"]}, {}, false); - assertSchemaMatch( - coll, {properties: {n: {type: "null"}}, required: ["n"]}, {u: undefined}, false); - - // bsonType "regex". - assertSchemaMatch(coll, {properties: {r: {bsonType: "regex"}}}, {r: /^abc/}, true); - assertSchemaMatch(coll, {properties: {r: {bsonType: "regex"}}}, {r: "^abc"}, false); - - // type "regex" should fail. - assert.throws(() => coll.find({$jsonSchema: {properties: {r: {type: "regex"}}}}).itcount()); - - // bsonType "javascript". - assertSchemaMatch(coll, - {properties: {code: {bsonType: "javascript"}}}, - {code: Code("function() { return true; }")}, - true); - assertSchemaMatch(coll, {properties: {code: {bsonType: "javascript"}}}, {code: 1}, false); - - // type "javascript" should fail. - assert.throws( - () => coll.find({$jsonSchema: {properties: {code: {type: "javascript"}}}}).itcount()); - - // bsonType "javascriptWithScope". - assertSchemaMatch(coll, - {properties: {code: {bsonType: "javascriptWithScope"}}}, - {code: Code("function() { return true; }", {scope: true})}, - true); - assertSchemaMatch( - coll, {properties: {code: {bsonType: "javascriptWithScope"}}}, {code: 1}, false); - - // type "javascriptWithScope" should fail. - assert.throws(() => - coll.find({$jsonSchema: {properties: {code: {type: "javascriptWithScope"}}}}) - .itcount()); - - // bsonType "int". - assertSchemaMatch(coll, {properties: {num: {bsonType: "int"}}}, {num: NumberInt(3)}, true); - assertSchemaMatch(coll, {properties: {num: {bsonType: "int"}}}, {num: NumberLong(3)}, false); - assertSchemaMatch(coll, {properties: {num: {bsonType: "int"}}}, {num: 3}, false); - assertSchemaMatch(coll, {properties: {num: {bsonType: "int"}}}, {num: NumberDecimal(3)}, false); - assertSchemaMatch(coll, {properties: {num: {bsonType: "int"}}}, {num: {}}, false); - assertSchemaMatch(coll, {properties: {num: {bsonType: "int"}}}, {foo: {}}, true); - - // type "int" should fail. - assert.throws(() => coll.find({$jsonSchema: {properties: {num: {type: "int"}}}}).itcount()); - - // bsonType "integer" should fail. - assert.throws( - () => coll.find({$jsonSchema: {properties: {num: {bsonType: "integer"}}}}).itcount()); - - // type "integer" is explicitly unsupported and should fail. - assert.throws(() => coll.find({$jsonSchema: {properties: {num: {type: "integer"}}}}).itcount()); - - // bsonType "timestamp". - assertSchemaMatch( - coll, {properties: {ts: {bsonType: "timestamp"}}}, {ts: Timestamp(0, 1234)}, true); - assertSchemaMatch(coll, {properties: {ts: {bsonType: "timestamp"}}}, {ts: new Date()}, false); - - // type "timestamp" should fail. - assert.throws(() => - coll.find({$jsonSchema: {properties: {ts: {type: "timestamp"}}}}).itcount()); - - // bsonType "long". - assertSchemaMatch(coll, {properties: {num: {bsonType: "long"}}}, {num: NumberLong(3)}, true); - assertSchemaMatch(coll, {properties: {num: {bsonType: "long"}}}, {num: NumberInt(3)}, false); - assertSchemaMatch(coll, {properties: {num: {bsonType: "long"}}}, {num: 3}, false); - assertSchemaMatch( - coll, {properties: {num: {bsonType: "long"}}}, {num: NumberDecimal(3)}, false); - assertSchemaMatch(coll, {properties: {num: {bsonType: "long"}}}, {num: {}}, false); - assertSchemaMatch(coll, {properties: {num: {bsonType: "long"}}}, {foo: {}}, true); - - // type "long" should fail. - assert.throws(() => coll.find({$jsonSchema: {properties: {num: {type: "long"}}}}).itcount()); - - // bsonType "decimal". - assertSchemaMatch( - coll, {properties: {num: {bsonType: "decimal"}}}, {num: NumberDecimal(3)}, true); - assertSchemaMatch( - coll, {properties: {num: {bsonType: "decimal"}}}, {num: NumberLong(3)}, false); - assertSchemaMatch(coll, {properties: {num: {bsonType: "decimal"}}}, {num: NumberInt(3)}, false); - assertSchemaMatch(coll, {properties: {num: {bsonType: "decimal"}}}, {num: 3}, false); - assertSchemaMatch(coll, {properties: {num: {bsonType: "decimal"}}}, {num: {}}, false); - assertSchemaMatch(coll, {properties: {num: {bsonType: "decimal"}}}, {foo: {}}, true); - - // type "decimal" should fail. - assert.throws(() => coll.find({$jsonSchema: {properties: {num: {type: "decimal"}}}}).itcount()); - - // bsonType "minKey". - assertSchemaMatch(coll, {properties: {k: {bsonType: "minKey"}}}, {k: MinKey()}, true); - assertSchemaMatch(coll, {properties: {k: {bsonType: "minKey"}}}, {k: MaxKey()}, false); - - // type "minKey" should fail. - assert.throws(() => coll.find({$jsonSchema: {properties: {num: {type: "minKey"}}}}).itcount()); - - // bsonType "maxKey". - assertSchemaMatch(coll, {properties: {k: {bsonType: "maxKey"}}}, {k: MaxKey()}, true); - assertSchemaMatch(coll, {properties: {k: {bsonType: "maxKey"}}}, {k: MinKey()}, false); - - // type "maxKey" should fail. - assert.throws(() => coll.find({$jsonSchema: {properties: {num: {type: "maxKey"}}}}).itcount()); - - // Test that 'bsonType' keyword rejects unknown type aliases. - assert.throws(() => - coll.find({$jsonSchema: {properties: {f: {bsonType: "unknown"}}}}).itcount()); - - // Test that 'type' keyword rejects unknown type aliases. - assert.throws(() => coll.find({$jsonSchema: {properties: {f: {type: "unknown"}}}}).itcount()); - - // Specifying both "type" and "bsonType" in the same schema should fail. - assert.throws(() => coll.find({$jsonSchema: {bsonType: "string", type: "string"}}).itcount()); - assert.throws( - () => coll.find({$jsonSchema: {properties: {a: {bsonType: "string", type: "string"}}}}) - .itcount()); - - // "type" and "bsonType" are both allowed when they are not sibling keywords in the same - // subschema. - assertSchemaMatch( - coll, {type: "object", properties: {obj: {bsonType: "object"}}}, {obj: {}}, true); - assertSchemaMatch( - coll, {type: "object", properties: {obj: {bsonType: "object"}}}, {obj: []}, false); - assertSchemaMatch(coll, - {properties: {a: {bsonType: "long"}, b: {type: "null"}}}, - {a: NumberLong(3), b: null}, - true); - assertSchemaMatch( - coll, {properties: {a: {bsonType: "long"}, b: {type: "null"}}}, {a: NumberLong(3)}, true); - assertSchemaMatch( - coll, {properties: {a: {bsonType: "long"}, b: {type: "null"}}}, {b: null}, true); - assertSchemaMatch(coll, - {properties: {a: {bsonType: "long"}, b: {type: "null"}}}, - {b: null}, - {a: 3, b: null}, - false); - assertSchemaMatch(coll, - {properties: {a: {bsonType: "long"}, b: {type: "null"}}}, - {b: null}, - {a: NumberLong(3), b: 3}, - false); - - // Test that the 'type' keyword rejects an array of aliases if one of those aliases is invalid. - assert.throws(() => coll.find({$jsonSchema: {f: {type: ["number", "objectId"]}}}).itcount()); - assert.throws(() => coll.find({$jsonSchema: {f: {type: ["object", "unknown"]}}}).itcount()); - - // Test that the 'bsonType' keyword rejects an array of aliases if one of those aliases is - // invalid. - assert.throws(() => coll.find({$jsonSchema: {f: {bsonType: ["number", "unknown"]}}}).itcount()); - assert.throws(() => coll.find({$jsonSchema: {bsonType: ["unknown"]}}).itcount()); - - // Test that the 'type' keyword rejects an array which contains a numerical type alias. - assert.throws(() => coll.find({$jsonSchema: {f: {type: ["number", 2]}}}).itcount()); - - // Test that the 'bsonType' keyword rejects an array which contains a numerical type alias. - assert.throws(() => coll.find({$jsonSchema: {f: {bsonType: ["number", 2]}}}).itcount()); - - // Test that the 'type' keyword rejects an array which contains duplicate aliases. - assert.throws( - () => coll.find({$jsonSchema: {f: {type: ["number", "string", "number"]}}}).itcount()); - - // Test that the 'bsonType' keyword rejects an array which contains duplicate aliases. - assert.throws( - () => coll.find({$jsonSchema: {f: {bsonType: ["number", "string", "number"]}}}).itcount()); - - // Test that the 'type' keyword can accept an array of type aliases. - assertSchemaMatch(coll, {properties: {f: {type: ["number", "string"]}}}, {f: 1}, true); - assertSchemaMatch(coll, {properties: {f: {type: ["number", "string"]}}}, {f: "str"}, true); - assertSchemaMatch(coll, {properties: {f: {type: ["number", "string"]}}}, {}, true); - assertSchemaMatch( - coll, {properties: {f: {type: ["number", "string"]}}}, {f: ["str", 1]}, false); - assertSchemaMatch(coll, {properties: {f: {type: ["number", "string"]}}}, {f: {}}, false); - - // Test that the 'bsonType' keyword can accept an array of type aliases. - assertSchemaMatch(coll, {properties: {f: {bsonType: ["objectId", "double"]}}}, {f: 1}, true); - assertSchemaMatch( - coll, {properties: {f: {bsonType: ["objectId", "double"]}}}, {f: ObjectId()}, true); - assertSchemaMatch(coll, {properties: {f: {bsonType: ["objectId", "double"]}}}, {}, true); - assertSchemaMatch(coll, {properties: {f: {bsonType: ["objectId", "double"]}}}, {f: [1]}, false); - assertSchemaMatch( - coll, {properties: {f: {bsonType: ["objectId", "double"]}}}, {f: NumberInt(1)}, false); - - // Test that the 'type' keyword with an array of types is valid at the top-level. - assertSchemaMatch(coll, {type: ["object", "string"]}, {}, true); - assertSchemaMatch(coll, {type: ["object", "string"]}, {foo: 1, bar: 1}, true); - - // Test that the 'bsonType' keyword with an array of types is valid at the top-level. - assertSchemaMatch(coll, {bsonType: ["object", "double"]}, {}, true); - assertSchemaMatch(coll, {bsonType: ["object", "double"]}, {foo: 1, bar: 1}, true); +"use strict"; + +load("jstests/libs/assert_schema_match.js"); + +const coll = db.jstests_schema_bsontype; + +// bsonType "double". +assertSchemaMatch(coll, {properties: {num: {bsonType: "double"}}}, {num: 3}, true); +assertSchemaMatch(coll, {properties: {num: {bsonType: "double"}}}, {num: NumberLong(3)}, false); +assertSchemaMatch(coll, {properties: {num: {bsonType: "double"}}}, {num: NumberInt(3)}, false); +assertSchemaMatch(coll, {properties: {num: {bsonType: "double"}}}, {num: NumberDecimal(3)}, false); +assertSchemaMatch(coll, {properties: {num: {bsonType: "double"}}}, {num: {}}, false); +assertSchemaMatch(coll, {properties: {num: {bsonType: "double"}}}, {num: [3]}, false); +assertSchemaMatch(coll, {properties: {num: {bsonType: "double"}}}, {foo: {}}, true); + +// type "double" should fail. +assert.throws(() => coll.find({$jsonSchema: {properties: {num: {type: "double"}}}}).itcount()); + +// bsonType "string". +assertSchemaMatch(coll, {properties: {str: {bsonType: "string"}}}, {str: ""}, true); +assertSchemaMatch(coll, {properties: {str: {bsonType: "string"}}}, {str: true}, false); +assertSchemaMatch(coll, {properties: {str: {bsonType: "string"}}}, {str: [1, "foo"]}, false); + +// type "string". +assertSchemaMatch(coll, {properties: {str: {type: "string"}}}, {str: ""}, true); +assertSchemaMatch(coll, {properties: {str: {type: "string"}}}, {str: true}, false); +assertSchemaMatch(coll, {properties: {str: {type: "string"}}}, {str: [1, "foo"]}, false); + +// bsonType "object". +assertSchemaMatch(coll, {bsonType: "object"}, {}, true); +assertSchemaMatch(coll, {properties: {obj: {bsonType: "object"}}}, {obj: {}}, true); +assertSchemaMatch(coll, {properties: {obj: {bsonType: "object"}}}, {obj: true}, false); +assertSchemaMatch(coll, {properties: {obj: {bsonType: "object"}}}, {obj: [{}]}, false); + +// type "object". +assertSchemaMatch(coll, {type: "object"}, {}, true); +assertSchemaMatch(coll, {properties: {obj: {type: "object"}}}, {obj: {}}, true); +assertSchemaMatch(coll, {properties: {obj: {type: "object"}}}, {obj: true}, false); +assertSchemaMatch(coll, {properties: {obj: {type: "object"}}}, {obj: [{}]}, false); + +// bsonType "array". +assertSchemaMatch(coll, {bsonType: "array"}, {arr: []}, false); +assertSchemaMatch(coll, {properties: {arr: {bsonType: "array"}}}, {arr: []}, true); +assertSchemaMatch(coll, {properties: {arr: {bsonType: "array"}}}, {arr: {}}, false); + +// type "array". +assertSchemaMatch(coll, {type: "array"}, {arr: []}, false); +assertSchemaMatch(coll, {properties: {arr: {type: "array"}}}, {arr: []}, true); +assertSchemaMatch(coll, {properties: {arr: {type: "array"}}}, {arr: {}}, false); + +// bsonType "binData". +assertSchemaMatch(coll, + {properties: {bin: {bsonType: "binData"}}}, + {bin: BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA")}, + true); +assertSchemaMatch(coll, {properties: {bin: {bsonType: "binData"}}}, {bin: {}}, false); + +// type "binData" should fail. +assert.throws(() => coll.find({$jsonSchema: {properties: {bin: {type: "binData"}}}}).itcount()); + +// bsonType "undefined". +assertSchemaMatch( + coll, {properties: {u: {bsonType: "undefined"}}, required: ["u"]}, {u: undefined}, true); +assertSchemaMatch(coll, {properties: {u: {bsonType: "undefined"}}, required: ["u"]}, {}, false); +assertSchemaMatch( + coll, {properties: {u: {bsonType: "undefined"}}, required: ["u"]}, {u: null}, false); + +// type "undefined" should fail. +assert.throws(() => coll.find({$jsonSchema: {properties: {u: {type: "undefined"}}}}).itcount()); + +// bsonType "objectId". +assertSchemaMatch(coll, {properties: {o: {bsonType: "objectId"}}}, {o: ObjectId()}, true); +assertSchemaMatch(coll, {properties: {o: {bsonType: "objectId"}}}, {o: 1}, false); + +// type "objectId" should fail. +assert.throws(() => coll.find({$jsonSchema: {properties: {o: {type: "objectId"}}}}).itcount()); + +// bsonType "bool". +assertSchemaMatch(coll, {properties: {b: {bsonType: "bool"}}}, {b: true}, true); +assertSchemaMatch(coll, {properties: {b: {bsonType: "bool"}}}, {b: false}, true); +assertSchemaMatch(coll, {properties: {b: {bsonType: "bool"}}}, {b: 1}, false); + +// bsonType "boolean" should fail. +assert.throws(() => coll.find({$jsonSchema: {properties: {b: {bsonType: "boolean"}}}}).itcount()); + +// type "boolean". +assertSchemaMatch(coll, {properties: {b: {type: "boolean"}}}, {b: true}, true); +assertSchemaMatch(coll, {properties: {b: {type: "boolean"}}}, {b: false}, true); +assertSchemaMatch(coll, {properties: {b: {type: "boolean"}}}, {b: 1}, false); + +// type "bool" should fail. +assert.throws(() => coll.find({$jsonSchema: {properties: {b: {type: "bool"}}}}).itcount()); + +// bsonType "date". +assertSchemaMatch(coll, {properties: {date: {bsonType: "date"}}}, {date: new Date()}, true); +assertSchemaMatch(coll, {properties: {date: {bsonType: "date"}}}, {date: 1}, false); + +// type "date" should fail. +assert.throws(() => coll.find({$jsonSchema: {properties: {b: {type: "date"}}}}).itcount()); + +// bsonType "null". +assertSchemaMatch(coll, {properties: {n: {bsonType: "null"}}, required: ["n"]}, {n: null}, true); +assertSchemaMatch(coll, {properties: {n: {bsonType: "null"}}, required: ["n"]}, {}, false); +assertSchemaMatch( + coll, {properties: {n: {bsonType: "null"}}, required: ["n"]}, {u: undefined}, false); + +// type "null". +assertSchemaMatch(coll, {properties: {n: {type: "null"}}, required: ["n"]}, {n: null}, true); +assertSchemaMatch(coll, {properties: {n: {type: "null"}}, required: ["n"]}, {}, false); +assertSchemaMatch(coll, {properties: {n: {type: "null"}}, required: ["n"]}, {u: undefined}, false); + +// bsonType "regex". +assertSchemaMatch(coll, {properties: {r: {bsonType: "regex"}}}, {r: /^abc/}, true); +assertSchemaMatch(coll, {properties: {r: {bsonType: "regex"}}}, {r: "^abc"}, false); + +// type "regex" should fail. +assert.throws(() => coll.find({$jsonSchema: {properties: {r: {type: "regex"}}}}).itcount()); + +// bsonType "javascript". +assertSchemaMatch(coll, + {properties: {code: {bsonType: "javascript"}}}, + {code: Code("function() { return true; }")}, + true); +assertSchemaMatch(coll, {properties: {code: {bsonType: "javascript"}}}, {code: 1}, false); + +// type "javascript" should fail. +assert.throws(() => coll.find({$jsonSchema: {properties: {code: {type: "javascript"}}}}).itcount()); + +// bsonType "javascriptWithScope". +assertSchemaMatch(coll, + {properties: {code: {bsonType: "javascriptWithScope"}}}, + {code: Code("function() { return true; }", {scope: true})}, + true); +assertSchemaMatch(coll, {properties: {code: {bsonType: "javascriptWithScope"}}}, {code: 1}, false); + +// type "javascriptWithScope" should fail. +assert.throws( + () => coll.find({$jsonSchema: {properties: {code: {type: "javascriptWithScope"}}}}).itcount()); + +// bsonType "int". +assertSchemaMatch(coll, {properties: {num: {bsonType: "int"}}}, {num: NumberInt(3)}, true); +assertSchemaMatch(coll, {properties: {num: {bsonType: "int"}}}, {num: NumberLong(3)}, false); +assertSchemaMatch(coll, {properties: {num: {bsonType: "int"}}}, {num: 3}, false); +assertSchemaMatch(coll, {properties: {num: {bsonType: "int"}}}, {num: NumberDecimal(3)}, false); +assertSchemaMatch(coll, {properties: {num: {bsonType: "int"}}}, {num: {}}, false); +assertSchemaMatch(coll, {properties: {num: {bsonType: "int"}}}, {foo: {}}, true); + +// type "int" should fail. +assert.throws(() => coll.find({$jsonSchema: {properties: {num: {type: "int"}}}}).itcount()); + +// bsonType "integer" should fail. +assert.throws(() => coll.find({$jsonSchema: {properties: {num: {bsonType: "integer"}}}}).itcount()); + +// type "integer" is explicitly unsupported and should fail. +assert.throws(() => coll.find({$jsonSchema: {properties: {num: {type: "integer"}}}}).itcount()); + +// bsonType "timestamp". +assertSchemaMatch( + coll, {properties: {ts: {bsonType: "timestamp"}}}, {ts: Timestamp(0, 1234)}, true); +assertSchemaMatch(coll, {properties: {ts: {bsonType: "timestamp"}}}, {ts: new Date()}, false); + +// type "timestamp" should fail. +assert.throws(() => coll.find({$jsonSchema: {properties: {ts: {type: "timestamp"}}}}).itcount()); + +// bsonType "long". +assertSchemaMatch(coll, {properties: {num: {bsonType: "long"}}}, {num: NumberLong(3)}, true); +assertSchemaMatch(coll, {properties: {num: {bsonType: "long"}}}, {num: NumberInt(3)}, false); +assertSchemaMatch(coll, {properties: {num: {bsonType: "long"}}}, {num: 3}, false); +assertSchemaMatch(coll, {properties: {num: {bsonType: "long"}}}, {num: NumberDecimal(3)}, false); +assertSchemaMatch(coll, {properties: {num: {bsonType: "long"}}}, {num: {}}, false); +assertSchemaMatch(coll, {properties: {num: {bsonType: "long"}}}, {foo: {}}, true); + +// type "long" should fail. +assert.throws(() => coll.find({$jsonSchema: {properties: {num: {type: "long"}}}}).itcount()); + +// bsonType "decimal". +assertSchemaMatch(coll, {properties: {num: {bsonType: "decimal"}}}, {num: NumberDecimal(3)}, true); +assertSchemaMatch(coll, {properties: {num: {bsonType: "decimal"}}}, {num: NumberLong(3)}, false); +assertSchemaMatch(coll, {properties: {num: {bsonType: "decimal"}}}, {num: NumberInt(3)}, false); +assertSchemaMatch(coll, {properties: {num: {bsonType: "decimal"}}}, {num: 3}, false); +assertSchemaMatch(coll, {properties: {num: {bsonType: "decimal"}}}, {num: {}}, false); +assertSchemaMatch(coll, {properties: {num: {bsonType: "decimal"}}}, {foo: {}}, true); + +// type "decimal" should fail. +assert.throws(() => coll.find({$jsonSchema: {properties: {num: {type: "decimal"}}}}).itcount()); + +// bsonType "minKey". +assertSchemaMatch(coll, {properties: {k: {bsonType: "minKey"}}}, {k: MinKey()}, true); +assertSchemaMatch(coll, {properties: {k: {bsonType: "minKey"}}}, {k: MaxKey()}, false); + +// type "minKey" should fail. +assert.throws(() => coll.find({$jsonSchema: {properties: {num: {type: "minKey"}}}}).itcount()); + +// bsonType "maxKey". +assertSchemaMatch(coll, {properties: {k: {bsonType: "maxKey"}}}, {k: MaxKey()}, true); +assertSchemaMatch(coll, {properties: {k: {bsonType: "maxKey"}}}, {k: MinKey()}, false); + +// type "maxKey" should fail. +assert.throws(() => coll.find({$jsonSchema: {properties: {num: {type: "maxKey"}}}}).itcount()); + +// Test that 'bsonType' keyword rejects unknown type aliases. +assert.throws(() => coll.find({$jsonSchema: {properties: {f: {bsonType: "unknown"}}}}).itcount()); + +// Test that 'type' keyword rejects unknown type aliases. +assert.throws(() => coll.find({$jsonSchema: {properties: {f: {type: "unknown"}}}}).itcount()); + +// Specifying both "type" and "bsonType" in the same schema should fail. +assert.throws(() => coll.find({$jsonSchema: {bsonType: "string", type: "string"}}).itcount()); +assert.throws(() => + coll.find({$jsonSchema: {properties: {a: {bsonType: "string", type: "string"}}}}) + .itcount()); + +// "type" and "bsonType" are both allowed when they are not sibling keywords in the same +// subschema. +assertSchemaMatch(coll, {type: "object", properties: {obj: {bsonType: "object"}}}, {obj: {}}, true); +assertSchemaMatch( + coll, {type: "object", properties: {obj: {bsonType: "object"}}}, {obj: []}, false); +assertSchemaMatch(coll, + {properties: {a: {bsonType: "long"}, b: {type: "null"}}}, + {a: NumberLong(3), b: null}, + true); +assertSchemaMatch( + coll, {properties: {a: {bsonType: "long"}, b: {type: "null"}}}, {a: NumberLong(3)}, true); +assertSchemaMatch(coll, {properties: {a: {bsonType: "long"}, b: {type: "null"}}}, {b: null}, true); +assertSchemaMatch(coll, + {properties: {a: {bsonType: "long"}, b: {type: "null"}}}, + {b: null}, + {a: 3, b: null}, + false); +assertSchemaMatch(coll, + {properties: {a: {bsonType: "long"}, b: {type: "null"}}}, + {b: null}, + {a: NumberLong(3), b: 3}, + false); + +// Test that the 'type' keyword rejects an array of aliases if one of those aliases is invalid. +assert.throws(() => coll.find({$jsonSchema: {f: {type: ["number", "objectId"]}}}).itcount()); +assert.throws(() => coll.find({$jsonSchema: {f: {type: ["object", "unknown"]}}}).itcount()); + +// Test that the 'bsonType' keyword rejects an array of aliases if one of those aliases is +// invalid. +assert.throws(() => coll.find({$jsonSchema: {f: {bsonType: ["number", "unknown"]}}}).itcount()); +assert.throws(() => coll.find({$jsonSchema: {bsonType: ["unknown"]}}).itcount()); + +// Test that the 'type' keyword rejects an array which contains a numerical type alias. +assert.throws(() => coll.find({$jsonSchema: {f: {type: ["number", 2]}}}).itcount()); + +// Test that the 'bsonType' keyword rejects an array which contains a numerical type alias. +assert.throws(() => coll.find({$jsonSchema: {f: {bsonType: ["number", 2]}}}).itcount()); + +// Test that the 'type' keyword rejects an array which contains duplicate aliases. +assert.throws(() => + coll.find({$jsonSchema: {f: {type: ["number", "string", "number"]}}}).itcount()); + +// Test that the 'bsonType' keyword rejects an array which contains duplicate aliases. +assert.throws( + () => coll.find({$jsonSchema: {f: {bsonType: ["number", "string", "number"]}}}).itcount()); + +// Test that the 'type' keyword can accept an array of type aliases. +assertSchemaMatch(coll, {properties: {f: {type: ["number", "string"]}}}, {f: 1}, true); +assertSchemaMatch(coll, {properties: {f: {type: ["number", "string"]}}}, {f: "str"}, true); +assertSchemaMatch(coll, {properties: {f: {type: ["number", "string"]}}}, {}, true); +assertSchemaMatch(coll, {properties: {f: {type: ["number", "string"]}}}, {f: ["str", 1]}, false); +assertSchemaMatch(coll, {properties: {f: {type: ["number", "string"]}}}, {f: {}}, false); + +// Test that the 'bsonType' keyword can accept an array of type aliases. +assertSchemaMatch(coll, {properties: {f: {bsonType: ["objectId", "double"]}}}, {f: 1}, true); +assertSchemaMatch( + coll, {properties: {f: {bsonType: ["objectId", "double"]}}}, {f: ObjectId()}, true); +assertSchemaMatch(coll, {properties: {f: {bsonType: ["objectId", "double"]}}}, {}, true); +assertSchemaMatch(coll, {properties: {f: {bsonType: ["objectId", "double"]}}}, {f: [1]}, false); +assertSchemaMatch( + coll, {properties: {f: {bsonType: ["objectId", "double"]}}}, {f: NumberInt(1)}, false); + +// Test that the 'type' keyword with an array of types is valid at the top-level. +assertSchemaMatch(coll, {type: ["object", "string"]}, {}, true); +assertSchemaMatch(coll, {type: ["object", "string"]}, {foo: 1, bar: 1}, true); + +// Test that the 'bsonType' keyword with an array of types is valid at the top-level. +assertSchemaMatch(coll, {bsonType: ["object", "double"]}, {}, true); +assertSchemaMatch(coll, {bsonType: ["object", "double"]}, {foo: 1, bar: 1}, true); }()); diff --git a/jstests/core/json_schema/dependencies.js b/jstests/core/json_schema/dependencies.js index 442976aa5fc..ffcd917889b 100644 --- a/jstests/core/json_schema/dependencies.js +++ b/jstests/core/json_schema/dependencies.js @@ -4,122 +4,107 @@ * Tests for the JSON Schema 'dependencies' keyword. */ (function() { - "use strict"; +"use strict"; - load("jstests/libs/assert_schema_match.js"); +load("jstests/libs/assert_schema_match.js"); - const coll = db.jstests_schema_dependencies; +const coll = db.jstests_schema_dependencies; - // Top-level schema dependency. - assertSchemaMatch(coll, {dependencies: {foo: {required: ["bar"]}}}, {}, true); - assertSchemaMatch(coll, {dependencies: {foo: {required: ["bar"]}}}, {foo: 1, bar: 1}, true); - assertSchemaMatch(coll, {dependencies: {foo: {required: ["bar"]}}}, {bar: 1}, true); - assertSchemaMatch(coll, {dependencies: {foo: {required: ["bar"]}}}, {foo: 1}, false); +// Top-level schema dependency. +assertSchemaMatch(coll, {dependencies: {foo: {required: ["bar"]}}}, {}, true); +assertSchemaMatch(coll, {dependencies: {foo: {required: ["bar"]}}}, {foo: 1, bar: 1}, true); +assertSchemaMatch(coll, {dependencies: {foo: {required: ["bar"]}}}, {bar: 1}, true); +assertSchemaMatch(coll, {dependencies: {foo: {required: ["bar"]}}}, {foo: 1}, false); - assertSchemaMatch( - coll, - {dependencies: {foo: {required: ["bar"], properties: {baz: {type: "string"}}}}}, - {}, - true); - assertSchemaMatch( - coll, - {dependencies: {foo: {required: ["bar"], properties: {baz: {type: "string"}}}}}, - {bar: 1}, - true); - assertSchemaMatch( - coll, - {dependencies: {foo: {required: ["bar"], properties: {baz: {type: "string"}}}}}, - {foo: 1, bar: 1}, - true); - assertSchemaMatch( - coll, - {dependencies: {foo: {required: ["bar"], properties: {baz: {type: "string"}}}}}, - {foo: 1, bar: 1, baz: 1}, - false); - assertSchemaMatch( - coll, - {dependencies: {foo: {required: ["bar"], properties: {baz: {type: "string"}}}}}, - {foo: 1, bar: 1, baz: "str"}, - true); +assertSchemaMatch(coll, + {dependencies: {foo: {required: ["bar"], properties: {baz: {type: "string"}}}}}, + {}, + true); +assertSchemaMatch(coll, + {dependencies: {foo: {required: ["bar"], properties: {baz: {type: "string"}}}}}, + {bar: 1}, + true); +assertSchemaMatch(coll, + {dependencies: {foo: {required: ["bar"], properties: {baz: {type: "string"}}}}}, + {foo: 1, bar: 1}, + true); +assertSchemaMatch(coll, + {dependencies: {foo: {required: ["bar"], properties: {baz: {type: "string"}}}}}, + {foo: 1, bar: 1, baz: 1}, + false); +assertSchemaMatch(coll, + {dependencies: {foo: {required: ["bar"], properties: {baz: {type: "string"}}}}}, + {foo: 1, bar: 1, baz: "str"}, + true); - // Top-level property dependency. - assertSchemaMatch(coll, {dependencies: {foo: ["bar", "baz"]}}, {}, true); - assertSchemaMatch(coll, {dependencies: {foo: ["bar", "baz"]}}, {bar: 1}, true); - assertSchemaMatch(coll, {dependencies: {foo: ["bar", "baz"]}}, {baz: 1}, true); - assertSchemaMatch(coll, {dependencies: {foo: ["bar", "baz"]}}, {bar: 1, baz: 1}, true); - assertSchemaMatch(coll, {dependencies: {foo: ["bar", "baz"]}}, {foo: 1}, false); - assertSchemaMatch(coll, {dependencies: {foo: ["bar", "baz"]}}, {foo: 1, bar: 1}, false); - assertSchemaMatch(coll, {dependencies: {foo: ["bar", "baz"]}}, {foo: 1, baz: 1}, false); - assertSchemaMatch(coll, {dependencies: {foo: ["bar", "baz"]}}, {foo: 1, bar: 1, baz: 1}, true); +// Top-level property dependency. +assertSchemaMatch(coll, {dependencies: {foo: ["bar", "baz"]}}, {}, true); +assertSchemaMatch(coll, {dependencies: {foo: ["bar", "baz"]}}, {bar: 1}, true); +assertSchemaMatch(coll, {dependencies: {foo: ["bar", "baz"]}}, {baz: 1}, true); +assertSchemaMatch(coll, {dependencies: {foo: ["bar", "baz"]}}, {bar: 1, baz: 1}, true); +assertSchemaMatch(coll, {dependencies: {foo: ["bar", "baz"]}}, {foo: 1}, false); +assertSchemaMatch(coll, {dependencies: {foo: ["bar", "baz"]}}, {foo: 1, bar: 1}, false); +assertSchemaMatch(coll, {dependencies: {foo: ["bar", "baz"]}}, {foo: 1, baz: 1}, false); +assertSchemaMatch(coll, {dependencies: {foo: ["bar", "baz"]}}, {foo: 1, bar: 1, baz: 1}, true); - // Nested schema dependency. - assertSchemaMatch( - coll, {properties: {obj: {dependencies: {foo: {required: ["bar"]}}}}}, {}, true); - assertSchemaMatch( - coll, {properties: {obj: {dependencies: {foo: {required: ["bar"]}}}}}, {obj: 1}, true); - assertSchemaMatch( - coll, {properties: {obj: {dependencies: {foo: {required: ["bar"]}}}}}, {obj: {}}, true); - assertSchemaMatch(coll, - {properties: {obj: {dependencies: {foo: {required: ["bar"]}}}}}, - {obj: {bar: 1}}, - true); - assertSchemaMatch(coll, - {properties: {obj: {dependencies: {foo: {required: ["bar"]}}}}}, - {obj: {foo: 1}}, - false); - assertSchemaMatch(coll, - {properties: {obj: {dependencies: {foo: {required: ["bar"]}}}}}, - {obj: {foo: 1, bar: 1}}, - true); +// Nested schema dependency. +assertSchemaMatch(coll, {properties: {obj: {dependencies: {foo: {required: ["bar"]}}}}}, {}, true); +assertSchemaMatch( + coll, {properties: {obj: {dependencies: {foo: {required: ["bar"]}}}}}, {obj: 1}, true); +assertSchemaMatch( + coll, {properties: {obj: {dependencies: {foo: {required: ["bar"]}}}}}, {obj: {}}, true); +assertSchemaMatch( + coll, {properties: {obj: {dependencies: {foo: {required: ["bar"]}}}}}, {obj: {bar: 1}}, true); +assertSchemaMatch( + coll, {properties: {obj: {dependencies: {foo: {required: ["bar"]}}}}}, {obj: {foo: 1}}, false); +assertSchemaMatch(coll, + {properties: {obj: {dependencies: {foo: {required: ["bar"]}}}}}, + {obj: {foo: 1, bar: 1}}, + true); - // Nested property dependency. - assertSchemaMatch(coll, {properties: {obj: {dependencies: {foo: ["bar"]}}}}, {}, true); - assertSchemaMatch(coll, {properties: {obj: {dependencies: {foo: ["bar"]}}}}, {obj: 1}, true); - assertSchemaMatch(coll, {properties: {obj: {dependencies: {foo: ["bar"]}}}}, {obj: {}}, true); - assertSchemaMatch( - coll, {properties: {obj: {dependencies: {foo: ["bar"]}}}}, {obj: {bar: 1}}, true); - assertSchemaMatch( - coll, {properties: {obj: {dependencies: {foo: ["bar"]}}}}, {obj: {foo: 1}}, false); - assertSchemaMatch( - coll, {properties: {obj: {dependencies: {foo: ["bar"]}}}}, {obj: {foo: 1, bar: 1}}, true); +// Nested property dependency. +assertSchemaMatch(coll, {properties: {obj: {dependencies: {foo: ["bar"]}}}}, {}, true); +assertSchemaMatch(coll, {properties: {obj: {dependencies: {foo: ["bar"]}}}}, {obj: 1}, true); +assertSchemaMatch(coll, {properties: {obj: {dependencies: {foo: ["bar"]}}}}, {obj: {}}, true); +assertSchemaMatch(coll, {properties: {obj: {dependencies: {foo: ["bar"]}}}}, {obj: {bar: 1}}, true); +assertSchemaMatch( + coll, {properties: {obj: {dependencies: {foo: ["bar"]}}}}, {obj: {foo: 1}}, false); +assertSchemaMatch( + coll, {properties: {obj: {dependencies: {foo: ["bar"]}}}}, {obj: {foo: 1, bar: 1}}, true); - // Nested property dependency and nested schema dependency. - assertSchemaMatch( - coll, {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}}, {}, true); - assertSchemaMatch(coll, - {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}}, - {obj: 1}, - true); - assertSchemaMatch(coll, - {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}}, - {obj: {}}, - true); - assertSchemaMatch(coll, - {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}}, - {obj: {b: 1, d: 1}}, - true); - assertSchemaMatch(coll, - {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}}, - {obj: {a: 1, b: 1, c: 1}}, - false); - assertSchemaMatch(coll, - {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}}, - {obj: {a: 1, c: 0, d: 1}}, - false); - assertSchemaMatch(coll, - {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}}, - {obj: {b: 1, c: 1, d: 1}}, - true); - assertSchemaMatch(coll, - {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}}, - {obj: {a: 1, b: 1, d: 1}}, - true); - assertSchemaMatch(coll, - {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}}, - {obj: {a: 1, b: 1, c: 1, d: 1}}, - true); +// Nested property dependency and nested schema dependency. +assertSchemaMatch( + coll, {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}}, {}, true); +assertSchemaMatch( + coll, {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}}, {obj: 1}, true); +assertSchemaMatch( + coll, {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}}, {obj: {}}, true); +assertSchemaMatch(coll, + {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}}, + {obj: {b: 1, d: 1}}, + true); +assertSchemaMatch(coll, + {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}}, + {obj: {a: 1, b: 1, c: 1}}, + false); +assertSchemaMatch(coll, + {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}}, + {obj: {a: 1, c: 0, d: 1}}, + false); +assertSchemaMatch(coll, + {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}}, + {obj: {b: 1, c: 1, d: 1}}, + true); +assertSchemaMatch(coll, + {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}}, + {obj: {a: 1, b: 1, d: 1}}, + true); +assertSchemaMatch(coll, + {properties: {obj: {dependencies: {a: ["b"], c: {required: ["d"]}}}}}, + {obj: {a: 1, b: 1, c: 1, d: 1}}, + true); - // Empty dependencies matches everything. - assertSchemaMatch(coll, {dependencies: {}}, {}, true); - assertSchemaMatch(coll, {properties: {obj: {dependencies: {}}}}, {obj: {}}, true); +// Empty dependencies matches everything. +assertSchemaMatch(coll, {dependencies: {}}, {}, true); +assertSchemaMatch(coll, {properties: {obj: {dependencies: {}}}}, {obj: {}}, true); }()); diff --git a/jstests/core/json_schema/encrypt.js b/jstests/core/json_schema/encrypt.js index b7e5c5ce0d5..32d93f43da4 100644 --- a/jstests/core/json_schema/encrypt.js +++ b/jstests/core/json_schema/encrypt.js @@ -6,62 +6,60 @@ * ] */ (function() { - "use strict"; +"use strict"; - load("jstests/libs/assert_schema_match.js"); +load("jstests/libs/assert_schema_match.js"); - const coll = db.jstests_schema_encrypt; - const encryptedBinDataElement = BinData(6, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA"); - const nonEncryptedBinDataElement = BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA"); +const coll = db.jstests_schema_encrypt; +const encryptedBinDataElement = BinData(6, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA"); +const nonEncryptedBinDataElement = BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAAA"); - // Only elements of type BinData with subtype '6' should match. - assertSchemaMatch( - coll, {properties: {bin: {encrypt: {}}}}, {bin: encryptedBinDataElement}, true); - assertSchemaMatch(coll, {properties: {bin: {encrypt: {}}}}, {bin: {}}, false); - assertSchemaMatch( - coll, {properties: {bin: {encrypt: {}}}}, {bin: nonEncryptedBinDataElement}, false); - // Nested in object. - assertSchemaMatch(coll, - {properties: {obj: {type: 'object', properties: {a: {encrypt: {}}}}}}, - {obj: {a: encryptedBinDataElement}}, - true); - assertSchemaMatch(coll, - {properties: {obj: {type: 'object', properties: {a: {encrypt: {}}}}}}, - {obj: {a: {}}}, - false); - assertSchemaMatch(coll, - {properties: {obj: {type: 'object', properties: {a: {encrypt: {}}}}}}, - {obj: {a: nonEncryptedBinDataElement}}, - false); +// Only elements of type BinData with subtype '6' should match. +assertSchemaMatch(coll, {properties: {bin: {encrypt: {}}}}, {bin: encryptedBinDataElement}, true); +assertSchemaMatch(coll, {properties: {bin: {encrypt: {}}}}, {bin: {}}, false); +assertSchemaMatch( + coll, {properties: {bin: {encrypt: {}}}}, {bin: nonEncryptedBinDataElement}, false); +// Nested in object. +assertSchemaMatch(coll, + {properties: {obj: {type: 'object', properties: {a: {encrypt: {}}}}}}, + {obj: {a: encryptedBinDataElement}}, + true); +assertSchemaMatch(coll, + {properties: {obj: {type: 'object', properties: {a: {encrypt: {}}}}}}, + {obj: {a: {}}}, + false); +assertSchemaMatch(coll, + {properties: {obj: {type: 'object', properties: {a: {encrypt: {}}}}}}, + {obj: {a: nonEncryptedBinDataElement}}, + false); - // Nested in array. - assertSchemaMatch(coll, - {properties: {arr: {type: 'array', items: {encrypt: {}}}}}, - {arr: [encryptedBinDataElement, encryptedBinDataElement]}, - true); - assertSchemaMatch( - coll, {properties: {arr: {type: 'array', items: {encrypt: {}}}}}, {arr: [{}, {}]}, false); - assertSchemaMatch(coll, - {properties: {arr: {type: 'array', items: {encrypt: {}}}}}, - {arr: [encryptedBinDataElement, nonEncryptedBinDataElement]}, - false); +// Nested in array. +assertSchemaMatch(coll, + {properties: {arr: {type: 'array', items: {encrypt: {}}}}}, + {arr: [encryptedBinDataElement, encryptedBinDataElement]}, + true); +assertSchemaMatch( + coll, {properties: {arr: {type: 'array', items: {encrypt: {}}}}}, {arr: [{}, {}]}, false); +assertSchemaMatch(coll, + {properties: {arr: {type: 'array', items: {encrypt: {}}}}}, + {arr: [encryptedBinDataElement, nonEncryptedBinDataElement]}, + false); - // If array is not specified, should not traverse array of encrypted BinData's. - assertSchemaMatch(coll, - {properties: {bin: {encrypt: {}}}}, - {bin: [encryptedBinDataElement, encryptedBinDataElement]}, - false); +// If array is not specified, should not traverse array of encrypted BinData's. +assertSchemaMatch(coll, + {properties: {bin: {encrypt: {}}}}, + {bin: [encryptedBinDataElement, encryptedBinDataElement]}, + false); - // Encrypt alongside type/bsontype should fail to parse. - assert.commandFailedWithCode(coll.runCommand({ - find: "coll", - filter: {$jsonSchema: {properties: {bin: {encrypt: {}, type: 'object'}}}} - }), - ErrorCodes.FailedToParse); +// Encrypt alongside type/bsontype should fail to parse. +assert.commandFailedWithCode( + coll.runCommand( + {find: "coll", filter: {$jsonSchema: {properties: {bin: {encrypt: {}, type: 'object'}}}}}), + ErrorCodes.FailedToParse); - assert.commandFailedWithCode(coll.runCommand({ - find: "coll", - filter: {$jsonSchema: {properties: {bin: {encrypt: {}, bsonType: 'object'}}}} - }), - ErrorCodes.FailedToParse); +assert.commandFailedWithCode(coll.runCommand({ + find: "coll", + filter: {$jsonSchema: {properties: {bin: {encrypt: {}, bsonType: 'object'}}}} +}), + ErrorCodes.FailedToParse); }()); diff --git a/jstests/core/json_schema/items.js b/jstests/core/json_schema/items.js index 57974a3d612..3bb71c79c7d 100644 --- a/jstests/core/json_schema/items.js +++ b/jstests/core/json_schema/items.js @@ -4,58 +4,64 @@ * Tests the JSON Schema "items" keyword. */ (function() { - "use strict"; - - load("jstests/libs/assert_schema_match.js"); - - const coll = db.getCollection("json_schema_items"); - coll.drop(); - - // Test that the JSON Schema fails to parse if "items" is not an object or array. - assert.throws(() => coll.find({$jsonSchema: {items: 1}}).itcount()); - assert.throws(() => coll.find({$jsonSchema: {items: 1.0}}).itcount()); - assert.throws(() => coll.find({$jsonSchema: {items: "true"}}).itcount()); - - // Test that "items" has no effect at the top level (but is still accepted). - assertSchemaMatch(coll, {items: {type: "number"}}, {}, true); - assertSchemaMatch(coll, {items: [{type: "number"}]}, {}, true); - - // Test that "items" matches documents where the field is missing or not an array. - assertSchemaMatch(coll, {properties: {a: {items: {minimum: 0}}}}, {}, true); - assertSchemaMatch(coll, {properties: {a: {items: {minimum: 0}}}}, {a: -1}, true); - assertSchemaMatch(coll, {properties: {a: {items: [{minimum: 0}]}}}, {}, true); - assertSchemaMatch(coll, {properties: {a: {items: [{minimum: 0}]}}}, {a: -1}, true); - - // Test that when "items" is an object, the schema applies to all elements of the array. - let schema = {properties: {a: {items: {pattern: "a+b"}}}}; - assertSchemaMatch(coll, schema, {a: []}, true); - assertSchemaMatch(coll, schema, {a: [7]}, true); - assertSchemaMatch(coll, schema, {a: [null]}, true); - assertSchemaMatch(coll, schema, {a: ["cab"]}, true); - assertSchemaMatch(coll, schema, {a: ["cab", "caab"]}, true); - assertSchemaMatch(coll, schema, {a: ["cab", "caab", "b"]}, false); - - // Test that when "items" is an array, each element schema only apply to elements at that - // position. - schema = {properties: {a: {items: [{multipleOf: 2}]}}}; - assertSchemaMatch(coll, schema, {a: []}, true); - assertSchemaMatch(coll, schema, {a: [2]}, true); - assertSchemaMatch(coll, schema, {a: [2, 3]}, true); - assertSchemaMatch(coll, schema, {a: [3]}, false); - - schema = {properties: {a: {items: [{maxLength: 1}, {maxLength: 2}]}}}; - assertSchemaMatch(coll, schema, {a: []}, true); - assertSchemaMatch(coll, schema, {a: ["1"]}, true); - assertSchemaMatch(coll, schema, {a: ["1"]}, true); - assertSchemaMatch(coll, schema, {a: ["1", "12"]}, true); - assertSchemaMatch(coll, schema, {a: ["1", "12", "123"]}, true); - assertSchemaMatch(coll, schema, {a: ["12"]}, false); - assertSchemaMatch(coll, schema, {a: ["1", "123"]}, false); - - // Test that "items" has no effect when it is an empty array (but is still accepted). - schema = {properties: {a: {items: []}}}; - assertSchemaMatch(coll, schema, {}, true); - assertSchemaMatch(coll, schema, {a: "blah"}, true); - assertSchemaMatch(coll, schema, {a: []}, true); - assertSchemaMatch(coll, schema, {a: [1, "foo", {}]}, true); +"use strict"; + +load("jstests/libs/assert_schema_match.js"); + +const coll = db.getCollection("json_schema_items"); +coll.drop(); + +// Test that the JSON Schema fails to parse if "items" is not an object or array. +assert.throws(() => coll.find({$jsonSchema: {items: 1}}).itcount()); +assert.throws(() => coll.find({$jsonSchema: {items: 1.0}}).itcount()); +assert.throws(() => coll.find({$jsonSchema: {items: "true"}}).itcount()); + +// Test that "items" has no effect at the top level (but is still accepted). +assertSchemaMatch(coll, {items: {type: "number"}}, {}, true); +assertSchemaMatch(coll, {items: [{type: "number"}]}, {}, true); + +// Test that "items" matches documents where the field is missing or not an array. +assertSchemaMatch(coll, {properties: {a: {items: {minimum: 0}}}}, {}, true); +assertSchemaMatch(coll, {properties: {a: {items: {minimum: 0}}}}, {a: -1}, true); +assertSchemaMatch(coll, {properties: {a: {items: [{minimum: 0}]}}}, {}, true); +assertSchemaMatch(coll, {properties: {a: {items: [{minimum: 0}]}}}, {a: -1}, true); + +// Test that when "items" is an object, the schema applies to all elements of the array. +let schema = {properties: {a: {items: {pattern: "a+b"}}}}; +assertSchemaMatch(coll, schema, {a: []}, true); +assertSchemaMatch(coll, schema, {a: [7]}, true); +assertSchemaMatch(coll, schema, {a: [null]}, true); +assertSchemaMatch(coll, schema, {a: ["cab"]}, true); +assertSchemaMatch(coll, schema, {a: ["cab", "caab"]}, true); +assertSchemaMatch(coll, schema, {a: ["cab", "caab", "b"]}, false); + +// Test that when "items" is an array, each element schema only apply to elements at that +// position. +schema = { + properties: {a: {items: [{multipleOf: 2}]}} +}; +assertSchemaMatch(coll, schema, {a: []}, true); +assertSchemaMatch(coll, schema, {a: [2]}, true); +assertSchemaMatch(coll, schema, {a: [2, 3]}, true); +assertSchemaMatch(coll, schema, {a: [3]}, false); + +schema = { + properties: {a: {items: [{maxLength: 1}, {maxLength: 2}]}} +}; +assertSchemaMatch(coll, schema, {a: []}, true); +assertSchemaMatch(coll, schema, {a: ["1"]}, true); +assertSchemaMatch(coll, schema, {a: ["1"]}, true); +assertSchemaMatch(coll, schema, {a: ["1", "12"]}, true); +assertSchemaMatch(coll, schema, {a: ["1", "12", "123"]}, true); +assertSchemaMatch(coll, schema, {a: ["12"]}, false); +assertSchemaMatch(coll, schema, {a: ["1", "123"]}, false); + +// Test that "items" has no effect when it is an empty array (but is still accepted). +schema = { + properties: {a: {items: []}} +}; +assertSchemaMatch(coll, schema, {}, true); +assertSchemaMatch(coll, schema, {a: "blah"}, true); +assertSchemaMatch(coll, schema, {a: []}, true); +assertSchemaMatch(coll, schema, {a: [1, "foo", {}]}, true); }()); diff --git a/jstests/core/json_schema/json_schema.js b/jstests/core/json_schema/json_schema.js index 613b14af226..13a10fde323 100644 --- a/jstests/core/json_schema/json_schema.js +++ b/jstests/core/json_schema/json_schema.js @@ -5,340 +5,335 @@ * Tests for JSON Schema document validation. */ (function() { - "use strict"; - - load("jstests/libs/assert_schema_match.js"); - - let coll = db.jstests_json_schema; - coll.drop(); - - assert.writeOK(coll.insert({_id: 0, num: 3})); - assert.writeOK(coll.insert({_id: 1, num: -3})); - assert.writeOK(coll.insert({_id: 2, num: NumberInt(2)})); - assert.writeOK(coll.insert({_id: 3, num: NumberInt(-2)})); - assert.writeOK(coll.insert({_id: 4, num: NumberLong(1)})); - assert.writeOK(coll.insert({_id: 5, num: NumberLong(-1)})); - assert.writeOK(coll.insert({_id: 6, num: {}})); - assert.writeOK(coll.insert({_id: 7, num: "str"})); - assert.writeOK(coll.insert({_id: 8, num: "string"})); - assert.writeOK(coll.insert({_id: 9})); - - // Test that $jsonSchema fails to parse if its argument is not an object. - assert.throws(function() { - coll.find({$jsonSchema: "foo"}).itcount(); - }); - assert.throws(function() { - coll.find({$jsonSchema: []}).itcount(); - }); - - // Test that $jsonSchema fails to parse if the value for the "type" keyword is not a string. - assert.throws(function() { - coll.find({$jsonSchema: {type: 3}}).itcount(); - }); - assert.throws(function() { - coll.find({$jsonSchema: {type: {}}}).itcount(); - }); - - // Test that $jsonSchema fails to parse if the value for the "type" keyword is an unsupported - // alias. - assert.throws(function() { - coll.find({$jsonSchema: {type: 'integer'}}).itcount(); - }); - - // Test that $jsonSchema fails to parse if the value for the properties keyword is not an - // object. - assert.throws(function() { - coll.find({$jsonSchema: {properties: 3}}).itcount(); - }); - assert.throws(function() { - coll.find({$jsonSchema: {properties: []}}).itcount(); - }); - - // Test that $jsonSchema fails to parse if one of the properties named inside the argument for - // the properties keyword is not an object. - assert.throws(function() { - coll.find({$jsonSchema: {properties: {num: "number"}}}).itcount(); - }); - - // Test that $jsonSchema fails to parse if the values for the maximum, maxLength, and - // minlength keywords are not numbers. - assert.throws(function() { - coll.find({$jsonSchema: {properties: {num: {maximum: "0"}}}}).itcount(); - }); - assert.throws(function() { - coll.find({$jsonSchema: {properties: {num: {maximum: {}}}}}).itcount(); - }); - assert.throws(function() { - coll.find({$jsonSchema: {properties: {num: {maxLength: "0"}}}}).itcount(); - }); - assert.throws(function() { - coll.find({$jsonSchema: {properties: {num: {maxLength: {}}}}}).itcount(); - }); - assert.throws(function() { - coll.find({$jsonSchema: {properties: {num: {minLength: "0"}}}}).itcount(); - }); - assert.throws(function() { - coll.find({$jsonSchema: {properties: {num: {minLength: {}}}}}).itcount(); - }); - - // Test that the empty schema matches everything. - assert.eq(10, coll.find({$jsonSchema: {}}).itcount()); - - // Test that a schema just checking that the type of stored documents is "object" is legal and - // matches everything. - assert.eq(10, coll.find({$jsonSchema: {type: "object"}}).itcount()); - - // Test that schemas whose top-level type is not object matches nothing. - assert.eq(0, coll.find({$jsonSchema: {type: "string"}}).itcount()); - assert.eq(0, coll.find({$jsonSchema: {bsonType: "long"}}).itcount()); - assert.eq(0, coll.find({$jsonSchema: {bsonType: "objectId"}}).itcount()); - - // Test that type:"number" only matches numbers, or documents where the field is missing. - assert.eq([{_id: 0}, {_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}, {_id: 5}, {_id: 9}], - coll.find({$jsonSchema: {properties: {num: {type: "number"}}}}, {_id: 1}) - .sort({_id: 1}) - .toArray()); - - // Test that maximum restriction is enforced correctly. - assert.eq([{_id: 1}, {_id: 3}, {_id: 5}, {_id: 9}], - coll.find({$jsonSchema: {properties: {num: {type: "number", maximum: -1}}}}, {_id: 1}) - .sort({_id: 1}) - .toArray()); - - // Repeat the test, but include an explicit top-level type:"object". - assert.eq( - [{_id: 1}, {_id: 3}, {_id: 5}, {_id: 9}], - coll.find({$jsonSchema: {type: "object", properties: {num: {type: "number", maximum: -1}}}}, - {_id: 1}) - .sort({_id: 1}) - .toArray()); - - // Test that type:"long" only matches longs, or documents where the field is missing. - assert.eq([{_id: 4}, {_id: 5}, {_id: 9}], - coll.find({$jsonSchema: {properties: {num: {bsonType: "long"}}}}, {_id: 1}) - .sort({_id: 1}) - .toArray()); - - // Test that maximum restriction is enforced correctly with type:"long". - assert.eq( - [{_id: 5}, {_id: 9}], - coll.find({$jsonSchema: {properties: {num: {bsonType: "long", maximum: 0}}}}, {_id: 1}) - .sort({_id: 1}) - .toArray()); - - // Test that maximum restriction without a numeric type specified only applies to numbers. - assert.eq([{_id: 1}, {_id: 3}, {_id: 5}, {_id: 6}, {_id: 7}, {_id: 8}, {_id: 9}], - coll.find({$jsonSchema: {properties: {num: {maximum: 0}}}}, {_id: 1}) - .sort({_id: 1}) - .toArray()); - - // Test that maximum restriction does nothing if a non-numeric type is also specified. - assert.eq([{_id: 7}, {_id: 8}, {_id: 9}], - coll.find({$jsonSchema: {properties: {num: {type: "string", maximum: 0}}}}, {_id: 1}) - .sort({_id: 1}) - .toArray()); - - // Test that maxLength restriction doesn't return strings with length greater than maxLength. - assert.eq( - [{_id: 9}], - coll.find({$jsonSchema: {properties: {num: {type: "string", maxLength: 2}}}}, {_id: 1}) - .sort({_id: 1}) - .toArray()); - - // Test that maxLength restriction returns strings with length less than or equal to maxLength. - assert.eq( - [{_id: 7}, {_id: 9}], - coll.find({$jsonSchema: {properties: {num: {type: "string", maxLength: 3}}}}, {_id: 1}) - .sort({_id: 1}) - .toArray()); - - // Test that minLength restriction doesn't return strings with length less than minLength. - assert.eq( - [{_id: 8}, {_id: 9}], - coll.find({$jsonSchema: {properties: {num: {type: "string", minLength: 4}}}}, {_id: 1}) - .sort({_id: 1}) - .toArray()); - - // Test that minLength restriction returns strings with length greater than or equal to - // minLength. - assert.eq( - [{_id: 7}, {_id: 8}, {_id: 9}], - coll.find({$jsonSchema: {properties: {num: {type: "string", minLength: 3}}}}, {_id: 1}) - .sort({_id: 1}) - .toArray()); - - // Test that $jsonSchema fails to parse if the values for the pattern keyword is not a string. - assert.throws(function() { - coll.find({$jsonSchema: {properties: {num: {pattern: 0}}}}).itcount(); - }); - assert.throws(function() { - coll.find({$jsonSchema: {properties: {num: {pattern: {}}}}}).itcount(); - }); - - // Tests that the pattern keyword only returns strings that match the regex pattern. - assert.eq( - [{_id: 8}, {_id: 9}], - coll.find({$jsonSchema: {properties: {num: {type: "string", pattern: "ing"}}}}, {_id: 1}) - .sort({_id: 1}) - .toArray()); - - coll.drop(); - assert.writeOK(coll.insert({_id: 0, obj: 3})); - assert.writeOK(coll.insert({_id: 1, obj: {f1: {f3: "str"}, f2: "str"}})); - assert.writeOK(coll.insert({_id: 2, obj: {f1: "str", f2: "str"}})); - assert.writeOK(coll.insert({_id: 3, obj: {f1: 1, f2: "str"}})); - - // Test that properties keyword can be used recursively, and that it does not apply when the - // field does not contain on object. - assert.eq([{_id: 0}, {_id: 1}], - coll.find({ - $jsonSchema: { - properties: { - obj: { - properties: { - f1: {type: "object", properties: {f3: {type: "string"}}}, - f2: {type: "string"} - } +"use strict"; + +load("jstests/libs/assert_schema_match.js"); + +let coll = db.jstests_json_schema; +coll.drop(); + +assert.writeOK(coll.insert({_id: 0, num: 3})); +assert.writeOK(coll.insert({_id: 1, num: -3})); +assert.writeOK(coll.insert({_id: 2, num: NumberInt(2)})); +assert.writeOK(coll.insert({_id: 3, num: NumberInt(-2)})); +assert.writeOK(coll.insert({_id: 4, num: NumberLong(1)})); +assert.writeOK(coll.insert({_id: 5, num: NumberLong(-1)})); +assert.writeOK(coll.insert({_id: 6, num: {}})); +assert.writeOK(coll.insert({_id: 7, num: "str"})); +assert.writeOK(coll.insert({_id: 8, num: "string"})); +assert.writeOK(coll.insert({_id: 9})); + +// Test that $jsonSchema fails to parse if its argument is not an object. +assert.throws(function() { + coll.find({$jsonSchema: "foo"}).itcount(); +}); +assert.throws(function() { + coll.find({$jsonSchema: []}).itcount(); +}); + +// Test that $jsonSchema fails to parse if the value for the "type" keyword is not a string. +assert.throws(function() { + coll.find({$jsonSchema: {type: 3}}).itcount(); +}); +assert.throws(function() { + coll.find({$jsonSchema: {type: {}}}).itcount(); +}); + +// Test that $jsonSchema fails to parse if the value for the "type" keyword is an unsupported +// alias. +assert.throws(function() { + coll.find({$jsonSchema: {type: 'integer'}}).itcount(); +}); + +// Test that $jsonSchema fails to parse if the value for the properties keyword is not an +// object. +assert.throws(function() { + coll.find({$jsonSchema: {properties: 3}}).itcount(); +}); +assert.throws(function() { + coll.find({$jsonSchema: {properties: []}}).itcount(); +}); + +// Test that $jsonSchema fails to parse if one of the properties named inside the argument for +// the properties keyword is not an object. +assert.throws(function() { + coll.find({$jsonSchema: {properties: {num: "number"}}}).itcount(); +}); + +// Test that $jsonSchema fails to parse if the values for the maximum, maxLength, and +// minlength keywords are not numbers. +assert.throws(function() { + coll.find({$jsonSchema: {properties: {num: {maximum: "0"}}}}).itcount(); +}); +assert.throws(function() { + coll.find({$jsonSchema: {properties: {num: {maximum: {}}}}}).itcount(); +}); +assert.throws(function() { + coll.find({$jsonSchema: {properties: {num: {maxLength: "0"}}}}).itcount(); +}); +assert.throws(function() { + coll.find({$jsonSchema: {properties: {num: {maxLength: {}}}}}).itcount(); +}); +assert.throws(function() { + coll.find({$jsonSchema: {properties: {num: {minLength: "0"}}}}).itcount(); +}); +assert.throws(function() { + coll.find({$jsonSchema: {properties: {num: {minLength: {}}}}}).itcount(); +}); + +// Test that the empty schema matches everything. +assert.eq(10, coll.find({$jsonSchema: {}}).itcount()); + +// Test that a schema just checking that the type of stored documents is "object" is legal and +// matches everything. +assert.eq(10, coll.find({$jsonSchema: {type: "object"}}).itcount()); + +// Test that schemas whose top-level type is not object matches nothing. +assert.eq(0, coll.find({$jsonSchema: {type: "string"}}).itcount()); +assert.eq(0, coll.find({$jsonSchema: {bsonType: "long"}}).itcount()); +assert.eq(0, coll.find({$jsonSchema: {bsonType: "objectId"}}).itcount()); + +// Test that type:"number" only matches numbers, or documents where the field is missing. +assert.eq([{_id: 0}, {_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}, {_id: 5}, {_id: 9}], + coll.find({$jsonSchema: {properties: {num: {type: "number"}}}}, {_id: 1}) + .sort({_id: 1}) + .toArray()); + +// Test that maximum restriction is enforced correctly. +assert.eq([{_id: 1}, {_id: 3}, {_id: 5}, {_id: 9}], + coll.find({$jsonSchema: {properties: {num: {type: "number", maximum: -1}}}}, {_id: 1}) + .sort({_id: 1}) + .toArray()); + +// Repeat the test, but include an explicit top-level type:"object". +assert.eq( + [{_id: 1}, {_id: 3}, {_id: 5}, {_id: 9}], + coll.find({$jsonSchema: {type: "object", properties: {num: {type: "number", maximum: -1}}}}, + {_id: 1}) + .sort({_id: 1}) + .toArray()); + +// Test that type:"long" only matches longs, or documents where the field is missing. +assert.eq([{_id: 4}, {_id: 5}, {_id: 9}], + coll.find({$jsonSchema: {properties: {num: {bsonType: "long"}}}}, {_id: 1}) + .sort({_id: 1}) + .toArray()); + +// Test that maximum restriction is enforced correctly with type:"long". +assert.eq([{_id: 5}, {_id: 9}], + coll.find({$jsonSchema: {properties: {num: {bsonType: "long", maximum: 0}}}}, {_id: 1}) + .sort({_id: 1}) + .toArray()); + +// Test that maximum restriction without a numeric type specified only applies to numbers. +assert.eq( + [{_id: 1}, {_id: 3}, {_id: 5}, {_id: 6}, {_id: 7}, {_id: 8}, {_id: 9}], + coll.find({$jsonSchema: {properties: {num: {maximum: 0}}}}, {_id: 1}).sort({_id: 1}).toArray()); + +// Test that maximum restriction does nothing if a non-numeric type is also specified. +assert.eq([{_id: 7}, {_id: 8}, {_id: 9}], + coll.find({$jsonSchema: {properties: {num: {type: "string", maximum: 0}}}}, {_id: 1}) + .sort({_id: 1}) + .toArray()); + +// Test that maxLength restriction doesn't return strings with length greater than maxLength. +assert.eq([{_id: 9}], + coll.find({$jsonSchema: {properties: {num: {type: "string", maxLength: 2}}}}, {_id: 1}) + .sort({_id: 1}) + .toArray()); + +// Test that maxLength restriction returns strings with length less than or equal to maxLength. +assert.eq([{_id: 7}, {_id: 9}], + coll.find({$jsonSchema: {properties: {num: {type: "string", maxLength: 3}}}}, {_id: 1}) + .sort({_id: 1}) + .toArray()); + +// Test that minLength restriction doesn't return strings with length less than minLength. +assert.eq([{_id: 8}, {_id: 9}], + coll.find({$jsonSchema: {properties: {num: {type: "string", minLength: 4}}}}, {_id: 1}) + .sort({_id: 1}) + .toArray()); + +// Test that minLength restriction returns strings with length greater than or equal to +// minLength. +assert.eq([{_id: 7}, {_id: 8}, {_id: 9}], + coll.find({$jsonSchema: {properties: {num: {type: "string", minLength: 3}}}}, {_id: 1}) + .sort({_id: 1}) + .toArray()); + +// Test that $jsonSchema fails to parse if the values for the pattern keyword is not a string. +assert.throws(function() { + coll.find({$jsonSchema: {properties: {num: {pattern: 0}}}}).itcount(); +}); +assert.throws(function() { + coll.find({$jsonSchema: {properties: {num: {pattern: {}}}}}).itcount(); +}); + +// Tests that the pattern keyword only returns strings that match the regex pattern. +assert.eq([{_id: 8}, {_id: 9}], + coll.find({$jsonSchema: {properties: {num: {type: "string", pattern: "ing"}}}}, {_id: 1}) + .sort({_id: 1}) + .toArray()); + +coll.drop(); +assert.writeOK(coll.insert({_id: 0, obj: 3})); +assert.writeOK(coll.insert({_id: 1, obj: {f1: {f3: "str"}, f2: "str"}})); +assert.writeOK(coll.insert({_id: 2, obj: {f1: "str", f2: "str"}})); +assert.writeOK(coll.insert({_id: 3, obj: {f1: 1, f2: "str"}})); + +// Test that properties keyword can be used recursively, and that it does not apply when the +// field does not contain on object. +assert.eq([{_id: 0}, {_id: 1}], + coll.find({ + $jsonSchema: { + properties: { + obj: { + properties: { + f1: {type: "object", properties: {f3: {type: "string"}}}, + f2: {type: "string"} } } } - }, - {_id: 1}) - .sort({_id: 1}) - .toArray()); - - // Test that $jsonSchema can be combined with other operators in the match language. - assert.eq( - [{_id: 0}, {_id: 1}, {_id: 2}], - coll.find({ - $or: [ - {"obj.f1": "str"}, - { - $jsonSchema: { - properties: { - obj: { - properties: { - f1: {type: "object", properties: {f3: {type: "string"}}}, - f2: {type: "string"} + } + }, + {_id: 1}) + .sort({_id: 1}) + .toArray()); + +// Test that $jsonSchema can be combined with other operators in the match language. +assert.eq([{_id: 0}, {_id: 1}, {_id: 2}], + coll.find({ + $or: [ + {"obj.f1": "str"}, + { + $jsonSchema: { + properties: { + obj: { + properties: { + f1: {type: "object", properties: {f3: {type: "string"}}}, + f2: {type: "string"} + } } } } } - } - ] - }, - {_id: 1}) - .sort({_id: 1}) - .toArray()); - - coll.drop(); - assert.writeOK(coll.insert({_id: 0, arr: 3})); - assert.writeOK(coll.insert({_id: 1, arr: [1, "foo"]})); - assert.writeOK(coll.insert({_id: 2, arr: [{a: 1}, {b: 2}]})); - assert.writeOK(coll.insert({_id: 3, arr: []})); - assert.writeOK(coll.insert({_id: 4, arr: {a: []}})); - - // Test that the type:"array" restriction works as expected. - assert.eq([{_id: 1}, {_id: 2}, {_id: 3}], - coll.find({$jsonSchema: {properties: {arr: {type: "array"}}}}, {_id: 1}) - .sort({_id: 1}) - .toArray()); - - // Test that type:"number" works correctly in the presence of arrays. - assert.eq([{_id: 0}], - coll.find({$jsonSchema: {properties: {arr: {type: "number"}}}}, {_id: 1}) - .sort({_id: 1}) - .toArray()); - - // Test that the following keywords fail to parse although present in the spec: - // - default - // - definitions - // - format - // - id - // - $ref - // - $schema - let res = coll.runCommand({find: coll.getName(), query: {$jsonSchema: {default: {_id: 0}}}}); - assert.commandFailedWithCode(res, ErrorCodes.FailedToParse); - - res = coll.runCommand({ - find: coll.getName(), - query: {$jsonSchema: {definitions: {numberField: {type: "number"}}}} - }); - assert.commandFailedWithCode(res, ErrorCodes.FailedToParse); - - res = coll.runCommand({find: coll.getName(), query: {$jsonSchema: {format: "email"}}}); - assert.commandFailedWithCode(res, ErrorCodes.FailedToParse); - - res = coll.runCommand({find: coll.getName(), query: {$jsonSchema: {id: "someschema.json"}}}); - assert.commandFailedWithCode(res, ErrorCodes.FailedToParse); - - res = coll.runCommand({ - find: coll.getName(), - query: {$jsonSchema: {properties: {a: {$ref: "#/definitions/positiveInt"}}}} - }); - assert.commandFailedWithCode(res, ErrorCodes.FailedToParse); - - res = coll.runCommand({find: coll.getName(), query: {$jsonSchema: {$schema: "hyper-schema"}}}); - assert.commandFailedWithCode(res, ErrorCodes.FailedToParse); - - res = coll.runCommand({ - find: coll.getName(), - query: {$jsonSchema: {$schema: "http://json-schema.org/draft-04/schema#"}} - }); - assert.commandFailedWithCode(res, ErrorCodes.FailedToParse); - - // Test that the following whitelisted keywords are verified as strings but otherwise ignored - // in a top-level schema: - // - description - // - title - assertSchemaMatch(coll, {description: "test"}, {}, true); - assertSchemaMatch(coll, {title: "insert title"}, {}, true); - - // Repeat the test above with nested schema. - assertSchemaMatch(coll, {properties: {a: {description: "test"}}}, {a: {}}, true); - assertSchemaMatch(coll, {properties: {a: {title: "this is a's title"}}}, {a: {}}, true); - - // Test that the $jsonSchema validator is correctly stored in the collection catalog. - coll.drop(); - let schema = {properties: {a: {type: 'number'}, b: {minLength: 1}}}; - assert.commandWorked(db.createCollection(coll.getName(), {validator: {$jsonSchema: schema}})); - - let listCollectionsOutput = db.runCommand({listCollections: 1, filter: {name: coll.getName()}}); - assert.commandWorked(listCollectionsOutput); - assert.eq(listCollectionsOutput.cursor.firstBatch[0].options.validator, {$jsonSchema: schema}); - - // Repeat the test above using the whitelisted metadata keywords. - coll.drop(); - schema = {title: "Test schema", description: "Metadata keyword test"}; - assert.commandWorked(db.createCollection(coll.getName(), {validator: {$jsonSchema: schema}})); - - listCollectionsOutput = db.runCommand({listCollections: 1, filter: {name: coll.getName()}}); - assert.commandWorked(listCollectionsOutput); - assert.eq(listCollectionsOutput.cursor.firstBatch[0].options.validator, {$jsonSchema: schema}); - - // Repeat again with a nested schema. - coll.drop(); - schema = {properties: {a: {title: "Nested title", description: "Nested description"}}}; - assert.commandWorked(db.createCollection(coll.getName(), {validator: {$jsonSchema: schema}})); - - listCollectionsOutput = db.runCommand({listCollections: 1, filter: {name: coll.getName()}}); - assert.commandWorked(listCollectionsOutput); - assert.eq(listCollectionsOutput.cursor.firstBatch[0].options.validator, {$jsonSchema: schema}); - - // Test that $jsonSchema and various internal match expressions work correctly with sibling - // predicates. - coll.drop(); - assert.writeOK(coll.insert({_id: 1, a: 1, b: 1})); - assert.writeOK(coll.insert({_id: 2, a: 2, b: 2})); - - assert.eq(1, - coll.find({$jsonSchema: {properties: {a: {type: "number"}}, required: ["a"]}, b: 1}) - .itcount()); - assert.eq(1, coll.find({$or: [{$jsonSchema: {}, a: 1}, {b: 1}]}).itcount()); - assert.eq(1, coll.find({$and: [{$jsonSchema: {}, a: 1}, {b: 1}]}).itcount()); - - assert.eq(1, coll.find({$_internalSchemaMinProperties: 3, b: 2}).itcount()); - assert.eq(1, coll.find({$_internalSchemaMaxProperties: 3, b: 2}).itcount()); - assert.eq(1, coll.find({$alwaysTrue: 1, b: 2}).itcount()); - assert.eq(0, coll.find({$alwaysFalse: 1, b: 2}).itcount()); + ] + }, + {_id: 1}) + .sort({_id: 1}) + .toArray()); + +coll.drop(); +assert.writeOK(coll.insert({_id: 0, arr: 3})); +assert.writeOK(coll.insert({_id: 1, arr: [1, "foo"]})); +assert.writeOK(coll.insert({_id: 2, arr: [{a: 1}, {b: 2}]})); +assert.writeOK(coll.insert({_id: 3, arr: []})); +assert.writeOK(coll.insert({_id: 4, arr: {a: []}})); + +// Test that the type:"array" restriction works as expected. +assert.eq([{_id: 1}, {_id: 2}, {_id: 3}], + coll.find({$jsonSchema: {properties: {arr: {type: "array"}}}}, {_id: 1}) + .sort({_id: 1}) + .toArray()); + +// Test that type:"number" works correctly in the presence of arrays. +assert.eq([{_id: 0}], + coll.find({$jsonSchema: {properties: {arr: {type: "number"}}}}, {_id: 1}) + .sort({_id: 1}) + .toArray()); + +// Test that the following keywords fail to parse although present in the spec: +// - default +// - definitions +// - format +// - id +// - $ref +// - $schema +let res = coll.runCommand({find: coll.getName(), query: {$jsonSchema: {default: {_id: 0}}}}); +assert.commandFailedWithCode(res, ErrorCodes.FailedToParse); + +res = coll.runCommand( + {find: coll.getName(), query: {$jsonSchema: {definitions: {numberField: {type: "number"}}}}}); +assert.commandFailedWithCode(res, ErrorCodes.FailedToParse); + +res = coll.runCommand({find: coll.getName(), query: {$jsonSchema: {format: "email"}}}); +assert.commandFailedWithCode(res, ErrorCodes.FailedToParse); + +res = coll.runCommand({find: coll.getName(), query: {$jsonSchema: {id: "someschema.json"}}}); +assert.commandFailedWithCode(res, ErrorCodes.FailedToParse); + +res = coll.runCommand({ + find: coll.getName(), + query: {$jsonSchema: {properties: {a: {$ref: "#/definitions/positiveInt"}}}} +}); +assert.commandFailedWithCode(res, ErrorCodes.FailedToParse); + +res = coll.runCommand({find: coll.getName(), query: {$jsonSchema: {$schema: "hyper-schema"}}}); +assert.commandFailedWithCode(res, ErrorCodes.FailedToParse); + +res = coll.runCommand({ + find: coll.getName(), + query: {$jsonSchema: {$schema: "http://json-schema.org/draft-04/schema#"}} +}); +assert.commandFailedWithCode(res, ErrorCodes.FailedToParse); + +// Test that the following whitelisted keywords are verified as strings but otherwise ignored +// in a top-level schema: +// - description +// - title +assertSchemaMatch(coll, {description: "test"}, {}, true); +assertSchemaMatch(coll, {title: "insert title"}, {}, true); + +// Repeat the test above with nested schema. +assertSchemaMatch(coll, {properties: {a: {description: "test"}}}, {a: {}}, true); +assertSchemaMatch(coll, {properties: {a: {title: "this is a's title"}}}, {a: {}}, true); + +// Test that the $jsonSchema validator is correctly stored in the collection catalog. +coll.drop(); +let schema = {properties: {a: {type: 'number'}, b: {minLength: 1}}}; +assert.commandWorked(db.createCollection(coll.getName(), {validator: {$jsonSchema: schema}})); + +let listCollectionsOutput = db.runCommand({listCollections: 1, filter: {name: coll.getName()}}); +assert.commandWorked(listCollectionsOutput); +assert.eq(listCollectionsOutput.cursor.firstBatch[0].options.validator, {$jsonSchema: schema}); + +// Repeat the test above using the whitelisted metadata keywords. +coll.drop(); +schema = { + title: "Test schema", + description: "Metadata keyword test" +}; +assert.commandWorked(db.createCollection(coll.getName(), {validator: {$jsonSchema: schema}})); + +listCollectionsOutput = db.runCommand({listCollections: 1, filter: {name: coll.getName()}}); +assert.commandWorked(listCollectionsOutput); +assert.eq(listCollectionsOutput.cursor.firstBatch[0].options.validator, {$jsonSchema: schema}); + +// Repeat again with a nested schema. +coll.drop(); +schema = { + properties: {a: {title: "Nested title", description: "Nested description"}} +}; +assert.commandWorked(db.createCollection(coll.getName(), {validator: {$jsonSchema: schema}})); + +listCollectionsOutput = db.runCommand({listCollections: 1, filter: {name: coll.getName()}}); +assert.commandWorked(listCollectionsOutput); +assert.eq(listCollectionsOutput.cursor.firstBatch[0].options.validator, {$jsonSchema: schema}); + +// Test that $jsonSchema and various internal match expressions work correctly with sibling +// predicates. +coll.drop(); +assert.writeOK(coll.insert({_id: 1, a: 1, b: 1})); +assert.writeOK(coll.insert({_id: 2, a: 2, b: 2})); + +assert.eq( + 1, + coll.find({$jsonSchema: {properties: {a: {type: "number"}}, required: ["a"]}, b: 1}).itcount()); +assert.eq(1, coll.find({$or: [{$jsonSchema: {}, a: 1}, {b: 1}]}).itcount()); +assert.eq(1, coll.find({$and: [{$jsonSchema: {}, a: 1}, {b: 1}]}).itcount()); + +assert.eq(1, coll.find({$_internalSchemaMinProperties: 3, b: 2}).itcount()); +assert.eq(1, coll.find({$_internalSchemaMaxProperties: 3, b: 2}).itcount()); +assert.eq(1, coll.find({$alwaysTrue: 1, b: 2}).itcount()); +assert.eq(0, coll.find({$alwaysFalse: 1, b: 2}).itcount()); }()); diff --git a/jstests/core/json_schema/logical_keywords.js b/jstests/core/json_schema/logical_keywords.js index 507123e2c69..3b7895f27cd 100644 --- a/jstests/core/json_schema/logical_keywords.js +++ b/jstests/core/json_schema/logical_keywords.js @@ -10,222 +10,268 @@ * - enum */ (function() { - "use strict"; - - load("jstests/libs/assert_schema_match.js"); - - const coll = db.jstests_json_schema_logical; - - // Test that $jsonSchema fails to parse if the values for the allOf, anyOf, and oneOf - // keywords are not arrays of valid schema. - assert.throws(function() { - coll.find({$jsonSchema: {properties: {foo: {allOf: {maximum: "0"}}}}}).itcount(); - }); - assert.throws(function() { - coll.find({$jsonSchema: {properties: {foo: {allOf: [0]}}}}).itcount(); - }); - assert.throws(function() { - coll.find({$jsonSchema: {properties: {foo: {allOf: [{invalid: "0"}]}}}}).itcount(); - }); - assert.throws(function() { - coll.find({$jsonSchema: {properties: {foo: {anyOf: {maximum: "0"}}}}}).itcount(); - }); - assert.throws(function() { - coll.find({$jsonSchema: {properties: {foo: {anyOf: [0]}}}}).itcount(); - }); - assert.throws(function() { - coll.find({$jsonSchema: {properties: {foo: {anyOf: [{invalid: "0"}]}}}}).itcount(); - }); - assert.throws(function() { - coll.find({$jsonSchema: {properties: {foo: {oneOf: {maximum: "0"}}}}}).itcount(); - }); - assert.throws(function() { - coll.find({$jsonSchema: {properties: {foo: {oneOf: [0]}}}}).itcount(); - }); - assert.throws(function() { - coll.find({$jsonSchema: {properties: {foo: {oneOf: [{invalid: "0"}]}}}}).itcount(); - }); - - // Test that $jsonSchema fails to parse if the value for the 'not' keyword is not a - // valid schema object. - assert.throws(function() { - coll.find({$jsonSchema: {properties: {foo: {not: {maximum: "0"}}}}}).itcount(); - }); - assert.throws(function() { - coll.find({$jsonSchema: {properties: {foo: {not: [0]}}}}).itcount(); - }); - assert.throws(function() { - coll.find({$jsonSchema: {properties: {foo: {not: [{}]}}}}).itcount(); - }); - - // Test that the 'allOf' keyword correctly returns documents that match every schema in - // the array. - let schema = {properties: {foo: {allOf: [{minimum: 1}]}}}; - assertSchemaMatch(coll, schema, {foo: 1}, true); - assertSchemaMatch(coll, schema, {foo: 0}, false); - assertSchemaMatch(coll, schema, {foo: "string"}, true); - - schema = {properties: {foo: {allOf: [{}]}}}; - assertSchemaMatch(coll, schema, {foo: {}}, true); - assertSchemaMatch(coll, schema, {}, true); - assertSchemaMatch(coll, schema, {foo: 0}, true); - - schema = {properties: {foo: {allOf: [{type: 'number'}, {minimum: 0}]}}}; - assertSchemaMatch(coll, schema, {foo: 0}, true); - assertSchemaMatch(coll, schema, {foo: "string"}, false); - assertSchemaMatch(coll, schema, {foo: [0]}, false); - - // Test that a top-level 'allOf' keyword matches the correct documents. - assertSchemaMatch(coll, {allOf: [{}]}, {}, true); - assertSchemaMatch(coll, {allOf: [{}]}, {foo: 0}, true); - assertSchemaMatch(coll, {allOf: [{type: 'string'}]}, {}, false); - assertSchemaMatch(coll, {allOf: [{properties: {foo: {type: 'string'}}}]}, {foo: "str"}, true); - assertSchemaMatch(coll, {allOf: [{properties: {foo: {type: 'string'}}}]}, {foo: 1}, false); - - // Test that 'allOf' in conjunction with another keyword matches the correct documents. - assertSchemaMatch( - coll, {properties: {foo: {type: "number", allOf: [{minimum: 1}]}}}, {foo: 1}, true); - assertSchemaMatch( - coll, {properties: {foo: {type: "number", allOf: [{minimum: 1}]}}}, {foo: "str"}, false); - - // Test that the 'anyOf' keyword correctly returns documents that match at least one schema - // in the array. - schema = {properties: {foo: {anyOf: [{type: 'string'}, {type: 'number', minimum: 1}]}}}; - assertSchemaMatch(coll, schema, {foo: "str"}, true); - assertSchemaMatch(coll, schema, {foo: 1}, true); - assertSchemaMatch(coll, schema, {foo: 0}, false); - - schema = {properties: {foo: {anyOf: [{type: 'string'}, {type: 'object'}]}}}; - assertSchemaMatch(coll, schema, {foo: {}}, true); - assertSchemaMatch(coll, schema, {foo: "str"}, true); - assertSchemaMatch(coll, schema, {foo: [{}]}, false); - - schema = {properties: {foo: {anyOf: [{}]}}}; - assertSchemaMatch(coll, schema, {}, true); - assertSchemaMatch(coll, schema, {foo: {}}, true); - assertSchemaMatch(coll, schema, {foo: 0}, true); - - // Test that a top-level 'anyOf' keyword matches the correct documents. - schema = {anyOf: [{}]}; - assertSchemaMatch(coll, schema, {}, true); - assertSchemaMatch(coll, schema, {foo: 1}, true); - - schema = {anyOf: [{properties: {foo: {type: 'string'}}}]}; - assertSchemaMatch(coll, schema, {}, true); - assertSchemaMatch(coll, schema, {foo: "str"}, true); - assertSchemaMatch(coll, schema, {foo: 1}, false); - - // Test that 'anyOf' in conjunction with another keyword matches the correct documents. - schema = {properties: {foo: {type: "number", anyOf: [{minimum: 1}]}}}; - assertSchemaMatch(coll, schema, {foo: 1}, true); - assertSchemaMatch(coll, schema, {foo: "str"}, false); - - // Test that the 'oneOf' keyword correctly returns documents that match exactly one schema - // in the array. - schema = {properties: {foo: {oneOf: [{minimum: 0}, {maximum: 3}]}}}; - assertSchemaMatch(coll, schema, {foo: 4}, true); - assertSchemaMatch(coll, schema, {foo: 1}, false); - assertSchemaMatch(coll, schema, {foo: "str"}, false); - - schema = {properties: {foo: {oneOf: [{type: 'string'}, {pattern: "ing"}]}}}; - assertSchemaMatch(coll, schema, {foo: "str"}, true); - assertSchemaMatch(coll, schema, {foo: "string"}, false); - - schema = {properties: {foo: {oneOf: [{}]}}}; - assertSchemaMatch(coll, schema, {}, true); - assertSchemaMatch(coll, schema, {foo: 1}, true); - - // Test that a top-level 'oneOf' keyword matches the correct documents. - schema = {oneOf: [{}]}; - assertSchemaMatch(coll, schema, {}, true); - assertSchemaMatch(coll, schema, {foo: 1}, true); - - schema = {oneOf: [{properties: {foo: {type: 'string'}}}]}; - assertSchemaMatch(coll, schema, {}, true); - assertSchemaMatch(coll, schema, {foo: "str"}, true); - assertSchemaMatch(coll, schema, {foo: 1}, false); - - assertSchemaMatch(coll, {oneOf: [{}, {}]}, {}, false); - - // Test that 'oneOf' in conjunction with another keyword matches the correct documents. - schema = {properties: {foo: {type: "number", oneOf: [{minimum: 4}]}}}; - assertSchemaMatch(coll, schema, {foo: 4}, true); - assertSchemaMatch(coll, schema, {}, true); - assertSchemaMatch(coll, schema, {foo: "str"}, false); - - // Test that the 'not' keyword correctly returns documents that do not match any schema - // in the array. - schema = {properties: {foo: {not: {type: 'number'}}}}; - assertSchemaMatch(coll, schema, {}, true); - assertSchemaMatch(coll, schema, {foo: "str"}, true); - assertSchemaMatch(coll, schema, {foo: 1}, false); - - schema = {properties: {foo: {not: {}}}}; - assertSchemaMatch(coll, schema, {}, true); - assertSchemaMatch(coll, schema, {foo: 1}, false); - - // Test that a top-level 'not' keyword matches the correct documents. - assertSchemaMatch(coll, {not: {}}, {}, false); - - schema = {not: {properties: {foo: {type: 'string'}}}}; - assertSchemaMatch(coll, schema, {foo: 1}, true); - assertSchemaMatch(coll, schema, {foo: "str"}, false); - assertSchemaMatch(coll, schema, {}, false); - - // Test that 'not' in conjunction with another keyword matches the correct documents. - schema = {properties: {foo: {type: "string", not: {maxLength: 4}}}}; - assertSchemaMatch(coll, schema, {}, true); - assertSchemaMatch(coll, schema, {foo: "string"}, true); - assertSchemaMatch(coll, schema, {foo: "str"}, false); - assertSchemaMatch(coll, schema, {foo: 1}, false); - - // Test that the 'enum' keyword correctly matches scalar values. - schema = {properties: {a: {enum: ["str", 5]}}}; - assertSchemaMatch(coll, schema, {a: "str"}, true); - assertSchemaMatch(coll, schema, {a: 5}, true); - assertSchemaMatch(coll, schema, {}, true); - assertSchemaMatch(coll, schema, {a: ["str"]}, false); - - // Test that the 'enum' keyword with a null value correctly matches literal null elements, but - // not 'missing' or 'undefined. - schema = {properties: {a: {enum: [null]}}}; - assertSchemaMatch(coll, schema, {a: null}, true); - assertSchemaMatch(coll, schema, {a: undefined}, false); - assertSchemaMatch(coll, schema, {a: 1}, false); - assertSchemaMatch(coll, {properties: {a: {enum: [null]}}, required: ['a']}, {}, false); - - // Test that the 'enum' keyword correctly matches array values. - schema = {properties: {a: {enum: [[1, 2, "3"]]}}}; - assertSchemaMatch(coll, schema, {a: [1, 2, "3"]}, true); - assertSchemaMatch(coll, schema, {}, true); - assertSchemaMatch(coll, schema, {a: [2, "3", 1]}, false); - - schema = {properties: {a: {enum: [[]]}}}; - assertSchemaMatch(coll, schema, {a: []}, true); - assertSchemaMatch(coll, schema, {}, true); - assertSchemaMatch(coll, schema, {a: [1]}, false); - - // Test that the 'enum' keyword does not traverse arrays when matching. - schema = {properties: {a: {enum: ["str", 1]}}}; - assertSchemaMatch(coll, schema, {a: ["str"]}, false); - assertSchemaMatch(coll, schema, {a: [1]}, false); - - // Test that the 'enum' keyword matches objects regardless of the field ordering. - schema = {properties: {a: {enum: [{name: "tiny", size: "large"}]}}}; - assertSchemaMatch(coll, schema, {a: {name: "tiny", size: "large"}}, true); - assertSchemaMatch(coll, schema, {a: {size: "large", name: "tiny"}}, true); - - // Test that the 'enum' keyword does not match documents with additional fields. - assertSchemaMatch(coll, - {properties: {a: {enum: [{name: "tiny"}]}}}, - {a: {size: "large", name: "tiny"}}, - false); - - // Test that a top-level 'enum' matches the correct documents. - assertSchemaMatch(coll, {enum: [{_id: 0}]}, {_id: 0}, true); - assertSchemaMatch(coll, {enum: [{_id: 0, a: "str"}]}, {_id: 0, a: "str"}, true); - assertSchemaMatch(coll, {enum: [{}]}, {}, false); - assertSchemaMatch(coll, {enum: [null]}, {}, false); - assertSchemaMatch(coll, {enum: [{_id: 0, a: "str"}]}, {_id: 0, a: "str", b: 1}, false); - assertSchemaMatch(coll, {enum: [1, 2]}, {}, false); +"use strict"; + +load("jstests/libs/assert_schema_match.js"); + +const coll = db.jstests_json_schema_logical; + +// Test that $jsonSchema fails to parse if the values for the allOf, anyOf, and oneOf +// keywords are not arrays of valid schema. +assert.throws(function() { + coll.find({$jsonSchema: {properties: {foo: {allOf: {maximum: "0"}}}}}).itcount(); +}); +assert.throws(function() { + coll.find({$jsonSchema: {properties: {foo: {allOf: [0]}}}}).itcount(); +}); +assert.throws(function() { + coll.find({$jsonSchema: {properties: {foo: {allOf: [{invalid: "0"}]}}}}).itcount(); +}); +assert.throws(function() { + coll.find({$jsonSchema: {properties: {foo: {anyOf: {maximum: "0"}}}}}).itcount(); +}); +assert.throws(function() { + coll.find({$jsonSchema: {properties: {foo: {anyOf: [0]}}}}).itcount(); +}); +assert.throws(function() { + coll.find({$jsonSchema: {properties: {foo: {anyOf: [{invalid: "0"}]}}}}).itcount(); +}); +assert.throws(function() { + coll.find({$jsonSchema: {properties: {foo: {oneOf: {maximum: "0"}}}}}).itcount(); +}); +assert.throws(function() { + coll.find({$jsonSchema: {properties: {foo: {oneOf: [0]}}}}).itcount(); +}); +assert.throws(function() { + coll.find({$jsonSchema: {properties: {foo: {oneOf: [{invalid: "0"}]}}}}).itcount(); +}); + +// Test that $jsonSchema fails to parse if the value for the 'not' keyword is not a +// valid schema object. +assert.throws(function() { + coll.find({$jsonSchema: {properties: {foo: {not: {maximum: "0"}}}}}).itcount(); +}); +assert.throws(function() { + coll.find({$jsonSchema: {properties: {foo: {not: [0]}}}}).itcount(); +}); +assert.throws(function() { + coll.find({$jsonSchema: {properties: {foo: {not: [{}]}}}}).itcount(); +}); + +// Test that the 'allOf' keyword correctly returns documents that match every schema in +// the array. +let schema = {properties: {foo: {allOf: [{minimum: 1}]}}}; +assertSchemaMatch(coll, schema, {foo: 1}, true); +assertSchemaMatch(coll, schema, {foo: 0}, false); +assertSchemaMatch(coll, schema, {foo: "string"}, true); + +schema = { + properties: {foo: {allOf: [{}]}} +}; +assertSchemaMatch(coll, schema, {foo: {}}, true); +assertSchemaMatch(coll, schema, {}, true); +assertSchemaMatch(coll, schema, {foo: 0}, true); + +schema = { + properties: {foo: {allOf: [{type: 'number'}, {minimum: 0}]}} +}; +assertSchemaMatch(coll, schema, {foo: 0}, true); +assertSchemaMatch(coll, schema, {foo: "string"}, false); +assertSchemaMatch(coll, schema, {foo: [0]}, false); + +// Test that a top-level 'allOf' keyword matches the correct documents. +assertSchemaMatch(coll, {allOf: [{}]}, {}, true); +assertSchemaMatch(coll, {allOf: [{}]}, {foo: 0}, true); +assertSchemaMatch(coll, {allOf: [{type: 'string'}]}, {}, false); +assertSchemaMatch(coll, {allOf: [{properties: {foo: {type: 'string'}}}]}, {foo: "str"}, true); +assertSchemaMatch(coll, {allOf: [{properties: {foo: {type: 'string'}}}]}, {foo: 1}, false); + +// Test that 'allOf' in conjunction with another keyword matches the correct documents. +assertSchemaMatch( + coll, {properties: {foo: {type: "number", allOf: [{minimum: 1}]}}}, {foo: 1}, true); +assertSchemaMatch( + coll, {properties: {foo: {type: "number", allOf: [{minimum: 1}]}}}, {foo: "str"}, false); + +// Test that the 'anyOf' keyword correctly returns documents that match at least one schema +// in the array. +schema = { + properties: {foo: {anyOf: [{type: 'string'}, {type: 'number', minimum: 1}]}} +}; +assertSchemaMatch(coll, schema, {foo: "str"}, true); +assertSchemaMatch(coll, schema, {foo: 1}, true); +assertSchemaMatch(coll, schema, {foo: 0}, false); + +schema = { + properties: {foo: {anyOf: [{type: 'string'}, {type: 'object'}]}} +}; +assertSchemaMatch(coll, schema, {foo: {}}, true); +assertSchemaMatch(coll, schema, {foo: "str"}, true); +assertSchemaMatch(coll, schema, {foo: [{}]}, false); + +schema = { + properties: {foo: {anyOf: [{}]}} +}; +assertSchemaMatch(coll, schema, {}, true); +assertSchemaMatch(coll, schema, {foo: {}}, true); +assertSchemaMatch(coll, schema, {foo: 0}, true); + +// Test that a top-level 'anyOf' keyword matches the correct documents. +schema = { + anyOf: [{}] +}; +assertSchemaMatch(coll, schema, {}, true); +assertSchemaMatch(coll, schema, {foo: 1}, true); + +schema = { + anyOf: [{properties: {foo: {type: 'string'}}}] +}; +assertSchemaMatch(coll, schema, {}, true); +assertSchemaMatch(coll, schema, {foo: "str"}, true); +assertSchemaMatch(coll, schema, {foo: 1}, false); + +// Test that 'anyOf' in conjunction with another keyword matches the correct documents. +schema = { + properties: {foo: {type: "number", anyOf: [{minimum: 1}]}} +}; +assertSchemaMatch(coll, schema, {foo: 1}, true); +assertSchemaMatch(coll, schema, {foo: "str"}, false); + +// Test that the 'oneOf' keyword correctly returns documents that match exactly one schema +// in the array. +schema = { + properties: {foo: {oneOf: [{minimum: 0}, {maximum: 3}]}} +}; +assertSchemaMatch(coll, schema, {foo: 4}, true); +assertSchemaMatch(coll, schema, {foo: 1}, false); +assertSchemaMatch(coll, schema, {foo: "str"}, false); + +schema = { + properties: {foo: {oneOf: [{type: 'string'}, {pattern: "ing"}]}} +}; +assertSchemaMatch(coll, schema, {foo: "str"}, true); +assertSchemaMatch(coll, schema, {foo: "string"}, false); + +schema = { + properties: {foo: {oneOf: [{}]}} +}; +assertSchemaMatch(coll, schema, {}, true); +assertSchemaMatch(coll, schema, {foo: 1}, true); + +// Test that a top-level 'oneOf' keyword matches the correct documents. +schema = { + oneOf: [{}] +}; +assertSchemaMatch(coll, schema, {}, true); +assertSchemaMatch(coll, schema, {foo: 1}, true); + +schema = { + oneOf: [{properties: {foo: {type: 'string'}}}] +}; +assertSchemaMatch(coll, schema, {}, true); +assertSchemaMatch(coll, schema, {foo: "str"}, true); +assertSchemaMatch(coll, schema, {foo: 1}, false); + +assertSchemaMatch(coll, {oneOf: [{}, {}]}, {}, false); + +// Test that 'oneOf' in conjunction with another keyword matches the correct documents. +schema = { + properties: {foo: {type: "number", oneOf: [{minimum: 4}]}} +}; +assertSchemaMatch(coll, schema, {foo: 4}, true); +assertSchemaMatch(coll, schema, {}, true); +assertSchemaMatch(coll, schema, {foo: "str"}, false); + +// Test that the 'not' keyword correctly returns documents that do not match any schema +// in the array. +schema = { + properties: {foo: {not: {type: 'number'}}} +}; +assertSchemaMatch(coll, schema, {}, true); +assertSchemaMatch(coll, schema, {foo: "str"}, true); +assertSchemaMatch(coll, schema, {foo: 1}, false); + +schema = { + properties: {foo: {not: {}}} +}; +assertSchemaMatch(coll, schema, {}, true); +assertSchemaMatch(coll, schema, {foo: 1}, false); + +// Test that a top-level 'not' keyword matches the correct documents. +assertSchemaMatch(coll, {not: {}}, {}, false); + +schema = { + not: {properties: {foo: {type: 'string'}}} +}; +assertSchemaMatch(coll, schema, {foo: 1}, true); +assertSchemaMatch(coll, schema, {foo: "str"}, false); +assertSchemaMatch(coll, schema, {}, false); + +// Test that 'not' in conjunction with another keyword matches the correct documents. +schema = { + properties: {foo: {type: "string", not: {maxLength: 4}}} +}; +assertSchemaMatch(coll, schema, {}, true); +assertSchemaMatch(coll, schema, {foo: "string"}, true); +assertSchemaMatch(coll, schema, {foo: "str"}, false); +assertSchemaMatch(coll, schema, {foo: 1}, false); + +// Test that the 'enum' keyword correctly matches scalar values. +schema = { + properties: {a: {enum: ["str", 5]}} +}; +assertSchemaMatch(coll, schema, {a: "str"}, true); +assertSchemaMatch(coll, schema, {a: 5}, true); +assertSchemaMatch(coll, schema, {}, true); +assertSchemaMatch(coll, schema, {a: ["str"]}, false); + +// Test that the 'enum' keyword with a null value correctly matches literal null elements, but +// not 'missing' or 'undefined. +schema = { + properties: {a: {enum: [null]}} +}; +assertSchemaMatch(coll, schema, {a: null}, true); +assertSchemaMatch(coll, schema, {a: undefined}, false); +assertSchemaMatch(coll, schema, {a: 1}, false); +assertSchemaMatch(coll, {properties: {a: {enum: [null]}}, required: ['a']}, {}, false); + +// Test that the 'enum' keyword correctly matches array values. +schema = { + properties: {a: {enum: [[1, 2, "3"]]}} +}; +assertSchemaMatch(coll, schema, {a: [1, 2, "3"]}, true); +assertSchemaMatch(coll, schema, {}, true); +assertSchemaMatch(coll, schema, {a: [2, "3", 1]}, false); + +schema = { + properties: {a: {enum: [[]]}} +}; +assertSchemaMatch(coll, schema, {a: []}, true); +assertSchemaMatch(coll, schema, {}, true); +assertSchemaMatch(coll, schema, {a: [1]}, false); + +// Test that the 'enum' keyword does not traverse arrays when matching. +schema = { + properties: {a: {enum: ["str", 1]}} +}; +assertSchemaMatch(coll, schema, {a: ["str"]}, false); +assertSchemaMatch(coll, schema, {a: [1]}, false); + +// Test that the 'enum' keyword matches objects regardless of the field ordering. +schema = { + properties: {a: {enum: [{name: "tiny", size: "large"}]}} +}; +assertSchemaMatch(coll, schema, {a: {name: "tiny", size: "large"}}, true); +assertSchemaMatch(coll, schema, {a: {size: "large", name: "tiny"}}, true); + +// Test that the 'enum' keyword does not match documents with additional fields. +assertSchemaMatch( + coll, {properties: {a: {enum: [{name: "tiny"}]}}}, {a: {size: "large", name: "tiny"}}, false); + +// Test that a top-level 'enum' matches the correct documents. +assertSchemaMatch(coll, {enum: [{_id: 0}]}, {_id: 0}, true); +assertSchemaMatch(coll, {enum: [{_id: 0, a: "str"}]}, {_id: 0, a: "str"}, true); +assertSchemaMatch(coll, {enum: [{}]}, {}, false); +assertSchemaMatch(coll, {enum: [null]}, {}, false); +assertSchemaMatch(coll, {enum: [{_id: 0, a: "str"}]}, {_id: 0, a: "str", b: 1}, false); +assertSchemaMatch(coll, {enum: [1, 2]}, {}, false); }()); diff --git a/jstests/core/json_schema/min_max_items.js b/jstests/core/json_schema/min_max_items.js index 1dff469747d..3c27283a037 100644 --- a/jstests/core/json_schema/min_max_items.js +++ b/jstests/core/json_schema/min_max_items.js @@ -4,44 +4,44 @@ * Tests the JSON Schema keywords "minItems" and "maxItems". */ (function() { - "use strict"; +"use strict"; - load("jstests/libs/assert_schema_match.js"); +load("jstests/libs/assert_schema_match.js"); - const coll = db.getCollection("json_schema_min_max_items"); - coll.drop(); +const coll = db.getCollection("json_schema_min_max_items"); +coll.drop(); - // Test that the JSON Schema fails to parse if "minItems" is not a valid number. - assert.throws(() => coll.find({$jsonSchema: {minItems: "blah"}}).itcount()); - assert.throws(() => coll.find({$jsonSchema: {minItems: -1}}).itcount()); - assert.throws(() => coll.find({$jsonSchema: {minItems: 12.5}}).itcount()); +// Test that the JSON Schema fails to parse if "minItems" is not a valid number. +assert.throws(() => coll.find({$jsonSchema: {minItems: "blah"}}).itcount()); +assert.throws(() => coll.find({$jsonSchema: {minItems: -1}}).itcount()); +assert.throws(() => coll.find({$jsonSchema: {minItems: 12.5}}).itcount()); - // Test that "minItems" matches when the field is missing or not an array. - assertSchemaMatch(coll, {properties: {a: {minItems: 1}}}, {}, true); - assertSchemaMatch(coll, {properties: {a: {minItems: 1}}}, {a: "foo"}, true); +// Test that "minItems" matches when the field is missing or not an array. +assertSchemaMatch(coll, {properties: {a: {minItems: 1}}}, {}, true); +assertSchemaMatch(coll, {properties: {a: {minItems: 1}}}, {a: "foo"}, true); - // Test that "minItems" matches arrays with the requisite number of items. - assertSchemaMatch(coll, {properties: {a: {minItems: 1}}}, {a: []}, false); - assertSchemaMatch(coll, {properties: {a: {minItems: 1}}}, {a: ["x"]}, true); - assertSchemaMatch(coll, {properties: {a: {minItems: 1}}}, {a: [0, 1]}, true); +// Test that "minItems" matches arrays with the requisite number of items. +assertSchemaMatch(coll, {properties: {a: {minItems: 1}}}, {a: []}, false); +assertSchemaMatch(coll, {properties: {a: {minItems: 1}}}, {a: ["x"]}, true); +assertSchemaMatch(coll, {properties: {a: {minItems: 1}}}, {a: [0, 1]}, true); - // Test that "minItems" has no effect when specified at the top level. - assertSchemaMatch(coll, {minItems: 2}, {}, true); +// Test that "minItems" has no effect when specified at the top level. +assertSchemaMatch(coll, {minItems: 2}, {}, true); - // Test that the JSON Schema fails to parse if "maxItems" is not a valid number. - assert.throws(() => coll.find({$jsonSchema: {maxItems: "blah"}}).itcount()); - assert.throws(() => coll.find({$jsonSchema: {maxItems: -1}}).itcount()); - assert.throws(() => coll.find({$jsonSchema: {maxItems: 12.5}}).itcount()); +// Test that the JSON Schema fails to parse if "maxItems" is not a valid number. +assert.throws(() => coll.find({$jsonSchema: {maxItems: "blah"}}).itcount()); +assert.throws(() => coll.find({$jsonSchema: {maxItems: -1}}).itcount()); +assert.throws(() => coll.find({$jsonSchema: {maxItems: 12.5}}).itcount()); - // Test that "maxItems" matches when the field is missing or not an array. - assertSchemaMatch(coll, {properties: {a: {maxItems: 1}}}, {}, true); - assertSchemaMatch(coll, {properties: {a: {maxItems: 1}}}, {a: "foo"}, true); +// Test that "maxItems" matches when the field is missing or not an array. +assertSchemaMatch(coll, {properties: {a: {maxItems: 1}}}, {}, true); +assertSchemaMatch(coll, {properties: {a: {maxItems: 1}}}, {a: "foo"}, true); - // Test that "maxItems" matches arrays with the requisite number of items. - assertSchemaMatch(coll, {properties: {a: {maxItems: 1}}}, {a: []}, true); - assertSchemaMatch(coll, {properties: {a: {maxItems: 1}}}, {a: ["x"]}, true); - assertSchemaMatch(coll, {properties: {a: {maxItems: 1}}}, {a: [0, 1]}, false); +// Test that "maxItems" matches arrays with the requisite number of items. +assertSchemaMatch(coll, {properties: {a: {maxItems: 1}}}, {a: []}, true); +assertSchemaMatch(coll, {properties: {a: {maxItems: 1}}}, {a: ["x"]}, true); +assertSchemaMatch(coll, {properties: {a: {maxItems: 1}}}, {a: [0, 1]}, false); - // Test that "maxItems" has no effect when specified at the top level. - assertSchemaMatch(coll, {maxItems: 2}, {}, true); +// Test that "maxItems" has no effect when specified at the top level. +assertSchemaMatch(coll, {maxItems: 2}, {}, true); }()); diff --git a/jstests/core/json_schema/min_max_properties.js b/jstests/core/json_schema/min_max_properties.js index fbfffceb96c..975a22fd527 100644 --- a/jstests/core/json_schema/min_max_properties.js +++ b/jstests/core/json_schema/min_max_properties.js @@ -4,46 +4,46 @@ * Tests for the JSON Schema 'minProperties' and 'maxProperties' keywords. */ (function() { - "use strict"; - - load("jstests/libs/assert_schema_match.js"); - - const coll = db.jstests_schema_min_max_properties; - - // Test that {minProperties: 0} matches any object. - assertSchemaMatch(coll, {minProperties: 0}, {}, true); - assertSchemaMatch(coll, {minProperties: 0}, {a: 1}, true); - assertSchemaMatch(coll, {minProperties: 0}, {a: 1, b: 2}, true); - - // Test that {maxProperties: 0} matches nothing, since objects always must have the "_id" field - // when inserted into a collection. - assertSchemaMatch(coll, {maxProperties: 0}, {}, false); - assertSchemaMatch(coll, {maxProperties: 0}, {a: 1}, false); - assertSchemaMatch(coll, {maxProperties: 0}, {a: 1, b: 2}, false); - - // Test top-level minProperties greater than 0. - assertSchemaMatch(coll, {minProperties: 2}, {_id: 0}, false); - assertSchemaMatch(coll, {minProperties: 2}, {_id: 0, a: 1}, true); - assertSchemaMatch(coll, {minProperties: 2}, {_id: 0, a: 1, b: 2}, true); - - // Test top-level maxProperties greater than 0. - assertSchemaMatch(coll, {maxProperties: 2}, {_id: 0}, true); - assertSchemaMatch(coll, {maxProperties: 2}, {_id: 0, a: 1}, true); - assertSchemaMatch(coll, {maxProperties: 2}, {_id: 0, a: 1, b: 2}, false); - - // Test nested maxProperties greater than 0. - assertSchemaMatch(coll, {properties: {a: {maxProperties: 1}}}, {a: 1}, true); - assertSchemaMatch(coll, {properties: {a: {maxProperties: 1}}}, {a: {}}, true); - assertSchemaMatch(coll, {properties: {a: {maxProperties: 1}}}, {a: {b: 1}}, true); - assertSchemaMatch(coll, {properties: {a: {maxProperties: 1}}}, {a: {b: 1, c: 1}}, false); - - // Test nested maxProperties of 0. - assertSchemaMatch(coll, {properties: {a: {maxProperties: 0}}}, {a: {}}, true); - assertSchemaMatch(coll, {properties: {a: {maxProperties: 0}}}, {a: {b: 1}}, false); - - // Test nested minProperties greater than 0. - assertSchemaMatch(coll, {properties: {a: {minProperties: 1}}}, {a: 1}, true); - assertSchemaMatch(coll, {properties: {a: {minProperties: 1}}}, {a: {}}, false); - assertSchemaMatch(coll, {properties: {a: {minProperties: 1}}}, {a: {b: 1}}, true); - assertSchemaMatch(coll, {properties: {a: {minProperties: 1}}}, {a: {b: 1, c: 1}}, true); +"use strict"; + +load("jstests/libs/assert_schema_match.js"); + +const coll = db.jstests_schema_min_max_properties; + +// Test that {minProperties: 0} matches any object. +assertSchemaMatch(coll, {minProperties: 0}, {}, true); +assertSchemaMatch(coll, {minProperties: 0}, {a: 1}, true); +assertSchemaMatch(coll, {minProperties: 0}, {a: 1, b: 2}, true); + +// Test that {maxProperties: 0} matches nothing, since objects always must have the "_id" field +// when inserted into a collection. +assertSchemaMatch(coll, {maxProperties: 0}, {}, false); +assertSchemaMatch(coll, {maxProperties: 0}, {a: 1}, false); +assertSchemaMatch(coll, {maxProperties: 0}, {a: 1, b: 2}, false); + +// Test top-level minProperties greater than 0. +assertSchemaMatch(coll, {minProperties: 2}, {_id: 0}, false); +assertSchemaMatch(coll, {minProperties: 2}, {_id: 0, a: 1}, true); +assertSchemaMatch(coll, {minProperties: 2}, {_id: 0, a: 1, b: 2}, true); + +// Test top-level maxProperties greater than 0. +assertSchemaMatch(coll, {maxProperties: 2}, {_id: 0}, true); +assertSchemaMatch(coll, {maxProperties: 2}, {_id: 0, a: 1}, true); +assertSchemaMatch(coll, {maxProperties: 2}, {_id: 0, a: 1, b: 2}, false); + +// Test nested maxProperties greater than 0. +assertSchemaMatch(coll, {properties: {a: {maxProperties: 1}}}, {a: 1}, true); +assertSchemaMatch(coll, {properties: {a: {maxProperties: 1}}}, {a: {}}, true); +assertSchemaMatch(coll, {properties: {a: {maxProperties: 1}}}, {a: {b: 1}}, true); +assertSchemaMatch(coll, {properties: {a: {maxProperties: 1}}}, {a: {b: 1, c: 1}}, false); + +// Test nested maxProperties of 0. +assertSchemaMatch(coll, {properties: {a: {maxProperties: 0}}}, {a: {}}, true); +assertSchemaMatch(coll, {properties: {a: {maxProperties: 0}}}, {a: {b: 1}}, false); + +// Test nested minProperties greater than 0. +assertSchemaMatch(coll, {properties: {a: {minProperties: 1}}}, {a: 1}, true); +assertSchemaMatch(coll, {properties: {a: {minProperties: 1}}}, {a: {}}, false); +assertSchemaMatch(coll, {properties: {a: {minProperties: 1}}}, {a: {b: 1}}, true); +assertSchemaMatch(coll, {properties: {a: {minProperties: 1}}}, {a: {b: 1, c: 1}}, true); }()); diff --git a/jstests/core/json_schema/misc_validation.js b/jstests/core/json_schema/misc_validation.js index 830ce63cae0..fbd15e7b31a 100644 --- a/jstests/core/json_schema/misc_validation.js +++ b/jstests/core/json_schema/misc_validation.js @@ -20,174 +20,177 @@ * ] */ (function() { - "use strict"; - - // For isWiredTiger. - load("jstests/concurrency/fsm_workload_helpers/server_types.js"); - // For isReplSet - load("jstests/libs/fixture_helpers.js"); - // For arrayEq. - load("jstests/aggregation/extras/utils.js"); - - const testName = "json_schema_misc_validation"; - const testDB = db.getSiblingDB(testName); - assert.commandWorked(testDB.dropDatabase()); - assert.commandWorked(testDB.createCollection(testName)); - const coll = testDB.getCollection(testName); - coll.drop(); - - const isMongos = (testDB.runCommand("ismaster").msg === "isdbgrid"); - - // Test that $jsonSchema is rejected in an $elemMatch projection. - assert.throws(function() { - coll.find({}, {a: {$elemMatch: {$jsonSchema: {}}}}).itcount(); - }); - - // Test that an invalid $jsonSchema fails to parse in a count command. - const invalidSchema = {invalid: {}}; - assert.throws(function() { - coll.count({$jsonSchema: invalidSchema}); - }); - - // Test that an invalid $jsonSchema fails to parse in a $geoNear query. - assert.commandWorked(coll.createIndex({geo: "2dsphere"})); - let res = testDB.runCommand({ - aggregate: coll.getName(), - cursor: {}, - pipeline: [{ - $geoNear: { - near: [30, 40], - distanceField: "dis", - query: {$jsonSchema: invalidSchema}, - } - }], - }); - assert.commandFailedWithCode(res, ErrorCodes.FailedToParse); - assert.neq(-1, - res.errmsg.indexOf("Unknown $jsonSchema keyword"), - `$geoNear failed for a reason other than invalid query: ${tojson(res)}`); - - // Test that an invalid $jsonSchema fails to parse in a distinct command. - assert.throws(function() { - coll.distinct("a", {$jsonSchema: invalidSchema}); - }); - - // Test that an invalid $jsonSchema fails to parse in a $match stage within a view. - res = testDB.createView("invalid", coll.getName(), [{$match: {$jsonSchema: invalidSchema}}]); - assert.commandFailedWithCode(res, ErrorCodes.FailedToParse); - - // Test that an invalid $jsonSchema fails to parse in a listCollections command. - res = testDB.runCommand({listCollections: 1, filter: {$jsonSchema: invalidSchema}}); - assert.commandFailedWithCode(res, ErrorCodes.FailedToParse); - - // Test that a valid $jsonSchema is legal in a count command. - coll.drop(); - assert.writeOK(coll.insert({a: 1, b: "str"})); - assert.writeOK(coll.insert({a: 1, b: 1})); - assert.eq(1, - coll.count({$jsonSchema: {properties: {a: {type: "number"}, b: {type: "string"}}}})); - - // Test that a valid $jsonSchema is legal in a $geoNear stage. - const point = {type: "Point", coordinates: [31.0, 41.0]}; - assert.writeOK(coll.insert({geo: point, a: 1})); - assert.writeOK(coll.insert({geo: point, a: 0})); - assert.commandWorked(coll.createIndex({geo: "2dsphere"})); - res = coll.aggregate({ - $geoNear: { - near: [30, 40], - spherical: true, - query: {$jsonSchema: {properties: {a: {minimum: 1}}}}, - distanceField: "dis", - includeLocs: "loc", - } - }) - .toArray(); - assert.eq(1, res.length, tojson(res)); - assert.eq(res[0].loc, point, tojson(res)); - - // Test that a valid $jsonSchema is legal in a distinct command. - coll.drop(); - assert.writeOK(coll.insert({a: 1})); - assert.writeOK(coll.insert({a: 2})); - assert.writeOK(coll.insert({a: "str"})); - assert.writeOK(coll.insert({a: ["STR", "str"]})); - - assert(arrayEq([1, 2], coll.distinct("a", {$jsonSchema: {properties: {a: {type: "number"}}}}))); +"use strict"; + +// For isWiredTiger. +load("jstests/concurrency/fsm_workload_helpers/server_types.js"); +// For isReplSet +load("jstests/libs/fixture_helpers.js"); +// For arrayEq. +load("jstests/aggregation/extras/utils.js"); + +const testName = "json_schema_misc_validation"; +const testDB = db.getSiblingDB(testName); +assert.commandWorked(testDB.dropDatabase()); +assert.commandWorked(testDB.createCollection(testName)); +const coll = testDB.getCollection(testName); +coll.drop(); + +const isMongos = (testDB.runCommand("ismaster").msg === "isdbgrid"); + +// Test that $jsonSchema is rejected in an $elemMatch projection. +assert.throws(function() { + coll.find({}, {a: {$elemMatch: {$jsonSchema: {}}}}).itcount(); +}); + +// Test that an invalid $jsonSchema fails to parse in a count command. +const invalidSchema = { + invalid: {} +}; +assert.throws(function() { + coll.count({$jsonSchema: invalidSchema}); +}); + +// Test that an invalid $jsonSchema fails to parse in a $geoNear query. +assert.commandWorked(coll.createIndex({geo: "2dsphere"})); +let res = testDB.runCommand({ + aggregate: coll.getName(), + cursor: {}, + pipeline: [{ + $geoNear: { + near: [30, 40], + distanceField: "dis", + query: {$jsonSchema: invalidSchema}, + } + }], +}); +assert.commandFailedWithCode(res, ErrorCodes.FailedToParse); +assert.neq(-1, + res.errmsg.indexOf("Unknown $jsonSchema keyword"), + `$geoNear failed for a reason other than invalid query: ${tojson(res)}`); + +// Test that an invalid $jsonSchema fails to parse in a distinct command. +assert.throws(function() { + coll.distinct("a", {$jsonSchema: invalidSchema}); +}); + +// Test that an invalid $jsonSchema fails to parse in a $match stage within a view. +res = testDB.createView("invalid", coll.getName(), [{$match: {$jsonSchema: invalidSchema}}]); +assert.commandFailedWithCode(res, ErrorCodes.FailedToParse); + +// Test that an invalid $jsonSchema fails to parse in a listCollections command. +res = testDB.runCommand({listCollections: 1, filter: {$jsonSchema: invalidSchema}}); +assert.commandFailedWithCode(res, ErrorCodes.FailedToParse); + +// Test that a valid $jsonSchema is legal in a count command. +coll.drop(); +assert.writeOK(coll.insert({a: 1, b: "str"})); +assert.writeOK(coll.insert({a: 1, b: 1})); +assert.eq(1, coll.count({$jsonSchema: {properties: {a: {type: "number"}, b: {type: "string"}}}})); + +// Test that a valid $jsonSchema is legal in a $geoNear stage. +const point = { + type: "Point", + coordinates: [31.0, 41.0] +}; +assert.writeOK(coll.insert({geo: point, a: 1})); +assert.writeOK(coll.insert({geo: point, a: 0})); +assert.commandWorked(coll.createIndex({geo: "2dsphere"})); +res = coll.aggregate({ + $geoNear: { + near: [30, 40], + spherical: true, + query: {$jsonSchema: {properties: {a: {minimum: 1}}}}, + distanceField: "dis", + includeLocs: "loc", + } + }) + .toArray(); +assert.eq(1, res.length, tojson(res)); +assert.eq(res[0].loc, point, tojson(res)); + +// Test that a valid $jsonSchema is legal in a distinct command. +coll.drop(); +assert.writeOK(coll.insert({a: 1})); +assert.writeOK(coll.insert({a: 2})); +assert.writeOK(coll.insert({a: "str"})); +assert.writeOK(coll.insert({a: ["STR", "str"]})); + +assert(arrayEq([1, 2], coll.distinct("a", {$jsonSchema: {properties: {a: {type: "number"}}}}))); + +// Test that $jsonSchema in a query does not respect the collection-default collation. +let schema = {properties: {a: {enum: ["STR"]}}}; +const caseInsensitiveCollation = { + locale: "en_US", + strength: 1 +}; +coll.drop(); +assert.commandWorked( + testDB.createCollection(coll.getName(), {collation: caseInsensitiveCollation})); +assert.writeOK(coll.insert({a: "str"})); +assert.writeOK(coll.insert({a: ["STR", "sTr"]})); +assert.eq(0, coll.find({$jsonSchema: schema}).itcount()); +assert.eq(2, coll.find({$jsonSchema: {properties: {a: {uniqueItems: true}}}}).itcount()); +assert.eq(2, coll.find({a: "STR"}).itcount()); + +// Test that $jsonSchema does not respect the collation set explicitly on a query. +coll.drop(); +assert.writeOK(coll.insert({a: "str"})); +assert.writeOK(coll.insert({a: ["STR", "sTr"]})); + +if (testDB.getMongo().useReadCommands()) { + assert.eq(0, coll.find({$jsonSchema: schema}).collation(caseInsensitiveCollation).itcount()); + assert.eq(2, + coll.find({$jsonSchema: {properties: {a: {uniqueItems: true}}}}) + .collation(caseInsensitiveCollation) + .itcount()); + assert.eq(2, coll.find({a: "STR"}).collation(caseInsensitiveCollation).itcount()); - // Test that $jsonSchema in a query does not respect the collection-default collation. - let schema = {properties: {a: {enum: ["STR"]}}}; - const caseInsensitiveCollation = {locale: "en_US", strength: 1}; - coll.drop(); - assert.commandWorked( - testDB.createCollection(coll.getName(), {collation: caseInsensitiveCollation})); - assert.writeOK(coll.insert({a: "str"})); - assert.writeOK(coll.insert({a: ["STR", "sTr"]})); - assert.eq(0, coll.find({$jsonSchema: schema}).itcount()); - assert.eq(2, coll.find({$jsonSchema: {properties: {a: {uniqueItems: true}}}}).itcount()); - assert.eq(2, coll.find({a: "STR"}).itcount()); - - // Test that $jsonSchema does not respect the collation set explicitly on a query. + // Test that $jsonSchema can be used in a $match stage within a view. coll.drop(); - assert.writeOK(coll.insert({a: "str"})); - assert.writeOK(coll.insert({a: ["STR", "sTr"]})); - - if (testDB.getMongo().useReadCommands()) { - assert.eq(0, - coll.find({$jsonSchema: schema}).collation(caseInsensitiveCollation).itcount()); - assert.eq(2, - coll.find({$jsonSchema: {properties: {a: {uniqueItems: true}}}}) - .collation(caseInsensitiveCollation) - .itcount()); - assert.eq(2, coll.find({a: "STR"}).collation(caseInsensitiveCollation).itcount()); - - // Test that $jsonSchema can be used in a $match stage within a view. - coll.drop(); - let bulk = coll.initializeUnorderedBulkOp(); - bulk.insert({name: "Peter", age: 65}); - bulk.insert({name: "Paul", age: 105}); - bulk.insert({name: "Mary", age: 10}); - bulk.insert({name: "John", age: "unknown"}); - bulk.insert({name: "Mark"}); - bulk.insert({}); - assert.writeOK(bulk.execute()); - - assert.commandWorked(testDB.createView( - "seniorCitizens", coll.getName(), [{ - $match: { - $jsonSchema: { - required: ["name", "age"], - properties: - {name: {type: "string"}, age: {type: "number", minimum: 65}} - } + let bulk = coll.initializeUnorderedBulkOp(); + bulk.insert({name: "Peter", age: 65}); + bulk.insert({name: "Paul", age: 105}); + bulk.insert({name: "Mary", age: 10}); + bulk.insert({name: "John", age: "unknown"}); + bulk.insert({name: "Mark"}); + bulk.insert({}); + assert.writeOK(bulk.execute()); + + assert.commandWorked(testDB.createView( + "seniorCitizens", coll.getName(), [{ + $match: { + $jsonSchema: { + required: ["name", "age"], + properties: {name: {type: "string"}, age: {type: "number", minimum: 65}} } - }])); - assert.eq(2, testDB.seniorCitizens.find().itcount()); - } - - // Test that $jsonSchema can be used in the listCollections filter. - res = testDB.runCommand({ - listCollections: 1, - filter: {$jsonSchema: {properties: {name: {enum: [coll.getName()]}}}} - }); - assert.commandWorked(res); - assert.eq(1, res.cursor.firstBatch.length); - - // Test that $jsonSchema can be used in the listDatabases filter. - res = testDB.adminCommand( - {listDatabases: 1, filter: {$jsonSchema: {properties: {name: {enum: [coll.getName()]}}}}}); - assert.commandWorked(res); - assert.eq(1, res.databases.length); - - // Test that $jsonSchema can be used in the filter of a $graphLookup stage. - const foreign = testDB.json_schema_foreign; - foreign.drop(); - coll.drop(); - for (let i = 0; i < 10; i++) { - assert.writeOK(foreign.insert({_id: i, n: [i - 1, i + 1]})); - } - assert.writeOK(coll.insert({starting: 0})); - - res = coll.aggregate({ + } + }])); + assert.eq(2, testDB.seniorCitizens.find().itcount()); +} + +// Test that $jsonSchema can be used in the listCollections filter. +res = testDB.runCommand( + {listCollections: 1, filter: {$jsonSchema: {properties: {name: {enum: [coll.getName()]}}}}}); +assert.commandWorked(res); +assert.eq(1, res.cursor.firstBatch.length); + +// Test that $jsonSchema can be used in the listDatabases filter. +res = testDB.adminCommand( + {listDatabases: 1, filter: {$jsonSchema: {properties: {name: {enum: [coll.getName()]}}}}}); +assert.commandWorked(res); +assert.eq(1, res.databases.length); + +// Test that $jsonSchema can be used in the filter of a $graphLookup stage. +const foreign = testDB.json_schema_foreign; +foreign.drop(); +coll.drop(); +for (let i = 0; i < 10; i++) { + assert.writeOK(foreign.insert({_id: i, n: [i - 1, i + 1]})); +} +assert.writeOK(coll.insert({starting: 0})); + +res = coll.aggregate({ $graphLookup: { from: foreign.getName(), startWith: "$starting", @@ -198,137 +201,136 @@ } }) .toArray(); - assert.eq(1, res.length); - assert.eq(res[0].integers.length, 5); - - // Test that $jsonSchema is legal in a delete command. - coll.drop(); - assert.writeOK(coll.insert({a: 1})); - assert.writeOK(coll.insert({a: 2})); - assert.writeOK(coll.insert({a: "str"})); - assert.writeOK(coll.insert({a: [3]})); - - schema = {properties: {a: {type: "number", maximum: 2}}}; - - res = coll.deleteMany({$jsonSchema: schema}); - assert.eq(2, res.deletedCount); - assert.eq(0, coll.find({$jsonSchema: schema}).itcount()); - - // Test that $jsonSchema does not respect the collation specified in a delete command. - if (db.getMongo().writeMode() === "commands") { - res = coll.deleteMany({$jsonSchema: {properties: {a: {enum: ["STR"]}}}}, - {collation: caseInsensitiveCollation}); - assert.eq(0, res.deletedCount); - } else { - res = testDB.runCommand({ - delete: coll.getName(), - deletes: [{q: {$jsonSchema: {properties: {a: {enum: ["STR"]}}}}}], - collation: caseInsensitiveCollation, - }); - assert.eq(res.deletedCount); - } - - // Test that $jsonSchema is legal in an update command. - coll.drop(); - assert.writeOK(coll.insert({a: 1})); - assert.writeOK(coll.insert({a: 2})); - - res = coll.update({$jsonSchema: schema}, {$inc: {a: 1}}, {multi: true}); - assert.writeOK(res); - assert.eq(2, res.nMatched); - assert.eq(1, coll.find({$jsonSchema: schema}).itcount()); - - // Test that $jsonSchema is legal in a findAndModify command. - coll.drop(); - assert.writeOK(coll.insert({a: "long_string"})); - assert.writeOK(coll.insert({a: "short"})); +assert.eq(1, res.length); +assert.eq(res[0].integers.length, 5); + +// Test that $jsonSchema is legal in a delete command. +coll.drop(); +assert.writeOK(coll.insert({a: 1})); +assert.writeOK(coll.insert({a: 2})); +assert.writeOK(coll.insert({a: "str"})); +assert.writeOK(coll.insert({a: [3]})); + +schema = { + properties: {a: {type: "number", maximum: 2}} +}; + +res = coll.deleteMany({$jsonSchema: schema}); +assert.eq(2, res.deletedCount); +assert.eq(0, coll.find({$jsonSchema: schema}).itcount()); + +// Test that $jsonSchema does not respect the collation specified in a delete command. +if (db.getMongo().writeMode() === "commands") { + res = coll.deleteMany({$jsonSchema: {properties: {a: {enum: ["STR"]}}}}, + {collation: caseInsensitiveCollation}); + assert.eq(0, res.deletedCount); +} else { + res = testDB.runCommand({ + delete: coll.getName(), + deletes: [{q: {$jsonSchema: {properties: {a: {enum: ["STR"]}}}}}], + collation: caseInsensitiveCollation, + }); + assert.eq(res.deletedCount); +} + +// Test that $jsonSchema is legal in an update command. +coll.drop(); +assert.writeOK(coll.insert({a: 1})); +assert.writeOK(coll.insert({a: 2})); + +res = coll.update({$jsonSchema: schema}, {$inc: {a: 1}}, {multi: true}); +assert.writeOK(res); +assert.eq(2, res.nMatched); +assert.eq(1, coll.find({$jsonSchema: schema}).itcount()); + +// Test that $jsonSchema is legal in a findAndModify command. +coll.drop(); +assert.writeOK(coll.insert({a: "long_string"})); +assert.writeOK(coll.insert({a: "short"})); + +schema = { + properties: {a: {type: "string", minLength: 6}} +}; +res = coll.findAndModify({query: {$jsonSchema: schema}, update: {$set: {a: "extra_long_string"}}}); +assert.eq("long_string", res.a); +assert.eq(1, coll.find({$jsonSchema: schema}).itcount()); + +// Test that $jsonSchema works correctly in the presence of a basic b-tree index. +coll.drop(); +assert.writeOK(coll.insert({_id: 1, a: 1, b: 1})); +assert.writeOK(coll.insert({_id: 2, a: 2, b: 2, point: [5, 5]})); +assert.writeOK(coll.insert({_id: 3, a: "temp text test"})); + +assert.commandWorked(coll.createIndex({a: 1})); +assert.eq(3, coll.find({$jsonSchema: {}}).itcount()); +assert.eq(2, coll.find({$jsonSchema: {properties: {a: {type: "number"}}}}).itcount()); +assert.eq(2, + coll.find({$jsonSchema: {required: ["a"], properties: {a: {type: "number"}}}}).itcount()); +assert.eq(2, coll.find({$or: [{$jsonSchema: {properties: {a: {minimum: 2}}}}, {b: 2}]}).itcount()); + +// Test that $jsonSchema works correctly in the presence of a geo index. +coll.dropIndexes(); +assert.commandWorked(coll.createIndex({point: "2dsphere"})); +assert.eq(1, coll.find({$jsonSchema: {required: ["point"]}}).itcount()); + +assert.eq(1, + coll.find({ + $jsonSchema: {properties: {point: {minItems: 2}}}, + point: {$geoNear: {$geometry: {type: "Point", coordinates: [5, 5]}}} + }) + .itcount()); - schema = {properties: {a: {type: "string", minLength: 6}}}; - res = coll.findAndModify( - {query: {$jsonSchema: schema}, update: {$set: {a: "extra_long_string"}}}); - assert.eq("long_string", res.a); - assert.eq(1, coll.find({$jsonSchema: schema}).itcount()); +coll.dropIndexes(); +assert.commandWorked(coll.createIndex({a: 1, point: "2dsphere"})); +assert.eq(1, coll.find({$jsonSchema: {required: ["a", "point"]}}).itcount()); - // Test that $jsonSchema works correctly in the presence of a basic b-tree index. +assert.eq(1, + coll.find({ + $jsonSchema: {required: ["a"], properties: {a: {minLength: 3}}}, + point: {$geoNear: {$geometry: {type: "Point", coordinates: [5, 5]}}} + }) + .itcount()); + +assert.eq(1, + coll.find({ + $and: [ + {$jsonSchema: {properties: {point: {maxItems: 2}}}}, + {point: {$geoNear: {$geometry: {type: "Point", coordinates: [5, 5]}}}, a: 2} + ] + }) + .itcount()); + +// Test that $jsonSchema works correctly in the presence of a text index. +coll.dropIndexes(); +assert.commandWorked(coll.createIndex({a: "text"})); +assert.commandWorked(coll.createIndex({a: 1})); +assert.eq(3, coll.find({$jsonSchema: {properties: {a: {minLength: 5}}}}).itcount()); +assert.eq(1, coll.find({$jsonSchema: {required: ["a"]}, $text: {$search: "test"}}).itcount()); +assert.eq( + 3, coll.find({$or: [{$jsonSchema: {required: ["a"]}}, {$text: {$search: "TEST"}}]}).itcount()); +assert.eq(1, coll.find({$and: [{$jsonSchema: {}}, {$text: {$search: "TEST"}}]}).itcount()); + +if (!isMongos) { coll.drop(); - assert.writeOK(coll.insert({_id: 1, a: 1, b: 1})); - assert.writeOK(coll.insert({_id: 2, a: 2, b: 2, point: [5, 5]})); - assert.writeOK(coll.insert({_id: 3, a: "temp text test"})); - - assert.commandWorked(coll.createIndex({a: 1})); - assert.eq(3, coll.find({$jsonSchema: {}}).itcount()); - assert.eq(2, coll.find({$jsonSchema: {properties: {a: {type: "number"}}}}).itcount()); - assert.eq( - 2, - coll.find({$jsonSchema: {required: ["a"], properties: {a: {type: "number"}}}}).itcount()); - assert.eq(2, - coll.find({$or: [{$jsonSchema: {properties: {a: {minimum: 2}}}}, {b: 2}]}).itcount()); - - // Test that $jsonSchema works correctly in the presence of a geo index. - coll.dropIndexes(); - assert.commandWorked(coll.createIndex({point: "2dsphere"})); - assert.eq(1, coll.find({$jsonSchema: {required: ["point"]}}).itcount()); - - assert.eq(1, - coll.find({ - $jsonSchema: {properties: {point: {minItems: 2}}}, - point: {$geoNear: {$geometry: {type: "Point", coordinates: [5, 5]}}} - }) - .itcount()); - - coll.dropIndexes(); - assert.commandWorked(coll.createIndex({a: 1, point: "2dsphere"})); - assert.eq(1, coll.find({$jsonSchema: {required: ["a", "point"]}}).itcount()); - - assert.eq(1, - coll.find({ - $jsonSchema: {required: ["a"], properties: {a: {minLength: 3}}}, - point: {$geoNear: {$geometry: {type: "Point", coordinates: [5, 5]}}} - }) - .itcount()); + assert.writeOK(coll.insert({_id: 0, a: true})); + + // Test $jsonSchema in the precondition checking for applyOps. + res = testDB.adminCommand({ + applyOps: [ + {op: "u", ns: coll.getFullName(), o2: {_id: 0}, o: {$set: {a: false}}}, + ], + preCondition: [{ + ns: coll.getFullName(), + q: {$jsonSchema: {properties: {a: {type: "boolean"}}}}, + res: {a: true} + }] + }); + assert.commandWorked(res); + assert.eq(1, res.applied); - assert.eq( - 1, - coll.find({ - $and: [ - {$jsonSchema: {properties: {point: {maxItems: 2}}}}, - {point: {$geoNear: {$geometry: {type: "Point", coordinates: [5, 5]}}}, a: 2} - ] - }) - .itcount()); - - // Test that $jsonSchema works correctly in the presence of a text index. - coll.dropIndexes(); - assert.commandWorked(coll.createIndex({a: "text"})); - assert.commandWorked(coll.createIndex({a: 1})); - assert.eq(3, coll.find({$jsonSchema: {properties: {a: {minLength: 5}}}}).itcount()); - assert.eq(1, coll.find({$jsonSchema: {required: ["a"]}, $text: {$search: "test"}}).itcount()); - assert.eq( - 3, - coll.find({$or: [{$jsonSchema: {required: ["a"]}}, {$text: {$search: "TEST"}}]}).itcount()); - assert.eq(1, coll.find({$and: [{$jsonSchema: {}}, {$text: {$search: "TEST"}}]}).itcount()); - - if (!isMongos) { - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: true})); - - // Test $jsonSchema in the precondition checking for applyOps. - res = testDB.adminCommand({ - applyOps: [ - {op: "u", ns: coll.getFullName(), o2: {_id: 0}, o: {$set: {a: false}}}, - ], - preCondition: [{ - ns: coll.getFullName(), - q: {$jsonSchema: {properties: {a: {type: "boolean"}}}}, - res: {a: true} - }] - }); - assert.commandWorked(res); - assert.eq(1, res.applied); - - // Use majority write concern to clear the drop-pending that can cause lock conflicts with - // transactions. - coll.drop({writeConcern: {w: "majority"}}); - assert.writeOK(coll.insert({_id: 1, a: true})); - } + // Use majority write concern to clear the drop-pending that can cause lock conflicts with + // transactions. + coll.drop({writeConcern: {w: "majority"}}); + assert.writeOK(coll.insert({_id: 1, a: true})); +} }()); diff --git a/jstests/core/json_schema/pattern_properties.js b/jstests/core/json_schema/pattern_properties.js index b94987f2a4a..4c75b78f72a 100644 --- a/jstests/core/json_schema/pattern_properties.js +++ b/jstests/core/json_schema/pattern_properties.js @@ -4,87 +4,83 @@ * Tests for the JSON Schema 'patternProperties' keyword. */ (function() { - "use strict"; +"use strict"; - load("jstests/libs/assert_schema_match.js"); +load("jstests/libs/assert_schema_match.js"); - const coll = db.schema_pattern_properties; +const coll = db.schema_pattern_properties; - // Test top-level patternProperties. - assertSchemaMatch( - coll, {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}, {}, true); - assertSchemaMatch( - coll, {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}, {c: 1}, true); - assertSchemaMatch(coll, - {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}, - {ca: 1, cb: 1}, - true); - assertSchemaMatch(coll, - {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}, - {a: "str", ca: 1, cb: 1}, - false); - assertSchemaMatch(coll, - {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}, - {a: 1, b: 1, ca: 1, cb: 1}, - false); - assertSchemaMatch(coll, - {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}, - {a: 1, b: "str", ca: 1, cb: 1}, - true); +// Test top-level patternProperties. +assertSchemaMatch( + coll, {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}, {}, true); +assertSchemaMatch( + coll, {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}, {c: 1}, true); +assertSchemaMatch(coll, + {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}, + {ca: 1, cb: 1}, + true); +assertSchemaMatch(coll, + {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}, + {a: "str", ca: 1, cb: 1}, + false); +assertSchemaMatch(coll, + {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}, + {a: 1, b: 1, ca: 1, cb: 1}, + false); +assertSchemaMatch(coll, + {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}, + {a: 1, b: "str", ca: 1, cb: 1}, + true); - // Test patternProperties within a nested schema. - assertSchemaMatch( - coll, - {properties: {obj: {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}}}, - {}, - true); - assertSchemaMatch( - coll, - {properties: {obj: {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}}}, - {obj: 1}, - true); - assertSchemaMatch( - coll, - {properties: {obj: {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}}}, - {obj: {}}, - true); - assertSchemaMatch( - coll, - {properties: {obj: {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}}}, - {obj: {ca: 1, cb: 1}}, - true); - assertSchemaMatch( - coll, - {properties: {obj: {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}}}, - {obj: {ac: "str", ca: 1, cb: 1}}, - false); - assertSchemaMatch( - coll, - {properties: {obj: {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}}}, - {obj: {ac: 1, bc: 1, ca: 1, cb: 1}}, - false); - assertSchemaMatch( - coll, - {properties: {obj: {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}}}, - {obj: {ac: 1, bc: "str", ca: 1, cb: 1}}, - true); +// Test patternProperties within a nested schema. +assertSchemaMatch( + coll, + {properties: {obj: {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}}}, + {}, + true); +assertSchemaMatch( + coll, + {properties: {obj: {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}}}, + {obj: 1}, + true); +assertSchemaMatch( + coll, + {properties: {obj: {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}}}, + {obj: {}}, + true); +assertSchemaMatch( + coll, + {properties: {obj: {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}}}, + {obj: {ca: 1, cb: 1}}, + true); +assertSchemaMatch( + coll, + {properties: {obj: {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}}}, + {obj: {ac: "str", ca: 1, cb: 1}}, + false); +assertSchemaMatch( + coll, + {properties: {obj: {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}}}, + {obj: {ac: 1, bc: 1, ca: 1, cb: 1}}, + false); +assertSchemaMatch( + coll, + {properties: {obj: {patternProperties: {"^a": {type: "number"}, "^b": {type: "string"}}}}}, + {obj: {ac: 1, bc: "str", ca: 1, cb: 1}}, + true); - // Test that 'patternProperties' still applies, even if the field name also appears in - // 'properties'. - assertSchemaMatch( - coll, - {properties: {aa: {type: "number"}}, patternProperties: {"^a": {type: "string"}}}, - {aa: 1}, - false); - assertSchemaMatch(coll, - { - properties: { - obj: { - properties: {aa: {type: "number"}}, - patternProperties: {"^a": {type: "string"}} - } - } - }, - {obj: {aa: 1}}, - false); +// Test that 'patternProperties' still applies, even if the field name also appears in +// 'properties'. +assertSchemaMatch(coll, + {properties: {aa: {type: "number"}}, patternProperties: {"^a": {type: "string"}}}, + {aa: 1}, + false); +assertSchemaMatch( + coll, + { + properties: + {obj: {properties: {aa: {type: "number"}}, patternProperties: {"^a": {type: "string"}}}} + }, + {obj: {aa: 1}}, + false); }()); diff --git a/jstests/core/json_schema/required.js b/jstests/core/json_schema/required.js index 4ffc7438b48..a9a0cd67a48 100644 --- a/jstests/core/json_schema/required.js +++ b/jstests/core/json_schema/required.js @@ -4,24 +4,24 @@ * Tests for handling of the JSON Schema 'required' keyword. */ (function() { - "use strict"; +"use strict"; - load("jstests/libs/assert_schema_match.js"); +load("jstests/libs/assert_schema_match.js"); - const coll = db.jstests_schema_required; +const coll = db.jstests_schema_required; - assertSchemaMatch(coll, {required: ["a"]}, {a: 1}, true); - assertSchemaMatch(coll, {required: ["a"]}, {}, false); - assertSchemaMatch(coll, {required: ["a"]}, {b: 1}, false); - assertSchemaMatch(coll, {required: ["a"]}, {b: {a: 1}}, false); +assertSchemaMatch(coll, {required: ["a"]}, {a: 1}, true); +assertSchemaMatch(coll, {required: ["a"]}, {}, false); +assertSchemaMatch(coll, {required: ["a"]}, {b: 1}, false); +assertSchemaMatch(coll, {required: ["a"]}, {b: {a: 1}}, false); - assertSchemaMatch(coll, {required: ["a", "b"]}, {a: 1, b: 1, c: 1}, true); - assertSchemaMatch(coll, {required: ["a", "b"]}, {a: 1, c: 1}, false); - assertSchemaMatch(coll, {required: ["a", "b"]}, {b: 1, c: 1}, false); +assertSchemaMatch(coll, {required: ["a", "b"]}, {a: 1, b: 1, c: 1}, true); +assertSchemaMatch(coll, {required: ["a", "b"]}, {a: 1, c: 1}, false); +assertSchemaMatch(coll, {required: ["a", "b"]}, {b: 1, c: 1}, false); - assertSchemaMatch(coll, {properties: {a: {required: ["b"]}}}, {}, true); - assertSchemaMatch(coll, {properties: {a: {required: ["b"]}}}, {a: 1}, true); - assertSchemaMatch(coll, {properties: {a: {required: ["b"]}}}, {a: {b: 1}}, true); - assertSchemaMatch(coll, {properties: {a: {required: ["b"]}}}, {a: {c: 1}}, false); - assertSchemaMatch(coll, {properties: {a: {required: ["b"]}}}, {a: {}}, false); +assertSchemaMatch(coll, {properties: {a: {required: ["b"]}}}, {}, true); +assertSchemaMatch(coll, {properties: {a: {required: ["b"]}}}, {a: 1}, true); +assertSchemaMatch(coll, {properties: {a: {required: ["b"]}}}, {a: {b: 1}}, true); +assertSchemaMatch(coll, {properties: {a: {required: ["b"]}}}, {a: {c: 1}}, false); +assertSchemaMatch(coll, {properties: {a: {required: ["b"]}}}, {a: {}}, false); }()); diff --git a/jstests/core/json_schema/unique_items.js b/jstests/core/json_schema/unique_items.js index 4e558b5db73..955bae74e88 100644 --- a/jstests/core/json_schema/unique_items.js +++ b/jstests/core/json_schema/unique_items.js @@ -4,62 +4,68 @@ * Tests the JSON Schema "uniqueItems" keyword. */ (function() { - "use strict"; +"use strict"; - load("jstests/libs/assert_schema_match.js"); +load("jstests/libs/assert_schema_match.js"); - const coll = db.getCollection("json_schema_unique_items"); - coll.drop(); +const coll = db.getCollection("json_schema_unique_items"); +coll.drop(); - // Test that the JSON Schema fails to parse if "uniqueItems" is not a boolean. - assert.throws(() => coll.find({$jsonSchema: {uniqueItems: 1}}).itcount()); - assert.throws(() => coll.find({$jsonSchema: {uniqueItems: 1.0}}).itcount()); - assert.throws(() => coll.find({$jsonSchema: {uniqueItems: "true"}}).itcount()); +// Test that the JSON Schema fails to parse if "uniqueItems" is not a boolean. +assert.throws(() => coll.find({$jsonSchema: {uniqueItems: 1}}).itcount()); +assert.throws(() => coll.find({$jsonSchema: {uniqueItems: 1.0}}).itcount()); +assert.throws(() => coll.find({$jsonSchema: {uniqueItems: "true"}}).itcount()); - // Test that "uniqueItems" has no effect at the top level (but still succeeds). - assertSchemaMatch(coll, {uniqueItems: true}, {}, true); - assertSchemaMatch(coll, {uniqueItems: false}, {}, true); +// Test that "uniqueItems" has no effect at the top level (but still succeeds). +assertSchemaMatch(coll, {uniqueItems: true}, {}, true); +assertSchemaMatch(coll, {uniqueItems: false}, {}, true); - // Test that "uniqueItems" matches when the field is missing or not an array. - let schema = {properties: {a: {uniqueItems: true}}}; - assertSchemaMatch(coll, schema, {}, true); - assertSchemaMatch(coll, schema, {a: "foo"}, true); - assertSchemaMatch(coll, schema, {a: {foo: [1, 1], bar: [2, 2]}}, true); +// Test that "uniqueItems" matches when the field is missing or not an array. +let schema = {properties: {a: {uniqueItems: true}}}; +assertSchemaMatch(coll, schema, {}, true); +assertSchemaMatch(coll, schema, {a: "foo"}, true); +assertSchemaMatch(coll, schema, {a: {foo: [1, 1], bar: [2, 2]}}, true); - // Test that {uniqueItems: true} matches arrays whose items are all unique. - schema = {properties: {a: {uniqueItems: true}}}; - assertSchemaMatch(coll, schema, {a: []}, true); - assertSchemaMatch(coll, schema, {a: [1]}, true); - assertSchemaMatch(coll, schema, {a: [1, 2, 3]}, true); - assertSchemaMatch(coll, schema, {a: ["foo", "FOO"]}, true); - assertSchemaMatch(coll, schema, {a: [{}, "", [], null]}, true); - assertSchemaMatch(coll, schema, {a: [[1, 2], [2, 1]]}, true); +// Test that {uniqueItems: true} matches arrays whose items are all unique. +schema = { + properties: {a: {uniqueItems: true}} +}; +assertSchemaMatch(coll, schema, {a: []}, true); +assertSchemaMatch(coll, schema, {a: [1]}, true); +assertSchemaMatch(coll, schema, {a: [1, 2, 3]}, true); +assertSchemaMatch(coll, schema, {a: ["foo", "FOO"]}, true); +assertSchemaMatch(coll, schema, {a: [{}, "", [], null]}, true); +assertSchemaMatch(coll, schema, {a: [[1, 2], [2, 1]]}, true); - // Test that {uniqueItems: true} rejects arrays with duplicates. - schema = {properties: {a: {uniqueItems: true}}}; - assertSchemaMatch(coll, schema, {a: [1, 1]}, false); - assertSchemaMatch(coll, schema, {a: [NumberLong(1), NumberInt(1)]}, false); - assertSchemaMatch(coll, schema, {a: ["foo", "foo"]}, false); - assertSchemaMatch(coll, schema, {a: [{a: 1}, {a: 1}]}, false); - assertSchemaMatch(coll, schema, {a: [[1, 2], [1, 2]]}, false); - assertSchemaMatch(coll, schema, {a: [null, null]}, false); - assertSchemaMatch(coll, schema, {a: [{x: 1, y: 1}, {y: 1, x: 1}]}, false); - assertSchemaMatch(coll, schema, {a: [{x: [1, 2], y: "a"}, {y: "a", x: [1, 2]}]}, false); +// Test that {uniqueItems: true} rejects arrays with duplicates. +schema = { + properties: {a: {uniqueItems: true}} +}; +assertSchemaMatch(coll, schema, {a: [1, 1]}, false); +assertSchemaMatch(coll, schema, {a: [NumberLong(1), NumberInt(1)]}, false); +assertSchemaMatch(coll, schema, {a: ["foo", "foo"]}, false); +assertSchemaMatch(coll, schema, {a: [{a: 1}, {a: 1}]}, false); +assertSchemaMatch(coll, schema, {a: [[1, 2], [1, 2]]}, false); +assertSchemaMatch(coll, schema, {a: [null, null]}, false); +assertSchemaMatch(coll, schema, {a: [{x: 1, y: 1}, {y: 1, x: 1}]}, false); +assertSchemaMatch(coll, schema, {a: [{x: [1, 2], y: "a"}, {y: "a", x: [1, 2]}]}, false); - // Test that {uniqueItems: false} has no effect. - schema = {properties: {a: {uniqueItems: false}}}; - assertSchemaMatch(coll, schema, {a: []}, true); - assertSchemaMatch(coll, schema, {a: [1]}, true); - assertSchemaMatch(coll, schema, {a: [1, 2, 3]}, true); - assertSchemaMatch(coll, schema, {a: ["foo", "FOO"]}, true); - assertSchemaMatch(coll, schema, {a: [{}, "", [], null]}, true); - assertSchemaMatch(coll, schema, {a: [[1, 2], [2, 1]]}, true); - assertSchemaMatch(coll, schema, {a: [1, 1]}, true); - assertSchemaMatch(coll, schema, {a: [NumberLong(1), NumberInt(1)]}, true); - assertSchemaMatch(coll, schema, {a: ["foo", "foo"]}, true); - assertSchemaMatch(coll, schema, {a: [{a: 1}, {a: 1}]}, true); - assertSchemaMatch(coll, schema, {a: [[1, 2], [1, 2]]}, true); - assertSchemaMatch(coll, schema, {a: [null, null]}, true); - assertSchemaMatch(coll, schema, {a: [{x: 1, y: 1}, {y: 1, x: 1}]}, true); - assertSchemaMatch(coll, schema, {a: [{x: [1, 2], y: "a"}, {y: "a", x: [1, 2]}]}, true); +// Test that {uniqueItems: false} has no effect. +schema = { + properties: {a: {uniqueItems: false}} +}; +assertSchemaMatch(coll, schema, {a: []}, true); +assertSchemaMatch(coll, schema, {a: [1]}, true); +assertSchemaMatch(coll, schema, {a: [1, 2, 3]}, true); +assertSchemaMatch(coll, schema, {a: ["foo", "FOO"]}, true); +assertSchemaMatch(coll, schema, {a: [{}, "", [], null]}, true); +assertSchemaMatch(coll, schema, {a: [[1, 2], [2, 1]]}, true); +assertSchemaMatch(coll, schema, {a: [1, 1]}, true); +assertSchemaMatch(coll, schema, {a: [NumberLong(1), NumberInt(1)]}, true); +assertSchemaMatch(coll, schema, {a: ["foo", "foo"]}, true); +assertSchemaMatch(coll, schema, {a: [{a: 1}, {a: 1}]}, true); +assertSchemaMatch(coll, schema, {a: [[1, 2], [1, 2]]}, true); +assertSchemaMatch(coll, schema, {a: [null, null]}, true); +assertSchemaMatch(coll, schema, {a: [{x: 1, y: 1}, {y: 1, x: 1}]}, true); +assertSchemaMatch(coll, schema, {a: [{x: [1, 2], y: "a"}, {y: "a", x: [1, 2]}]}, true); }()); diff --git a/jstests/core/jssymbol.js b/jstests/core/jssymbol.js index 6f216b52879..8a5e538aeeb 100644 --- a/jstests/core/jssymbol.js +++ b/jstests/core/jssymbol.js @@ -1,31 +1,31 @@ // Test Symbol.toPrimitive works for DB and BSON objects // (function() { - // Exercise Symbol.toPrimitive on DB objects - assert(`${db}` === 'test'); - assert(isNaN(+db)); +// Exercise Symbol.toPrimitive on DB objects +assert(`${db}` === 'test'); +assert(isNaN(+db)); - // Exercise the special Symbol methods and make sure DB.getProperty handles them - assert(db[Symbol.iterator] != 1); - assert(db[Symbol.match] != 1); - assert(db[Symbol.species] != 1); - assert(db[Symbol.toPrimitive] != 1); +// Exercise the special Symbol methods and make sure DB.getProperty handles them +assert(db[Symbol.iterator] != 1); +assert(db[Symbol.match] != 1); +assert(db[Symbol.species] != 1); +assert(db[Symbol.toPrimitive] != 1); - // Exercise Symbol.toPrimitive on BSON objects - col1 = db.jssymbol_col; - col1.insert({}); - a = db.getCollection("jssymbol_col").getIndexes()[0]; +// Exercise Symbol.toPrimitive on BSON objects +col1 = db.jssymbol_col; +col1.insert({}); +a = db.getCollection("jssymbol_col").getIndexes()[0]; - assert(isNaN(+a)); - assert(+a.v >= 1); - assert(`${a.v}` >= 1); - assert(`${a}` == '[object BSON]'); +assert(isNaN(+a)); +assert(+a.v >= 1); +assert(`${a.v}` >= 1); +assert(`${a}` == '[object BSON]'); - // Exercise the special Symbol methods and make sure BSON.resolve handles them - assert(db[Symbol.iterator] != 1); - assert(db[Symbol.match] != 1); - assert(db[Symbol.species] != 1); - assert(db[Symbol.toPrimitive] != 1); +// Exercise the special Symbol methods and make sure BSON.resolve handles them +assert(db[Symbol.iterator] != 1); +assert(db[Symbol.match] != 1); +assert(db[Symbol.species] != 1); +assert(db[Symbol.toPrimitive] != 1); - col1.drop(); +col1.drop(); })(); diff --git a/jstests/core/kill_cursors.js b/jstests/core/kill_cursors.js index 096d8962d2a..a65078028ed 100644 --- a/jstests/core/kill_cursors.js +++ b/jstests/core/kill_cursors.js @@ -10,74 +10,73 @@ // // Test the killCursors command. (function() { - 'use strict'; +'use strict'; - var cmdRes; - var cursor; - var cursorId; +var cmdRes; +var cursor; +var cursorId; - var coll = db.jstest_killcursors; - coll.drop(); +var coll = db.jstest_killcursors; +coll.drop(); - for (var i = 0; i < 10; i++) { - assert.writeOK(coll.insert({_id: i})); - } +for (var i = 0; i < 10; i++) { + assert.writeOK(coll.insert({_id: i})); +} - // killCursors command should fail if the collection name is not a string. - cmdRes = db.runCommand( - {killCursors: {foo: "bad collection param"}, cursors: [NumberLong(123), NumberLong(456)]}); - assert.commandFailedWithCode(cmdRes, ErrorCodes.FailedToParse); +// killCursors command should fail if the collection name is not a string. +cmdRes = db.runCommand( + {killCursors: {foo: "bad collection param"}, cursors: [NumberLong(123), NumberLong(456)]}); +assert.commandFailedWithCode(cmdRes, ErrorCodes.FailedToParse); - // killCursors command should fail if the cursors parameter is not an array. - cmdRes = db.runCommand( - {killCursors: coll.getName(), cursors: {a: NumberLong(123), b: NumberLong(456)}}); - assert.commandFailedWithCode(cmdRes, ErrorCodes.FailedToParse); +// killCursors command should fail if the cursors parameter is not an array. +cmdRes = + db.runCommand({killCursors: coll.getName(), cursors: {a: NumberLong(123), b: NumberLong(456)}}); +assert.commandFailedWithCode(cmdRes, ErrorCodes.FailedToParse); - // killCursors command should fail if the cursors parameter is an empty array. - cmdRes = db.runCommand({killCursors: coll.getName(), cursors: []}); - assert.commandFailedWithCode(cmdRes, ErrorCodes.BadValue); +// killCursors command should fail if the cursors parameter is an empty array. +cmdRes = db.runCommand({killCursors: coll.getName(), cursors: []}); +assert.commandFailedWithCode(cmdRes, ErrorCodes.BadValue); - // killCursors command should report cursors as not found if the collection does not exist. - cmdRes = db.runCommand( - {killCursors: "non-existent-collection", cursors: [NumberLong(123), NumberLong(456)]}); - assert.commandWorked(cmdRes); - assert.eq(cmdRes.cursorsKilled, []); - assert.eq(cmdRes.cursorsNotFound, [NumberLong(123), NumberLong(456)]); - assert.eq(cmdRes.cursorsAlive, []); - assert.eq(cmdRes.cursorsUnknown, []); +// killCursors command should report cursors as not found if the collection does not exist. +cmdRes = db.runCommand( + {killCursors: "non-existent-collection", cursors: [NumberLong(123), NumberLong(456)]}); +assert.commandWorked(cmdRes); +assert.eq(cmdRes.cursorsKilled, []); +assert.eq(cmdRes.cursorsNotFound, [NumberLong(123), NumberLong(456)]); +assert.eq(cmdRes.cursorsAlive, []); +assert.eq(cmdRes.cursorsUnknown, []); - // killCursors command should report non-existent cursors as "not found". - cmdRes = - db.runCommand({killCursors: coll.getName(), cursors: [NumberLong(123), NumberLong(456)]}); - assert.commandWorked(cmdRes); - assert.eq(cmdRes.cursorsKilled, []); - assert.eq(cmdRes.cursorsNotFound, [NumberLong(123), NumberLong(456)]); - assert.eq(cmdRes.cursorsAlive, []); - assert.eq(cmdRes.cursorsUnknown, []); +// killCursors command should report non-existent cursors as "not found". +cmdRes = db.runCommand({killCursors: coll.getName(), cursors: [NumberLong(123), NumberLong(456)]}); +assert.commandWorked(cmdRes); +assert.eq(cmdRes.cursorsKilled, []); +assert.eq(cmdRes.cursorsNotFound, [NumberLong(123), NumberLong(456)]); +assert.eq(cmdRes.cursorsAlive, []); +assert.eq(cmdRes.cursorsUnknown, []); - // Test a case where one cursors exists and is killed but the other does not exist. - cmdRes = db.runCommand({find: coll.getName(), batchSize: 2}); - assert.commandWorked(cmdRes); - cursorId = cmdRes.cursor.id; - assert.neq(cursorId, NumberLong(0)); +// Test a case where one cursors exists and is killed but the other does not exist. +cmdRes = db.runCommand({find: coll.getName(), batchSize: 2}); +assert.commandWorked(cmdRes); +cursorId = cmdRes.cursor.id; +assert.neq(cursorId, NumberLong(0)); - cmdRes = db.runCommand({killCursors: coll.getName(), cursors: [NumberLong(123), cursorId]}); - assert.commandWorked(cmdRes); - assert.eq(cmdRes.cursorsKilled, [cursorId]); - assert.eq(cmdRes.cursorsNotFound, [NumberLong(123)]); - assert.eq(cmdRes.cursorsAlive, []); - assert.eq(cmdRes.cursorsUnknown, []); +cmdRes = db.runCommand({killCursors: coll.getName(), cursors: [NumberLong(123), cursorId]}); +assert.commandWorked(cmdRes); +assert.eq(cmdRes.cursorsKilled, [cursorId]); +assert.eq(cmdRes.cursorsNotFound, [NumberLong(123)]); +assert.eq(cmdRes.cursorsAlive, []); +assert.eq(cmdRes.cursorsUnknown, []); - // Test killing a noTimeout cursor. - cmdRes = db.runCommand({find: coll.getName(), batchSize: 2, noCursorTimeout: true}); - assert.commandWorked(cmdRes); - cursorId = cmdRes.cursor.id; - assert.neq(cursorId, NumberLong(0)); +// Test killing a noTimeout cursor. +cmdRes = db.runCommand({find: coll.getName(), batchSize: 2, noCursorTimeout: true}); +assert.commandWorked(cmdRes); +cursorId = cmdRes.cursor.id; +assert.neq(cursorId, NumberLong(0)); - cmdRes = db.runCommand({killCursors: coll.getName(), cursors: [NumberLong(123), cursorId]}); - assert.commandWorked(cmdRes); - assert.eq(cmdRes.cursorsKilled, [cursorId]); - assert.eq(cmdRes.cursorsNotFound, [NumberLong(123)]); - assert.eq(cmdRes.cursorsAlive, []); - assert.eq(cmdRes.cursorsUnknown, []); +cmdRes = db.runCommand({killCursors: coll.getName(), cursors: [NumberLong(123), cursorId]}); +assert.commandWorked(cmdRes); +assert.eq(cmdRes.cursorsKilled, [cursorId]); +assert.eq(cmdRes.cursorsNotFound, [NumberLong(123)]); +assert.eq(cmdRes.cursorsAlive, []); +assert.eq(cmdRes.cursorsUnknown, []); })(); diff --git a/jstests/core/killop_drop_collection.js b/jstests/core/killop_drop_collection.js index 621b5c4ace9..b4efd13733b 100644 --- a/jstests/core/killop_drop_collection.js +++ b/jstests/core/killop_drop_collection.js @@ -11,64 +11,62 @@ * ] */ (function() { - "use strict"; +"use strict"; - var collectionName = "killop_drop"; - let collection = db.getCollection(collectionName); - collection.drop(); - for (let i = 0; i < 1000; i++) { - assert.writeOK(collection.insert({x: i})); - } - assert.writeOK(collection.createIndex({x: 1}, {background: true})); - - // Attempt to fsyncLock the database, aborting early if the storage engine doesn't support it. - const storageEngine = jsTest.options().storageEngine; - let fsyncRes = db.fsyncLock(); - if (!fsyncRes.ok) { - assert.commandFailedWithCode(fsyncRes, ErrorCodes.CommandNotSupported); - jsTest.log("Skipping test on storage engine " + storageEngine + - ", which does not support fsyncLock."); - return; - } +var collectionName = "killop_drop"; +let collection = db.getCollection(collectionName); +collection.drop(); +for (let i = 0; i < 1000; i++) { + assert.writeOK(collection.insert({x: i})); +} +assert.writeOK(collection.createIndex({x: 1}, {background: true})); - // Kick off a drop on the collection. - const useDefaultPort = null; - const noConnect = false; - // The drop will occasionally, and legitimately be interrupted by killOp (and not succeed). - let awaitDropCommand = startParallelShell(function() { - let res = db.getSiblingDB("test").runCommand({drop: "killop_drop"}); - let collectionFound = db.getCollectionNames().includes("killop_drop"); - if (res.ok == 1) { - // Ensure that the collection has been dropped. - assert( - !collectionFound, - "Expected collection to not appear in listCollections output after being dropped"); - } else { - // Ensure that the collection hasn't been dropped. - assert(collectionFound, - "Expected collection to appear in listCollections output after drop failed"); - } - }, useDefaultPort, noConnect); +// Attempt to fsyncLock the database, aborting early if the storage engine doesn't support it. +const storageEngine = jsTest.options().storageEngine; +let fsyncRes = db.fsyncLock(); +if (!fsyncRes.ok) { + assert.commandFailedWithCode(fsyncRes, ErrorCodes.CommandNotSupported); + jsTest.log("Skipping test on storage engine " + storageEngine + + ", which does not support fsyncLock."); + return; +} - // Wait for the drop operation to appear in the db.currentOp() output. - let dropCommandOpId = null; - assert.soon(function() { - let dropOpsInProgress = db.currentOp().inprog.filter( - op => op.command && op.command.drop === collection.getName()); - if (dropOpsInProgress.length > 0) { - dropCommandOpId = dropOpsInProgress[0].opid; - } - return dropCommandOpId; - }); +// Kick off a drop on the collection. +const useDefaultPort = null; +const noConnect = false; +// The drop will occasionally, and legitimately be interrupted by killOp (and not succeed). +let awaitDropCommand = startParallelShell(function() { + let res = db.getSiblingDB("test").runCommand({drop: "killop_drop"}); + let collectionFound = db.getCollectionNames().includes("killop_drop"); + if (res.ok == 1) { + // Ensure that the collection has been dropped. + assert(!collectionFound, + "Expected collection to not appear in listCollections output after being dropped"); + } else { + // Ensure that the collection hasn't been dropped. + assert(collectionFound, + "Expected collection to appear in listCollections output after drop failed"); + } +}, useDefaultPort, noConnect); - // Issue a killOp for the drop command, then unlock the server. We expect that the drop - // operation was *not* killed, and that the collection was dropped successfully. - assert.commandWorked(db.killOp(dropCommandOpId)); - let unlockRes = assert.commandWorked(db.fsyncUnlock()); - assert.eq(0, - unlockRes.lockCount, - "Expected the number of fsyncLocks to be zero after issuing fsyncUnlock"); +// Wait for the drop operation to appear in the db.currentOp() output. +let dropCommandOpId = null; +assert.soon(function() { + let dropOpsInProgress = + db.currentOp().inprog.filter(op => op.command && op.command.drop === collection.getName()); + if (dropOpsInProgress.length > 0) { + dropCommandOpId = dropOpsInProgress[0].opid; + } + return dropCommandOpId; +}); - awaitDropCommand(); +// Issue a killOp for the drop command, then unlock the server. We expect that the drop +// operation was *not* killed, and that the collection was dropped successfully. +assert.commandWorked(db.killOp(dropCommandOpId)); +let unlockRes = assert.commandWorked(db.fsyncUnlock()); +assert.eq(0, + unlockRes.lockCount, + "Expected the number of fsyncLocks to be zero after issuing fsyncUnlock"); +awaitDropCommand(); }()); diff --git a/jstests/core/list_all_local_sessions.js b/jstests/core/list_all_local_sessions.js index 78189b1b324..72226dfbb08 100644 --- a/jstests/core/list_all_local_sessions.js +++ b/jstests/core/list_all_local_sessions.js @@ -11,34 +11,34 @@ // ] (function() { - 'use strict'; +'use strict'; - const admin = db.getSisterDB('admin'); +const admin = db.getSisterDB('admin'); - // Get current log level. - let originalLogLevel = assert.commandWorked(admin.setLogLevel(1)).was.verbosity; +// Get current log level. +let originalLogLevel = assert.commandWorked(admin.setLogLevel(1)).was.verbosity; - try { - const listAllLocalSessions = function() { - return admin.aggregate([{'$listLocalSessions': {allUsers: true}}]); - }; +try { + const listAllLocalSessions = function() { + return admin.aggregate([{'$listLocalSessions': {allUsers: true}}]); + }; - // Start a new session and capture its sessionId. - const myid = assert.commandWorked(db.runCommand({startSession: 1})).id.id; - assert(myid !== undefined); + // Start a new session and capture its sessionId. + const myid = assert.commandWorked(db.runCommand({startSession: 1})).id.id; + assert(myid !== undefined); - // Ensure that the cache now contains the session and is visible by admin. - const resultArray = assert.doesNotThrow(listAllLocalSessions).toArray(); - assert.gte(resultArray.length, 1); - const resultArrayMine = resultArray - .map(function(sess) { - return sess._id.id; - }) - .filter(function(id) { - return 0 == bsonWoCompare({x: id}, {x: myid}); - }); - assert.eq(resultArrayMine.length, 1); - } finally { - admin.setLogLevel(originalLogLevel); - } + // Ensure that the cache now contains the session and is visible by admin. + const resultArray = assert.doesNotThrow(listAllLocalSessions).toArray(); + assert.gte(resultArray.length, 1); + const resultArrayMine = resultArray + .map(function(sess) { + return sess._id.id; + }) + .filter(function(id) { + return 0 == bsonWoCompare({x: id}, {x: myid}); + }); + assert.eq(resultArrayMine.length, 1); +} finally { + admin.setLogLevel(originalLogLevel); +} })(); diff --git a/jstests/core/list_all_sessions.js b/jstests/core/list_all_sessions.js index 88bd83da628..9d02b99167b 100644 --- a/jstests/core/list_all_sessions.js +++ b/jstests/core/list_all_sessions.js @@ -8,51 +8,51 @@ // Basic tests for the $listSessions {allUsers:true} aggregation stage. (function() { - 'use strict'; - load('jstests/aggregation/extras/utils.js'); - - const admin = db.getSiblingDB("admin"); - const config = db.getSiblingDB("config"); - const pipeline = [{'$listSessions': {allUsers: true}}]; - function listSessions() { - return config.system.sessions.aggregate(pipeline); - } - function listSessionsWithFilter(filter) { - return config.system.sessions.aggregate( - [{'$listSessions': {allUsers: true}}, {$match: filter}]); - } - - // Get current log level. - let originalLogLevel = assert.commandWorked(admin.setLogLevel(1)).was.verbosity; - - try { - // Start a new session and capture its sessionId. - const myid = assert.commandWorked(admin.runCommand({startSession: 1})).id.id; - assert(myid !== undefined); - assert.commandWorked(admin.runCommand({refreshLogicalSessionCacheNow: 1})); - - // Ensure that the cache now contains the session and is visible by admin. - assert.soon(function() { - const resultArray = listSessions().toArray(); - if (resultArray.length < 1) { - return false; - } - const resultArrayMine = resultArray - .map(function(sess) { - return sess._id.id; - }) - .filter(function(id) { - return 0 == bsonWoCompare({x: id}, {x: myid}); - }); - return resultArrayMine.length == 1; - }, "Failed to locate session in collection"); - - const sessionList = listSessionsWithFilter({_id: "non_existent"}).toArray(); - assert.eq(0, sessionList.length, tojson(sessionList)); - - // Make sure pipelining other collections fail. - assertErrorCode(admin.system.collections, pipeline, ErrorCodes.InvalidNamespace); - } finally { - admin.setLogLevel(originalLogLevel); - } +'use strict'; +load('jstests/aggregation/extras/utils.js'); + +const admin = db.getSiblingDB("admin"); +const config = db.getSiblingDB("config"); +const pipeline = [{'$listSessions': {allUsers: true}}]; +function listSessions() { + return config.system.sessions.aggregate(pipeline); +} +function listSessionsWithFilter(filter) { + return config.system.sessions.aggregate( + [{'$listSessions': {allUsers: true}}, {$match: filter}]); +} + +// Get current log level. +let originalLogLevel = assert.commandWorked(admin.setLogLevel(1)).was.verbosity; + +try { + // Start a new session and capture its sessionId. + const myid = assert.commandWorked(admin.runCommand({startSession: 1})).id.id; + assert(myid !== undefined); + assert.commandWorked(admin.runCommand({refreshLogicalSessionCacheNow: 1})); + + // Ensure that the cache now contains the session and is visible by admin. + assert.soon(function() { + const resultArray = listSessions().toArray(); + if (resultArray.length < 1) { + return false; + } + const resultArrayMine = resultArray + .map(function(sess) { + return sess._id.id; + }) + .filter(function(id) { + return 0 == bsonWoCompare({x: id}, {x: myid}); + }); + return resultArrayMine.length == 1; + }, "Failed to locate session in collection"); + + const sessionList = listSessionsWithFilter({_id: "non_existent"}).toArray(); + assert.eq(0, sessionList.length, tojson(sessionList)); + + // Make sure pipelining other collections fail. + assertErrorCode(admin.system.collections, pipeline, ErrorCodes.InvalidNamespace); +} finally { + admin.setLogLevel(originalLogLevel); +} })(); diff --git a/jstests/core/list_collections1.js b/jstests/core/list_collections1.js index ddc563cf852..39445a3ff91 100644 --- a/jstests/core/list_collections1.js +++ b/jstests/core/list_collections1.js @@ -12,291 +12,290 @@ // listCollections output. (function() { - "use strict"; - - var mydb = db.getSiblingDB("list_collections1"); - var cursor; - var res; - var collObj; - - // - // Test basic command output. - // - - assert.commandWorked(mydb.dropDatabase()); - assert.commandWorked(mydb.createCollection("foo")); - res = mydb.runCommand("listCollections"); - assert.commandWorked(res); - assert.eq('object', typeof(res.cursor)); - assert.eq(0, res.cursor.id); - assert.eq('string', typeof(res.cursor.ns)); - collObj = res.cursor.firstBatch.filter(function(c) { - return c.name === "foo"; - })[0]; - assert(collObj); - assert.eq('object', typeof(collObj.options)); - assert.eq('collection', collObj.type, tojson(collObj)); - assert.eq(false, collObj.info.readOnly, tojson(collObj)); - assert.eq("object", typeof(collObj.idIndex), tojson(collObj)); - assert(collObj.idIndex.hasOwnProperty("v"), tojson(collObj)); - - // - // Test basic command output for views. - // - - assert.commandWorked(mydb.createView("bar", "foo", [])); - res = mydb.runCommand("listCollections"); - assert.commandWorked(res); - collObj = res.cursor.firstBatch.filter(function(c) { - return c.name === "bar"; - })[0]; - assert(collObj); - assert.eq("object", typeof(collObj.options), tojson(collObj)); - assert.eq("foo", collObj.options.viewOn, tojson(collObj)); - assert.eq([], collObj.options.pipeline, tojson(collObj)); - assert.eq("view", collObj.type, tojson(collObj)); - assert.eq(true, collObj.info.readOnly, tojson(collObj)); - assert(!collObj.hasOwnProperty("idIndex"), tojson(collObj)); - - // - // Test basic usage with DBCommandCursor. - // - - var getListCollectionsCursor = function(options, subsequentBatchSize) { - return new DBCommandCursor( - mydb, mydb.runCommand("listCollections", options), subsequentBatchSize); - }; - - var cursorCountMatching = function(cursor, pred) { - return cursor.toArray().filter(pred).length; - }; - - assert.commandWorked(mydb.dropDatabase()); - assert.commandWorked(mydb.createCollection("foo")); - assert.eq(1, cursorCountMatching(getListCollectionsCursor(), function(c) { - return c.name === "foo"; - })); - - // - // Test that the collection metadata object is returned correctly. - // - - assert.commandWorked(mydb.dropDatabase()); - assert.commandWorked(mydb.createCollection("foo")); - assert.commandWorked(mydb.runCommand( - {applyOps: [{op: "c", ns: mydb.getName() + ".$cmd", o: {create: "bar", temp: true}}]})); - assert.eq(1, cursorCountMatching(getListCollectionsCursor(), function(c) { - return c.name === "foo" && c.options.temp === undefined; - })); - assert.eq(1, cursorCountMatching(getListCollectionsCursor(), function(c) { - return c.name === "bar" && c.options.temp === true; - })); - - // - // Test basic usage of "filter" option. - // - - assert.commandWorked(mydb.dropDatabase()); - assert.commandWorked(mydb.createCollection("foo")); - assert.commandWorked(mydb.runCommand( - {applyOps: [{op: "c", ns: mydb.getName() + ".$cmd", o: {create: "bar", temp: true}}]})); - assert.eq(2, cursorCountMatching(getListCollectionsCursor({filter: {}}), function(c) { - return c.name === "foo" || c.name === "bar"; - })); - assert.eq(2, getListCollectionsCursor({filter: {name: {$in: ["foo", "bar"]}}}).itcount()); - assert.eq(1, getListCollectionsCursor({filter: {name: /^foo$/}}).itcount()); - assert.eq(1, getListCollectionsCursor({filter: {"options.temp": true}}).itcount()); - mydb.foo.drop(); - assert.eq(1, cursorCountMatching(getListCollectionsCursor({filter: {}}), function(c) { - return c.name === "foo" || c.name === "bar"; - })); - assert.eq(1, getListCollectionsCursor({filter: {name: {$in: ["foo", "bar"]}}}).itcount()); - assert.eq(0, getListCollectionsCursor({filter: {name: /^foo$/}}).itcount()); - assert.eq(1, getListCollectionsCursor({filter: {"options.temp": true}}).itcount()); - mydb.bar.drop(); - assert.eq(0, cursorCountMatching(getListCollectionsCursor({filter: {}}), function(c) { - return c.name === "foo" || c.name === "bar"; - })); - assert.eq(0, getListCollectionsCursor({filter: {name: {$in: ["foo", "bar"]}}}).itcount()); - assert.eq(0, getListCollectionsCursor({filter: {name: /^foo$/}}).itcount()); - assert.eq(0, getListCollectionsCursor({filter: {"options.temp": true}}).itcount()); - - // - // Test for invalid values of "filter". - // - - assert.throws(function() { - getListCollectionsCursor({filter: {$invalid: 1}}); - }); - assert.throws(function() { - getListCollectionsCursor({filter: 0}); - }); - assert.throws(function() { - getListCollectionsCursor({filter: 'x'}); - }); - assert.throws(function() { - getListCollectionsCursor({filter: []}); - }); - - // - // Test basic usage of "cursor.batchSize" option. - // - - assert.commandWorked(mydb.dropDatabase()); - assert.commandWorked(mydb.createCollection("foo")); - assert.commandWorked(mydb.createCollection("bar")); - cursor = getListCollectionsCursor({cursor: {batchSize: 2}}); - assert.eq(2, cursor.objsLeftInBatch()); - assert.eq(2, cursorCountMatching(cursor, function(c) { - return c.name === "foo" || c.name === "bar"; - })); - cursor = getListCollectionsCursor({cursor: {batchSize: 1}}); - assert.eq(1, cursor.objsLeftInBatch()); - assert.eq(2, cursorCountMatching(cursor, function(c) { - return c.name === "foo" || c.name === "bar"; - })); - cursor = getListCollectionsCursor({cursor: {batchSize: 0}}); - assert.eq(0, cursor.objsLeftInBatch()); - assert.eq(2, cursorCountMatching(cursor, function(c) { - return c.name === "foo" || c.name === "bar"; - })); - - cursor = getListCollectionsCursor({cursor: {batchSize: NumberInt(2)}}); - assert.eq(2, cursor.objsLeftInBatch()); - assert.eq(2, cursorCountMatching(cursor, function(c) { - return c.name === "foo" || c.name === "bar"; - })); - cursor = getListCollectionsCursor({cursor: {batchSize: NumberLong(2)}}); - assert.eq(2, cursor.objsLeftInBatch()); - assert.eq(2, cursorCountMatching(cursor, function(c) { - return c.name === "foo" || c.name === "bar"; - })); - - // Test a large batch size, and assert that at least 2 results are returned in the initial - // batch. - cursor = getListCollectionsCursor({cursor: {batchSize: Math.pow(2, 62)}}); - assert.lte(2, cursor.objsLeftInBatch()); - assert.eq(2, cursorCountMatching(cursor, function(c) { - return c.name === "foo" || c.name === "bar"; - })); - - // Ensure that the server accepts an empty object for "cursor". This is equivalent to not - // specifying "cursor" at all. - // - // We do not test for objsLeftInBatch() here, since the default batch size for this command - // is not specified. - cursor = getListCollectionsCursor({cursor: {}}); - assert.eq(2, cursorCountMatching(cursor, function(c) { - return c.name === "foo" || c.name === "bar"; - })); - - // - // Test for invalid values of "cursor" and "cursor.batchSize". - // - - assert.throws(function() { - getListCollectionsCursor({cursor: 0}); - }); - assert.throws(function() { - getListCollectionsCursor({cursor: 'x'}); - }); - assert.throws(function() { - getListCollectionsCursor({cursor: []}); - }); - assert.throws(function() { - getListCollectionsCursor({cursor: {foo: 1}}); - }); - assert.throws(function() { - getListCollectionsCursor({cursor: {batchSize: -1}}); - }); - assert.throws(function() { - getListCollectionsCursor({cursor: {batchSize: 'x'}}); - }); - assert.throws(function() { - getListCollectionsCursor({cursor: {batchSize: {}}}); - }); - assert.throws(function() { - getListCollectionsCursor({cursor: {batchSize: 2, foo: 1}}); - }); - - // - // Test more than 2 batches of results. - // - - assert.commandWorked(mydb.dropDatabase()); - assert.commandWorked(mydb.createCollection("foo")); - assert.commandWorked(mydb.createCollection("bar")); - assert.commandWorked(mydb.createCollection("baz")); - assert.commandWorked(mydb.createCollection("quux")); - cursor = getListCollectionsCursor({cursor: {batchSize: 0}}, 2); - assert.eq(0, cursor.objsLeftInBatch()); - assert(cursor.hasNext()); - assert.eq(2, cursor.objsLeftInBatch()); - cursor.next(); - assert(cursor.hasNext()); - assert.eq(1, cursor.objsLeftInBatch()); - cursor.next(); - assert(cursor.hasNext()); - assert.eq(2, cursor.objsLeftInBatch()); - cursor.next(); - assert(cursor.hasNext()); - assert.eq(1, cursor.objsLeftInBatch()); - - // - // Test on non-existent database. - // - - assert.commandWorked(mydb.dropDatabase()); - cursor = getListCollectionsCursor(); - assert.eq(0, cursorCountMatching(cursor, function(c) { - return c.name === "foo"; - })); - - // - // Test on empty database. - // - - assert.commandWorked(mydb.dropDatabase()); - assert.commandWorked(mydb.createCollection("foo")); - mydb.foo.drop(); - cursor = getListCollectionsCursor(); - assert.eq(0, cursorCountMatching(cursor, function(c) { - return c.name === "foo"; - })); - - // - // Test killCursors against a listCollections cursor. - // - - assert.commandWorked(mydb.dropDatabase()); - assert.commandWorked(mydb.createCollection("foo")); - assert.commandWorked(mydb.createCollection("bar")); - assert.commandWorked(mydb.createCollection("baz")); - assert.commandWorked(mydb.createCollection("quux")); - - res = mydb.runCommand("listCollections", {cursor: {batchSize: 0}}); - cursor = new DBCommandCursor(mydb, res, 2); - cursor.close(); - cursor = new DBCommandCursor(mydb, res, 2); - assert.throws(function() { - cursor.hasNext(); - }); - - // - // Test parsing of the 'includePendingDrops' flag. If included, its argument must be of - // 'boolean' type. Functional testing of the 'includePendingDrops' flag is done in - // "jstests/replsets". - // - - // Bad argument types. - assert.commandFailedWithCode(mydb.runCommand("listCollections", {includePendingDrops: {}}), - ErrorCodes.TypeMismatch); - assert.commandFailedWithCode(mydb.runCommand("listCollections", {includePendingDrops: "s"}), - ErrorCodes.TypeMismatch); - - // Valid argument types. - assert.commandWorked(mydb.runCommand("listCollections", {includePendingDrops: 1})); - assert.commandWorked(mydb.runCommand("listCollections", {includePendingDrops: true})); - assert.commandWorked(mydb.runCommand("listCollections", {includePendingDrops: false})); +"use strict"; +var mydb = db.getSiblingDB("list_collections1"); +var cursor; +var res; +var collObj; + +// +// Test basic command output. +// + +assert.commandWorked(mydb.dropDatabase()); +assert.commandWorked(mydb.createCollection("foo")); +res = mydb.runCommand("listCollections"); +assert.commandWorked(res); +assert.eq('object', typeof (res.cursor)); +assert.eq(0, res.cursor.id); +assert.eq('string', typeof (res.cursor.ns)); +collObj = res.cursor.firstBatch.filter(function(c) { + return c.name === "foo"; +})[0]; +assert(collObj); +assert.eq('object', typeof (collObj.options)); +assert.eq('collection', collObj.type, tojson(collObj)); +assert.eq(false, collObj.info.readOnly, tojson(collObj)); +assert.eq("object", typeof (collObj.idIndex), tojson(collObj)); +assert(collObj.idIndex.hasOwnProperty("v"), tojson(collObj)); + +// +// Test basic command output for views. +// + +assert.commandWorked(mydb.createView("bar", "foo", [])); +res = mydb.runCommand("listCollections"); +assert.commandWorked(res); +collObj = res.cursor.firstBatch.filter(function(c) { + return c.name === "bar"; +})[0]; +assert(collObj); +assert.eq("object", typeof (collObj.options), tojson(collObj)); +assert.eq("foo", collObj.options.viewOn, tojson(collObj)); +assert.eq([], collObj.options.pipeline, tojson(collObj)); +assert.eq("view", collObj.type, tojson(collObj)); +assert.eq(true, collObj.info.readOnly, tojson(collObj)); +assert(!collObj.hasOwnProperty("idIndex"), tojson(collObj)); + +// +// Test basic usage with DBCommandCursor. +// + +var getListCollectionsCursor = function(options, subsequentBatchSize) { + return new DBCommandCursor( + mydb, mydb.runCommand("listCollections", options), subsequentBatchSize); +}; + +var cursorCountMatching = function(cursor, pred) { + return cursor.toArray().filter(pred).length; +}; + +assert.commandWorked(mydb.dropDatabase()); +assert.commandWorked(mydb.createCollection("foo")); +assert.eq(1, cursorCountMatching(getListCollectionsCursor(), function(c) { + return c.name === "foo"; + })); + +// +// Test that the collection metadata object is returned correctly. +// + +assert.commandWorked(mydb.dropDatabase()); +assert.commandWorked(mydb.createCollection("foo")); +assert.commandWorked(mydb.runCommand( + {applyOps: [{op: "c", ns: mydb.getName() + ".$cmd", o: {create: "bar", temp: true}}]})); +assert.eq(1, cursorCountMatching(getListCollectionsCursor(), function(c) { + return c.name === "foo" && c.options.temp === undefined; + })); +assert.eq(1, cursorCountMatching(getListCollectionsCursor(), function(c) { + return c.name === "bar" && c.options.temp === true; + })); + +// +// Test basic usage of "filter" option. +// + +assert.commandWorked(mydb.dropDatabase()); +assert.commandWorked(mydb.createCollection("foo")); +assert.commandWorked(mydb.runCommand( + {applyOps: [{op: "c", ns: mydb.getName() + ".$cmd", o: {create: "bar", temp: true}}]})); +assert.eq(2, cursorCountMatching(getListCollectionsCursor({filter: {}}), function(c) { + return c.name === "foo" || c.name === "bar"; + })); +assert.eq(2, getListCollectionsCursor({filter: {name: {$in: ["foo", "bar"]}}}).itcount()); +assert.eq(1, getListCollectionsCursor({filter: {name: /^foo$/}}).itcount()); +assert.eq(1, getListCollectionsCursor({filter: {"options.temp": true}}).itcount()); +mydb.foo.drop(); +assert.eq(1, cursorCountMatching(getListCollectionsCursor({filter: {}}), function(c) { + return c.name === "foo" || c.name === "bar"; + })); +assert.eq(1, getListCollectionsCursor({filter: {name: {$in: ["foo", "bar"]}}}).itcount()); +assert.eq(0, getListCollectionsCursor({filter: {name: /^foo$/}}).itcount()); +assert.eq(1, getListCollectionsCursor({filter: {"options.temp": true}}).itcount()); +mydb.bar.drop(); +assert.eq(0, cursorCountMatching(getListCollectionsCursor({filter: {}}), function(c) { + return c.name === "foo" || c.name === "bar"; + })); +assert.eq(0, getListCollectionsCursor({filter: {name: {$in: ["foo", "bar"]}}}).itcount()); +assert.eq(0, getListCollectionsCursor({filter: {name: /^foo$/}}).itcount()); +assert.eq(0, getListCollectionsCursor({filter: {"options.temp": true}}).itcount()); + +// +// Test for invalid values of "filter". +// + +assert.throws(function() { + getListCollectionsCursor({filter: {$invalid: 1}}); +}); +assert.throws(function() { + getListCollectionsCursor({filter: 0}); +}); +assert.throws(function() { + getListCollectionsCursor({filter: 'x'}); +}); +assert.throws(function() { + getListCollectionsCursor({filter: []}); +}); + +// +// Test basic usage of "cursor.batchSize" option. +// + +assert.commandWorked(mydb.dropDatabase()); +assert.commandWorked(mydb.createCollection("foo")); +assert.commandWorked(mydb.createCollection("bar")); +cursor = getListCollectionsCursor({cursor: {batchSize: 2}}); +assert.eq(2, cursor.objsLeftInBatch()); +assert.eq(2, cursorCountMatching(cursor, function(c) { + return c.name === "foo" || c.name === "bar"; + })); +cursor = getListCollectionsCursor({cursor: {batchSize: 1}}); +assert.eq(1, cursor.objsLeftInBatch()); +assert.eq(2, cursorCountMatching(cursor, function(c) { + return c.name === "foo" || c.name === "bar"; + })); +cursor = getListCollectionsCursor({cursor: {batchSize: 0}}); +assert.eq(0, cursor.objsLeftInBatch()); +assert.eq(2, cursorCountMatching(cursor, function(c) { + return c.name === "foo" || c.name === "bar"; + })); + +cursor = getListCollectionsCursor({cursor: {batchSize: NumberInt(2)}}); +assert.eq(2, cursor.objsLeftInBatch()); +assert.eq(2, cursorCountMatching(cursor, function(c) { + return c.name === "foo" || c.name === "bar"; + })); +cursor = getListCollectionsCursor({cursor: {batchSize: NumberLong(2)}}); +assert.eq(2, cursor.objsLeftInBatch()); +assert.eq(2, cursorCountMatching(cursor, function(c) { + return c.name === "foo" || c.name === "bar"; + })); + +// Test a large batch size, and assert that at least 2 results are returned in the initial +// batch. +cursor = getListCollectionsCursor({cursor: {batchSize: Math.pow(2, 62)}}); +assert.lte(2, cursor.objsLeftInBatch()); +assert.eq(2, cursorCountMatching(cursor, function(c) { + return c.name === "foo" || c.name === "bar"; + })); + +// Ensure that the server accepts an empty object for "cursor". This is equivalent to not +// specifying "cursor" at all. +// +// We do not test for objsLeftInBatch() here, since the default batch size for this command +// is not specified. +cursor = getListCollectionsCursor({cursor: {}}); +assert.eq(2, cursorCountMatching(cursor, function(c) { + return c.name === "foo" || c.name === "bar"; + })); + +// +// Test for invalid values of "cursor" and "cursor.batchSize". +// + +assert.throws(function() { + getListCollectionsCursor({cursor: 0}); +}); +assert.throws(function() { + getListCollectionsCursor({cursor: 'x'}); +}); +assert.throws(function() { + getListCollectionsCursor({cursor: []}); +}); +assert.throws(function() { + getListCollectionsCursor({cursor: {foo: 1}}); +}); +assert.throws(function() { + getListCollectionsCursor({cursor: {batchSize: -1}}); +}); +assert.throws(function() { + getListCollectionsCursor({cursor: {batchSize: 'x'}}); +}); +assert.throws(function() { + getListCollectionsCursor({cursor: {batchSize: {}}}); +}); +assert.throws(function() { + getListCollectionsCursor({cursor: {batchSize: 2, foo: 1}}); +}); + +// +// Test more than 2 batches of results. +// + +assert.commandWorked(mydb.dropDatabase()); +assert.commandWorked(mydb.createCollection("foo")); +assert.commandWorked(mydb.createCollection("bar")); +assert.commandWorked(mydb.createCollection("baz")); +assert.commandWorked(mydb.createCollection("quux")); +cursor = getListCollectionsCursor({cursor: {batchSize: 0}}, 2); +assert.eq(0, cursor.objsLeftInBatch()); +assert(cursor.hasNext()); +assert.eq(2, cursor.objsLeftInBatch()); +cursor.next(); +assert(cursor.hasNext()); +assert.eq(1, cursor.objsLeftInBatch()); +cursor.next(); +assert(cursor.hasNext()); +assert.eq(2, cursor.objsLeftInBatch()); +cursor.next(); +assert(cursor.hasNext()); +assert.eq(1, cursor.objsLeftInBatch()); + +// +// Test on non-existent database. +// + +assert.commandWorked(mydb.dropDatabase()); +cursor = getListCollectionsCursor(); +assert.eq(0, cursorCountMatching(cursor, function(c) { + return c.name === "foo"; + })); + +// +// Test on empty database. +// + +assert.commandWorked(mydb.dropDatabase()); +assert.commandWorked(mydb.createCollection("foo")); +mydb.foo.drop(); +cursor = getListCollectionsCursor(); +assert.eq(0, cursorCountMatching(cursor, function(c) { + return c.name === "foo"; + })); + +// +// Test killCursors against a listCollections cursor. +// + +assert.commandWorked(mydb.dropDatabase()); +assert.commandWorked(mydb.createCollection("foo")); +assert.commandWorked(mydb.createCollection("bar")); +assert.commandWorked(mydb.createCollection("baz")); +assert.commandWorked(mydb.createCollection("quux")); + +res = mydb.runCommand("listCollections", {cursor: {batchSize: 0}}); +cursor = new DBCommandCursor(mydb, res, 2); +cursor.close(); +cursor = new DBCommandCursor(mydb, res, 2); +assert.throws(function() { + cursor.hasNext(); +}); + +// +// Test parsing of the 'includePendingDrops' flag. If included, its argument must be of +// 'boolean' type. Functional testing of the 'includePendingDrops' flag is done in +// "jstests/replsets". +// + +// Bad argument types. +assert.commandFailedWithCode(mydb.runCommand("listCollections", {includePendingDrops: {}}), + ErrorCodes.TypeMismatch); +assert.commandFailedWithCode(mydb.runCommand("listCollections", {includePendingDrops: "s"}), + ErrorCodes.TypeMismatch); + +// Valid argument types. +assert.commandWorked(mydb.runCommand("listCollections", {includePendingDrops: 1})); +assert.commandWorked(mydb.runCommand("listCollections", {includePendingDrops: true})); +assert.commandWorked(mydb.runCommand("listCollections", {includePendingDrops: false})); }()); diff --git a/jstests/core/list_collections_filter.js b/jstests/core/list_collections_filter.js index fdd1c85429c..e2f93f84ead 100644 --- a/jstests/core/list_collections_filter.js +++ b/jstests/core/list_collections_filter.js @@ -1,115 +1,111 @@ // Test SERVER-18622 listCollections should special case filtering by name. // @tags: [requires_replication] (function() { - "use strict"; - var mydb = db.getSiblingDB("list_collections_filter"); - assert.commandWorked(mydb.dropDatabase()); +"use strict"; +var mydb = db.getSiblingDB("list_collections_filter"); +assert.commandWorked(mydb.dropDatabase()); - // Make some collections. - assert.commandWorked(mydb.createCollection("lists")); - assert.commandWorked(mydb.createCollection("ordered_sets")); - assert.commandWorked(mydb.createCollection("unordered_sets")); - assert.commandWorked(mydb.runCommand({ - applyOps: - [{op: "c", ns: mydb.getName() + ".$cmd", o: {create: "arrays_temp", temp: true}}] - })); +// Make some collections. +assert.commandWorked(mydb.createCollection("lists")); +assert.commandWorked(mydb.createCollection("ordered_sets")); +assert.commandWorked(mydb.createCollection("unordered_sets")); +assert.commandWorked(mydb.runCommand( + {applyOps: [{op: "c", ns: mydb.getName() + ".$cmd", o: {create: "arrays_temp", temp: true}}]})); - /** - * Asserts that the names of the collections returned from running the listCollections - * command with the given filter match the expected names. - */ - function testListCollections(filter, expectedNames) { - if (filter === undefined) { - filter = {}; - } +/** + * Asserts that the names of the collections returned from running the listCollections + * command with the given filter match the expected names. + */ +function testListCollections(filter, expectedNames) { + if (filter === undefined) { + filter = {}; + } - var cursor = - new DBCommandCursor(mydb, mydb.runCommand("listCollections", {filter: filter})); - function stripToName(result) { - return result.name; - } - var cursorResultNames = cursor.toArray().map(stripToName); + var cursor = new DBCommandCursor(mydb, mydb.runCommand("listCollections", {filter: filter})); + function stripToName(result) { + return result.name; + } + var cursorResultNames = cursor.toArray().map(stripToName); - assert.eq(cursorResultNames.sort(), expectedNames.sort()); + assert.eq(cursorResultNames.sort(), expectedNames.sort()); - // Assert the shell helper returns the same list, but in sorted order. - var shellResultNames = mydb.getCollectionInfos(filter).map(stripToName); - assert.eq(shellResultNames, expectedNames.sort()); - } + // Assert the shell helper returns the same list, but in sorted order. + var shellResultNames = mydb.getCollectionInfos(filter).map(stripToName); + assert.eq(shellResultNames, expectedNames.sort()); +} - // No filter. - testListCollections({}, ["lists", "ordered_sets", "unordered_sets", "arrays_temp"]); +// No filter. +testListCollections({}, ["lists", "ordered_sets", "unordered_sets", "arrays_temp"]); - // Filter without name. - testListCollections({options: {}}, ["lists", "ordered_sets", "unordered_sets"]); +// Filter without name. +testListCollections({options: {}}, ["lists", "ordered_sets", "unordered_sets"]); - // Filter with exact match on name. - testListCollections({name: "lists"}, ["lists"]); - testListCollections({name: "non-existent"}, []); - testListCollections({name: ""}, []); - testListCollections({name: 1234}, []); +// Filter with exact match on name. +testListCollections({name: "lists"}, ["lists"]); +testListCollections({name: "non-existent"}, []); +testListCollections({name: ""}, []); +testListCollections({name: 1234}, []); - // Filter with $in. - testListCollections({name: {$in: ["lists"]}}, ["lists"]); - testListCollections({name: {$in: []}}, []); - testListCollections({name: {$in: ["lists", "ordered_sets", "non-existent", "", 1234]}}, - ["lists", "ordered_sets"]); - // With a regex. - testListCollections({name: {$in: ["lists", /.*_sets$/, "non-existent", "", 1234]}}, - ["lists", "ordered_sets", "unordered_sets"]); +// Filter with $in. +testListCollections({name: {$in: ["lists"]}}, ["lists"]); +testListCollections({name: {$in: []}}, []); +testListCollections({name: {$in: ["lists", "ordered_sets", "non-existent", "", 1234]}}, + ["lists", "ordered_sets"]); +// With a regex. +testListCollections({name: {$in: ["lists", /.*_sets$/, "non-existent", "", 1234]}}, + ["lists", "ordered_sets", "unordered_sets"]); - // Filter with $and. - testListCollections({name: "lists", options: {}}, ["lists"]); - testListCollections({name: "lists", options: {temp: true}}, []); - testListCollections({$and: [{name: "lists"}, {options: {temp: true}}]}, []); - testListCollections({name: "arrays_temp", options: {temp: true}}, ["arrays_temp"]); +// Filter with $and. +testListCollections({name: "lists", options: {}}, ["lists"]); +testListCollections({name: "lists", options: {temp: true}}, []); +testListCollections({$and: [{name: "lists"}, {options: {temp: true}}]}, []); +testListCollections({name: "arrays_temp", options: {temp: true}}, ["arrays_temp"]); - // Filter with $and and $in. - testListCollections({name: {$in: ["lists", /.*_sets$/]}, options: {}}, - ["lists", "ordered_sets", "unordered_sets"]); - testListCollections({ - $and: [ - {name: {$in: ["lists", /.*_sets$/]}}, - {name: "lists"}, - {options: {}}, - ] - }, - ["lists"]); - testListCollections({ - $and: [ - {name: {$in: ["lists", /.*_sets$/]}}, - {name: "non-existent"}, - {options: {}}, - ] - }, - []); +// Filter with $and and $in. +testListCollections({name: {$in: ["lists", /.*_sets$/]}, options: {}}, + ["lists", "ordered_sets", "unordered_sets"]); +testListCollections({ + $and: [ + {name: {$in: ["lists", /.*_sets$/]}}, + {name: "lists"}, + {options: {}}, + ] +}, + ["lists"]); +testListCollections({ + $and: [ + {name: {$in: ["lists", /.*_sets$/]}}, + {name: "non-existent"}, + {options: {}}, + ] +}, + []); - // Filter with $expr. - testListCollections({$expr: {$eq: ["$name", "lists"]}}, ["lists"]); +// Filter with $expr. +testListCollections({$expr: {$eq: ["$name", "lists"]}}, ["lists"]); - // Filter with $expr with an unbound variable. - assert.throws(function() { - mydb.getCollectionInfos({$expr: {$eq: ["$name", "$$unbound"]}}); - }); +// Filter with $expr with an unbound variable. +assert.throws(function() { + mydb.getCollectionInfos({$expr: {$eq: ["$name", "$$unbound"]}}); +}); - // Filter with $expr with a runtime error. - assert.throws(function() { - mydb.getCollectionInfos({$expr: {$abs: "$name"}}); - }); +// Filter with $expr with a runtime error. +assert.throws(function() { + mydb.getCollectionInfos({$expr: {$abs: "$name"}}); +}); - // No extensions are allowed in filters. - assert.throws(function() { - mydb.getCollectionInfos({$text: {$search: "str"}}); - }); - assert.throws(function() { - mydb.getCollectionInfos({ - $where: function() { - return true; - } - }); - }); - assert.throws(function() { - mydb.getCollectionInfos( - {a: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}}); +// No extensions are allowed in filters. +assert.throws(function() { + mydb.getCollectionInfos({$text: {$search: "str"}}); +}); +assert.throws(function() { + mydb.getCollectionInfos({ + $where: function() { + return true; + } }); +}); +assert.throws(function() { + mydb.getCollectionInfos({a: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}}); +}); }()); diff --git a/jstests/core/list_collections_name_only.js b/jstests/core/list_collections_name_only.js index dd50398bcba..9a89fed9e20 100644 --- a/jstests/core/list_collections_name_only.js +++ b/jstests/core/list_collections_name_only.js @@ -1,34 +1,33 @@ // Test nameOnly option of listCollections (function() { - "use strict"; +"use strict"; - var mydb = db.getSiblingDB("list_collections_nameonly"); - var res; - var collObj; +var mydb = db.getSiblingDB("list_collections_nameonly"); +var res; +var collObj; - assert.commandWorked(mydb.dropDatabase()); - assert.commandWorked(mydb.createCollection("foo")); - res = mydb.runCommand({listCollections: 1, nameOnly: true}); - assert.commandWorked(res); - collObj = res.cursor.firstBatch[0]; - // collObj should only have name and type fields. - assert.eq('foo', collObj.name); - assert.eq('collection', collObj.type); - assert(!collObj.hasOwnProperty("idIndex"), tojson(collObj)); - assert(!collObj.hasOwnProperty("options"), tojson(collObj)); - assert(!collObj.hasOwnProperty("info"), tojson(collObj)); - - // listCollections for views still works - assert.commandWorked(mydb.createView("bar", "foo", [])); - res = mydb.runCommand({listCollections: 1, nameOnly: true}); - assert.commandWorked(res); - print(tojson(res)); - collObj = res.cursor.firstBatch.filter(function(c) { - return c.name === "bar"; - })[0]; - assert.eq('bar', collObj.name); - assert.eq('view', collObj.type); - assert(!collObj.hasOwnProperty("options"), tojson(collObj)); - assert(!collObj.hasOwnProperty("info"), tojson(collObj)); +assert.commandWorked(mydb.dropDatabase()); +assert.commandWorked(mydb.createCollection("foo")); +res = mydb.runCommand({listCollections: 1, nameOnly: true}); +assert.commandWorked(res); +collObj = res.cursor.firstBatch[0]; +// collObj should only have name and type fields. +assert.eq('foo', collObj.name); +assert.eq('collection', collObj.type); +assert(!collObj.hasOwnProperty("idIndex"), tojson(collObj)); +assert(!collObj.hasOwnProperty("options"), tojson(collObj)); +assert(!collObj.hasOwnProperty("info"), tojson(collObj)); +// listCollections for views still works +assert.commandWorked(mydb.createView("bar", "foo", [])); +res = mydb.runCommand({listCollections: 1, nameOnly: true}); +assert.commandWorked(res); +print(tojson(res)); +collObj = res.cursor.firstBatch.filter(function(c) { + return c.name === "bar"; +})[0]; +assert.eq('bar', collObj.name); +assert.eq('view', collObj.type); +assert(!collObj.hasOwnProperty("options"), tojson(collObj)); +assert(!collObj.hasOwnProperty("info"), tojson(collObj)); }()); diff --git a/jstests/core/list_collections_no_views.js b/jstests/core/list_collections_no_views.js index 1b454eb7978..ed2c1b95d02 100644 --- a/jstests/core/list_collections_no_views.js +++ b/jstests/core/list_collections_no_views.js @@ -3,132 +3,132 @@ // assumes_superuser_permissions, // ] (function() { - 'use strict'; - let mydb = db.getSiblingDB('list_collections_no_views'); - - assert.commandWorked(mydb.createCollection('foo')); - assert.commandWorked(mydb.createView('bar', 'foo', [])); - - let all = mydb.runCommand({listCollections: 1}); - assert.commandWorked(all); - - let allExpected = [ - { - "name": "bar", - "type": "view", - }, - { - "name": "foo", - "type": "collection", - }, - { - "name": "system.views", - "type": "collection", - }, - ]; - - assert.eq(allExpected, - all.cursor.firstBatch - .map(function(c) { - return {name: c.name, type: c.type}; - }) - .sort(function(c1, c2) { - if (c1.name > c2.name) { - return 1; - } - - if (c1.name < c2.name) { - return -1; - } - - return 0; - })); - - // {type: {$exists: false}} is needed for versions <= 3.2 - let collOnlyCommand = { - listCollections: 1, - filter: {$or: [{type: 'collection'}, {type: {$exists: false}}]} - }; - - let collOnly = mydb.runCommand(collOnlyCommand); - assert.commandWorked(collOnly); - - let collOnlyExpected = [ - { - "name": "foo", - "type": "collection", - }, - { - "name": "system.views", - "type": "collection", - }, - ]; - - assert.eq(collOnlyExpected, - collOnly.cursor.firstBatch - .map(function(c) { - return {name: c.name, type: c.type}; - }) - .sort(function(c1, c2) { - if (c1.name > c2.name) { - return 1; - } - - if (c1.name < c2.name) { - return -1; - } - - return 0; - })); - - let viewOnly = mydb.runCommand({listCollections: 1, filter: {type: 'view'}}); - assert.commandWorked(viewOnly); - let viewOnlyExpected = [{ +'use strict'; +let mydb = db.getSiblingDB('list_collections_no_views'); + +assert.commandWorked(mydb.createCollection('foo')); +assert.commandWorked(mydb.createView('bar', 'foo', [])); + +let all = mydb.runCommand({listCollections: 1}); +assert.commandWorked(all); + +let allExpected = [ + { "name": "bar", "type": "view", - }]; - - assert.eq(viewOnlyExpected, - viewOnly.cursor.firstBatch - .map(function(c) { - return {name: c.name, type: c.type}; - }) - .sort(function(c1, c2) { - if (c1.name > c2.name) { - return 1; - } - - if (c1.name < c2.name) { - return -1; - } - - return 0; - })); - - let views = mydb.getCollection('system.views'); - views.insertOne({invalid: NumberLong(1000)}); - - let collOnlyInvalidView = mydb.runCommand(collOnlyCommand); - assert.eq(collOnlyExpected, - collOnlyInvalidView.cursor.firstBatch - .map(function(c) { - return {name: c.name, type: c.type}; - }) - .sort(function(c1, c2) { - if (c1.name > c2.name) { - return 1; - } - - if (c1.name < c2.name) { - return -1; - } - - return 0; - })); - - assert.commandFailed(mydb.runCommand({listCollections: 1})); - assert.commandFailed(mydb.runCommand({listCollections: 1, filter: {type: 'view'}})); - - // Fix database state for end of test validation and burn-in tests - mydb.dropDatabase(); + }, + { + "name": "foo", + "type": "collection", + }, + { + "name": "system.views", + "type": "collection", + }, +]; + +assert.eq(allExpected, + all.cursor.firstBatch + .map(function(c) { + return {name: c.name, type: c.type}; + }) + .sort(function(c1, c2) { + if (c1.name > c2.name) { + return 1; + } + + if (c1.name < c2.name) { + return -1; + } + + return 0; + })); + +// {type: {$exists: false}} is needed for versions <= 3.2 +let collOnlyCommand = { + listCollections: 1, + filter: {$or: [{type: 'collection'}, {type: {$exists: false}}]} +}; + +let collOnly = mydb.runCommand(collOnlyCommand); +assert.commandWorked(collOnly); + +let collOnlyExpected = [ + { + "name": "foo", + "type": "collection", + }, + { + "name": "system.views", + "type": "collection", + }, +]; + +assert.eq(collOnlyExpected, + collOnly.cursor.firstBatch + .map(function(c) { + return {name: c.name, type: c.type}; + }) + .sort(function(c1, c2) { + if (c1.name > c2.name) { + return 1; + } + + if (c1.name < c2.name) { + return -1; + } + + return 0; + })); + +let viewOnly = mydb.runCommand({listCollections: 1, filter: {type: 'view'}}); +assert.commandWorked(viewOnly); +let viewOnlyExpected = [{ + "name": "bar", + "type": "view", +}]; + +assert.eq(viewOnlyExpected, + viewOnly.cursor.firstBatch + .map(function(c) { + return {name: c.name, type: c.type}; + }) + .sort(function(c1, c2) { + if (c1.name > c2.name) { + return 1; + } + + if (c1.name < c2.name) { + return -1; + } + + return 0; + })); + +let views = mydb.getCollection('system.views'); +views.insertOne({invalid: NumberLong(1000)}); + +let collOnlyInvalidView = mydb.runCommand(collOnlyCommand); +assert.eq(collOnlyExpected, + collOnlyInvalidView.cursor.firstBatch + .map(function(c) { + return {name: c.name, type: c.type}; + }) + .sort(function(c1, c2) { + if (c1.name > c2.name) { + return 1; + } + + if (c1.name < c2.name) { + return -1; + } + + return 0; + })); + +assert.commandFailed(mydb.runCommand({listCollections: 1})); +assert.commandFailed(mydb.runCommand({listCollections: 1, filter: {type: 'view'}})); + +// Fix database state for end of test validation and burn-in tests +mydb.dropDatabase(); })(); diff --git a/jstests/core/list_commands.js b/jstests/core/list_commands.js index 0eadce58507..cfda8f0d91c 100644 --- a/jstests/core/list_commands.js +++ b/jstests/core/list_commands.js @@ -1,39 +1,39 @@ // Test for listCommands. (function() { - "use strict"; +"use strict"; - var commands = db.runCommand({listCommands: 1}); - assert.commandWorked(commands); +var commands = db.runCommand({listCommands: 1}); +assert.commandWorked(commands); - // Test that result is sorted. - function isSorted(obj) { - var previousProperty; - for (var property in obj["commands"]) { - if (previousProperty && (previousProperty > property)) { - return false; - } - previousProperty = property; +// Test that result is sorted. +function isSorted(obj) { + var previousProperty; + for (var property in obj["commands"]) { + if (previousProperty && (previousProperty > property)) { + return false; } - return true; + previousProperty = property; } - assert(isSorted(commands)); + return true; +} +assert(isSorted(commands)); - // Test that result contains basic commands. - assert(commands.hasOwnProperty("commands")); - assert(commands["commands"].hasOwnProperty("isMaster")); - assert(commands["commands"].hasOwnProperty("insert")); - assert(commands["commands"].hasOwnProperty("ping")); +// Test that result contains basic commands. +assert(commands.hasOwnProperty("commands")); +assert(commands["commands"].hasOwnProperty("isMaster")); +assert(commands["commands"].hasOwnProperty("insert")); +assert(commands["commands"].hasOwnProperty("ping")); - // Test that commands listed have required properties - const isMaster = commands["commands"]["isMaster"]; - assert(isMaster.hasOwnProperty("help")); - assert(isMaster.hasOwnProperty("slaveOk")); - assert(isMaster.hasOwnProperty("adminOnly")); - assert(isMaster.hasOwnProperty("requiresAuth")); +// Test that commands listed have required properties +const isMaster = commands["commands"]["isMaster"]; +assert(isMaster.hasOwnProperty("help")); +assert(isMaster.hasOwnProperty("slaveOk")); +assert(isMaster.hasOwnProperty("adminOnly")); +assert(isMaster.hasOwnProperty("requiresAuth")); - // Test that requiresAuth outputs correct value - const insert = commands["commands"]["insert"]; - assert(isMaster["requiresAuth"] === false); - assert(insert["requiresAuth"] === true); +// Test that requiresAuth outputs correct value +const insert = commands["commands"]["insert"]; +assert(isMaster["requiresAuth"] === false); +assert(insert["requiresAuth"] === true); })(); diff --git a/jstests/core/list_databases.js b/jstests/core/list_databases.js index 930e6f36322..2a1db9fc5d1 100644 --- a/jstests/core/list_databases.js +++ b/jstests/core/list_databases.js @@ -2,92 +2,91 @@ * Tests for the listDatabases command. */ (function() { - "use strict"; +"use strict"; - // Given the output from the listDatabases command, ensures that the total size reported is the - // sum of the individual db sizes. - function verifySizeSum(listDatabasesOut) { - assert(listDatabasesOut.hasOwnProperty("databases")); - const dbList = listDatabasesOut.databases; - let sizeSum = 0; - for (let i = 0; i < dbList.length; i++) { - sizeSum += dbList[i].sizeOnDisk; - } - assert.eq(sizeSum, listDatabasesOut.totalSize); +// Given the output from the listDatabases command, ensures that the total size reported is the +// sum of the individual db sizes. +function verifySizeSum(listDatabasesOut) { + assert(listDatabasesOut.hasOwnProperty("databases")); + const dbList = listDatabasesOut.databases; + let sizeSum = 0; + for (let i = 0; i < dbList.length; i++) { + sizeSum += dbList[i].sizeOnDisk; } + assert.eq(sizeSum, listDatabasesOut.totalSize); +} - function verifyNameOnly(listDatabasesOut) { - for (let field in listDatabasesOut) { - assert(['databases', 'nameOnly', 'ok', 'operationTime', '$clusterTime'].some((f) => f == - field), - 'unexpected field ' + field); - } - listDatabasesOut.databases.forEach((database) => { - for (let field in database) { - assert.eq(field, "name", "expected name only"); - } - }); +function verifyNameOnly(listDatabasesOut) { + for (let field in listDatabasesOut) { + assert(['databases', 'nameOnly', 'ok', 'operationTime', '$clusterTime'].some((f) => f == + field), + 'unexpected field ' + field); } + listDatabasesOut.databases.forEach((database) => { + for (let field in database) { + assert.eq(field, "name", "expected name only"); + } + }); +} - // Make 4 test databases. - db.getSiblingDB("jstest_list_databases_foo").coll.insert({}); - db.getSiblingDB("jstest_list_databases_bar").coll.insert({}); - db.getSiblingDB("jstest_list_databases_baz").coll.insert({}); - db.getSiblingDB("jstest_list_databases_zap").coll.insert({}); +// Make 4 test databases. +db.getSiblingDB("jstest_list_databases_foo").coll.insert({}); +db.getSiblingDB("jstest_list_databases_bar").coll.insert({}); +db.getSiblingDB("jstest_list_databases_baz").coll.insert({}); +db.getSiblingDB("jstest_list_databases_zap").coll.insert({}); - let cmdRes = assert.commandWorked( - db.adminCommand({listDatabases: 1, filter: {name: /jstest_list_databases/}})); - assert.eq(4, cmdRes.databases.length); - verifySizeSum(cmdRes); +let cmdRes = assert.commandWorked( + db.adminCommand({listDatabases: 1, filter: {name: /jstest_list_databases/}})); +assert.eq(4, cmdRes.databases.length); +verifySizeSum(cmdRes); - // Now only list databases starting with a particular prefix. - cmdRes = assert.commandWorked( - db.adminCommand({listDatabases: 1, filter: {name: /^jstest_list_databases_ba/}})); - assert.eq(2, cmdRes.databases.length); - verifySizeSum(cmdRes); +// Now only list databases starting with a particular prefix. +cmdRes = assert.commandWorked( + db.adminCommand({listDatabases: 1, filter: {name: /^jstest_list_databases_ba/}})); +assert.eq(2, cmdRes.databases.length); +verifySizeSum(cmdRes); - // Now return only the admin database. - cmdRes = assert.commandWorked(db.adminCommand({listDatabases: 1, filter: {name: "admin"}})); - assert.eq(1, cmdRes.databases.length); - verifySizeSum(cmdRes); +// Now return only the admin database. +cmdRes = assert.commandWorked(db.adminCommand({listDatabases: 1, filter: {name: "admin"}})); +assert.eq(1, cmdRes.databases.length); +verifySizeSum(cmdRes); - // Now return only the names. - cmdRes = assert.commandWorked(db.adminCommand({listDatabases: 1, nameOnly: true})); - assert.lte(4, cmdRes.databases.length, tojson(cmdRes)); - verifyNameOnly(cmdRes); +// Now return only the names. +cmdRes = assert.commandWorked(db.adminCommand({listDatabases: 1, nameOnly: true})); +assert.lte(4, cmdRes.databases.length, tojson(cmdRes)); +verifyNameOnly(cmdRes); - // Now return only the name of the zap database. - cmdRes = assert.commandWorked( - db.adminCommand({listDatabases: 1, nameOnly: true, filter: {name: /zap/}})); - assert.eq(1, cmdRes.databases.length, tojson(cmdRes)); - verifyNameOnly(cmdRes); +// Now return only the name of the zap database. +cmdRes = assert.commandWorked( + db.adminCommand({listDatabases: 1, nameOnly: true, filter: {name: /zap/}})); +assert.eq(1, cmdRes.databases.length, tojson(cmdRes)); +verifyNameOnly(cmdRes); - // $expr in filter. - cmdRes = assert.commandWorked(db.adminCommand( - {listDatabases: 1, filter: {$expr: {$eq: ["$name", "jstest_list_databases_zap"]}}})); - assert.eq(1, cmdRes.databases.length, tojson(cmdRes)); - assert.eq("jstest_list_databases_zap", cmdRes.databases[0].name, tojson(cmdRes)); +// $expr in filter. +cmdRes = assert.commandWorked(db.adminCommand( + {listDatabases: 1, filter: {$expr: {$eq: ["$name", "jstest_list_databases_zap"]}}})); +assert.eq(1, cmdRes.databases.length, tojson(cmdRes)); +assert.eq("jstest_list_databases_zap", cmdRes.databases[0].name, tojson(cmdRes)); - // $expr with an unbound variable in filter. - assert.commandFailed( - db.adminCommand({listDatabases: 1, filter: {$expr: {$eq: ["$name", "$$unbound"]}}})); +// $expr with an unbound variable in filter. +assert.commandFailed( + db.adminCommand({listDatabases: 1, filter: {$expr: {$eq: ["$name", "$$unbound"]}}})); - // $expr with a filter that throws at runtime. - assert.commandFailed(db.adminCommand({listDatabases: 1, filter: {$expr: {$abs: "$name"}}})); +// $expr with a filter that throws at runtime. +assert.commandFailed(db.adminCommand({listDatabases: 1, filter: {$expr: {$abs: "$name"}}})); - // No extensions are allowed in filters. - assert.commandFailed(db.adminCommand({listDatabases: 1, filter: {$text: {$search: "str"}}})); - assert.commandFailed(db.adminCommand({ - listDatabases: 1, - filter: { - $where: function() { - return true; - } +// No extensions are allowed in filters. +assert.commandFailed(db.adminCommand({listDatabases: 1, filter: {$text: {$search: "str"}}})); +assert.commandFailed(db.adminCommand({ + listDatabases: 1, + filter: { + $where: function() { + return true; } - })); - assert.commandFailed(db.adminCommand({ - listDatabases: 1, - filter: {a: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}} - })); - + } +})); +assert.commandFailed(db.adminCommand({ + listDatabases: 1, + filter: {a: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}} +})); }()); diff --git a/jstests/core/list_indexes.js b/jstests/core/list_indexes.js index f32ff128dbf..b5c3ab4f8b9 100644 --- a/jstests/core/list_indexes.js +++ b/jstests/core/list_indexes.js @@ -7,177 +7,177 @@ load("jstests/libs/fixture_helpers.js"); (function() { - "use strict"; - - var coll = db.list_indexes1; - var cursor; - var res; - var specs; - - // - // Test basic command output. - // - - coll.drop(); - assert.commandWorked(coll.getDB().createCollection(coll.getName())); - res = coll.runCommand("listIndexes"); - assert.commandWorked(res); - assert.eq("object", typeof(res.cursor)); - assert.eq(0, res.cursor.id); - assert.eq("string", typeof(res.cursor.ns)); - assert.eq(1, res.cursor.firstBatch.length); - assert.eq("_id_", res.cursor.firstBatch[0].name); - - // - // Test basic usage with DBCommandCursor. - // - - var getListIndexesCursor = function(coll, options, subsequentBatchSize) { - return new DBCommandCursor( - coll.getDB(), coll.runCommand("listIndexes", options), subsequentBatchSize); - }; - - var cursorGetIndexSpecs = function(cursor) { - return cursor.toArray().sort(function(a, b) { - return a.name > b.name; - }); - }; - - var cursorGetIndexNames = function(cursor) { - return cursorGetIndexSpecs(cursor).map(function(spec) { - return spec.name; - }); - }; - - coll.drop(); - assert.commandWorked(coll.getDB().createCollection(coll.getName())); - assert.eq(["_id_"], cursorGetIndexNames(getListIndexesCursor(coll))); - - // - // Test that the index metadata object is returned correctly. - // - - coll.drop(); - assert.commandWorked(coll.getDB().createCollection(coll.getName())); - assert.commandWorked(coll.ensureIndex({a: 1}, {unique: true})); - specs = cursorGetIndexSpecs(getListIndexesCursor(coll)); - assert.eq(2, specs.length); - assert.eq("_id_", specs[0].name); - assert.eq(coll.getFullName(), specs[0].ns); - assert.eq({_id: 1}, specs[0].key); - assert(!specs[0].hasOwnProperty("unique")); - assert.eq("a_1", specs[1].name); - assert.eq(coll.getFullName(), specs[1].ns); - assert.eq({a: 1}, specs[1].key); - assert.eq(true, specs[1].unique); - - // - // Test that the command does not accept invalid values for the collection. - // - - assert.commandFailed(coll.getDB().runCommand({listIndexes: ""})); - assert.commandFailed(coll.getDB().runCommand({listIndexes: 1})); - assert.commandFailed(coll.getDB().runCommand({listIndexes: {}})); - assert.commandFailed(coll.getDB().runCommand({listIndexes: []})); - - // - // Test basic usage of "cursor.batchSize" option. - // - - coll.drop(); - assert.commandWorked(coll.getDB().createCollection(coll.getName())); - assert.commandWorked(coll.ensureIndex({a: 1}, {unique: true})); - - cursor = getListIndexesCursor(coll, {cursor: {batchSize: 2}}); - assert.eq(2, cursor.objsLeftInBatch()); - assert.eq(["_id_", "a_1"], cursorGetIndexNames(cursor)); - - cursor = getListIndexesCursor(coll, {cursor: {batchSize: 1}}); - assert.eq(1, cursor.objsLeftInBatch()); - assert.eq(["_id_", "a_1"], cursorGetIndexNames(cursor)); - - cursor = getListIndexesCursor(coll, {cursor: {batchSize: 0}}); - assert.eq(0, cursor.objsLeftInBatch()); - assert.eq(["_id_", "a_1"], cursorGetIndexNames(cursor)); - - cursor = getListIndexesCursor(coll, {cursor: {batchSize: NumberInt(2)}}); - assert.eq(2, cursor.objsLeftInBatch()); - assert.eq(["_id_", "a_1"], cursorGetIndexNames(cursor)); - - cursor = getListIndexesCursor(coll, {cursor: {batchSize: NumberLong(2)}}); - assert.eq(2, cursor.objsLeftInBatch()); - assert.eq(["_id_", "a_1"], cursorGetIndexNames(cursor)); - - cursor = getListIndexesCursor(coll, {cursor: {batchSize: Math.pow(2, 62)}}); - assert.eq(2, cursor.objsLeftInBatch()); - assert.eq(["_id_", "a_1"], cursorGetIndexNames(cursor)); - - // Ensure that the server accepts an empty object for "cursor". This is equivalent to not - // specifying "cursor" at all. - // - // We do not test for objsLeftInBatch() here, since the default batch size for this command is - // not specified. - cursor = getListIndexesCursor(coll, {cursor: {}}); - assert.eq(["_id_", "a_1"], cursorGetIndexNames(cursor)); - - // - // Test more than 2 batches of results. - // - - coll.drop(); - assert.commandWorked(coll.getDB().createCollection(coll.getName())); - assert.commandWorked(coll.ensureIndex({a: 1}, {unique: true})); - assert.commandWorked(coll.ensureIndex({b: 1}, {unique: true})); - assert.commandWorked(coll.ensureIndex({c: 1}, {unique: true})); - - cursor = getListIndexesCursor(coll, {cursor: {batchSize: 0}}, 2); - assert.eq(0, cursor.objsLeftInBatch()); - assert(cursor.hasNext()); - assert.eq(2, cursor.objsLeftInBatch()); - - cursor.next(); - assert(cursor.hasNext()); - assert.eq(1, cursor.objsLeftInBatch()); - - cursor.next(); - assert(cursor.hasNext()); - assert.eq(2, cursor.objsLeftInBatch()); - - cursor.next(); - assert(cursor.hasNext()); - assert.eq(1, cursor.objsLeftInBatch()); - - cursor.next(); - assert(!cursor.hasNext()); - - // - // Test on collection with no indexes. The local database is not accessible via mongos. - // - - if (!FixtureHelpers.isMongos(db)) { - let localColl = db.getSiblingDB("local").getCollection("list_indexes1"); - localColl.drop(); - assert.commandWorked( - localColl.getDB().createCollection(localColl.getName(), {autoIndexId: false})); - assert.eq([], cursorGetIndexNames(getListIndexesCursor(localColl))); - localColl.drop(); - } - - // - // Test killCursors against a listCollections cursor. - // - - coll.drop(); - assert.commandWorked(coll.getDB().createCollection(coll.getName())); - assert.commandWorked(coll.ensureIndex({a: 1}, {unique: true})); - assert.commandWorked(coll.ensureIndex({b: 1}, {unique: true})); - assert.commandWorked(coll.ensureIndex({c: 1}, {unique: true})); - - res = coll.runCommand("listIndexes", {cursor: {batchSize: 0}}); - cursor = new DBCommandCursor(coll.getDB(), res, 2); - cursor.close(); - cursor = new DBCommandCursor(coll.getDB(), res, 2); - assert.throws(function() { - cursor.hasNext(); +"use strict"; + +var coll = db.list_indexes1; +var cursor; +var res; +var specs; + +// +// Test basic command output. +// + +coll.drop(); +assert.commandWorked(coll.getDB().createCollection(coll.getName())); +res = coll.runCommand("listIndexes"); +assert.commandWorked(res); +assert.eq("object", typeof (res.cursor)); +assert.eq(0, res.cursor.id); +assert.eq("string", typeof (res.cursor.ns)); +assert.eq(1, res.cursor.firstBatch.length); +assert.eq("_id_", res.cursor.firstBatch[0].name); + +// +// Test basic usage with DBCommandCursor. +// + +var getListIndexesCursor = function(coll, options, subsequentBatchSize) { + return new DBCommandCursor( + coll.getDB(), coll.runCommand("listIndexes", options), subsequentBatchSize); +}; + +var cursorGetIndexSpecs = function(cursor) { + return cursor.toArray().sort(function(a, b) { + return a.name > b.name; }); +}; + +var cursorGetIndexNames = function(cursor) { + return cursorGetIndexSpecs(cursor).map(function(spec) { + return spec.name; + }); +}; + +coll.drop(); +assert.commandWorked(coll.getDB().createCollection(coll.getName())); +assert.eq(["_id_"], cursorGetIndexNames(getListIndexesCursor(coll))); + +// +// Test that the index metadata object is returned correctly. +// + +coll.drop(); +assert.commandWorked(coll.getDB().createCollection(coll.getName())); +assert.commandWorked(coll.ensureIndex({a: 1}, {unique: true})); +specs = cursorGetIndexSpecs(getListIndexesCursor(coll)); +assert.eq(2, specs.length); +assert.eq("_id_", specs[0].name); +assert.eq(coll.getFullName(), specs[0].ns); +assert.eq({_id: 1}, specs[0].key); +assert(!specs[0].hasOwnProperty("unique")); +assert.eq("a_1", specs[1].name); +assert.eq(coll.getFullName(), specs[1].ns); +assert.eq({a: 1}, specs[1].key); +assert.eq(true, specs[1].unique); + +// +// Test that the command does not accept invalid values for the collection. +// + +assert.commandFailed(coll.getDB().runCommand({listIndexes: ""})); +assert.commandFailed(coll.getDB().runCommand({listIndexes: 1})); +assert.commandFailed(coll.getDB().runCommand({listIndexes: {}})); +assert.commandFailed(coll.getDB().runCommand({listIndexes: []})); + +// +// Test basic usage of "cursor.batchSize" option. +// + +coll.drop(); +assert.commandWorked(coll.getDB().createCollection(coll.getName())); +assert.commandWorked(coll.ensureIndex({a: 1}, {unique: true})); + +cursor = getListIndexesCursor(coll, {cursor: {batchSize: 2}}); +assert.eq(2, cursor.objsLeftInBatch()); +assert.eq(["_id_", "a_1"], cursorGetIndexNames(cursor)); + +cursor = getListIndexesCursor(coll, {cursor: {batchSize: 1}}); +assert.eq(1, cursor.objsLeftInBatch()); +assert.eq(["_id_", "a_1"], cursorGetIndexNames(cursor)); + +cursor = getListIndexesCursor(coll, {cursor: {batchSize: 0}}); +assert.eq(0, cursor.objsLeftInBatch()); +assert.eq(["_id_", "a_1"], cursorGetIndexNames(cursor)); + +cursor = getListIndexesCursor(coll, {cursor: {batchSize: NumberInt(2)}}); +assert.eq(2, cursor.objsLeftInBatch()); +assert.eq(["_id_", "a_1"], cursorGetIndexNames(cursor)); + +cursor = getListIndexesCursor(coll, {cursor: {batchSize: NumberLong(2)}}); +assert.eq(2, cursor.objsLeftInBatch()); +assert.eq(["_id_", "a_1"], cursorGetIndexNames(cursor)); + +cursor = getListIndexesCursor(coll, {cursor: {batchSize: Math.pow(2, 62)}}); +assert.eq(2, cursor.objsLeftInBatch()); +assert.eq(["_id_", "a_1"], cursorGetIndexNames(cursor)); + +// Ensure that the server accepts an empty object for "cursor". This is equivalent to not +// specifying "cursor" at all. +// +// We do not test for objsLeftInBatch() here, since the default batch size for this command is +// not specified. +cursor = getListIndexesCursor(coll, {cursor: {}}); +assert.eq(["_id_", "a_1"], cursorGetIndexNames(cursor)); + +// +// Test more than 2 batches of results. +// + +coll.drop(); +assert.commandWorked(coll.getDB().createCollection(coll.getName())); +assert.commandWorked(coll.ensureIndex({a: 1}, {unique: true})); +assert.commandWorked(coll.ensureIndex({b: 1}, {unique: true})); +assert.commandWorked(coll.ensureIndex({c: 1}, {unique: true})); + +cursor = getListIndexesCursor(coll, {cursor: {batchSize: 0}}, 2); +assert.eq(0, cursor.objsLeftInBatch()); +assert(cursor.hasNext()); +assert.eq(2, cursor.objsLeftInBatch()); + +cursor.next(); +assert(cursor.hasNext()); +assert.eq(1, cursor.objsLeftInBatch()); + +cursor.next(); +assert(cursor.hasNext()); +assert.eq(2, cursor.objsLeftInBatch()); + +cursor.next(); +assert(cursor.hasNext()); +assert.eq(1, cursor.objsLeftInBatch()); + +cursor.next(); +assert(!cursor.hasNext()); + +// +// Test on collection with no indexes. The local database is not accessible via mongos. +// + +if (!FixtureHelpers.isMongos(db)) { + let localColl = db.getSiblingDB("local").getCollection("list_indexes1"); + localColl.drop(); + assert.commandWorked( + localColl.getDB().createCollection(localColl.getName(), {autoIndexId: false})); + assert.eq([], cursorGetIndexNames(getListIndexesCursor(localColl))); + localColl.drop(); +} + +// +// Test killCursors against a listCollections cursor. +// + +coll.drop(); +assert.commandWorked(coll.getDB().createCollection(coll.getName())); +assert.commandWorked(coll.ensureIndex({a: 1}, {unique: true})); +assert.commandWorked(coll.ensureIndex({b: 1}, {unique: true})); +assert.commandWorked(coll.ensureIndex({c: 1}, {unique: true})); + +res = coll.runCommand("listIndexes", {cursor: {batchSize: 0}}); +cursor = new DBCommandCursor(coll.getDB(), res, 2); +cursor.close(); +cursor = new DBCommandCursor(coll.getDB(), res, 2); +assert.throws(function() { + cursor.hasNext(); +}); }()); diff --git a/jstests/core/list_indexes_invalidation.js b/jstests/core/list_indexes_invalidation.js index 38a70ce4005..85ab71eec42 100644 --- a/jstests/core/list_indexes_invalidation.js +++ b/jstests/core/list_indexes_invalidation.js @@ -3,38 +3,37 @@ // @tags: [assumes_unsharded_collection, requires_non_retryable_commands, requires_fastcount] (function() { - 'use strict'; - let collName = 'system_indexes_invalidations'; - let collNameRenamed = 'renamed_collection'; - let coll = db[collName]; - let collRenamed = db[collNameRenamed]; +'use strict'; +let collName = 'system_indexes_invalidations'; +let collNameRenamed = 'renamed_collection'; +let coll = db[collName]; +let collRenamed = db[collNameRenamed]; - function testIndexInvalidation(isRename) { - coll.drop(); - collRenamed.drop(); - assert.commandWorked(coll.createIndexes([{a: 1}, {b: 1}, {c: 1}])); +function testIndexInvalidation(isRename) { + coll.drop(); + collRenamed.drop(); + assert.commandWorked(coll.createIndexes([{a: 1}, {b: 1}, {c: 1}])); - // Get the first two indexes. - let cmd = {listIndexes: collName}; - Object.extend(cmd, {batchSize: 2}); - let res = db.runCommand(cmd); - assert.commandWorked(res, 'could not run ' + tojson(cmd)); - printjson(res); + // Get the first two indexes. + let cmd = {listIndexes: collName}; + Object.extend(cmd, {batchSize: 2}); + let res = db.runCommand(cmd); + assert.commandWorked(res, 'could not run ' + tojson(cmd)); + printjson(res); - // Ensure the cursor has data, rename or drop the collection, and exhaust the cursor. - let cursor = new DBCommandCursor(db, res); - let errMsg = - 'expected more data from command ' + tojson(cmd) + ', with result ' + tojson(res); - assert(cursor.hasNext(), errMsg); - if (isRename) { - assert.commandWorked(coll.renameCollection(collNameRenamed)); - } else { - assert(coll.drop()); - } - assert.gt(cursor.itcount(), 0, errMsg); + // Ensure the cursor has data, rename or drop the collection, and exhaust the cursor. + let cursor = new DBCommandCursor(db, res); + let errMsg = 'expected more data from command ' + tojson(cmd) + ', with result ' + tojson(res); + assert(cursor.hasNext(), errMsg); + if (isRename) { + assert.commandWorked(coll.renameCollection(collNameRenamed)); + } else { + assert(coll.drop()); } + assert.gt(cursor.itcount(), 0, errMsg); +} - // Test that we invalidate indexes for both collection drops and renames. - testIndexInvalidation(false); - testIndexInvalidation(true); +// Test that we invalidate indexes for both collection drops and renames. +testIndexInvalidation(false); +testIndexInvalidation(true); }()); diff --git a/jstests/core/list_indexes_non_existent_ns.js b/jstests/core/list_indexes_non_existent_ns.js index 0e134862c3a..11a82746b06 100644 --- a/jstests/core/list_indexes_non_existent_ns.js +++ b/jstests/core/list_indexes_non_existent_ns.js @@ -1,18 +1,18 @@ // Test the listIndexes command on non-existent collection.
(function() {
- var dbTest = db.getSiblingDB("list_indexes_non_existent_db");
- assert.commandWorked(dbTest.dropDatabase());
+var dbTest = db.getSiblingDB("list_indexes_non_existent_db");
+assert.commandWorked(dbTest.dropDatabase());
- var coll;
+var coll;
- // Non-existent database
- coll = dbTest.getCollection("list_indexes_non_existent_db");
- assert.commandFailed(coll.runCommand("listIndexes"));
+// Non-existent database
+coll = dbTest.getCollection("list_indexes_non_existent_db");
+assert.commandFailed(coll.runCommand("listIndexes"));
- // Creates the actual database that did not exist till now
- coll.insert({});
+// Creates the actual database that did not exist till now
+coll.insert({});
- // Non-existent collection
- coll = dbTest.getCollection("list_indexes_non_existent_collection");
- assert.commandFailed(coll.runCommand("listIndexes"));
+// Non-existent collection
+coll = dbTest.getCollection("list_indexes_non_existent_collection");
+assert.commandFailed(coll.runCommand("listIndexes"));
}());
diff --git a/jstests/core/list_local_sessions.js b/jstests/core/list_local_sessions.js index 3943ee66c73..c1ba5799c96 100644 --- a/jstests/core/list_local_sessions.js +++ b/jstests/core/list_local_sessions.js @@ -11,73 +11,73 @@ // ] (function() { - 'use strict'; +'use strict'; - const admin = db.getSisterDB('admin'); - function listLocalSessions() { - return admin.aggregate([{'$listLocalSessions': {allUsers: false}}]); - } +const admin = db.getSisterDB('admin'); +function listLocalSessions() { + return admin.aggregate([{'$listLocalSessions': {allUsers: false}}]); +} - // Get current log level. - let originalLogLevel = assert.commandWorked(admin.setLogLevel(1)).was.verbosity; +// Get current log level. +let originalLogLevel = assert.commandWorked(admin.setLogLevel(1)).was.verbosity; - try { - // Start a new session and capture its sessionId. - const myid = assert.commandWorked(db.runCommand({startSession: 1})).id.id; - assert(myid !== undefined); +try { + // Start a new session and capture its sessionId. + const myid = assert.commandWorked(db.runCommand({startSession: 1})).id.id; + assert(myid !== undefined); - // Ensure that the cache now contains the session and is visible. - const resultArray = assert.doesNotThrow(listLocalSessions).toArray(); - assert.gte(resultArray.length, 1); - const resultArrayMine = resultArray - .map(function(sess) { - return sess._id.id; - }) - .filter(function(id) { - return 0 == bsonWoCompare({x: id}, {x: myid}); - }); - assert.eq(resultArrayMine.length, 1); + // Ensure that the cache now contains the session and is visible. + const resultArray = assert.doesNotThrow(listLocalSessions).toArray(); + assert.gte(resultArray.length, 1); + const resultArrayMine = resultArray + .map(function(sess) { + return sess._id.id; + }) + .filter(function(id) { + return 0 == bsonWoCompare({x: id}, {x: myid}); + }); + assert.eq(resultArrayMine.length, 1); - // Try asking for the session by username. - const myusername = (function() { - if (0 == bsonWoCompare({x: resultArray[0]._id.uid}, {x: computeSHA256Block("")})) { - // Code for "we're running in no-auth mode" - return {user: "", db: ""}; - } - const connstats = assert.commandWorked(db.runCommand({connectionStatus: 1})); - const authUsers = connstats.authInfo.authenticatedUsers; - assert(authUsers !== undefined); - assert.eq(authUsers.length, 1); - assert(authUsers[0].user !== undefined); - assert(authUsers[0].db !== undefined); - return {user: authUsers[0].user, db: authUsers[0].db}; - })(); + // Try asking for the session by username. + const myusername = (function() { + if (0 == bsonWoCompare({x: resultArray[0]._id.uid}, {x: computeSHA256Block("")})) { + // Code for "we're running in no-auth mode" + return {user: "", db: ""}; + } + const connstats = assert.commandWorked(db.runCommand({connectionStatus: 1})); + const authUsers = connstats.authInfo.authenticatedUsers; + assert(authUsers !== undefined); + assert.eq(authUsers.length, 1); + assert(authUsers[0].user !== undefined); + assert(authUsers[0].db !== undefined); + return {user: authUsers[0].user, db: authUsers[0].db}; + })(); - const listMyLocalSessions = function() { - return admin.aggregate([{'$listLocalSessions': {users: [myusername]}}]); - }; + const listMyLocalSessions = function() { + return admin.aggregate([{'$listLocalSessions': {users: [myusername]}}]); + }; - const myArray = assert.doesNotThrow(listMyLocalSessions) - .toArray() - .map(function(sess) { - return sess._id.id; - }) - .filter(function(id) { - return 0 == bsonWoCompare({x: id}, {x: myid}); - }); - assert.eq(myArray.length, 1); + const myArray = assert.doesNotThrow(listMyLocalSessions) + .toArray() + .map(function(sess) { + return sess._id.id; + }) + .filter(function(id) { + return 0 == bsonWoCompare({x: id}, {x: myid}); + }); + assert.eq(myArray.length, 1); - print("sessions returned from $listLocalSessions filtered by user: [ " + myArray + - " ]"); - print("sessions returned from un-filtered $listLocalSessions for this user: [ " + - resultArrayMine + " ]"); + print("sessions returned from $listLocalSessions filtered by user: [ " + myArray + + " ]"); + print("sessions returned from un-filtered $listLocalSessions for this user: [ " + + resultArrayMine + " ]"); - assert.eq( - 0, - bsonWoCompare(myArray, resultArrayMine), - "set of listed sessions for user contains different sessions from prior $listLocalSessions run"); + assert.eq( + 0, + bsonWoCompare(myArray, resultArrayMine), + "set of listed sessions for user contains different sessions from prior $listLocalSessions run"); - } finally { - admin.setLogLevel(originalLogLevel); - } +} finally { + admin.setLogLevel(originalLogLevel); +} })(); diff --git a/jstests/core/list_namespaces_invalidation.js b/jstests/core/list_namespaces_invalidation.js index ebd5dd82542..4bfbdffd4e6 100644 --- a/jstests/core/list_namespaces_invalidation.js +++ b/jstests/core/list_namespaces_invalidation.js @@ -1,71 +1,70 @@ // @tags: [requires_non_retryable_commands, requires_fastcount] (function() { - 'use strict'; - let dbInvalidName = 'system_namespaces_invalidations'; - let dbInvalid = db.getSiblingDB(dbInvalidName); - let num_collections = 3; - let DROP = 1; - let RENAME = 2; - let MOVE = 3; - function testNamespaceInvalidation(namespaceAction, batchSize) { - dbInvalid.dropDatabase(); +'use strict'; +let dbInvalidName = 'system_namespaces_invalidations'; +let dbInvalid = db.getSiblingDB(dbInvalidName); +let num_collections = 3; +let DROP = 1; +let RENAME = 2; +let MOVE = 3; +function testNamespaceInvalidation(namespaceAction, batchSize) { + dbInvalid.dropDatabase(); - // Create enough collections to necessitate multiple cursor batches. - for (let i = 0; i < num_collections; i++) { - assert.commandWorked(dbInvalid.createCollection('coll' + i.toString())); - } + // Create enough collections to necessitate multiple cursor batches. + for (let i = 0; i < num_collections; i++) { + assert.commandWorked(dbInvalid.createCollection('coll' + i.toString())); + } - // Get the first two namespaces using listCollections. - let cmd = {listCollections: dbInvalidName}; - Object.extend(cmd, {batchSize: batchSize}); - let res = dbInvalid.runCommand(cmd); - assert.commandWorked(res, 'could not run ' + tojson(cmd)); - printjson(res); + // Get the first two namespaces using listCollections. + let cmd = {listCollections: dbInvalidName}; + Object.extend(cmd, {batchSize: batchSize}); + let res = dbInvalid.runCommand(cmd); + assert.commandWorked(res, 'could not run ' + tojson(cmd)); + printjson(res); - // Ensure the cursor has data, invalidate the namespace, and exhaust the cursor. - let cursor = new DBCommandCursor(dbInvalid, res); - let errMsg = - 'expected more data from command ' + tojson(cmd) + ', with result ' + tojson(res); - assert(cursor.hasNext(), errMsg); - if (namespaceAction == RENAME) { - // Rename the collection to something that does not fit in the previously allocated - // memory for the record. - assert.commandWorked( - dbInvalid['coll1'].renameCollection('coll1' + - 'lkdsahflaksjdhfsdkljhfskladhfkahfsakfla' + - 'skfjhaslfaslfkhasklfjhsakljhdsjksahkldjslh')); - } else if (namespaceAction == DROP) { - assert(dbInvalid['coll1'].drop()); - } else if (namespaceAction == MOVE) { - let modCmd = { - collMod: 'coll1', - validator: { - $or: [ - {phone: {$type: "string"}}, - {email: {$regex: /@mongodb\.com$/}}, - {status: {$in: ["Unknown", "Incomplete"]}}, - {address: {$type: "string"}}, - {ssn: {$type: "string"}}, - {favoriteBook: {$type: "string"}}, - {favoriteColor: {$type: "string"}}, - {favoriteBeverage: {$type: "string"}}, - {favoriteDay: {$type: "string"}}, - {favoriteFood: {$type: "string"}}, - {favoriteSport: {$type: "string"}}, - {favoriteMovie: {$type: "string"}}, - {favoriteShow: {$type: "string"}} - ] - } - }; - assert.commandWorked(dbInvalid.runCommand(modCmd)); - } - assert.gt(cursor.itcount(), 0, errMsg); - } - // Test that we invalidate the old namespace record ID when we remove, rename, or move a - // namespace record. - for (let j = 2; j < 7; j++) { - testNamespaceInvalidation(DROP, j); - testNamespaceInvalidation(RENAME, j); - testNamespaceInvalidation(MOVE, j); + // Ensure the cursor has data, invalidate the namespace, and exhaust the cursor. + let cursor = new DBCommandCursor(dbInvalid, res); + let errMsg = 'expected more data from command ' + tojson(cmd) + ', with result ' + tojson(res); + assert(cursor.hasNext(), errMsg); + if (namespaceAction == RENAME) { + // Rename the collection to something that does not fit in the previously allocated + // memory for the record. + assert.commandWorked( + dbInvalid['coll1'].renameCollection('coll1' + + 'lkdsahflaksjdhfsdkljhfskladhfkahfsakfla' + + 'skfjhaslfaslfkhasklfjhsakljhdsjksahkldjslh')); + } else if (namespaceAction == DROP) { + assert(dbInvalid['coll1'].drop()); + } else if (namespaceAction == MOVE) { + let modCmd = { + collMod: 'coll1', + validator: { + $or: [ + {phone: {$type: "string"}}, + {email: {$regex: /@mongodb\.com$/}}, + {status: {$in: ["Unknown", "Incomplete"]}}, + {address: {$type: "string"}}, + {ssn: {$type: "string"}}, + {favoriteBook: {$type: "string"}}, + {favoriteColor: {$type: "string"}}, + {favoriteBeverage: {$type: "string"}}, + {favoriteDay: {$type: "string"}}, + {favoriteFood: {$type: "string"}}, + {favoriteSport: {$type: "string"}}, + {favoriteMovie: {$type: "string"}}, + {favoriteShow: {$type: "string"}} + ] + } + }; + assert.commandWorked(dbInvalid.runCommand(modCmd)); } + assert.gt(cursor.itcount(), 0, errMsg); +} +// Test that we invalidate the old namespace record ID when we remove, rename, or move a +// namespace record. +for (let j = 2; j < 7; j++) { + testNamespaceInvalidation(DROP, j); + testNamespaceInvalidation(RENAME, j); + testNamespaceInvalidation(MOVE, j); +} }()); diff --git a/jstests/core/list_sessions.js b/jstests/core/list_sessions.js index 9b04d3c1aa5..65345ca7c01 100644 --- a/jstests/core/list_sessions.js +++ b/jstests/core/list_sessions.js @@ -8,65 +8,65 @@ // Basic tests for the $listSessions aggregation stage. (function() { - 'use strict'; - load('jstests/aggregation/extras/utils.js'); +'use strict'; +load('jstests/aggregation/extras/utils.js'); - const admin = db.getSiblingDB('admin'); - const config = db.getSiblingDB('config'); - const pipeline = [{'$listSessions': {}}]; - function listSessions() { - return config.system.sessions.aggregate(pipeline); - } +const admin = db.getSiblingDB('admin'); +const config = db.getSiblingDB('config'); +const pipeline = [{'$listSessions': {}}]; +function listSessions() { + return config.system.sessions.aggregate(pipeline); +} - // Start a new session and capture its sessionId. - const myid = assert.commandWorked(admin.runCommand({startSession: 1})).id.id; - assert(myid !== undefined); +// Start a new session and capture its sessionId. +const myid = assert.commandWorked(admin.runCommand({startSession: 1})).id.id; +assert(myid !== undefined); - // Sync cache to collection and ensure it arrived. - assert.commandWorked(admin.runCommand({refreshLogicalSessionCacheNow: 1})); - var resultArrayMine; - assert.soon(function() { - const resultArray = listSessions().toArray(); - if (resultArray.length < 1) { - return false; - } - resultArrayMine = resultArray - .map(function(sess) { - return sess._id; - }) - .filter(function(id) { - return 0 == bsonWoCompare({x: id.id}, {x: myid}); - }); - return resultArrayMine.length == 1; - }, "Failed to locate session in collection"); +// Sync cache to collection and ensure it arrived. +assert.commandWorked(admin.runCommand({refreshLogicalSessionCacheNow: 1})); +var resultArrayMine; +assert.soon(function() { + const resultArray = listSessions().toArray(); + if (resultArray.length < 1) { + return false; + } + resultArrayMine = resultArray + .map(function(sess) { + return sess._id; + }) + .filter(function(id) { + return 0 == bsonWoCompare({x: id.id}, {x: myid}); + }); + return resultArrayMine.length == 1; +}, "Failed to locate session in collection"); - // Try asking for the session by username. - const myusername = (function() { - if (0 == bsonWoCompare({x: resultArrayMine[0].uid}, {x: computeSHA256Block("")})) { - // Code for "we're running in no-auth mode" - return {user: "", db: ""}; - } - const connstats = assert.commandWorked(db.runCommand({connectionStatus: 1})); - const authUsers = connstats.authInfo.authenticatedUsers; - assert(authUsers !== undefined); - assert.eq(authUsers.length, 1); - assert(authUsers[0].user !== undefined); - assert(authUsers[0].db !== undefined); - return {user: authUsers[0].user, db: authUsers[0].db}; - })(); - function listMySessions() { - return config.system.sessions.aggregate([{'$listSessions': {users: [myusername]}}]); +// Try asking for the session by username. +const myusername = (function() { + if (0 == bsonWoCompare({x: resultArrayMine[0].uid}, {x: computeSHA256Block("")})) { + // Code for "we're running in no-auth mode" + return {user: "", db: ""}; } - const myArray = listMySessions() - .toArray() - .map(function(sess) { - return sess._id; - }) - .filter(function(id) { - return 0 == bsonWoCompare({x: id.id}, {x: myid}); - }); - assert.eq(0, bsonWoCompare(myArray, resultArrayMine)); + const connstats = assert.commandWorked(db.runCommand({connectionStatus: 1})); + const authUsers = connstats.authInfo.authenticatedUsers; + assert(authUsers !== undefined); + assert.eq(authUsers.length, 1); + assert(authUsers[0].user !== undefined); + assert(authUsers[0].db !== undefined); + return {user: authUsers[0].user, db: authUsers[0].db}; +})(); +function listMySessions() { + return config.system.sessions.aggregate([{'$listSessions': {users: [myusername]}}]); +} +const myArray = listMySessions() + .toArray() + .map(function(sess) { + return sess._id; + }) + .filter(function(id) { + return 0 == bsonWoCompare({x: id.id}, {x: myid}); + }); +assert.eq(0, bsonWoCompare(myArray, resultArrayMine)); - // Make sure pipelining other collections fail. - assertErrorCode(admin.system.collections, pipeline, ErrorCodes.InvalidNamespace); +// Make sure pipelining other collections fail. +assertErrorCode(admin.system.collections, pipeline, ErrorCodes.InvalidNamespace); })(); diff --git a/jstests/core/long_index_rename.js b/jstests/core/long_index_rename.js index 41e89825570..df304b777ad 100644 --- a/jstests/core/long_index_rename.js +++ b/jstests/core/long_index_rename.js @@ -3,20 +3,20 @@ // @tags: [requires_non_retryable_commands, assumes_unsharded_collection] (function() { - 'use strict'; +'use strict'; - const coll = db.long_index_rename; - coll.drop(); +const coll = db.long_index_rename; +coll.drop(); - for (let i = 1; i < 10; i++) { - coll.save({a: i}); - } +for (let i = 1; i < 10; i++) { + coll.save({a: i}); +} - // Beginning with 4.2, index namespaces longer than 127 characters are acceptable. - assert.commandWorked(coll.createIndex({b: 1}, {name: 'a'.repeat(8192)})); +// Beginning with 4.2, index namespaces longer than 127 characters are acceptable. +assert.commandWorked(coll.createIndex({b: 1}, {name: 'a'.repeat(8192)})); - // Before 4.2, index namespace lengths were checked while renaming collections. - const dest = db.long_index_rename2; - dest.drop(); - assert.commandWorked(coll.renameCollection(dest.getName())); +// Before 4.2, index namespace lengths were checked while renaming collections. +const dest = db.long_index_rename2; +dest.drop(); +assert.commandWorked(coll.renameCollection(dest.getName())); })(); diff --git a/jstests/core/max_doc_size.js b/jstests/core/max_doc_size.js index 775121a5c9f..859896c17f6 100644 --- a/jstests/core/max_doc_size.js +++ b/jstests/core/max_doc_size.js @@ -7,66 +7,66 @@ * - Documents over the maximum BSON size limit cannot be written. */ (function() { - 'use strict'; +'use strict'; - const maxBsonObjectSize = db.isMaster().maxBsonObjectSize; - const docOverhead = Object.bsonsize({_id: new ObjectId(), x: ''}); - const maxStrSize = maxBsonObjectSize - docOverhead; - const maxStr = 'a'.repeat(maxStrSize); - const coll = db.max_doc_size; +const maxBsonObjectSize = db.isMaster().maxBsonObjectSize; +const docOverhead = Object.bsonsize({_id: new ObjectId(), x: ''}); +const maxStrSize = maxBsonObjectSize - docOverhead; +const maxStr = 'a'.repeat(maxStrSize); +const coll = db.max_doc_size; - // - // Test that documents at the size limit can be written and read back. - // - coll.drop(); - assert.commandWorked( - db.runCommand({insert: coll.getName(), documents: [{_id: new ObjectId(), x: maxStr}]})); - assert.eq(coll.find({}).itcount(), 1); +// +// Test that documents at the size limit can be written and read back. +// +coll.drop(); +assert.commandWorked( + db.runCommand({insert: coll.getName(), documents: [{_id: new ObjectId(), x: maxStr}]})); +assert.eq(coll.find({}).itcount(), 1); - coll.drop(); - const objectId = new ObjectId(); - assert.commandWorked(db.runCommand({ - update: coll.getName(), - ordered: true, - updates: [{q: {_id: objectId}, u: {_id: objectId, x: maxStr}, upsert: true}] - })); - assert.eq(coll.find({}).itcount(), 1); +coll.drop(); +const objectId = new ObjectId(); +assert.commandWorked(db.runCommand({ + update: coll.getName(), + ordered: true, + updates: [{q: {_id: objectId}, u: {_id: objectId, x: maxStr}, upsert: true}] +})); +assert.eq(coll.find({}).itcount(), 1); - coll.drop(); +coll.drop(); - assert.commandWorked(coll.insert({_id: objectId})); - assert.commandWorked(db.runCommand({ - update: coll.getName(), - ordered: true, - updates: [{q: {_id: objectId}, u: {$set: {x: maxStr}}}] - })); - assert.eq(coll.find({}).itcount(), 1); +assert.commandWorked(coll.insert({_id: objectId})); +assert.commandWorked(db.runCommand({ + update: coll.getName(), + ordered: true, + updates: [{q: {_id: objectId}, u: {$set: {x: maxStr}}}] +})); +assert.eq(coll.find({}).itcount(), 1); - // - // Test that documents over the size limit cannot be written. - // - const largerThanMaxString = maxStr + 'a'; +// +// Test that documents over the size limit cannot be written. +// +const largerThanMaxString = maxStr + 'a'; - coll.drop(); - assert.commandFailedWithCode( - db.runCommand( - {insert: coll.getName(), documents: [{_id: new ObjectId(), x: largerThanMaxString}]}), - 2); +coll.drop(); +assert.commandFailedWithCode( + db.runCommand( + {insert: coll.getName(), documents: [{_id: new ObjectId(), x: largerThanMaxString}]}), + 2); - coll.drop(); - assert.commandFailedWithCode(db.runCommand({ - update: coll.getName(), - ordered: true, - updates: [{q: {_id: objectId}, u: {_id: objectId, x: largerThanMaxString}, upsert: true}] - }), - 17420); +coll.drop(); +assert.commandFailedWithCode(db.runCommand({ + update: coll.getName(), + ordered: true, + updates: [{q: {_id: objectId}, u: {_id: objectId, x: largerThanMaxString}, upsert: true}] +}), + 17420); - coll.drop(); - assert.commandWorked(coll.insert({_id: objectId})); - assert.commandFailedWithCode(db.runCommand({ - update: coll.getName(), - ordered: true, - updates: [{q: {_id: objectId}, u: {$set: {x: largerThanMaxString}}}] - }), - 17419); +coll.drop(); +assert.commandWorked(coll.insert({_id: objectId})); +assert.commandFailedWithCode(db.runCommand({ + update: coll.getName(), + ordered: true, + updates: [{q: {_id: objectId}, u: {$set: {x: largerThanMaxString}}}] +}), + 17419); })(); diff --git a/jstests/core/max_time_ms.js b/jstests/core/max_time_ms.js index 8a539cc5493..470d281b080 100644 --- a/jstests/core/max_time_ms.js +++ b/jstests/core/max_time_ms.js @@ -230,14 +230,14 @@ assert.eq(1, t.getDB().runCommand({ping: 1, maxTimeMS: NumberInt(0)}).ok); assert.eq(1, t.getDB().runCommand({ping: 1, maxTimeMS: NumberLong(0)}).ok); assert.throws.automsg(function() { - t.find().maxTimeMS(-1).itcount(); -}); + t.find().maxTimeMS(-1).itcount(); + }); assert.throws.automsg(function() { - t.find().maxTimeMS(NumberInt(-1)).itcount(); -}); + t.find().maxTimeMS(NumberInt(-1)).itcount(); + }); assert.throws.automsg(function() { - t.find().maxTimeMS(NumberLong(-1)).itcount(); -}); + t.find().maxTimeMS(NumberLong(-1)).itcount(); + }); assert.eq(0, t.getDB().runCommand({ping: 1, maxTimeMS: -1}).ok); assert.eq(0, t.getDB().runCommand({ping: 1, maxTimeMS: NumberInt(-1)}).ok); assert.eq(0, t.getDB().runCommand({ping: 1, maxTimeMS: NumberLong(-1)}).ok); @@ -260,37 +260,37 @@ assert.eq(1, t.getDB().runCommand({ping: 1, maxTimeMS: NumberInt(maxValue)}).ok) assert.eq(1, t.getDB().runCommand({ping: 1, maxTimeMS: NumberLong(maxValue)}).ok); assert.throws.automsg(function() { - t.find().maxTimeMS(maxValue + 1).itcount(); -}); + t.find().maxTimeMS(maxValue + 1).itcount(); + }); assert.throws.automsg(function() { - t.find().maxTimeMS(NumberInt(maxValue + 1)).itcount(); -}); + t.find().maxTimeMS(NumberInt(maxValue + 1)).itcount(); + }); assert.throws.automsg(function() { - t.find().maxTimeMS(NumberLong(maxValue + 1)).itcount(); -}); + t.find().maxTimeMS(NumberLong(maxValue + 1)).itcount(); + }); assert.eq(0, t.getDB().runCommand({ping: 1, maxTimeMS: maxValue + 1}).ok); assert.eq(0, t.getDB().runCommand({ping: 1, maxTimeMS: NumberInt(maxValue + 1)}).ok); assert.eq(0, t.getDB().runCommand({ping: 1, maxTimeMS: NumberLong(maxValue + 1)}).ok); // Verify invalid values are rejected. assert.throws.automsg(function() { - t.find().maxTimeMS(0.1).itcount(); -}); + t.find().maxTimeMS(0.1).itcount(); + }); assert.throws.automsg(function() { - t.find().maxTimeMS(-0.1).itcount(); -}); + t.find().maxTimeMS(-0.1).itcount(); + }); assert.throws.automsg(function() { - t.find().maxTimeMS().itcount(); -}); + t.find().maxTimeMS().itcount(); + }); assert.throws.automsg(function() { - t.find().maxTimeMS("").itcount(); -}); + t.find().maxTimeMS("").itcount(); + }); assert.throws.automsg(function() { - t.find().maxTimeMS(true).itcount(); -}); + t.find().maxTimeMS(true).itcount(); + }); assert.throws.automsg(function() { - t.find().maxTimeMS({}).itcount(); -}); + t.find().maxTimeMS({}).itcount(); + }); assert.eq(0, t.getDB().runCommand({ping: 1, maxTimeMS: 0.1}).ok); assert.eq(0, t.getDB().runCommand({ping: 1, maxTimeMS: -0.1}).ok); assert.eq(0, t.getDB().runCommand({ping: 1, maxTimeMS: undefined}).ok); @@ -323,8 +323,8 @@ assert.eq( 1, t.getDB().adminCommand({configureFailPoint: "maxTimeAlwaysTimeOut", mode: "alwaysOn"}).ok); res = t.getDB().runCommand({ping: 1, maxTimeMS: 10 * 1000}); assert(res.ok == 0 && res.code == ErrorCodes.MaxTimeMSExpired, - "expected command to trigger maxTimeAlwaysTimeOut fail point, ok=" + res.ok + ", code=" + - res.code); + "expected command to trigger maxTimeAlwaysTimeOut fail point, ok=" + res.ok + + ", code=" + res.code); assert.eq(1, t.getDB().adminCommand({configureFailPoint: "maxTimeAlwaysTimeOut", mode: "off"}).ok); // maxTimeNeverTimeOut positive test for command. @@ -333,8 +333,8 @@ assert.eq(1, t.getDB().adminCommand({configureFailPoint: "maxTimeNeverTimeOut", mode: "alwaysOn"}).ok); res = t.getDB().adminCommand({sleep: 1, millis: 300, maxTimeMS: 100}); assert(res.ok == 1, - "expected command to trigger maxTimeNeverTimeOut fail point, ok=" + res.ok + ", code=" + - res.code); + "expected command to trigger maxTimeNeverTimeOut fail point, ok=" + res.ok + + ", code=" + res.code); assert.eq(1, t.getDB().adminCommand({configureFailPoint: "maxTimeNeverTimeOut", mode: "off"}).ok); // maxTimeAlwaysTimeOut positive test for query. diff --git a/jstests/core/min_max_bounds.js b/jstests/core/min_max_bounds.js index 41e20157985..c2171df23fb 100644 --- a/jstests/core/min_max_bounds.js +++ b/jstests/core/min_max_bounds.js @@ -3,74 +3,74 @@ * @tags: [assumes_balancer_off] */ (function() { - 'use strict'; +'use strict'; - load('jstests/libs/fixture_helpers.js'); // For FixtureHelpers. - load('jstests/aggregation/extras/utils.js'); // For resultsEq. +load('jstests/libs/fixture_helpers.js'); // For FixtureHelpers. +load('jstests/aggregation/extras/utils.js'); // For resultsEq. - var coll = db.query_bound_inclusion; - coll.drop(); - assert.writeOK(coll.insert({a: 1, b: 1})); - assert.writeOK(coll.insert({a: 2, b: 2})); - assert.writeOK(coll.insert({a: 3, b: 3})); +var coll = db.query_bound_inclusion; +coll.drop(); +assert.writeOK(coll.insert({a: 1, b: 1})); +assert.writeOK(coll.insert({a: 2, b: 2})); +assert.writeOK(coll.insert({a: 3, b: 3})); - assert.commandWorked(coll.createIndex({a: 1})); +assert.commandWorked(coll.createIndex({a: 1})); - var res = coll.find().sort({a: 1}).toArray(); - assert.eq(res.length, 3); +var res = coll.find().sort({a: 1}).toArray(); +assert.eq(res.length, 3); +assert.eq(res[0].a, 1); +assert.eq(res[1].a, 2); +assert.eq(res[2].a, 3); + +res = coll.find().sort({a: -1}).toArray(); +assert.eq(res.length, 3); +assert.eq(res[0].a, 3); +assert.eq(res[1].a, 2); +assert.eq(res[2].a, 1); + +res = coll.find().min({a: 1}).max({a: 3}).hint({a: 1}).toArray(); +assert.eq(res.length, 2); +if (FixtureHelpers.numberOfShardsForCollection(coll) === 1) { assert.eq(res[0].a, 1); assert.eq(res[1].a, 2); - assert.eq(res[2].a, 3); +} else { + // With more than one shard, we cannot assume the results will come back in order, since we + // did not request a sort. + assert(resultsEq(res.map((result) => result.a), [1, 2])); +} - res = coll.find().sort({a: -1}).toArray(); - assert.eq(res.length, 3); - assert.eq(res[0].a, 3); - assert.eq(res[1].a, 2); - assert.eq(res[2].a, 1); +res = coll.find().min({a: 1}).max({a: 3}).sort({a: -1}).hint({a: 1}).toArray(); +assert.eq(res.length, 2); +assert.eq(res[0].a, 2); +assert.eq(res[1].a, 1); - res = coll.find().min({a: 1}).max({a: 3}).hint({a: 1}).toArray(); - assert.eq(res.length, 2); - if (FixtureHelpers.numberOfShardsForCollection(coll) === 1) { - assert.eq(res[0].a, 1); - assert.eq(res[1].a, 2); - } else { - // With more than one shard, we cannot assume the results will come back in order, since we - // did not request a sort. - assert(resultsEq(res.map((result) => result.a), [1, 2])); - } +assert.commandWorked(coll.createIndex({b: -1})); - res = coll.find().min({a: 1}).max({a: 3}).sort({a: -1}).hint({a: 1}).toArray(); - assert.eq(res.length, 2); - assert.eq(res[0].a, 2); - assert.eq(res[1].a, 1); +res = coll.find().sort({b: -1}).toArray(); +assert.eq(res.length, 3); +assert.eq(res[0].b, 3); +assert.eq(res[1].b, 2); +assert.eq(res[2].b, 1); - assert.commandWorked(coll.createIndex({b: -1})); +res = coll.find().sort({b: 1}).toArray(); +assert.eq(res.length, 3); +assert.eq(res[0].b, 1); +assert.eq(res[1].b, 2); +assert.eq(res[2].b, 3); - res = coll.find().sort({b: -1}).toArray(); - assert.eq(res.length, 3); +res = coll.find().min({b: 3}).max({b: 1}).hint({b: -1}).toArray(); +assert.eq(res.length, 2); +if (FixtureHelpers.numberOfShardsForCollection(coll) === 1) { assert.eq(res[0].b, 3); assert.eq(res[1].b, 2); - assert.eq(res[2].b, 1); - - res = coll.find().sort({b: 1}).toArray(); - assert.eq(res.length, 3); - assert.eq(res[0].b, 1); - assert.eq(res[1].b, 2); - assert.eq(res[2].b, 3); - - res = coll.find().min({b: 3}).max({b: 1}).hint({b: -1}).toArray(); - assert.eq(res.length, 2); - if (FixtureHelpers.numberOfShardsForCollection(coll) === 1) { - assert.eq(res[0].b, 3); - assert.eq(res[1].b, 2); - } else { - // With more than one shard, we cannot assume the results will come back in order, since we - // did not request a sort. - assert(resultsEq(res.map((result) => result.b), [3, 2])); - } +} else { + // With more than one shard, we cannot assume the results will come back in order, since we + // did not request a sort. + assert(resultsEq(res.map((result) => result.b), [3, 2])); +} - res = coll.find().min({b: 3}).max({b: 1}).sort({b: 1}).hint({b: -1}).toArray(); - assert.eq(res.length, 2); - assert.eq(res[0].b, 2); - assert.eq(res[1].b, 3); +res = coll.find().min({b: 3}).max({b: 1}).sort({b: 1}).hint({b: -1}).toArray(); +assert.eq(res.length, 2); +assert.eq(res[0].b, 2); +assert.eq(res[1].b, 3); })(); diff --git a/jstests/core/min_max_hashed_index.js b/jstests/core/min_max_hashed_index.js index aacd1987f44..511f5a9ae62 100644 --- a/jstests/core/min_max_hashed_index.js +++ b/jstests/core/min_max_hashed_index.js @@ -2,17 +2,17 @@ * Check that min() and max() work with a hashed index. */ (function() { - "use strict"; +"use strict"; - const coll = db.min_max_hashed_index; - coll.drop(); - assert.commandWorked(coll.insert({a: "test"})); - assert.commandWorked(coll.createIndex({a: 1})); - const minWithNormalIndex = coll.find({}, {_id: 0}).min({a: -Infinity}).hint({a: 1}).toArray(); - assert.eq(minWithNormalIndex, [{a: "test"}]); +const coll = db.min_max_hashed_index; +coll.drop(); +assert.commandWorked(coll.insert({a: "test"})); +assert.commandWorked(coll.createIndex({a: 1})); +const minWithNormalIndex = coll.find({}, {_id: 0}).min({a: -Infinity}).hint({a: 1}).toArray(); +assert.eq(minWithNormalIndex, [{a: "test"}]); - assert.commandWorked(coll.createIndex({a: "hashed"})); - const minWithHashedIndex = - coll.find({}, {_id: 0}).min({a: -Infinity}).hint({a: "hashed"}).toArray(); - assert.eq(minWithHashedIndex, [{a: "test"}]); +assert.commandWorked(coll.createIndex({a: "hashed"})); +const minWithHashedIndex = + coll.find({}, {_id: 0}).min({a: -Infinity}).hint({a: "hashed"}).toArray(); +assert.eq(minWithHashedIndex, [{a: "test"}]); })(); diff --git a/jstests/core/min_max_key.js b/jstests/core/min_max_key.js index d65d68292fa..e14a7ba4fda 100644 --- a/jstests/core/min_max_key.js +++ b/jstests/core/min_max_key.js @@ -1,98 +1,98 @@ // Tests the behavior of queries using MinKey and MaxKey (function() { - "use strict"; +"use strict"; - load("jstests/aggregation/extras/utils.js"); // For 'resultsEq'. +load("jstests/aggregation/extras/utils.js"); // For 'resultsEq'. - const coll = db.test_min_max; - coll.drop(); +const coll = db.test_min_max; +coll.drop(); - const allElements = [ +const allElements = [ + {_id: "a_max_key", a: MaxKey}, + {_id: "a_min_key", a: MinKey}, + {_id: "a_null", a: null}, + {_id: "a_number", a: 4}, + {_id: "a_subobject", a: {b: "hi"}}, + {_id: "a_undefined", a: undefined}, + {_id: "a_string", a: "hello"} +]; + +assert.writeOK(coll.insert(allElements)); + +function testQueriesWithMinOrMaxKey() { + const eqMinRes = coll.find({a: {$eq: MinKey}}).toArray(); + const expectedEqMin = [{_id: "a_min_key", a: MinKey}]; + assert(resultsEq(expectedEqMin, eqMinRes), tojson(eqMinRes)); + + const gtMinRes = coll.find({a: {$gt: MinKey}}).toArray(); + const expectedGtMin = [ {_id: "a_max_key", a: MaxKey}, - {_id: "a_min_key", a: MinKey}, {_id: "a_null", a: null}, {_id: "a_number", a: 4}, {_id: "a_subobject", a: {b: "hi"}}, {_id: "a_undefined", a: undefined}, {_id: "a_string", a: "hello"} ]; + assert(resultsEq(expectedGtMin, gtMinRes), tojson(gtMinRes)); + + const gteMinRes = coll.find({a: {$gte: MinKey}}).toArray(); + assert(resultsEq(allElements, gteMinRes), tojson(gteMinRes)); + + const ltMinRes = coll.find({a: {$lt: MinKey}}).toArray(); + assert(resultsEq([], ltMinRes), tojson(ltMinRes)); + + const lteMinRes = coll.find({a: {$lte: MinKey}}).toArray(); + assert(resultsEq(expectedEqMin, lteMinRes), tojson(lteMinRes)); + + const eqMaxRes = coll.find({a: {$eq: MaxKey}}).toArray(); + const expectedEqMax = [{_id: "a_max_key", a: MaxKey}]; + assert(resultsEq(expectedEqMax, eqMaxRes), tojson(eqMaxRes)); + + const gtMaxRes = coll.find({a: {$gt: MaxKey}}).toArray(); + assert(resultsEq([], gtMaxRes), tojson(gtMaxRes)); - assert.writeOK(coll.insert(allElements)); - - function testQueriesWithMinOrMaxKey() { - const eqMinRes = coll.find({a: {$eq: MinKey}}).toArray(); - const expectedEqMin = [{_id: "a_min_key", a: MinKey}]; - assert(resultsEq(expectedEqMin, eqMinRes), tojson(eqMinRes)); - - const gtMinRes = coll.find({a: {$gt: MinKey}}).toArray(); - const expectedGtMin = [ - {_id: "a_max_key", a: MaxKey}, - {_id: "a_null", a: null}, - {_id: "a_number", a: 4}, - {_id: "a_subobject", a: {b: "hi"}}, - {_id: "a_undefined", a: undefined}, - {_id: "a_string", a: "hello"} - ]; - assert(resultsEq(expectedGtMin, gtMinRes), tojson(gtMinRes)); - - const gteMinRes = coll.find({a: {$gte: MinKey}}).toArray(); - assert(resultsEq(allElements, gteMinRes), tojson(gteMinRes)); - - const ltMinRes = coll.find({a: {$lt: MinKey}}).toArray(); - assert(resultsEq([], ltMinRes), tojson(ltMinRes)); - - const lteMinRes = coll.find({a: {$lte: MinKey}}).toArray(); - assert(resultsEq(expectedEqMin, lteMinRes), tojson(lteMinRes)); - - const eqMaxRes = coll.find({a: {$eq: MaxKey}}).toArray(); - const expectedEqMax = [{_id: "a_max_key", a: MaxKey}]; - assert(resultsEq(expectedEqMax, eqMaxRes), tojson(eqMaxRes)); - - const gtMaxRes = coll.find({a: {$gt: MaxKey}}).toArray(); - assert(resultsEq([], gtMaxRes), tojson(gtMaxRes)); - - const gteMaxRes = coll.find({a: {$gte: MaxKey}}).toArray(); - assert(resultsEq(expectedEqMax, gteMaxRes), tojson(gteMaxRes)); - - const ltMaxRes = coll.find({a: {$lt: MaxKey}}).toArray(); - const expectedLtMax = [ - {_id: "a_min_key", a: MinKey}, - {_id: "a_null", a: null}, - {_id: "a_number", a: 4}, - {_id: "a_subobject", a: {b: "hi"}}, - {_id: "a_undefined", a: undefined}, - {_id: "a_string", a: "hello"} - ]; - assert(resultsEq(expectedLtMax, ltMaxRes), tojson(ltMaxRes)); - - const lteMaxRes = coll.find({a: {$lte: MaxKey}}).toArray(); - assert(resultsEq(allElements, lteMaxRes), tojson(lteMaxRes)); - } - - function testTypeBracketedQueries() { - // Queries that do not involve MinKey or MaxKey follow type bracketing and thus do not - // return MinKey or MaxKey as results. These queries are being run to test this - // functionality. - const numRes = coll.find({a: {$gt: 3}}).toArray(); - const expectedNum = [{_id: "a_number", a: 4}]; - assert(resultsEq(expectedNum, numRes), tojson(numRes)); - const noNum = coll.find({a: {$lt: 3}}).toArray(); - assert(resultsEq([], noNum), tojson(noNum)); - - const stringRes = coll.find({a: {$gt: "best"}}).toArray(); - const expectedString = [{_id: "a_string", a: "hello"}]; - assert(resultsEq(expectedString, stringRes), tojson(stringRes)); - } - - testQueriesWithMinOrMaxKey(); - testTypeBracketedQueries(); - - assert.commandWorked(coll.createIndex({a: 1})); - // TODO: SERVER-35921 The results of the queries above should not change based on the - // presence of an index - assert.commandWorked(coll.dropIndexes()); - - testQueriesWithMinOrMaxKey(); - testTypeBracketedQueries(); + const gteMaxRes = coll.find({a: {$gte: MaxKey}}).toArray(); + assert(resultsEq(expectedEqMax, gteMaxRes), tojson(gteMaxRes)); + + const ltMaxRes = coll.find({a: {$lt: MaxKey}}).toArray(); + const expectedLtMax = [ + {_id: "a_min_key", a: MinKey}, + {_id: "a_null", a: null}, + {_id: "a_number", a: 4}, + {_id: "a_subobject", a: {b: "hi"}}, + {_id: "a_undefined", a: undefined}, + {_id: "a_string", a: "hello"} + ]; + assert(resultsEq(expectedLtMax, ltMaxRes), tojson(ltMaxRes)); + + const lteMaxRes = coll.find({a: {$lte: MaxKey}}).toArray(); + assert(resultsEq(allElements, lteMaxRes), tojson(lteMaxRes)); +} + +function testTypeBracketedQueries() { + // Queries that do not involve MinKey or MaxKey follow type bracketing and thus do not + // return MinKey or MaxKey as results. These queries are being run to test this + // functionality. + const numRes = coll.find({a: {$gt: 3}}).toArray(); + const expectedNum = [{_id: "a_number", a: 4}]; + assert(resultsEq(expectedNum, numRes), tojson(numRes)); + const noNum = coll.find({a: {$lt: 3}}).toArray(); + assert(resultsEq([], noNum), tojson(noNum)); + + const stringRes = coll.find({a: {$gt: "best"}}).toArray(); + const expectedString = [{_id: "a_string", a: "hello"}]; + assert(resultsEq(expectedString, stringRes), tojson(stringRes)); +} + +testQueriesWithMinOrMaxKey(); +testTypeBracketedQueries(); + +assert.commandWorked(coll.createIndex({a: 1})); +// TODO: SERVER-35921 The results of the queries above should not change based on the +// presence of an index +assert.commandWorked(coll.dropIndexes()); + +testQueriesWithMinOrMaxKey(); +testTypeBracketedQueries(); }()); diff --git a/jstests/core/minmax.js b/jstests/core/minmax.js index 1387d0adb4e..1a32fe9d059 100644 --- a/jstests/core/minmax.js +++ b/jstests/core/minmax.js @@ -1,159 +1,153 @@ // Test min / max query parameters. // @tags: [assumes_balancer_off] (function() { - "use strict"; - - load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. - load("jstests/aggregation/extras/utils.js"); // For resultsEq. - - const coll = db.jstests_minmax; - coll.drop(); - - function addData() { - assert.commandWorked(coll.save({a: 1, b: 1})); - assert.commandWorked(coll.save({a: 1, b: 2})); - assert.commandWorked(coll.save({a: 2, b: 1})); - assert.commandWorked(coll.save({a: 2, b: 2})); - } - - assert.commandWorked(coll.ensureIndex({a: 1, b: 1})); - addData(); - - assert.eq(1, - coll.find().hint({a: 1, b: 1}).min({a: 1, b: 2}).max({a: 2, b: 1}).toArray().length); - assert.eq( - 2, coll.find().hint({a: 1, b: 1}).min({a: 1, b: 2}).max({a: 2, b: 1.5}).toArray().length); - assert.eq(2, - coll.find().hint({a: 1, b: 1}).min({a: 1, b: 2}).max({a: 2, b: 2}).toArray().length); - - // Single bound. - assert.eq(3, coll.find().hint({a: 1, b: 1}).min({a: 1, b: 2}).toArray().length); - assert.eq(3, coll.find().hint({a: 1, b: 1}).max({a: 2, b: 1.5}).toArray().length); - assert.eq(3, - coll.find().hint({a: 1, b: 1}).min({a: 1, b: 2}).hint({a: 1, b: 1}).toArray().length); - assert.eq( - 3, coll.find().hint({a: 1, b: 1}).max({a: 2, b: 1.5}).hint({a: 1, b: 1}).toArray().length); - - coll.drop(); - assert.commandWorked(coll.ensureIndex({a: 1, b: -1})); - addData(); - assert.eq(4, coll.find().hint({a: 1, b: -1}).min({a: 1, b: 2}).toArray().length); - assert.eq(4, coll.find().hint({a: 1, b: -1}).max({a: 2, b: 0.5}).toArray().length); - assert.eq(1, coll.find().hint({a: 1, b: -1}).min({a: 2, b: 1}).toArray().length); - assert.eq(1, coll.find().hint({a: 1, b: -1}).max({a: 1, b: 1.5}).toArray().length); - assert.eq( - 4, coll.find().hint({a: 1, b: -1}).min({a: 1, b: 2}).hint({a: 1, b: -1}).toArray().length); - assert.eq( - 4, - coll.find().hint({a: 1, b: -1}).max({a: 2, b: 0.5}).hint({a: 1, b: -1}).toArray().length); - assert.eq( - 1, coll.find().hint({a: 1, b: -1}).min({a: 2, b: 1}).hint({a: 1, b: -1}).toArray().length); - assert.eq( - 1, - coll.find().hint({a: 1, b: -1}).max({a: 1, b: 1.5}).hint({a: 1, b: -1}).toArray().length); - - // Check that min/max requires a hint. - let error = assert.throws(() => coll.find().min({a: 1, b: 2}).max({a: 2, b: 1}).toArray()); - assert.eq(error.code, 51173); - - // Hint doesn't match. - error = assert.throws(function() { - coll.find().min({a: 1}).hint({a: 1, b: -1}).toArray(); - }); - assert.eq(error.code, 51174, error); - - error = assert.throws(function() { - coll.find().min({a: 1, b: 1}).max({a: 1}).hint({a: 1, b: -1}).toArray(); - }); - assert.eq(error.code, 51176, error); - - error = assert.throws(function() { - coll.find().min({b: 1}).max({a: 1, b: 2}).hint({a: 1, b: -1}).toArray(); - }); - assert.eq(error.code, 51176, error); - - // No query solutions. - error = assert.throws(function() { - coll.find().min({a: 1}).hint({$natural: 1}).toArray(); - }); - assert.eq(error.code, ErrorCodes.BadValue, error); - - error = assert.throws(function() { - coll.find().max({a: 1}).hint({$natural: 1}).toArray(); - }); - assert.eq(error.code, ErrorCodes.BadValue); - - coll.drop(); - assert.commandWorked(coll.ensureIndex({a: 1})); - for (let i = 0; i < 10; ++i) { - assert.commandWorked(coll.save({_id: i, a: i})); - } - - // Reverse direction scan of the a:1 index between a:6 (inclusive) and a:3 (exclusive) is - // expected to fail, as max must be > min. - error = assert.throws(function() { - coll.find().hint({a: 1}).min({a: 6}).max({a: 3}).sort({a: -1}).toArray(); - }); - assert.eq(error.code, 51175); - - // A find with identical min and max values is expected to fail, as max is exclusive. - error = assert.throws(function() { - coll.find().hint({a: 1}).min({a: 2}).max({a: 2}).toArray(); - }); - assert.eq(error.code, 51175); - - error = assert.throws(function() { - coll.find().hint({a: 1}).min({a: 2}).max({a: 2}).sort({a: -1}).toArray(); - }); - assert.eq(error.code, 51175); - - coll.drop(); - addData(); - assert.commandWorked(coll.ensureIndex({a: 1, b: 1})); - - error = assert.throws(function() { - coll.find().min({a: 1, b: 2}).max({a: 1, b: 2}).hint({a: 1, b: 1}).toArray(); - }); - assert.eq(error.code, 51175); - - // Test ascending index. - coll.drop(); - assert.commandWorked(coll.ensureIndex({a: 1})); - assert.commandWorked(coll.insert({a: 3})); - assert.commandWorked(coll.insert({a: 4})); - assert.commandWorked(coll.insert({a: 5})); - - let cursor = coll.find().hint({a: 1}).min({a: 4}); - if (FixtureHelpers.numberOfShardsForCollection(coll) === 1) { - assert.eq(4, cursor.next().a); - assert.eq(5, cursor.next().a); - } else { - // With more than one shard, we cannot assume the results will come back in order, since we - // did not request a sort. - assert(resultsEq([cursor.next().a, cursor.next().a], [4, 5])); - } - assert(!cursor.hasNext()); - - cursor = coll.find().hint({a: 1}).max({a: 4}); - assert.eq(3, cursor.next()["a"]); - assert(!cursor.hasNext()); - - // Test descending index. - assert.commandWorked(coll.dropIndexes()); - assert.commandWorked(coll.ensureIndex({a: -1})); - - cursor = coll.find().hint({a: -1}).min({a: 4}); - if (FixtureHelpers.numberOfShardsForCollection(coll) === 1) { - assert.eq(4, cursor.next().a); - assert.eq(3, cursor.next().a); - } else { - // With more than one shard, we cannot assume the results will come back in order, since we - // did not request a sort. - assert(resultsEq([cursor.next().a, cursor.next().a], [4, 3])); - } - assert(!cursor.hasNext()); - - cursor = coll.find().hint({a: -1}).max({a: 4}); - assert.eq(5, cursor.next()["a"]); - assert(!cursor.hasNext()); +"use strict"; + +load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. +load("jstests/aggregation/extras/utils.js"); // For resultsEq. + +const coll = db.jstests_minmax; +coll.drop(); + +function addData() { + assert.commandWorked(coll.save({a: 1, b: 1})); + assert.commandWorked(coll.save({a: 1, b: 2})); + assert.commandWorked(coll.save({a: 2, b: 1})); + assert.commandWorked(coll.save({a: 2, b: 2})); +} + +assert.commandWorked(coll.ensureIndex({a: 1, b: 1})); +addData(); + +assert.eq(1, coll.find().hint({a: 1, b: 1}).min({a: 1, b: 2}).max({a: 2, b: 1}).toArray().length); +assert.eq(2, coll.find().hint({a: 1, b: 1}).min({a: 1, b: 2}).max({a: 2, b: 1.5}).toArray().length); +assert.eq(2, coll.find().hint({a: 1, b: 1}).min({a: 1, b: 2}).max({a: 2, b: 2}).toArray().length); + +// Single bound. +assert.eq(3, coll.find().hint({a: 1, b: 1}).min({a: 1, b: 2}).toArray().length); +assert.eq(3, coll.find().hint({a: 1, b: 1}).max({a: 2, b: 1.5}).toArray().length); +assert.eq(3, coll.find().hint({a: 1, b: 1}).min({a: 1, b: 2}).hint({a: 1, b: 1}).toArray().length); +assert.eq(3, + coll.find().hint({a: 1, b: 1}).max({a: 2, b: 1.5}).hint({a: 1, b: 1}).toArray().length); + +coll.drop(); +assert.commandWorked(coll.ensureIndex({a: 1, b: -1})); +addData(); +assert.eq(4, coll.find().hint({a: 1, b: -1}).min({a: 1, b: 2}).toArray().length); +assert.eq(4, coll.find().hint({a: 1, b: -1}).max({a: 2, b: 0.5}).toArray().length); +assert.eq(1, coll.find().hint({a: 1, b: -1}).min({a: 2, b: 1}).toArray().length); +assert.eq(1, coll.find().hint({a: 1, b: -1}).max({a: 1, b: 1.5}).toArray().length); +assert.eq(4, + coll.find().hint({a: 1, b: -1}).min({a: 1, b: 2}).hint({a: 1, b: -1}).toArray().length); +assert.eq(4, + coll.find().hint({a: 1, b: -1}).max({a: 2, b: 0.5}).hint({a: 1, b: -1}).toArray().length); +assert.eq(1, + coll.find().hint({a: 1, b: -1}).min({a: 2, b: 1}).hint({a: 1, b: -1}).toArray().length); +assert.eq(1, + coll.find().hint({a: 1, b: -1}).max({a: 1, b: 1.5}).hint({a: 1, b: -1}).toArray().length); + +// Check that min/max requires a hint. +let error = assert.throws(() => coll.find().min({a: 1, b: 2}).max({a: 2, b: 1}).toArray()); +assert.eq(error.code, 51173); + +// Hint doesn't match. +error = assert.throws(function() { + coll.find().min({a: 1}).hint({a: 1, b: -1}).toArray(); +}); +assert.eq(error.code, 51174, error); + +error = assert.throws(function() { + coll.find().min({a: 1, b: 1}).max({a: 1}).hint({a: 1, b: -1}).toArray(); +}); +assert.eq(error.code, 51176, error); + +error = assert.throws(function() { + coll.find().min({b: 1}).max({a: 1, b: 2}).hint({a: 1, b: -1}).toArray(); +}); +assert.eq(error.code, 51176, error); + +// No query solutions. +error = assert.throws(function() { + coll.find().min({a: 1}).hint({$natural: 1}).toArray(); +}); +assert.eq(error.code, ErrorCodes.BadValue, error); + +error = assert.throws(function() { + coll.find().max({a: 1}).hint({$natural: 1}).toArray(); +}); +assert.eq(error.code, ErrorCodes.BadValue); + +coll.drop(); +assert.commandWorked(coll.ensureIndex({a: 1})); +for (let i = 0; i < 10; ++i) { + assert.commandWorked(coll.save({_id: i, a: i})); +} + +// Reverse direction scan of the a:1 index between a:6 (inclusive) and a:3 (exclusive) is +// expected to fail, as max must be > min. +error = assert.throws(function() { + coll.find().hint({a: 1}).min({a: 6}).max({a: 3}).sort({a: -1}).toArray(); +}); +assert.eq(error.code, 51175); + +// A find with identical min and max values is expected to fail, as max is exclusive. +error = assert.throws(function() { + coll.find().hint({a: 1}).min({a: 2}).max({a: 2}).toArray(); +}); +assert.eq(error.code, 51175); + +error = assert.throws(function() { + coll.find().hint({a: 1}).min({a: 2}).max({a: 2}).sort({a: -1}).toArray(); +}); +assert.eq(error.code, 51175); + +coll.drop(); +addData(); +assert.commandWorked(coll.ensureIndex({a: 1, b: 1})); + +error = assert.throws(function() { + coll.find().min({a: 1, b: 2}).max({a: 1, b: 2}).hint({a: 1, b: 1}).toArray(); +}); +assert.eq(error.code, 51175); + +// Test ascending index. +coll.drop(); +assert.commandWorked(coll.ensureIndex({a: 1})); +assert.commandWorked(coll.insert({a: 3})); +assert.commandWorked(coll.insert({a: 4})); +assert.commandWorked(coll.insert({a: 5})); + +let cursor = coll.find().hint({a: 1}).min({a: 4}); +if (FixtureHelpers.numberOfShardsForCollection(coll) === 1) { + assert.eq(4, cursor.next().a); + assert.eq(5, cursor.next().a); +} else { + // With more than one shard, we cannot assume the results will come back in order, since we + // did not request a sort. + assert(resultsEq([cursor.next().a, cursor.next().a], [4, 5])); +} +assert(!cursor.hasNext()); + +cursor = coll.find().hint({a: 1}).max({a: 4}); +assert.eq(3, cursor.next()["a"]); +assert(!cursor.hasNext()); + +// Test descending index. +assert.commandWorked(coll.dropIndexes()); +assert.commandWorked(coll.ensureIndex({a: -1})); + +cursor = coll.find().hint({a: -1}).min({a: 4}); +if (FixtureHelpers.numberOfShardsForCollection(coll) === 1) { + assert.eq(4, cursor.next().a); + assert.eq(3, cursor.next().a); +} else { + // With more than one shard, we cannot assume the results will come back in order, since we + // did not request a sort. + assert(resultsEq([cursor.next().a, cursor.next().a], [4, 3])); +} +assert(!cursor.hasNext()); + +cursor = coll.find().hint({a: -1}).max({a: 4}); +assert.eq(5, cursor.next()["a"]); +assert(!cursor.hasNext()); }()); diff --git a/jstests/core/minmax_edge.js b/jstests/core/minmax_edge.js index cf75edaef80..081af7a347e 100644 --- a/jstests/core/minmax_edge.js +++ b/jstests/core/minmax_edge.js @@ -3,230 +3,242 @@ * Other edge cases are covered by C++ unit tests. */ (function() { - const t = db.minmax_edge; - - /* - * Function to verify that the results of a query match the expected results. - * Results is the cursor toArray, expectedIds is a list of _ids - */ - function verifyResultIds(results, expectedIds) { - // check they are the same length - assert.eq(results.length, expectedIds.length); - - function compare(a, b) { - if (a._id < b._id) - return -1; - if (a._id > b._id) - return 1; - return 0; - } - - results.sort(compare); - expectedIds.sort(); - - for (var i = 0; i < results.length; i++) { - assert.eq(results._id, expectedIds._ids); - } - } +const t = db.minmax_edge; - /* - * Shortcut to drop the collection and insert these 3 test docs. Used to change the indices - * regardless of any previous indices. - */ - function reset(t) { - t.drop(); - assert.writeOK(t.insert({_id: 0, a: 1, b: 1})); - assert.writeOK(t.insert({_id: 1, a: 1, b: 2})); - assert.writeOK(t.insert({_id: 2, a: 1, b: 3})); - - assert.writeOK(t.insert({_id: 3, a: 2, b: 1})); - assert.writeOK(t.insert({_id: 4, a: 2, b: 2})); - assert.writeOK(t.insert({_id: 5, a: 2, b: 3})); - - assert.writeOK(t.insert({_id: 6, a: 3, b: 1})); - assert.writeOK(t.insert({_id: 7, a: 3, b: 2})); - assert.writeOK(t.insert({_id: 8, a: 3, b: 3})); +/* + * Function to verify that the results of a query match the expected results. + * Results is the cursor toArray, expectedIds is a list of _ids + */ +function verifyResultIds(results, expectedIds) { + // check they are the same length + assert.eq(results.length, expectedIds.length); + + function compare(a, b) { + if (a._id < b._id) + return -1; + if (a._id > b._id) + return 1; + return 0; } - // Two helpers to save typing - function verifyMin(minDoc, idx, expectedIds) { - verifyResultIds(t.find().min(minDoc).hint(idx).toArray(), expectedIds); - } + results.sort(compare); + expectedIds.sort(); - function verifyMax(minDoc, idx, expectedIds) { - verifyResultIds(t.find().max(minDoc).hint(idx).toArray(), expectedIds); + for (var i = 0; i < results.length; i++) { + assert.eq(results._id, expectedIds._ids); } +} - // Basic ascending index. - reset(t); - let indexSpec = {a: 1}; - assert.commandWorked(t.createIndex(indexSpec)); - - verifyMin({a: Infinity}, indexSpec, []); - verifyMax({a: Infinity}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); - - verifyMin({a: -Infinity}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); - verifyMax({a: -Infinity}, indexSpec, []); - - // NaN < all ints. - verifyMin({a: NaN}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); - verifyMax({a: NaN}, indexSpec, []); - - // {a: 1} > all ints. - verifyMin({a: {a: 1}}, indexSpec, []); - verifyMax({a: {a: 1}}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); - - // 'a' > all ints. - verifyMin({a: 'a'}, indexSpec, []); - verifyMax({a: 'a'}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); - - // Now with a compound index. - reset(t); - indexSpec = {a: 1, b: -1}; - - assert.commandWorked(t.createIndex(indexSpec)); - - // Same as single-key index assertions, with b field present. - verifyMin({a: NaN, b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); - verifyMax({a: NaN, b: 1}, indexSpec, []); - - verifyMin({a: Infinity, b: 1}, indexSpec, []); - verifyMax({a: Infinity, b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); - - verifyMin({a: -Infinity, b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); - verifyMax({a: -Infinity, b: 1}, indexSpec, []); - - verifyMin({a: {a: 1}, b: 1}, indexSpec, []); - verifyMax({a: {a: 1}, b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); - - verifyMin({a: 'a', b: 1}, indexSpec, []); - verifyMax({a: 'a', b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); - - // Edge cases on b values - verifyMin({a: 1, b: Infinity}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); - verifyMin({a: 2, b: Infinity}, indexSpec, [3, 4, 5, 6, 7, 8]); - verifyMin({a: 3, b: Infinity}, indexSpec, [6, 7, 8]); - verifyMax({a: 1, b: Infinity}, indexSpec, []); - verifyMax({a: 2, b: Infinity}, indexSpec, [0, 1, 2]); - verifyMax({a: 3, b: Infinity}, indexSpec, [0, 1, 2, 3, 4, 5]); - - verifyMin({a: 1, b: -Infinity}, indexSpec, [3, 4, 5, 6, 7, 8]); - verifyMin({a: 2, b: -Infinity}, indexSpec, [6, 7, 8]); - verifyMin({a: 3, b: -Infinity}, indexSpec, []); - verifyMax({a: 1, b: -Infinity}, indexSpec, [0, 1, 2]); - verifyMax({a: 2, b: -Infinity}, indexSpec, [0, 1, 2, 3, 4, 5]); - verifyMax({a: 3, b: -Infinity}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); - - verifyMin({a: 2, b: NaN}, indexSpec, [6, 7, 8]); - verifyMax({a: 2, b: NaN}, indexSpec, [0, 1, 2, 3, 4, 5]); - - verifyMin({a: 2, b: {b: 1}}, indexSpec, [3, 4, 5, 6, 7, 8]); - verifyMax({a: 2, b: {b: 1}}, indexSpec, [0, 1, 2]); - - verifyMin({a: 2, b: 'b'}, indexSpec, [3, 4, 5, 6, 7, 8]); - verifyMax({a: 2, b: 'b'}, indexSpec, [0, 1, 2]); - - // Test descending index. - reset(t); - indexSpec = {a: -1}; - assert.commandWorked(t.createIndex(indexSpec)); - - verifyMin({a: NaN}, indexSpec, []); - verifyMax({a: NaN}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); - - verifyMin({a: Infinity}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); - verifyMax({a: Infinity}, indexSpec, []); - - verifyMin({a: -Infinity}, indexSpec, []); - verifyMax({a: -Infinity}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); - - verifyMin({a: {a: 1}}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); - verifyMax({a: {a: 1}}, indexSpec, []); - - verifyMin({a: 'a'}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); - verifyMax({a: 'a'}, indexSpec, []); - - // Now with a compound index. - reset(t); - indexSpec = {a: -1, b: -1}; - assert.commandWorked(t.createIndex(indexSpec)); - - // Same as single-key index assertions, with b field present. - verifyMin({a: NaN, b: 1}, indexSpec, []); - verifyMax({a: NaN, b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); - - verifyMin({a: Infinity, b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); - verifyMax({a: Infinity, b: 1}, indexSpec, []); - - verifyMin({a: -Infinity, b: 1}, indexSpec, []); - verifyMax({a: -Infinity, b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); - - verifyMin({a: {a: 1}, b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); - verifyMax({a: {a: 1}, b: 1}, indexSpec, []); - - verifyMin({a: 'a', b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); - verifyMax({a: 'a', b: 1}, indexSpec, []); - - // Edge cases on b values. - verifyMin({a: 1, b: Infinity}, indexSpec, [0, 1, 2]); - verifyMin({a: 2, b: Infinity}, indexSpec, [0, 1, 2, 3, 4, 5]); - verifyMin({a: 3, b: Infinity}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); - verifyMax({a: 1, b: Infinity}, indexSpec, [3, 4, 5, 6, 7, 8]); - verifyMax({a: 2, b: Infinity}, indexSpec, [6, 7, 8]); - verifyMax({a: 3, b: Infinity}, indexSpec, []); - - verifyMin({a: 1, b: -Infinity}, indexSpec, []); - verifyMin({a: 2, b: -Infinity}, indexSpec, [0, 1, 2]); - verifyMin({a: 3, b: -Infinity}, indexSpec, [0, 1, 2, 3, 4, 5]); - verifyMax({a: 1, b: -Infinity}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); - verifyMax({a: 2, b: -Infinity}, indexSpec, [3, 4, 5, 6, 7, 8]); - verifyMax({a: 3, b: -Infinity}, indexSpec, [6, 7, 8]); +/* + * Shortcut to drop the collection and insert these 3 test docs. Used to change the indices + * regardless of any previous indices. + */ +function reset(t) { + t.drop(); + assert.writeOK(t.insert({_id: 0, a: 1, b: 1})); + assert.writeOK(t.insert({_id: 1, a: 1, b: 2})); + assert.writeOK(t.insert({_id: 2, a: 1, b: 3})); + + assert.writeOK(t.insert({_id: 3, a: 2, b: 1})); + assert.writeOK(t.insert({_id: 4, a: 2, b: 2})); + assert.writeOK(t.insert({_id: 5, a: 2, b: 3})); + + assert.writeOK(t.insert({_id: 6, a: 3, b: 1})); + assert.writeOK(t.insert({_id: 7, a: 3, b: 2})); + assert.writeOK(t.insert({_id: 8, a: 3, b: 3})); +} - verifyMin({a: 2, b: NaN}, indexSpec, [0, 1, 2]); - verifyMax({a: 2, b: NaN}, indexSpec, [3, 4, 5, 6, 7, 8]); +// Two helpers to save typing +function verifyMin(minDoc, idx, expectedIds) { + verifyResultIds(t.find().min(minDoc).hint(idx).toArray(), expectedIds); +} - verifyMin({a: 2, b: {b: 1}}, indexSpec, [3, 4, 5, 6, 7, 8]); - verifyMax({a: 2, b: {b: 1}}, indexSpec, [0, 1, 2]); +function verifyMax(minDoc, idx, expectedIds) { + verifyResultIds(t.find().max(minDoc).hint(idx).toArray(), expectedIds); +} + +// Basic ascending index. +reset(t); +let indexSpec = {a: 1}; +assert.commandWorked(t.createIndex(indexSpec)); + +verifyMin({a: Infinity}, indexSpec, []); +verifyMax({a: Infinity}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); + +verifyMin({a: -Infinity}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); +verifyMax({a: -Infinity}, indexSpec, []); + +// NaN < all ints. +verifyMin({a: NaN}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); +verifyMax({a: NaN}, indexSpec, []); - verifyMin({a: 2, b: 'b'}, indexSpec, [3, 4, 5, 6, 7, 8]); - verifyMax({a: 2, b: 'b'}, indexSpec, [0, 1, 2]); +// {a: 1} > all ints. +verifyMin({a: {a: 1}}, indexSpec, []); +verifyMax({a: {a: 1}}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); - // Now a couple cases with an extra compound index. - t.drop(); - indexSpec = {a: 1, b: -1, c: 1}; - assert.commandWorked(t.createIndex(indexSpec)); - // The following documents are in order according to the index. - t.insert({_id: 0, a: 1, b: 'b', c: 1}); - t.insert({_id: 1, a: 1, b: 'b', c: 2}); - t.insert({_id: 2, a: 1, b: 'a', c: 1}); - t.insert({_id: 3, a: 1, b: 'a', c: 2}); - t.insert({_id: 4, a: 2, b: 'b', c: 1}); - t.insert({_id: 5, a: 2, b: 'b', c: 2}); - t.insert({_id: 6, a: 2, b: 'a', c: 1}); - t.insert({_id: 7, a: 2, b: 'a', c: 2}); - - verifyMin({a: 1, b: 'a', c: 1}, indexSpec, [2, 3, 4, 5, 6, 7]); - verifyMin({a: 2, b: 'a', c: 2}, indexSpec, [7]); - verifyMax({a: 1, b: 'a', c: 1}, indexSpec, [0, 1]); - verifyMax({a: 2, b: 'a', c: 2}, indexSpec, [0, 1, 2, 3, 4, 5, 6]); - - verifyMin({a: Infinity, b: 'a', c: 2}, indexSpec, []); - verifyMax({a: Infinity, b: 'a', c: 2}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7]); - - verifyMin({a: -Infinity, b: 'a', c: 2}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7]); - verifyMax({a: -Infinity, b: 'a', c: 2}, indexSpec, []); - - // 'a' > Infinity, actually. - verifyMin({a: 1, b: Infinity, c: 2}, indexSpec, [4, 5, 6, 7]); - verifyMax({a: 1, b: Infinity, c: 2}, indexSpec, [0, 1, 2, 3]); - - // Also, 'a' > -Infinity. - verifyMin({a: 1, b: -Infinity, c: 2}, indexSpec, [4, 5, 6, 7]); - verifyMax({a: 1, b: -Infinity, c: 2}, indexSpec, [0, 1, 2, 3]); - - verifyMin({a: 1, b: 'a', c: Infinity}, indexSpec, [4, 5, 6, 7]); - verifyMax({a: 1, b: 'a', c: Infinity}, indexSpec, [0, 1, 2, 3]); - - verifyMin({a: 1, b: 'a', c: -Infinity}, indexSpec, [2, 3, 4, 5, 6, 7]); - verifyMax({a: 1, b: 'a', c: -Infinity}, indexSpec, [0, 1]); +// 'a' > all ints. +verifyMin({a: 'a'}, indexSpec, []); +verifyMax({a: 'a'}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); + +// Now with a compound index. +reset(t); +indexSpec = { + a: 1, + b: -1 +}; + +assert.commandWorked(t.createIndex(indexSpec)); + +// Same as single-key index assertions, with b field present. +verifyMin({a: NaN, b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); +verifyMax({a: NaN, b: 1}, indexSpec, []); + +verifyMin({a: Infinity, b: 1}, indexSpec, []); +verifyMax({a: Infinity, b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); + +verifyMin({a: -Infinity, b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); +verifyMax({a: -Infinity, b: 1}, indexSpec, []); + +verifyMin({a: {a: 1}, b: 1}, indexSpec, []); +verifyMax({a: {a: 1}, b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); + +verifyMin({a: 'a', b: 1}, indexSpec, []); +verifyMax({a: 'a', b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); + +// Edge cases on b values +verifyMin({a: 1, b: Infinity}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); +verifyMin({a: 2, b: Infinity}, indexSpec, [3, 4, 5, 6, 7, 8]); +verifyMin({a: 3, b: Infinity}, indexSpec, [6, 7, 8]); +verifyMax({a: 1, b: Infinity}, indexSpec, []); +verifyMax({a: 2, b: Infinity}, indexSpec, [0, 1, 2]); +verifyMax({a: 3, b: Infinity}, indexSpec, [0, 1, 2, 3, 4, 5]); + +verifyMin({a: 1, b: -Infinity}, indexSpec, [3, 4, 5, 6, 7, 8]); +verifyMin({a: 2, b: -Infinity}, indexSpec, [6, 7, 8]); +verifyMin({a: 3, b: -Infinity}, indexSpec, []); +verifyMax({a: 1, b: -Infinity}, indexSpec, [0, 1, 2]); +verifyMax({a: 2, b: -Infinity}, indexSpec, [0, 1, 2, 3, 4, 5]); +verifyMax({a: 3, b: -Infinity}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); + +verifyMin({a: 2, b: NaN}, indexSpec, [6, 7, 8]); +verifyMax({a: 2, b: NaN}, indexSpec, [0, 1, 2, 3, 4, 5]); + +verifyMin({a: 2, b: {b: 1}}, indexSpec, [3, 4, 5, 6, 7, 8]); +verifyMax({a: 2, b: {b: 1}}, indexSpec, [0, 1, 2]); + +verifyMin({a: 2, b: 'b'}, indexSpec, [3, 4, 5, 6, 7, 8]); +verifyMax({a: 2, b: 'b'}, indexSpec, [0, 1, 2]); + +// Test descending index. +reset(t); +indexSpec = { + a: -1 +}; +assert.commandWorked(t.createIndex(indexSpec)); + +verifyMin({a: NaN}, indexSpec, []); +verifyMax({a: NaN}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); + +verifyMin({a: Infinity}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); +verifyMax({a: Infinity}, indexSpec, []); + +verifyMin({a: -Infinity}, indexSpec, []); +verifyMax({a: -Infinity}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); + +verifyMin({a: {a: 1}}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); +verifyMax({a: {a: 1}}, indexSpec, []); + +verifyMin({a: 'a'}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); +verifyMax({a: 'a'}, indexSpec, []); + +// Now with a compound index. +reset(t); +indexSpec = { + a: -1, + b: -1 +}; +assert.commandWorked(t.createIndex(indexSpec)); + +// Same as single-key index assertions, with b field present. +verifyMin({a: NaN, b: 1}, indexSpec, []); +verifyMax({a: NaN, b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); + +verifyMin({a: Infinity, b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); +verifyMax({a: Infinity, b: 1}, indexSpec, []); + +verifyMin({a: -Infinity, b: 1}, indexSpec, []); +verifyMax({a: -Infinity, b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); + +verifyMin({a: {a: 1}, b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); +verifyMax({a: {a: 1}, b: 1}, indexSpec, []); + +verifyMin({a: 'a', b: 1}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); +verifyMax({a: 'a', b: 1}, indexSpec, []); + +// Edge cases on b values. +verifyMin({a: 1, b: Infinity}, indexSpec, [0, 1, 2]); +verifyMin({a: 2, b: Infinity}, indexSpec, [0, 1, 2, 3, 4, 5]); +verifyMin({a: 3, b: Infinity}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); +verifyMax({a: 1, b: Infinity}, indexSpec, [3, 4, 5, 6, 7, 8]); +verifyMax({a: 2, b: Infinity}, indexSpec, [6, 7, 8]); +verifyMax({a: 3, b: Infinity}, indexSpec, []); + +verifyMin({a: 1, b: -Infinity}, indexSpec, []); +verifyMin({a: 2, b: -Infinity}, indexSpec, [0, 1, 2]); +verifyMin({a: 3, b: -Infinity}, indexSpec, [0, 1, 2, 3, 4, 5]); +verifyMax({a: 1, b: -Infinity}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7, 8]); +verifyMax({a: 2, b: -Infinity}, indexSpec, [3, 4, 5, 6, 7, 8]); +verifyMax({a: 3, b: -Infinity}, indexSpec, [6, 7, 8]); + +verifyMin({a: 2, b: NaN}, indexSpec, [0, 1, 2]); +verifyMax({a: 2, b: NaN}, indexSpec, [3, 4, 5, 6, 7, 8]); + +verifyMin({a: 2, b: {b: 1}}, indexSpec, [3, 4, 5, 6, 7, 8]); +verifyMax({a: 2, b: {b: 1}}, indexSpec, [0, 1, 2]); + +verifyMin({a: 2, b: 'b'}, indexSpec, [3, 4, 5, 6, 7, 8]); +verifyMax({a: 2, b: 'b'}, indexSpec, [0, 1, 2]); + +// Now a couple cases with an extra compound index. +t.drop(); +indexSpec = { + a: 1, + b: -1, + c: 1 +}; +assert.commandWorked(t.createIndex(indexSpec)); +// The following documents are in order according to the index. +t.insert({_id: 0, a: 1, b: 'b', c: 1}); +t.insert({_id: 1, a: 1, b: 'b', c: 2}); +t.insert({_id: 2, a: 1, b: 'a', c: 1}); +t.insert({_id: 3, a: 1, b: 'a', c: 2}); +t.insert({_id: 4, a: 2, b: 'b', c: 1}); +t.insert({_id: 5, a: 2, b: 'b', c: 2}); +t.insert({_id: 6, a: 2, b: 'a', c: 1}); +t.insert({_id: 7, a: 2, b: 'a', c: 2}); + +verifyMin({a: 1, b: 'a', c: 1}, indexSpec, [2, 3, 4, 5, 6, 7]); +verifyMin({a: 2, b: 'a', c: 2}, indexSpec, [7]); +verifyMax({a: 1, b: 'a', c: 1}, indexSpec, [0, 1]); +verifyMax({a: 2, b: 'a', c: 2}, indexSpec, [0, 1, 2, 3, 4, 5, 6]); + +verifyMin({a: Infinity, b: 'a', c: 2}, indexSpec, []); +verifyMax({a: Infinity, b: 'a', c: 2}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7]); + +verifyMin({a: -Infinity, b: 'a', c: 2}, indexSpec, [0, 1, 2, 3, 4, 5, 6, 7]); +verifyMax({a: -Infinity, b: 'a', c: 2}, indexSpec, []); + +// 'a' > Infinity, actually. +verifyMin({a: 1, b: Infinity, c: 2}, indexSpec, [4, 5, 6, 7]); +verifyMax({a: 1, b: Infinity, c: 2}, indexSpec, [0, 1, 2, 3]); + +// Also, 'a' > -Infinity. +verifyMin({a: 1, b: -Infinity, c: 2}, indexSpec, [4, 5, 6, 7]); +verifyMax({a: 1, b: -Infinity, c: 2}, indexSpec, [0, 1, 2, 3]); + +verifyMin({a: 1, b: 'a', c: Infinity}, indexSpec, [4, 5, 6, 7]); +verifyMax({a: 1, b: 'a', c: Infinity}, indexSpec, [0, 1, 2, 3]); + +verifyMin({a: 1, b: 'a', c: -Infinity}, indexSpec, [2, 3, 4, 5, 6, 7]); +verifyMax({a: 1, b: 'a', c: -Infinity}, indexSpec, [0, 1]); })(); diff --git a/jstests/core/mr5.js b/jstests/core/mr5.js index c78ce1d8f4e..1858eaa57a5 100644 --- a/jstests/core/mr5.js +++ b/jstests/core/mr5.js @@ -5,63 +5,63 @@ // ] (function() { - "use strict"; +"use strict"; - load("jstests/aggregation/extras/utils.js"); // For resultsEq. +load("jstests/aggregation/extras/utils.js"); // For resultsEq. - const t = db.mr5; - t.drop(); +const t = db.mr5; +t.drop(); - assert.writeOK(t.insert({"partner": 1, "visits": 9})); - assert.writeOK(t.insert({"partner": 2, "visits": 9})); - assert.writeOK(t.insert({"partner": 1, "visits": 11})); - assert.writeOK(t.insert({"partner": 1, "visits": 30})); - assert.writeOK(t.insert({"partner": 2, "visits": 41})); - assert.writeOK(t.insert({"partner": 2, "visits": 41})); +assert.writeOK(t.insert({"partner": 1, "visits": 9})); +assert.writeOK(t.insert({"partner": 2, "visits": 9})); +assert.writeOK(t.insert({"partner": 1, "visits": 11})); +assert.writeOK(t.insert({"partner": 1, "visits": 30})); +assert.writeOK(t.insert({"partner": 2, "visits": 41})); +assert.writeOK(t.insert({"partner": 2, "visits": 41})); - let mapper = function() { - emit(this.partner, {stats: [this.visits]}); - }; +let mapper = function() { + emit(this.partner, {stats: [this.visits]}); +}; - const reducer = function(k, v) { - var stats = []; - var total = 0; - for (var i = 0; i < v.length; i++) { - for (var j in v[i].stats) { - stats.push(v[i].stats[j]); - total += v[i].stats[j]; - } +const reducer = function(k, v) { + var stats = []; + var total = 0; + for (var i = 0; i < v.length; i++) { + for (var j in v[i].stats) { + stats.push(v[i].stats[j]); + total += v[i].stats[j]; } - return {stats: stats, total: total}; - }; + } + return {stats: stats, total: total}; +}; - let res = t.mapReduce(mapper, reducer, {out: "mr5_out", scope: {xx: 1}}); +let res = t.mapReduce(mapper, reducer, {out: "mr5_out", scope: {xx: 1}}); - let resultAsObj = res.convertToSingleObject(); - assert.eq(2, - Object.keySet(resultAsObj).length, - `Expected 2 keys ("1" and "2") in object ${tojson(resultAsObj)}`); - // Use resultsEq() to avoid any assumptions about order. - assert(resultsEq([9, 11, 30], resultAsObj["1"].stats)); - assert(resultsEq([9, 41, 41], resultAsObj["2"].stats)); +let resultAsObj = res.convertToSingleObject(); +assert.eq(2, + Object.keySet(resultAsObj).length, + `Expected 2 keys ("1" and "2") in object ${tojson(resultAsObj)}`); +// Use resultsEq() to avoid any assumptions about order. +assert(resultsEq([9, 11, 30], resultAsObj["1"].stats)); +assert(resultsEq([9, 41, 41], resultAsObj["2"].stats)); - res.drop(); +res.drop(); - mapper = function() { - var x = "partner"; - var y = "visits"; - emit(this[x], {stats: [this[y]]}); - }; +mapper = function() { + var x = "partner"; + var y = "visits"; + emit(this[x], {stats: [this[y]]}); +}; - res = t.mapReduce(mapper, reducer, {out: "mr5_out", scope: {xx: 1}}); +res = t.mapReduce(mapper, reducer, {out: "mr5_out", scope: {xx: 1}}); - resultAsObj = res.convertToSingleObject(); - assert.eq(2, - Object.keySet(resultAsObj).length, - `Expected 2 keys ("1" and "2") in object ${tojson(resultAsObj)}`); - // Use resultsEq() to avoid any assumptions about order. - assert(resultsEq([9, 11, 30], resultAsObj["1"].stats)); - assert(resultsEq([9, 41, 41], resultAsObj["2"].stats)); +resultAsObj = res.convertToSingleObject(); +assert.eq(2, + Object.keySet(resultAsObj).length, + `Expected 2 keys ("1" and "2") in object ${tojson(resultAsObj)}`); +// Use resultsEq() to avoid any assumptions about order. +assert(resultsEq([9, 11, 30], resultAsObj["1"].stats)); +assert(resultsEq([9, 41, 41], resultAsObj["2"].stats)); - res.drop(); +res.drop(); }()); diff --git a/jstests/core/mr_bigobject.js b/jstests/core/mr_bigobject.js index 513d48d25a2..92865a04f0a 100644 --- a/jstests/core/mr_bigobject.js +++ b/jstests/core/mr_bigobject.js @@ -39,7 +39,7 @@ r = function(k, v) { total = 0; for (var i = 0; i < v.length; i++) { var x = v[i]; - if (typeof(x) == "number") + if (typeof (x) == "number") total += x; else total += x.length; diff --git a/jstests/core/mr_bigobject_replace.js b/jstests/core/mr_bigobject_replace.js index 3c32e6de8af..c02ee7f1fac 100644 --- a/jstests/core/mr_bigobject_replace.js +++ b/jstests/core/mr_bigobject_replace.js @@ -13,58 +13,58 @@ * "replace" action for the out collection. */ (function() { - function mapper() { - // Emit multiple values to ensure that the reducer gets called. - emit(this._id, 1); - emit(this._id, 1); - } - - function createBigDocument() { - // Returns a document of the form { _id: ObjectId(...), value: '...' } with the specified - // 'targetSize' in bytes. - function makeDocWithSize(targetSize) { - var doc = {_id: new ObjectId(), value: ''}; +function mapper() { + // Emit multiple values to ensure that the reducer gets called. + emit(this._id, 1); + emit(this._id, 1); +} - var size = Object.bsonsize(doc); - assert.gte(targetSize, size); +function createBigDocument() { + // Returns a document of the form { _id: ObjectId(...), value: '...' } with the specified + // 'targetSize' in bytes. + function makeDocWithSize(targetSize) { + var doc = {_id: new ObjectId(), value: ''}; - // Set 'value' as a string with enough characters to make the whole document 'size' - // bytes long. - doc.value = new Array(targetSize - size + 1).join('x'); - assert.eq(targetSize, Object.bsonsize(doc)); + var size = Object.bsonsize(doc); + assert.gte(targetSize, size); - return doc; - } + // Set 'value' as a string with enough characters to make the whole document 'size' + // bytes long. + doc.value = new Array(targetSize - size + 1).join('x'); + assert.eq(targetSize, Object.bsonsize(doc)); - var maxDocSize = 16 * 1024 * 1024; - return makeDocWithSize(maxDocSize + 1).value; + return doc; } - function runTest(testOptions) { - db.input.drop(); - db.mr_bigobject_replace.drop(); + var maxDocSize = 16 * 1024 * 1024; + return makeDocWithSize(maxDocSize + 1).value; +} - // Insert a document so the mapper gets run. - assert.writeOK(db.input.insert({})); +function runTest(testOptions) { + db.input.drop(); + db.mr_bigobject_replace.drop(); - var res = db.runCommand(Object.extend({ - mapReduce: "input", - map: mapper, - out: {replace: "mr_bigobject_replace"}, - }, - testOptions)); + // Insert a document so the mapper gets run. + assert.writeOK(db.input.insert({})); - assert.commandFailed(res, "creating a document larger than 16MB didn't fail"); - assert.lte(0, - res.errmsg.indexOf("object to insert too large"), - "map-reduce command failed for a reason other than inserting a large document"); - } + var res = db.runCommand(Object.extend({ + mapReduce: "input", + map: mapper, + out: {replace: "mr_bigobject_replace"}, + }, + testOptions)); + + assert.commandFailed(res, "creating a document larger than 16MB didn't fail"); + assert.lte(0, + res.errmsg.indexOf("object to insert too large"), + "map-reduce command failed for a reason other than inserting a large document"); +} - runTest({reduce: createBigDocument}); - runTest({ - reduce: function() { - return 1; - }, - finalize: createBigDocument - }); +runTest({reduce: createBigDocument}); +runTest({ + reduce: function() { + return 1; + }, + finalize: createBigDocument +}); })(); diff --git a/jstests/core/mr_killop.js b/jstests/core/mr_killop.js index 56a025dc4b5..168b54a5f6d 100644 --- a/jstests/core/mr_killop.js +++ b/jstests/core/mr_killop.js @@ -60,17 +60,17 @@ function op(childLoop) { } /** -* Run one map reduce with the specified parameters in a parallel shell, kill the -* map reduce op or its child op with killOp, and wait for the map reduce op to -* terminate. -* @param childLoop - if true, a distinct $where op is killed rather than the map reduce op. -* This is necessay for a child distinct $where of a map reduce op because child -* ops currently mask parent ops in currentOp. -*/ + * Run one map reduce with the specified parameters in a parallel shell, kill the + * map reduce op or its child op with killOp, and wait for the map reduce op to + * terminate. + * @param childLoop - if true, a distinct $where op is killed rather than the map reduce op. + * This is necessay for a child distinct $where of a map reduce op because child + * ops currently mask parent ops in currentOp. + */ function testOne(map, reduce, finalize, scope, childLoop, wait) { - debug("testOne - map = " + tojson(map) + "; reduce = " + tojson(reduce) + "; finalize = " + - tojson(finalize) + "; scope = " + tojson(scope) + "; childLoop = " + childLoop + - "; wait = " + wait); + debug("testOne - map = " + tojson(map) + "; reduce = " + tojson(reduce) + + "; finalize = " + tojson(finalize) + "; scope = " + tojson(scope) + + "; childLoop = " + childLoop + "; wait = " + wait); t.drop(); t2.drop(); diff --git a/jstests/core/mr_stored.js b/jstests/core/mr_stored.js index e2c8d1450ec..c1c38253727 100644 --- a/jstests/core/mr_stored.js +++ b/jstests/core/mr_stored.js @@ -8,81 +8,81 @@ // requires_non_retryable_writes, // ] (function() { - "use strict"; +"use strict"; - // Use a unique database name to avoid conflicts with other tests that directly modify - // system.js. - const testDB = db.getSiblingDB("mr_stored"); - const coll = testDB.test; - coll.drop(); +// Use a unique database name to avoid conflicts with other tests that directly modify +// system.js. +const testDB = db.getSiblingDB("mr_stored"); +const coll = testDB.test; +coll.drop(); - assert.commandWorked(coll.insert({"partner": 1, "visits": 9})); - assert.commandWorked(coll.insert({"partner": 2, "visits": 9})); - assert.commandWorked(coll.insert({"partner": 1, "visits": 11})); - assert.commandWorked(coll.insert({"partner": 1, "visits": 30})); - assert.commandWorked(coll.insert({"partner": 2, "visits": 41})); - assert.commandWorked(coll.insert({"partner": 2, "visits": 41})); +assert.commandWorked(coll.insert({"partner": 1, "visits": 9})); +assert.commandWorked(coll.insert({"partner": 2, "visits": 9})); +assert.commandWorked(coll.insert({"partner": 1, "visits": 11})); +assert.commandWorked(coll.insert({"partner": 1, "visits": 30})); +assert.commandWorked(coll.insert({"partner": 2, "visits": 41})); +assert.commandWorked(coll.insert({"partner": 2, "visits": 41})); - let map = function(obj) { - emit(obj.partner, {stats: [obj.visits]}); - }; +let map = function(obj) { + emit(obj.partner, {stats: [obj.visits]}); +}; - let reduce = function(k, v) { - var stats = []; - var total = 0; - for (var i = 0; i < v.length; i++) { - for (var j in v[i].stats) { - stats.push(v[i].stats[j]); - total += v[i].stats[j]; - } +let reduce = function(k, v) { + var stats = []; + var total = 0; + for (var i = 0; i < v.length; i++) { + for (var j in v[i].stats) { + stats.push(v[i].stats[j]); + total += v[i].stats[j]; } - return {stats: stats, total: total}; - }; + } + return {stats: stats, total: total}; +}; - // Test that map reduce works with stored javascript - assert.commandWorked(testDB.system.js.insert({_id: "mr_stored_map", value: map})); - assert.commandWorked(testDB.system.js.insert({_id: "mr_stored_reduce", value: reduce})); +// Test that map reduce works with stored javascript +assert.commandWorked(testDB.system.js.insert({_id: "mr_stored_map", value: map})); +assert.commandWorked(testDB.system.js.insert({_id: "mr_stored_reduce", value: reduce})); - let res = coll.mapReduce( - function() { - mr_stored_map(this); - }, - function(k, v) { - return mr_stored_reduce(k, v); - }, - {out: "mr_stored_out", scope: {xx: 1}}); +let res = coll.mapReduce( + function() { + mr_stored_map(this); + }, + function(k, v) { + return mr_stored_reduce(k, v); + }, + {out: "mr_stored_out", scope: {xx: 1}}); - let z = res.convertToSingleObject(); - assert.eq(2, Object.keySet(z).length); - assert.eq([9, 11, 30], z["1"].stats); - assert.eq([9, 41, 41], z["2"].stats); +let z = res.convertToSingleObject(); +assert.eq(2, Object.keySet(z).length); +assert.eq([9, 11, 30], z["1"].stats); +assert.eq([9, 41, 41], z["2"].stats); - res.drop(); +res.drop(); - map = function(obj) { - var x = "partner"; - var y = "visits"; - emit(obj[x], {stats: [obj[y]]}); - }; +map = function(obj) { + var x = "partner"; + var y = "visits"; + emit(obj[x], {stats: [obj[y]]}); +}; - assert.commandWorked(testDB.system.js.save({_id: "mr_stored_map", value: map})); +assert.commandWorked(testDB.system.js.save({_id: "mr_stored_map", value: map})); - res = coll.mapReduce( - function() { - mr_stored_map(this); - }, - function(k, v) { - return mr_stored_reduce(k, v); - }, - {out: "mr_stored_out", scope: {xx: 1}}); +res = coll.mapReduce( + function() { + mr_stored_map(this); + }, + function(k, v) { + return mr_stored_reduce(k, v); + }, + {out: "mr_stored_out", scope: {xx: 1}}); - z = res.convertToSingleObject(); - assert.eq(2, Object.keySet(z).length); - assert.eq([9, 11, 30], z["1"].stats); - assert.eq([9, 41, 41], z["2"].stats); +z = res.convertToSingleObject(); +assert.eq(2, Object.keySet(z).length); +assert.eq([9, 11, 30], z["1"].stats); +assert.eq([9, 41, 41], z["2"].stats); - assert.commandWorked(testDB.system.js.remove({_id: "mr_stored_map"})); - assert.commandWorked(testDB.system.js.remove({_id: "mr_stored_reduce"})); +assert.commandWorked(testDB.system.js.remove({_id: "mr_stored_map"})); +assert.commandWorked(testDB.system.js.remove({_id: "mr_stored_reduce"})); - res.drop(); +res.drop(); }()); diff --git a/jstests/core/mr_tolerates_js_exception.js b/jstests/core/mr_tolerates_js_exception.js index 29de4cf795d..2689bce8433 100644 --- a/jstests/core/mr_tolerates_js_exception.js +++ b/jstests/core/mr_tolerates_js_exception.js @@ -10,63 +10,61 @@ * ] */ (function() { - "use strict"; +"use strict"; - let coll = db.mr_tolerates_js_exception; - coll.drop(); - for (let i = 0; i < 100; i++) { - assert.writeOK(coll.insert({_id: i, a: 1})); - } +let coll = db.mr_tolerates_js_exception; +coll.drop(); +for (let i = 0; i < 100; i++) { + assert.writeOK(coll.insert({_id: i, a: 1})); +} - // Test that the command fails with a JS interpreter failure error when the reduce function - // throws. - let cmdOutput = db.runCommand({ - mapReduce: coll.getName(), - map: function() { - emit(this.a, 1); - }, - reduce: function(key, value) { - (function myFunction() { - throw new Error("Intentionally thrown inside reduce function"); - })(); - }, - out: {inline: 1} - }); - assert.commandFailedWithCode(cmdOutput, ErrorCodes.JSInterpreterFailure, tojson(cmdOutput)); - assert(/Intentionally thrown inside reduce function/.test(cmdOutput.errmsg), - () => "mapReduce didn't include the message from the exception thrown: " + - tojson(cmdOutput)); - assert(/myFunction@/.test(cmdOutput.errmsg), - () => "mapReduce didn't return the JavaScript stacktrace: " + tojson(cmdOutput)); - assert( - !cmdOutput.hasOwnProperty("stack"), - () => "mapReduce shouldn't return JavaScript stacktrace separately: " + tojson(cmdOutput)); - assert(!cmdOutput.hasOwnProperty("originalError"), - () => "mapReduce shouldn't return wrapped version of the error: " + tojson(cmdOutput)); +// Test that the command fails with a JS interpreter failure error when the reduce function +// throws. +let cmdOutput = db.runCommand({ + mapReduce: coll.getName(), + map: function() { + emit(this.a, 1); + }, + reduce: function(key, value) { + (function myFunction() { + throw new Error("Intentionally thrown inside reduce function"); + })(); + }, + out: {inline: 1} +}); +assert.commandFailedWithCode(cmdOutput, ErrorCodes.JSInterpreterFailure, tojson(cmdOutput)); +assert( + /Intentionally thrown inside reduce function/.test(cmdOutput.errmsg), + () => "mapReduce didn't include the message from the exception thrown: " + tojson(cmdOutput)); +assert(/myFunction@/.test(cmdOutput.errmsg), + () => "mapReduce didn't return the JavaScript stacktrace: " + tojson(cmdOutput)); +assert(!cmdOutput.hasOwnProperty("stack"), + () => "mapReduce shouldn't return JavaScript stacktrace separately: " + tojson(cmdOutput)); +assert(!cmdOutput.hasOwnProperty("originalError"), + () => "mapReduce shouldn't return wrapped version of the error: " + tojson(cmdOutput)); - // Test that the command fails with a JS interpreter failure error when the map function - // throws. - cmdOutput = db.runCommand({ - mapReduce: coll.getName(), - map: function() { - (function myFunction() { - throw new Error("Intentionally thrown inside map function"); - })(); - }, - reduce: function(key, value) { - return Array.sum(value); - }, - out: {inline: 1} - }); - assert.commandFailedWithCode(cmdOutput, ErrorCodes.JSInterpreterFailure, tojson(cmdOutput)); - assert(/Intentionally thrown inside map function/.test(cmdOutput.errmsg), - () => "mapReduce didn't include the message from the exception thrown: " + - tojson(cmdOutput)); - assert(/myFunction@/.test(cmdOutput.errmsg), - () => "mapReduce didn't return the JavaScript stacktrace: " + tojson(cmdOutput)); - assert( - !cmdOutput.hasOwnProperty("stack"), - () => "mapReduce shouldn't return JavaScript stacktrace separately: " + tojson(cmdOutput)); - assert(!cmdOutput.hasOwnProperty("originalError"), - () => "mapReduce shouldn't return wrapped version of the error: " + tojson(cmdOutput)); +// Test that the command fails with a JS interpreter failure error when the map function +// throws. +cmdOutput = db.runCommand({ + mapReduce: coll.getName(), + map: function() { + (function myFunction() { + throw new Error("Intentionally thrown inside map function"); + })(); + }, + reduce: function(key, value) { + return Array.sum(value); + }, + out: {inline: 1} +}); +assert.commandFailedWithCode(cmdOutput, ErrorCodes.JSInterpreterFailure, tojson(cmdOutput)); +assert( + /Intentionally thrown inside map function/.test(cmdOutput.errmsg), + () => "mapReduce didn't include the message from the exception thrown: " + tojson(cmdOutput)); +assert(/myFunction@/.test(cmdOutput.errmsg), + () => "mapReduce didn't return the JavaScript stacktrace: " + tojson(cmdOutput)); +assert(!cmdOutput.hasOwnProperty("stack"), + () => "mapReduce shouldn't return JavaScript stacktrace separately: " + tojson(cmdOutput)); +assert(!cmdOutput.hasOwnProperty("originalError"), + () => "mapReduce shouldn't return wrapped version of the error: " + tojson(cmdOutput)); }()); diff --git a/jstests/core/nan.js b/jstests/core/nan.js index 1b34a53e64d..4cca00c4b66 100644 --- a/jstests/core/nan.js +++ b/jstests/core/nan.js @@ -2,59 +2,59 @@ * Tests basic NaN handling. Note that WiredTiger indexes handle -NaN and NaN differently. */ (function() { - "use strict"; - - const coll = db.jstests_nan; - coll.drop(); - - assert.writeOK(coll.insert({_id: 0, a: -Infinity})); - assert.writeOK(coll.insert({_id: 1, a: -3})); - assert.writeOK(coll.insert({_id: 2, a: 0})); - assert.writeOK(coll.insert({_id: 3, a: 3})); - assert.writeOK(coll.insert({_id: 4, a: Infinity})); - assert.writeOK(coll.insert({_id: 5, a: NaN})); - assert.writeOK(coll.insert({_id: 6, a: -NaN})); - assert.writeOK(coll.insert({_id: 7, a: undefined})); - assert.writeOK(coll.insert({_id: 8, a: null})); - assert.writeOK(coll.insert({_id: 9, a: []})); - assert.writeOK(coll.insert({_id: 10, a: {b: 1}})); - assert.writeOK(coll.insert({_id: 11, a: {b: 1}})); - - /** - * Ensures correct results for EQ, LT, LTE, GT, and GTE cases. - */ - var testNaNComparisons = function() { - // EQ - let cursor = coll.find({a: NaN}).sort({_id: 1}); - assert.eq(5, cursor.next()["_id"]); - assert.eq(6, cursor.next()["_id"]); - assert(!cursor.hasNext()); - - // LT - cursor = coll.find({a: {$lt: NaN}}); - assert(!cursor.hasNext()); - - // LTE - cursor = coll.find({a: {$lte: NaN}}).sort({_id: 1}); - assert.eq(5, cursor.next()["_id"]); - assert.eq(6, cursor.next()["_id"]); - assert(!cursor.hasNext()); - - // GT - cursor = coll.find({a: {$gt: NaN}}); - assert(!cursor.hasNext()); - - // GTE - cursor = coll.find({a: {$gte: NaN}}).sort({_id: 1}); - assert.eq(5, cursor.next()["_id"]); - assert.eq(6, cursor.next()["_id"]); - assert(!cursor.hasNext()); - }; - - // Unindexed. - testNaNComparisons(); - - // Indexed. - assert.commandWorked(coll.createIndex({a: 1})); - testNaNComparisons(); +"use strict"; + +const coll = db.jstests_nan; +coll.drop(); + +assert.writeOK(coll.insert({_id: 0, a: -Infinity})); +assert.writeOK(coll.insert({_id: 1, a: -3})); +assert.writeOK(coll.insert({_id: 2, a: 0})); +assert.writeOK(coll.insert({_id: 3, a: 3})); +assert.writeOK(coll.insert({_id: 4, a: Infinity})); +assert.writeOK(coll.insert({_id: 5, a: NaN})); +assert.writeOK(coll.insert({_id: 6, a: -NaN})); +assert.writeOK(coll.insert({_id: 7, a: undefined})); +assert.writeOK(coll.insert({_id: 8, a: null})); +assert.writeOK(coll.insert({_id: 9, a: []})); +assert.writeOK(coll.insert({_id: 10, a: {b: 1}})); +assert.writeOK(coll.insert({_id: 11, a: {b: 1}})); + +/** + * Ensures correct results for EQ, LT, LTE, GT, and GTE cases. + */ +var testNaNComparisons = function() { + // EQ + let cursor = coll.find({a: NaN}).sort({_id: 1}); + assert.eq(5, cursor.next()["_id"]); + assert.eq(6, cursor.next()["_id"]); + assert(!cursor.hasNext()); + + // LT + cursor = coll.find({a: {$lt: NaN}}); + assert(!cursor.hasNext()); + + // LTE + cursor = coll.find({a: {$lte: NaN}}).sort({_id: 1}); + assert.eq(5, cursor.next()["_id"]); + assert.eq(6, cursor.next()["_id"]); + assert(!cursor.hasNext()); + + // GT + cursor = coll.find({a: {$gt: NaN}}); + assert(!cursor.hasNext()); + + // GTE + cursor = coll.find({a: {$gte: NaN}}).sort({_id: 1}); + assert.eq(5, cursor.next()["_id"]); + assert.eq(6, cursor.next()["_id"]); + assert(!cursor.hasNext()); +}; + +// Unindexed. +testNaNComparisons(); + +// Indexed. +assert.commandWorked(coll.createIndex({a: 1})); +testNaNComparisons(); }()); diff --git a/jstests/core/natural.js b/jstests/core/natural.js index d972be22839..2471e2be495 100644 --- a/jstests/core/natural.js +++ b/jstests/core/natural.js @@ -1,26 +1,26 @@ // Tests for $natural sort and $natural hint. (function() { - 'use strict'; +'use strict'; - var results; +var results; - var coll = db.jstests_natural; - coll.drop(); +var coll = db.jstests_natural; +coll.drop(); - assert.commandWorked(coll.ensureIndex({a: 1})); - assert.writeOK(coll.insert({_id: 1, a: 3})); - assert.writeOK(coll.insert({_id: 2, a: 2})); - assert.writeOK(coll.insert({_id: 3, a: 1})); +assert.commandWorked(coll.ensureIndex({a: 1})); +assert.writeOK(coll.insert({_id: 1, a: 3})); +assert.writeOK(coll.insert({_id: 2, a: 2})); +assert.writeOK(coll.insert({_id: 3, a: 1})); - // Regression test for SERVER-20660. Ensures that documents returned with $natural don't have - // any extraneous fields. - results = coll.find({a: 2}).sort({$natural: 1}).toArray(); - assert.eq(results.length, 1); - assert.eq(results[0], {_id: 2, a: 2}); +// Regression test for SERVER-20660. Ensures that documents returned with $natural don't have +// any extraneous fields. +results = coll.find({a: 2}).sort({$natural: 1}).toArray(); +assert.eq(results.length, 1); +assert.eq(results[0], {_id: 2, a: 2}); - // Regression test for SERVER-20660. Ensures that documents returned with $natural don't have - // any extraneous fields. - results = coll.find({a: 2}).hint({$natural: -1}).toArray(); - assert.eq(results.length, 1); - assert.eq(results[0], {_id: 2, a: 2}); +// Regression test for SERVER-20660. Ensures that documents returned with $natural don't have +// any extraneous fields. +results = coll.find({a: 2}).hint({$natural: -1}).toArray(); +assert.eq(results.length, 1); +assert.eq(results[0], {_id: 2, a: 2}); })(); diff --git a/jstests/core/ne_array.js b/jstests/core/ne_array.js index 5e5b8c860dd..e703d4b13a4 100644 --- a/jstests/core/ne_array.js +++ b/jstests/core/ne_array.js @@ -4,62 +4,57 @@ // returned for this type of query when an index is present. // @tags: [requires_non_retryable_writes] (function() { - const coll = db.ne_array; - coll.drop(); - assert.commandWorked(coll.createIndex({a: 1})); - - assert.commandWorked(coll.insert({_id: 0, a: [1]})); - assert.commandWorked(coll.insert({_id: 1, a: [1, 3]})); - - assert.eq(coll.find({a: {$ne: [1, 3]}}, {_id: 1}).toArray(), [{_id: 0}]); - assert.eq(coll.find({a: {$ne: [1]}}, {_id: 1}).toArray(), [{_id: 1}]); - - assert.eq(coll.find({a: {$not: {$in: [[1]]}}}, {_id: 1}).toArray(), [{_id: 1}]); - assert.eq(coll.find({a: {$not: {$in: [[1, 3]]}}}, {_id: 1}).toArray(), [{_id: 0}]); - assert.eq(coll.find({a: {$not: {$in: [[1], [1, 3]]}}}, {_id: 1}).toArray(), []); - assert.eq(coll.find({a: {$not: {$in: ["scalar value", [1, 3]]}}}, {_id: 1}).toArray(), - [{_id: 0}]); - - // Insert some documents which have nested arrays so we can test $elemMatch value. - assert.commandWorked(coll.remove({})); - assert.commandWorked(coll.insert({_id: 0, a: [[123]]})); - assert.commandWorked(coll.insert({_id: 1, a: [4, 5, [123]]})); - assert.commandWorked(coll.insert({_id: 2, a: [7, 8]})); - - // sort by _id in case we run on a sharded cluster which puts the documents on different - // shards (and thus, might return them in any order). - assert.eq(coll.find({a: {$elemMatch: {$not: {$eq: [123]}}}}, {_id: 1}).sort({_id: 1}).toArray(), - [{_id: 1}, {_id: 2}]); - - assert.eq( - coll.find({a: {$elemMatch: {$not: {$in: [[123]]}}}}, {_id: 1}).sort({_id: 1}).toArray(), - [{_id: 1}, {_id: 2}]); - - assert.eq(coll.find({a: {$not: {$elemMatch: {$eq: [123]}}}}, {_id: 1}).toArray(), [{_id: 2}]); - assert.eq(coll.find({a: {$not: {$elemMatch: {$in: [[123]]}}}}, {_id: 1}).toArray(), [{_id: 2}]); - - // Test $elemMatch object. - assert.commandWorked(coll.remove({})); - coll.dropIndexes(); - assert.commandWorked(coll.createIndex({"a.b": 1})); - assert.commandWorked(coll.insert({_id: 0, a: [[123]]})); - assert.commandWorked(coll.insert({_id: 1, a: [{b: 123}]})); - assert.commandWorked(coll.insert({_id: 2, a: [{b: [4, [123]]}]})); - assert.commandWorked(coll.insert({_id: 3, a: [{b: [[123]]}]})); - - // Remember that $ne with an array will match arrays where _none_ of the elements match. - assert.eq(coll.find({a: {$elemMatch: {b: {$ne: [123]}}}}, {_id: 1}).sort({_id: 1}).toArray(), - [{_id: 0}, {_id: 1}]); - assert.eq(coll.find({a: {$elemMatch: {b: {$not: {$in: [[123]]}}}}}, {_id: 1}) - .sort({_id: 1}) - .toArray(), - [{_id: 0}, {_id: 1}]); - - assert.eq(coll.find({a: {$not: {$elemMatch: {b: [123]}}}}, {_id: 1}).sort({_id: 1}).toArray(), - [{_id: 0}, {_id: 1}]); - assert.eq(coll.find({a: {$not: {$elemMatch: {b: {$in: [[123]]}}}}}, {_id: 1}) - .sort({_id: 1}) - .toArray(), - [{_id: 0}, {_id: 1}]); - +const coll = db.ne_array; +coll.drop(); +assert.commandWorked(coll.createIndex({a: 1})); + +assert.commandWorked(coll.insert({_id: 0, a: [1]})); +assert.commandWorked(coll.insert({_id: 1, a: [1, 3]})); + +assert.eq(coll.find({a: {$ne: [1, 3]}}, {_id: 1}).toArray(), [{_id: 0}]); +assert.eq(coll.find({a: {$ne: [1]}}, {_id: 1}).toArray(), [{_id: 1}]); + +assert.eq(coll.find({a: {$not: {$in: [[1]]}}}, {_id: 1}).toArray(), [{_id: 1}]); +assert.eq(coll.find({a: {$not: {$in: [[1, 3]]}}}, {_id: 1}).toArray(), [{_id: 0}]); +assert.eq(coll.find({a: {$not: {$in: [[1], [1, 3]]}}}, {_id: 1}).toArray(), []); +assert.eq(coll.find({a: {$not: {$in: ["scalar value", [1, 3]]}}}, {_id: 1}).toArray(), [{_id: 0}]); + +// Insert some documents which have nested arrays so we can test $elemMatch value. +assert.commandWorked(coll.remove({})); +assert.commandWorked(coll.insert({_id: 0, a: [[123]]})); +assert.commandWorked(coll.insert({_id: 1, a: [4, 5, [123]]})); +assert.commandWorked(coll.insert({_id: 2, a: [7, 8]})); + +// sort by _id in case we run on a sharded cluster which puts the documents on different +// shards (and thus, might return them in any order). +assert.eq(coll.find({a: {$elemMatch: {$not: {$eq: [123]}}}}, {_id: 1}).sort({_id: 1}).toArray(), + [{_id: 1}, {_id: 2}]); + +assert.eq(coll.find({a: {$elemMatch: {$not: {$in: [[123]]}}}}, {_id: 1}).sort({_id: 1}).toArray(), + [{_id: 1}, {_id: 2}]); + +assert.eq(coll.find({a: {$not: {$elemMatch: {$eq: [123]}}}}, {_id: 1}).toArray(), [{_id: 2}]); +assert.eq(coll.find({a: {$not: {$elemMatch: {$in: [[123]]}}}}, {_id: 1}).toArray(), [{_id: 2}]); + +// Test $elemMatch object. +assert.commandWorked(coll.remove({})); +coll.dropIndexes(); +assert.commandWorked(coll.createIndex({"a.b": 1})); +assert.commandWorked(coll.insert({_id: 0, a: [[123]]})); +assert.commandWorked(coll.insert({_id: 1, a: [{b: 123}]})); +assert.commandWorked(coll.insert({_id: 2, a: [{b: [4, [123]]}]})); +assert.commandWorked(coll.insert({_id: 3, a: [{b: [[123]]}]})); + +// Remember that $ne with an array will match arrays where _none_ of the elements match. +assert.eq(coll.find({a: {$elemMatch: {b: {$ne: [123]}}}}, {_id: 1}).sort({_id: 1}).toArray(), + [{_id: 0}, {_id: 1}]); +assert.eq( + coll.find({a: {$elemMatch: {b: {$not: {$in: [[123]]}}}}}, {_id: 1}).sort({_id: 1}).toArray(), + [{_id: 0}, {_id: 1}]); + +assert.eq(coll.find({a: {$not: {$elemMatch: {b: [123]}}}}, {_id: 1}).sort({_id: 1}).toArray(), + [{_id: 0}, {_id: 1}]); +assert.eq( + coll.find({a: {$not: {$elemMatch: {b: {$in: [[123]]}}}}}, {_id: 1}).sort({_id: 1}).toArray(), + [{_id: 0}, {_id: 1}]); })(); diff --git a/jstests/core/nestedarr1.js b/jstests/core/nestedarr1.js index 9fc8ef3c582..7edef9db512 100644 --- a/jstests/core/nestedarr1.js +++ b/jstests/core/nestedarr1.js @@ -6,42 +6,41 @@ * supported BSON nesting depth, as well as maintaining index consistency. */ (function() { - "use strict"; +"use strict"; - function makeNestArr(depth) { - if (depth == 1) { - return {a: 1}; - } else if (depth == 2) { - return {a: [1]}; - } else { - return {a: [makeNestArr(depth - 2)]}; - } +function makeNestArr(depth) { + if (depth == 1) { + return {a: 1}; + } else if (depth == 2) { + return {a: [1]}; + } else { + return {a: [makeNestArr(depth - 2)]}; } +} - let collection = db.arrNestTest; - collection.drop(); +let collection = db.arrNestTest; +collection.drop(); - assert.commandWorked(collection.ensureIndex({a: 1})); +assert.commandWorked(collection.ensureIndex({a: 1})); - const kMaxDocumentDepthSoftLimit = 100; - const kJavaScriptMaxDepthLimit = 150; +const kMaxDocumentDepthSoftLimit = 100; +const kJavaScriptMaxDepthLimit = 150; - let level; - for (level = 1; level < kJavaScriptMaxDepthLimit - 3; level++) { - let res = db.runCommand({insert: collection.getName(), documents: [makeNestArr(level)]}); - if (!res.ok) { - assert.commandFailedWithCode( - res, 17280, "Expected insertion to fail only because key is too large to index"); - break; - } +let level; +for (level = 1; level < kJavaScriptMaxDepthLimit - 3; level++) { + let res = db.runCommand({insert: collection.getName(), documents: [makeNestArr(level)]}); + if (!res.ok) { + assert.commandFailedWithCode( + res, 17280, "Expected insertion to fail only because key is too large to index"); + break; } +} - assert.gt(level, - kMaxDocumentDepthSoftLimit, - "Unable to insert a document nested with " + level + - " levels, which is less than the supported limit of " + - kMaxDocumentDepthSoftLimit); - assert.eq(collection.count(), - collection.find().hint({a: 1}).itcount(), - "Number of documents in collection does not match number of entries in index"); +assert.gt(level, + kMaxDocumentDepthSoftLimit, + "Unable to insert a document nested with " + level + + " levels, which is less than the supported limit of " + kMaxDocumentDepthSoftLimit); +assert.eq(collection.count(), + collection.find().hint({a: 1}).itcount(), + "Number of documents in collection does not match number of entries in index"); }()); diff --git a/jstests/core/nestedobj1.js b/jstests/core/nestedobj1.js index ea1984e7954..44fdd4599d7 100644 --- a/jstests/core/nestedobj1.js +++ b/jstests/core/nestedobj1.js @@ -6,41 +6,40 @@ * supported BSON nesting depth, as well as maintaining index consistency. */ (function() { - "use strict"; +"use strict"; - function makeNestObj(depth) { - if (depth == 1) { - return {a: 1}; - } else { - return {a: makeNestObj(depth - 1)}; - } +function makeNestObj(depth) { + if (depth == 1) { + return {a: 1}; + } else { + return {a: makeNestObj(depth - 1)}; } +} - let collection = db.objNestTest; - collection.drop(); +let collection = db.objNestTest; +collection.drop(); - assert.commandWorked(collection.ensureIndex({a: 1})); +assert.commandWorked(collection.ensureIndex({a: 1})); - const kMaxDocumentDepthSoftLimit = 100; - const kJavaScriptMaxDepthLimit = 150; +const kMaxDocumentDepthSoftLimit = 100; +const kJavaScriptMaxDepthLimit = 150; - let level; - for (level = 1; level < kJavaScriptMaxDepthLimit - 3; level++) { - let object = makeNestObj(level); - let res = db.runCommand({insert: collection.getName(), documents: [makeNestObj(level)]}); - if (!res.ok) { - assert.commandFailedWithCode( - res, 17280, "Expected insertion to fail only because key is too large to index"); - break; - } +let level; +for (level = 1; level < kJavaScriptMaxDepthLimit - 3; level++) { + let object = makeNestObj(level); + let res = db.runCommand({insert: collection.getName(), documents: [makeNestObj(level)]}); + if (!res.ok) { + assert.commandFailedWithCode( + res, 17280, "Expected insertion to fail only because key is too large to index"); + break; } +} - assert.gt(level, - kMaxDocumentDepthSoftLimit, - "Unable to insert a document nested with " + level + - " levels, which is less than the supported limit of " + - kMaxDocumentDepthSoftLimit); - assert.eq(collection.count(), - collection.find().hint({a: 1}).itcount(), - "Number of documents in collection does not match number of entries in index"); +assert.gt(level, + kMaxDocumentDepthSoftLimit, + "Unable to insert a document nested with " + level + + " levels, which is less than the supported limit of " + kMaxDocumentDepthSoftLimit); +assert.eq(collection.count(), + collection.find().hint({a: 1}).itcount(), + "Number of documents in collection does not match number of entries in index"); }()); diff --git a/jstests/core/nin.js b/jstests/core/nin.js index 36ed47550e0..d8a254357ba 100644 --- a/jstests/core/nin.js +++ b/jstests/core/nin.js @@ -12,12 +12,11 @@ function checkEqual(name, key, value) { assert.eq(t.find().count(), i + n, - "checkEqual " + name + " $in + $nin != total | " + i + " + " + n + " != " + - t.find().count()); + "checkEqual " + name + " $in + $nin != total | " + i + " + " + n + + " != " + t.find().count()); } doTest = function(n) { - t.save({a: [1, 2, 3]}); t.save({a: [1, 2, 4]}); t.save({a: [1, 8, 5]}); diff --git a/jstests/core/no_db_created.js b/jstests/core/no_db_created.js index 231e8ffe581..e563a7cd468 100644 --- a/jstests/core/no_db_created.js +++ b/jstests/core/no_db_created.js @@ -3,35 +3,35 @@ // checks that operations do not create a database (function() { - "use strict"; - var adminDB = db.getSiblingDB("admin"); - var noDB = function(db) { - var dbName = db.getName(); - var dbsRes = assert.commandWorked(adminDB.runCommand("listDatabases")); - dbsRes.databases.forEach(function(e) { - assert.neq( - dbName, e.name, "Found db which shouldn't exist:" + dbName + "; " + tojson(dbsRes)); - }); - }; - var mydb = db.getSiblingDB("neverCreated"); - mydb.dropDatabase(); - noDB(mydb); +"use strict"; +var adminDB = db.getSiblingDB("admin"); +var noDB = function(db) { + var dbName = db.getName(); + var dbsRes = assert.commandWorked(adminDB.runCommand("listDatabases")); + dbsRes.databases.forEach(function(e) { + assert.neq( + dbName, e.name, "Found db which shouldn't exist:" + dbName + "; " + tojson(dbsRes)); + }); +}; +var mydb = db.getSiblingDB("neverCreated"); +mydb.dropDatabase(); +noDB(mydb); - var coll = mydb.fake; +var coll = mydb.fake; - // force:true is for replset passthroughs - assert.commandFailed(coll.runCommand("compact", {force: true})); - noDB(mydb); - assert.writeOK(coll.insert({})); - mydb.dropDatabase(); +// force:true is for replset passthroughs +assert.commandFailed(coll.runCommand("compact", {force: true})); +noDB(mydb); +assert.writeOK(coll.insert({})); +mydb.dropDatabase(); - assert.commandFailed(coll.runCommand("dropIndexes")); - noDB(mydb); - assert.writeOK(coll.insert({})); - mydb.dropDatabase(); +assert.commandFailed(coll.runCommand("dropIndexes")); +noDB(mydb); +assert.writeOK(coll.insert({})); +mydb.dropDatabase(); - assert.commandFailed(coll.runCommand("collMod", {expireAfterSeconds: 1})); - noDB(mydb); - assert.writeOK(coll.insert({})); - mydb.dropDatabase(); +assert.commandFailed(coll.runCommand("collMod", {expireAfterSeconds: 1})); +noDB(mydb); +assert.writeOK(coll.insert({})); +mydb.dropDatabase(); }());
\ No newline at end of file diff --git a/jstests/core/not2.js b/jstests/core/not2.js index 610d79c4d8f..8f0f91da1d5 100644 --- a/jstests/core/not2.js +++ b/jstests/core/not2.js @@ -1,89 +1,89 @@ // @tags: [requires_non_retryable_writes] (function() { - "use strict"; +"use strict"; - const coll = db.jstests_not2; - coll.drop(); +const coll = db.jstests_not2; +coll.drop(); - function check(query, expected) { - const resultList = coll.find(query).sort({i: 1}).toArray(); - assert.eq(expected.length, resultList.length, query); +function check(query, expected) { + const resultList = coll.find(query).sort({i: 1}).toArray(); + assert.eq(expected.length, resultList.length, query); - for (let x = 0; x < expected.length; ++x) { - assert.eq(expected[x], resultList[x].i, query); - } + for (let x = 0; x < expected.length; ++x) { + assert.eq(expected[x], resultList[x].i, query); } +} - function fail(query) { - assert.throws(() => coll.find(query).itcount()); - } +function fail(query) { + assert.throws(() => coll.find(query).itcount()); +} - function doTest() { - assert.writeOK(coll.remove({})); +function doTest() { + assert.writeOK(coll.remove({})); - assert.writeOK(coll.insert({i: "a"})); - assert.writeOK(coll.insert({i: "b"})); + assert.writeOK(coll.insert({i: "a"})); + assert.writeOK(coll.insert({i: "b"})); - // TODO SERVER-12735: We currently do not handle double negatives during query - // canonicalization. - fail({i: {$not: {$not: "a"}}}); - check({i: {$not: {$not: {$gt: "a"}}}}, ["b"]); + // TODO SERVER-12735: We currently do not handle double negatives during query + // canonicalization. + fail({i: {$not: {$not: "a"}}}); + check({i: {$not: {$not: {$gt: "a"}}}}, ["b"]); - fail({i: {$not: "a"}}); - fail({i: {$not: {$ref: "foo"}}}); - fail({i: {$not: {}}}); - check({i: {$gt: "a"}}, ["b"]); - check({i: {$not: {$gt: "a"}}}, ["a"]); - check({i: {$not: {$ne: "a"}}}, ["a"]); - check({i: {$not: {$gte: "b"}}}, ["a"]); - check({i: {$exists: true}}, ["a", "b"]); - check({i: {$not: {$exists: true}}}, []); - check({j: {$not: {$exists: false}}}, []); - check({j: {$not: {$exists: true}}}, ["a", "b"]); - check({i: {$not: {$in: ["a"]}}}, ["b"]); - check({i: {$not: {$in: ["a", "b"]}}}, []); - check({i: {$not: {$in: ["g"]}}}, ["a", "b"]); - check({i: {$not: {$nin: ["a"]}}}, ["a"]); - check({i: {$not: /a/}}, ["b"]); - check({i: {$not: /(a|b)/}}, []); - check({i: {$not: /a/, $regex: "a"}}, []); - check({i: {$not: /aa/}}, ["a", "b"]); - check({i: {$not: {$regex: "a"}}}, ["b"]); - check({i: {$not: {$regex: "A", $options: "i"}}}, ["b"]); - check({i: {$not: {$regex: "[ab]"}}}, []); - check({i: {$not: {$regex: "^foo"}}}, ["a", "b"]); - fail({i: {$not: {$options: "a"}}}); - check({i: {$type: 2}}, ["a", "b"]); - check({i: {$not: {$type: 1}}}, ["a", "b"]); - check({i: {$not: {$type: 2}}}, []); + fail({i: {$not: "a"}}); + fail({i: {$not: {$ref: "foo"}}}); + fail({i: {$not: {}}}); + check({i: {$gt: "a"}}, ["b"]); + check({i: {$not: {$gt: "a"}}}, ["a"]); + check({i: {$not: {$ne: "a"}}}, ["a"]); + check({i: {$not: {$gte: "b"}}}, ["a"]); + check({i: {$exists: true}}, ["a", "b"]); + check({i: {$not: {$exists: true}}}, []); + check({j: {$not: {$exists: false}}}, []); + check({j: {$not: {$exists: true}}}, ["a", "b"]); + check({i: {$not: {$in: ["a"]}}}, ["b"]); + check({i: {$not: {$in: ["a", "b"]}}}, []); + check({i: {$not: {$in: ["g"]}}}, ["a", "b"]); + check({i: {$not: {$nin: ["a"]}}}, ["a"]); + check({i: {$not: /a/}}, ["b"]); + check({i: {$not: /(a|b)/}}, []); + check({i: {$not: /a/, $regex: "a"}}, []); + check({i: {$not: /aa/}}, ["a", "b"]); + check({i: {$not: {$regex: "a"}}}, ["b"]); + check({i: {$not: {$regex: "A", $options: "i"}}}, ["b"]); + check({i: {$not: {$regex: "[ab]"}}}, []); + check({i: {$not: {$regex: "^foo"}}}, ["a", "b"]); + fail({i: {$not: {$options: "a"}}}); + check({i: {$type: 2}}, ["a", "b"]); + check({i: {$not: {$type: 1}}}, ["a", "b"]); + check({i: {$not: {$type: 2}}}, []); - assert.writeOK(coll.remove({})); - assert.writeOK(coll.insert({i: 1})); - check({i: {$not: {$mod: [5, 1]}}}, []); - check({i: {$mod: [5, 2]}}, []); - check({i: {$not: {$mod: [5, 2]}}}, [1]); + assert.writeOK(coll.remove({})); + assert.writeOK(coll.insert({i: 1})); + check({i: {$not: {$mod: [5, 1]}}}, []); + check({i: {$mod: [5, 2]}}, []); + check({i: {$not: {$mod: [5, 2]}}}, [1]); - assert.writeOK(coll.remove({})); - assert.writeOK(coll.insert({i: ["a", "b"]})); - check({i: {$not: {$size: 2}}}, []); - check({i: {$not: {$size: 3}}}, [["a", "b"]]); - check({i: {$not: {$gt: "a"}}}, []); - check({i: {$not: {$gt: "c"}}}, [["a", "b"]]); - check({i: {$not: {$all: ["a", "b"]}}}, []); - check({i: {$not: {$all: ["c"]}}}, [["a", "b"]]); + assert.writeOK(coll.remove({})); + assert.writeOK(coll.insert({i: ["a", "b"]})); + check({i: {$not: {$size: 2}}}, []); + check({i: {$not: {$size: 3}}}, [["a", "b"]]); + check({i: {$not: {$gt: "a"}}}, []); + check({i: {$not: {$gt: "c"}}}, [["a", "b"]]); + check({i: {$not: {$all: ["a", "b"]}}}, []); + check({i: {$not: {$all: ["c"]}}}, [["a", "b"]]); - assert.writeOK(coll.remove({})); - assert.writeOK(coll.insert({i: [{j: "a"}]})); - assert.writeOK(coll.insert({i: [{j: "b"}]})); - check({i: {$not: {$elemMatch: {j: "a"}}}}, [[{j: "b"}]]); - check({i: {$not: {$elemMatch: {j: "f"}}}}, [[{j: "a"}], [{j: "b"}]]); - } + assert.writeOK(coll.remove({})); + assert.writeOK(coll.insert({i: [{j: "a"}]})); + assert.writeOK(coll.insert({i: [{j: "b"}]})); + check({i: {$not: {$elemMatch: {j: "a"}}}}, [[{j: "b"}]]); + check({i: {$not: {$elemMatch: {j: "f"}}}}, [[{j: "a"}], [{j: "b"}]]); +} - // Run the test without any index. - doTest(); +// Run the test without any index. +doTest(); - // Run the test with an index present. - assert.commandWorked(coll.ensureIndex({i: 1})); - doTest(); +// Run the test with an index present. +assert.commandWorked(coll.ensureIndex({i: 1})); +doTest(); }()); diff --git a/jstests/core/null_query_semantics.js b/jstests/core/null_query_semantics.js index 7aa7a2a585f..8f664a6d80b 100644 --- a/jstests/core/null_query_semantics.js +++ b/jstests/core/null_query_semantics.js @@ -1,201 +1,308 @@ // Tests the behavior of queries with a {$eq: null} or {$ne: null} predicate. (function() { - "use strict"; +"use strict"; - load("jstests/aggregation/extras/utils.js"); // For 'resultsEq'. +load("jstests/aggregation/extras/utils.js"); // For 'resultsEq'. - const coll = db.not_equals_null; - coll.drop(); +const coll = db.not_equals_null; +coll.drop(); - function extractAValues(results) { - return results.map(function(res) { - if (!res.hasOwnProperty("a")) { - return {}; - } - return {a: res.a}; - }); - } +function extractAValues(results) { + return results.map(function(res) { + if (!res.hasOwnProperty("a")) { + return {}; + } + return {a: res.a}; + }); +} + +function testNotEqualsNullSemantics() { + // For the first portion of the test, only insert documents without arrays. This will avoid + // making the indexes multi-key, which may allow an index to be used to answer the queries. + assert.writeOK(coll.insert([ + {_id: "a_empty_subobject", a: {}}, + {_id: "a_null", a: null}, + {_id: "a_number", a: 4}, + {_id: "a_subobject_b_not_null", a: {b: "hi"}}, + {_id: "a_subobject_b_null", a: {b: null}}, + {_id: "a_subobject_b_undefined", a: {b: undefined}}, + {_id: "a_undefined", a: undefined}, + {_id: "no_a"}, + ])); + + // Throughout this test we will run queries with a projection which may allow the planner to + // consider an index-only plan. Checking the results of those queries will test that the + // query system will never choose such an optimization if it is incorrect. + const projectToOnlyA = {_id: 0, a: 1}; + const projectToOnlyADotB = {_id: 0, "a.b": 1}; + + // Test the semantics of the query {a: {$eq: null}}. + (function testBasicNullQuery() { + const noProjectResults = coll.find({a: {$eq: null}}).toArray(); + const expected = + [{_id: "a_null", a: null}, {_id: "a_undefined", a: undefined}, {_id: "no_a"}]; + assert(resultsEq(expected, noProjectResults), tojson(noProjectResults)); + + const projectResults = coll.find({a: {$eq: null}}, projectToOnlyA).toArray(); + assert(resultsEq(projectResults, extractAValues(expected)), tojson(projectResults)); + }()); + + // Test the semantics of the query {a: {$ne: null}}. + (function testBasicNotEqualsNullQuery() { + const noProjectResults = coll.find({a: {$ne: null}}).toArray(); + const expected = [ + {_id: "a_empty_subobject", a: {}}, + {_id: "a_number", a: 4}, + {_id: "a_subobject_b_not_null", a: {b: "hi"}}, + {_id: "a_subobject_b_null", a: {b: null}}, + {_id: "a_subobject_b_undefined", a: {b: undefined}}, + ]; + assert(resultsEq(noProjectResults, expected), tojson(noProjectResults)); + + const projectResults = coll.find({a: {$ne: null}}, projectToOnlyA).toArray(); + assert(resultsEq(projectResults, extractAValues(expected)), tojson(projectResults)); + }()); + + // Test the semantics of the query {a: {$nin: [null, <number>]}}. + (function testNotInNullQuery() { + const query = {a: {$nin: [null, 4]}}; + const noProjectResults = coll.find(query).toArray(); + const expected = [ + {_id: "a_empty_subobject", a: {}}, + {_id: "a_subobject_b_not_null", a: {b: "hi"}}, + {_id: "a_subobject_b_null", a: {b: null}}, + {_id: "a_subobject_b_undefined", a: {b: undefined}}, + ]; + + // TODO: SERVER-21929: $in may (or may not) miss fields with value "undefined". + const expectedWithUndefined = expected.concat([ + {_id: "a_undefined", a: undefined}, + ]); + assert(resultsEq(noProjectResults, expected) || + resultsEq(noProjectResults, expectedWithUndefined), + noProjectResults); + + const projectResults = coll.find(query, projectToOnlyA).toArray(); + assert(resultsEq(projectResults, extractAValues(expected)) || + resultsEq(projectResults, extractAValues(expectedWithUndefined)), + projectResults); + }()); + + (function testNotInNullAndRegexQuery() { + // While $nin: [null, ...] can be indexed, $nin: [<regex>] cannot. Ensure that we get + // the correct results in this case. + const query = {a: {$nin: [null, /^hi.*/]}}; + const noProjectResults = coll.find(query).toArray(); + const expected = [ + {_id: "a_empty_subobject", a: {}}, + {_id: "a_empty_subobject", a: 4}, + {_id: "a_subobject_b_not_null", a: {b: "hi"}}, + {_id: "a_subobject_b_null", a: {b: null}}, + {_id: "a_subobject_b_undefined", a: {b: undefined}}, + + // TODO: SERVER-21929: $in may (or may not) miss fields with value "undefined". + {_id: "a_undefined", a: undefined}, + ]; + assert(resultsEq(noProjectResults, expected), tojson(noProjectResults)); - function testNotEqualsNullSemantics() { - // For the first portion of the test, only insert documents without arrays. This will avoid - // making the indexes multi-key, which may allow an index to be used to answer the queries. - assert.writeOK(coll.insert([ + const projectResults = coll.find(query, projectToOnlyA).toArray(); + assert(resultsEq(projectResults, extractAValues(expected)), tojson(projectResults)); + }()); + + (function testExistsFalse() { + const noProjectResults = coll.find({a: {$exists: false}}).toArray(); + const expected = [ + {_id: "no_a"}, + ]; + assert(resultsEq(noProjectResults, expected), tojson(noProjectResults)); + + const projectResults = coll.find({a: {$exists: false}}, projectToOnlyA).toArray(); + assert(resultsEq(projectResults, extractAValues(expected)), tojson(projectResults)); + }()); + + // Test the semantics of the query {"a.b": {$eq: null}}. + (function testDottedEqualsNull() { + const noProjectResults = coll.find({"a.b": {$eq: null}}).toArray(); + assert(resultsEq(noProjectResults, + [ + {_id: "a_empty_subobject", a: {}}, + {_id: "a_null", a: null}, + {_id: "a_number", a: 4}, + {_id: "a_subobject_b_null", a: {b: null}}, + {_id: "a_subobject_b_undefined", a: {b: undefined}}, + {_id: "a_undefined", a: undefined}, + {_id: "no_a"} + ]), + tojson(noProjectResults)); + + const projectResults = coll.find({"a.b": {$eq: null}}, projectToOnlyADotB).toArray(); + assert(resultsEq(projectResults, + [{a: {}}, {}, {}, {a: {b: null}}, {a: {b: undefined}}, {}, {}]), + tojson(projectResults)); + }()); + + // Test the semantics of the query {"a.b": {$ne: null}}. + (function testDottedNotEqualsNull() { + const noProjectResults = coll.find({"a.b": {$ne: null}}).toArray(); + assert(resultsEq(noProjectResults, [{_id: "a_subobject_b_not_null", a: {b: "hi"}}]), + tojson(noProjectResults)); + + const projectResults = coll.find({"a.b": {$ne: null}}, projectToOnlyADotB).toArray(); + assert(resultsEq(projectResults, [{a: {b: "hi"}}]), tojson(projectResults)); + }()); + + (function testDottedExistsFalse() { + const noProjectResults = coll.find({"a.b": {$exists: false}}).toArray(); + const expected = [ + {_id: "no_a"}, {_id: "a_empty_subobject", a: {}}, {_id: "a_null", a: null}, {_id: "a_number", a: 4}, + {_id: "a_undefined", a: undefined}, + ]; + assert(resultsEq(noProjectResults, expected), tojson(noProjectResults)); + + const projectResults = coll.find({"a.b": {$exists: false}}, projectToOnlyADotB).toArray(); + assert(resultsEq(projectResults, [{}, {a: {}}, {}, {}, {}]), tojson(projectResults)); + }()); + + // Test similar queries, but with an $elemMatch. These queries should have no results since + // an $elemMatch requires an array. + (function testElemMatchQueriesWithNoArrays() { + for (let elemMatchQuery of [{a: {$elemMatch: {$eq: null}}}, + {a: {$elemMatch: {$ne: null}}}, + {"a.b": {$elemMatch: {$eq: null}}}, + {"a.b": {$elemMatch: {$ne: null}}}, + {a: {$elemMatch: {b: {$eq: null}}}}, + {a: {$elemMatch: {b: {$ne: null}}}}, + ]) { + const noProjectResults = coll.find(elemMatchQuery).toArray(); + assert(resultsEq(noProjectResults, []), + `Expected no results for query ${tojson(elemMatchQuery)}, got ` + + tojson(noProjectResults)); + + let projectResults = coll.find(elemMatchQuery, projectToOnlyA).toArray(); + assert(resultsEq(projectResults, []), + `Expected no results for query ${tojson(elemMatchQuery)}, got ` + + tojson(projectResults)); + + projectResults = coll.find(elemMatchQuery, projectToOnlyADotB).toArray(); + assert(resultsEq(projectResults, []), + `Expected no results for query ${tojson(elemMatchQuery)}, got ` + + tojson(projectResults)); + } + }()); + + // An index which includes "a" or a sub-path of "a" will become multi-key after this insert. + const writeResult = coll.insert([ + {_id: "a_double_array", a: [[]]}, + {_id: "a_empty_array", a: []}, + {_id: "a_object_array_all_b_nulls", a: [{b: null}, {b: undefined}, {b: null}, {}]}, + {_id: "a_object_array_no_b_nulls", a: [{b: 1}, {b: 3}, {b: "string"}]}, + {_id: "a_object_array_some_b_nulls", a: [{b: null}, {b: 3}, {b: null}]}, + {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]}, + {_id: "a_object_array_some_b_missing", a: [{b: 3}, {}]}, + {_id: "a_value_array_all_nulls", a: [null, null]}, + {_id: "a_value_array_no_nulls", a: [1, "string", 4]}, + {_id: "a_value_array_with_null", a: [1, "string", null, 4]}, + {_id: "a_value_array_with_undefined", a: [1, "string", undefined, 4]}, + ]); + if (writeResult.hasWriteErrors()) { + // We're testing a hashed index which is incompatible with arrays. Skip the multi-key + // portion of this test for this index. + assert.eq(writeResult.getWriteErrors().length, 1, tojson(writeResult)); + assert.eq(writeResult.getWriteErrors()[0].code, 16766, tojson(writeResult)); + return; + } + assert.writeOK(writeResult); + + // Test the semantics of the query {a: {$eq: null}}. + (function testBasicNullQuery() { + const noProjectResults = coll.find({a: {$eq: null}}).toArray(); + const expected = [ + {_id: "a_null", a: null}, + {_id: "a_undefined", a: undefined}, + {_id: "a_value_array_all_nulls", a: [null, null]}, + {_id: "a_value_array_with_null", a: [1, "string", null, 4]}, + {_id: "a_value_array_with_undefined", a: [1, "string", undefined, 4]}, + {_id: "no_a"}, + ]; + assert(resultsEq(noProjectResults, expected), tojson(noProjectResults)); + + const projectResults = coll.find({a: {$eq: null}}, projectToOnlyA).toArray(); + assert(resultsEq(projectResults, extractAValues(expected)), tojson(projectResults)); + }()); + + // Test the semantics of the query {a: {$ne: null}}. + (function testBasicNotEqualsNullQuery() { + const noProjectResults = coll.find({a: {$ne: null}}).toArray(); + const expected = [ + {_id: "a_double_array", a: [[]]}, + {_id: "a_empty_array", a: []}, + {_id: "a_empty_subobject", a: {}}, + {_id: "a_number", a: 4}, + {_id: "a_object_array_all_b_nulls", a: [{b: null}, {b: undefined}, {b: null}, {}]}, + {_id: "a_object_array_no_b_nulls", a: [{b: 1}, {b: 3}, {b: "string"}]}, + {_id: "a_object_array_some_b_nulls", a: [{b: null}, {b: 3}, {b: null}]}, + {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]}, + {_id: "a_object_array_some_b_missing", a: [{b: 3}, {}]}, {_id: "a_subobject_b_not_null", a: {b: "hi"}}, {_id: "a_subobject_b_null", a: {b: null}}, {_id: "a_subobject_b_undefined", a: {b: undefined}}, + {_id: "a_value_array_no_nulls", a: [1, "string", 4]}, + ]; + assert(resultsEq(noProjectResults, expected), tojson(noProjectResults)); + + const projectResults = coll.find({a: {$ne: null}}, projectToOnlyA).toArray(); + assert(resultsEq(projectResults, extractAValues(expected)), tojson(projectResults)); + }()); + + // Test the semantics of the query {a: {$nin: [null, <number>]}}. + (function testNotInNullQuery() { + const query = {a: {$nin: [null, 75]}}; + const noProjectResults = coll.find(query).toArray(); + const expected = [ + {_id: "a_empty_subobject", a: {}}, + {_id: "a_number", a: 4}, + {_id: "a_subobject_b_not_null", a: {b: "hi"}}, + {_id: "a_subobject_b_null", a: {b: null}}, + {_id: "a_subobject_b_undefined", a: {b: undefined}}, + + {_id: "a_double_array", a: [[]]}, + {_id: "a_empty_array", a: []}, + {_id: "a_object_array_all_b_nulls", a: [{b: null}, {b: undefined}, {b: null}, {}]}, + {_id: "a_object_array_no_b_nulls", a: [{b: 1}, {b: 3}, {b: "string"}]}, + {_id: "a_object_array_some_b_nulls", a: [{b: null}, {b: 3}, {b: null}]}, + {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]}, + {_id: "a_object_array_some_b_missing", a: [{b: 3}, {}]}, + {_id: "a_value_array_no_nulls", a: [1, "string", 4]}, + ]; + + // TODO: SERVER-21929: $in may (or may not) miss fields with value "undefined". + const expectedWithUndefined = expected.concat([ {_id: "a_undefined", a: undefined}, - {_id: "no_a"}, - ])); - - // Throughout this test we will run queries with a projection which may allow the planner to - // consider an index-only plan. Checking the results of those queries will test that the - // query system will never choose such an optimization if it is incorrect. - const projectToOnlyA = {_id: 0, a: 1}; - const projectToOnlyADotB = {_id: 0, "a.b": 1}; - - // Test the semantics of the query {a: {$eq: null}}. - (function testBasicNullQuery() { - const noProjectResults = coll.find({a: {$eq: null}}).toArray(); - const expected = - [{_id: "a_null", a: null}, {_id: "a_undefined", a: undefined}, {_id: "no_a"}]; - assert(resultsEq(expected, noProjectResults), tojson(noProjectResults)); - - const projectResults = coll.find({a: {$eq: null}}, projectToOnlyA).toArray(); - assert(resultsEq(projectResults, extractAValues(expected)), tojson(projectResults)); - }()); - - // Test the semantics of the query {a: {$ne: null}}. - (function testBasicNotEqualsNullQuery() { - const noProjectResults = coll.find({a: {$ne: null}}).toArray(); - const expected = [ - {_id: "a_empty_subobject", a: {}}, - {_id: "a_number", a: 4}, - {_id: "a_subobject_b_not_null", a: {b: "hi"}}, - {_id: "a_subobject_b_null", a: {b: null}}, - {_id: "a_subobject_b_undefined", a: {b: undefined}}, - ]; - assert(resultsEq(noProjectResults, expected), tojson(noProjectResults)); - - const projectResults = coll.find({a: {$ne: null}}, projectToOnlyA).toArray(); - assert(resultsEq(projectResults, extractAValues(expected)), tojson(projectResults)); - }()); - - // Test the semantics of the query {a: {$nin: [null, <number>]}}. - (function testNotInNullQuery() { - const query = {a: {$nin: [null, 4]}}; - const noProjectResults = coll.find(query).toArray(); - const expected = [ - {_id: "a_empty_subobject", a: {}}, - {_id: "a_subobject_b_not_null", a: {b: "hi"}}, - {_id: "a_subobject_b_null", a: {b: null}}, - {_id: "a_subobject_b_undefined", a: {b: undefined}}, - ]; + {_id: "a_value_array_with_undefined", a: [1, "string", undefined, 4]}, + ]); + assert(resultsEq(noProjectResults, expected) || + resultsEq(noProjectResults, expectedWithUndefined), + noProjectResults); + + const projectResults = coll.find(query, projectToOnlyA).toArray(); + assert(resultsEq(projectResults, extractAValues(expected)) || + resultsEq(projectResults, extractAValues(expectedWithUndefined)), + projectResults); + }()); + + (function testNotInNullAndRegexQuery() { + const query = {a: {$nin: [null, /^str.*/]}}; + const noProjectResults = coll.find(query).toArray(); + const expected = [ + {_id: "a_empty_subobject", a: {}}, + {_id: "a_number", a: 4}, + {_id: "a_subobject_b_not_null", a: {b: "hi"}}, + {_id: "a_subobject_b_null", a: {b: null}}, + {_id: "a_subobject_b_undefined", a: {b: undefined}}, - // TODO: SERVER-21929: $in may (or may not) miss fields with value "undefined". - const expectedWithUndefined = expected.concat([ - {_id: "a_undefined", a: undefined}, - ]); - assert(resultsEq(noProjectResults, expected) || - resultsEq(noProjectResults, expectedWithUndefined), - noProjectResults); - - const projectResults = coll.find(query, projectToOnlyA).toArray(); - assert(resultsEq(projectResults, extractAValues(expected)) || - resultsEq(projectResults, extractAValues(expectedWithUndefined)), - projectResults); - }()); - - (function testNotInNullAndRegexQuery() { - // While $nin: [null, ...] can be indexed, $nin: [<regex>] cannot. Ensure that we get - // the correct results in this case. - const query = {a: {$nin: [null, /^hi.*/]}}; - const noProjectResults = coll.find(query).toArray(); - const expected = [ - {_id: "a_empty_subobject", a: {}}, - {_id: "a_empty_subobject", a: 4}, - {_id: "a_subobject_b_not_null", a: {b: "hi"}}, - {_id: "a_subobject_b_null", a: {b: null}}, - {_id: "a_subobject_b_undefined", a: {b: undefined}}, - - // TODO: SERVER-21929: $in may (or may not) miss fields with value "undefined". - {_id: "a_undefined", a: undefined}, - ]; - assert(resultsEq(noProjectResults, expected), tojson(noProjectResults)); - - const projectResults = coll.find(query, projectToOnlyA).toArray(); - assert(resultsEq(projectResults, extractAValues(expected)), tojson(projectResults)); - }()); - - (function testExistsFalse() { - const noProjectResults = coll.find({a: {$exists: false}}).toArray(); - const expected = [ - {_id: "no_a"}, - ]; - assert(resultsEq(noProjectResults, expected), tojson(noProjectResults)); - - const projectResults = coll.find({a: {$exists: false}}, projectToOnlyA).toArray(); - assert(resultsEq(projectResults, extractAValues(expected)), tojson(projectResults)); - }()); - - // Test the semantics of the query {"a.b": {$eq: null}}. - (function testDottedEqualsNull() { - const noProjectResults = coll.find({"a.b": {$eq: null}}).toArray(); - assert(resultsEq(noProjectResults, - [ - {_id: "a_empty_subobject", a: {}}, - {_id: "a_null", a: null}, - {_id: "a_number", a: 4}, - {_id: "a_subobject_b_null", a: {b: null}}, - {_id: "a_subobject_b_undefined", a: {b: undefined}}, - {_id: "a_undefined", a: undefined}, - {_id: "no_a"} - ]), - tojson(noProjectResults)); - - const projectResults = coll.find({"a.b": {$eq: null}}, projectToOnlyADotB).toArray(); - assert(resultsEq(projectResults, - [{a: {}}, {}, {}, {a: {b: null}}, {a: {b: undefined}}, {}, {}]), - tojson(projectResults)); - }()); - - // Test the semantics of the query {"a.b": {$ne: null}}. - (function testDottedNotEqualsNull() { - const noProjectResults = coll.find({"a.b": {$ne: null}}).toArray(); - assert(resultsEq(noProjectResults, [{_id: "a_subobject_b_not_null", a: {b: "hi"}}]), - tojson(noProjectResults)); - - const projectResults = coll.find({"a.b": {$ne: null}}, projectToOnlyADotB).toArray(); - assert(resultsEq(projectResults, [{a: {b: "hi"}}]), tojson(projectResults)); - }()); - - (function testDottedExistsFalse() { - const noProjectResults = coll.find({"a.b": {$exists: false}}).toArray(); - const expected = [ - {_id: "no_a"}, - {_id: "a_empty_subobject", a: {}}, - {_id: "a_null", a: null}, - {_id: "a_number", a: 4}, - {_id: "a_undefined", a: undefined}, - ]; - assert(resultsEq(noProjectResults, expected), tojson(noProjectResults)); - - const projectResults = - coll.find({"a.b": {$exists: false}}, projectToOnlyADotB).toArray(); - assert(resultsEq(projectResults, [{}, {a: {}}, {}, {}, {}]), tojson(projectResults)); - }()); - - // Test similar queries, but with an $elemMatch. These queries should have no results since - // an $elemMatch requires an array. - (function testElemMatchQueriesWithNoArrays() { - for (let elemMatchQuery of[{a: {$elemMatch: {$eq: null}}}, - {a: {$elemMatch: {$ne: null}}}, - {"a.b": {$elemMatch: {$eq: null}}}, - {"a.b": {$elemMatch: {$ne: null}}}, - {a: {$elemMatch: {b: {$eq: null}}}}, - {a: {$elemMatch: {b: {$ne: null}}}}, - ]) { - const noProjectResults = coll.find(elemMatchQuery).toArray(); - assert(resultsEq(noProjectResults, []), - `Expected no results for query ${tojson(elemMatchQuery)}, got ` + - tojson(noProjectResults)); - - let projectResults = coll.find(elemMatchQuery, projectToOnlyA).toArray(); - assert(resultsEq(projectResults, []), - `Expected no results for query ${tojson(elemMatchQuery)}, got ` + - tojson(projectResults)); - - projectResults = coll.find(elemMatchQuery, projectToOnlyADotB).toArray(); - assert(resultsEq(projectResults, []), - `Expected no results for query ${tojson(elemMatchQuery)}, got ` + - tojson(projectResults)); - } - }()); - - // An index which includes "a" or a sub-path of "a" will become multi-key after this insert. - const writeResult = coll.insert([ {_id: "a_double_array", a: [[]]}, {_id: "a_empty_array", a: []}, {_id: "a_object_array_all_b_nulls", a: [{b: null}, {b: undefined}, {b: null}, {}]}, @@ -203,394 +310,283 @@ {_id: "a_object_array_some_b_nulls", a: [{b: null}, {b: 3}, {b: null}]}, {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]}, {_id: "a_object_array_some_b_missing", a: [{b: 3}, {}]}, + ]; + + // TODO: SERVER-21929: $in may (or may not) miss fields with value "undefined". + const expectedWithUndefined = expected.concat([ + {_id: "a_undefined", a: undefined}, + ]); + + assert(resultsEq(noProjectResults, expected) || + resultsEq(noProjectResults, expectedWithUndefined), + noProjectResults); + + const projectResults = coll.find(query, projectToOnlyA).toArray(); + assert(resultsEq(projectResults, extractAValues(expected)) || + resultsEq(projectResults, extractAValues(expectedWithUndefined)), + projectResults); + }()); + + // Test the results of similar queries with an $elemMatch. + (function testElemMatchValue() { + // Test $elemMatch with equality to null. + let noProjectResults = coll.find({a: {$elemMatch: {$eq: null}}}).toArray(); + const expectedEqualToNull = [ {_id: "a_value_array_all_nulls", a: [null, null]}, - {_id: "a_value_array_no_nulls", a: [1, "string", 4]}, {_id: "a_value_array_with_null", a: [1, "string", null, 4]}, {_id: "a_value_array_with_undefined", a: [1, "string", undefined, 4]}, - ]); - if (writeResult.hasWriteErrors()) { - // We're testing a hashed index which is incompatible with arrays. Skip the multi-key - // portion of this test for this index. - assert.eq(writeResult.getWriteErrors().length, 1, tojson(writeResult)); - assert.eq(writeResult.getWriteErrors()[0].code, 16766, tojson(writeResult)); - return; - } - assert.writeOK(writeResult); - - // Test the semantics of the query {a: {$eq: null}}. - (function testBasicNullQuery() { - const noProjectResults = coll.find({a: {$eq: null}}).toArray(); - const expected = [ - {_id: "a_null", a: null}, - {_id: "a_undefined", a: undefined}, - {_id: "a_value_array_all_nulls", a: [null, null]}, - {_id: "a_value_array_with_null", a: [1, "string", null, 4]}, - {_id: "a_value_array_with_undefined", a: [1, "string", undefined, 4]}, - {_id: "no_a"}, - ]; - assert(resultsEq(noProjectResults, expected), tojson(noProjectResults)); - - const projectResults = coll.find({a: {$eq: null}}, projectToOnlyA).toArray(); - assert(resultsEq(projectResults, extractAValues(expected)), tojson(projectResults)); - }()); - - // Test the semantics of the query {a: {$ne: null}}. - (function testBasicNotEqualsNullQuery() { - const noProjectResults = coll.find({a: {$ne: null}}).toArray(); - const expected = [ - {_id: "a_double_array", a: [[]]}, - {_id: "a_empty_array", a: []}, - {_id: "a_empty_subobject", a: {}}, - {_id: "a_number", a: 4}, - {_id: "a_object_array_all_b_nulls", a: [{b: null}, {b: undefined}, {b: null}, {}]}, - {_id: "a_object_array_no_b_nulls", a: [{b: 1}, {b: 3}, {b: "string"}]}, - {_id: "a_object_array_some_b_nulls", a: [{b: null}, {b: 3}, {b: null}]}, - {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]}, - {_id: "a_object_array_some_b_missing", a: [{b: 3}, {}]}, - {_id: "a_subobject_b_not_null", a: {b: "hi"}}, - {_id: "a_subobject_b_null", a: {b: null}}, - {_id: "a_subobject_b_undefined", a: {b: undefined}}, - {_id: "a_value_array_no_nulls", a: [1, "string", 4]}, - ]; - assert(resultsEq(noProjectResults, expected), tojson(noProjectResults)); - - const projectResults = coll.find({a: {$ne: null}}, projectToOnlyA).toArray(); - assert(resultsEq(projectResults, extractAValues(expected)), tojson(projectResults)); - }()); - - // Test the semantics of the query {a: {$nin: [null, <number>]}}. - (function testNotInNullQuery() { - const query = {a: {$nin: [null, 75]}}; - const noProjectResults = coll.find(query).toArray(); - const expected = [ - {_id: "a_empty_subobject", a: {}}, - {_id: "a_number", a: 4}, - {_id: "a_subobject_b_not_null", a: {b: "hi"}}, - {_id: "a_subobject_b_null", a: {b: null}}, - {_id: "a_subobject_b_undefined", a: {b: undefined}}, - - {_id: "a_double_array", a: [[]]}, - {_id: "a_empty_array", a: []}, - {_id: "a_object_array_all_b_nulls", a: [{b: null}, {b: undefined}, {b: null}, {}]}, - {_id: "a_object_array_no_b_nulls", a: [{b: 1}, {b: 3}, {b: "string"}]}, - {_id: "a_object_array_some_b_nulls", a: [{b: null}, {b: 3}, {b: null}]}, - {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]}, - {_id: "a_object_array_some_b_missing", a: [{b: 3}, {}]}, - {_id: "a_value_array_no_nulls", a: [1, "string", 4]}, - ]; + ]; + assert(resultsEq(noProjectResults, expectedEqualToNull), tojson(noProjectResults)); - // TODO: SERVER-21929: $in may (or may not) miss fields with value "undefined". - const expectedWithUndefined = expected.concat([ - {_id: "a_undefined", a: undefined}, - {_id: "a_value_array_with_undefined", a: [1, "string", undefined, 4]}, - ]); - assert(resultsEq(noProjectResults, expected) || - resultsEq(noProjectResults, expectedWithUndefined), - noProjectResults); - - const projectResults = coll.find(query, projectToOnlyA).toArray(); - assert(resultsEq(projectResults, extractAValues(expected)) || - resultsEq(projectResults, extractAValues(expectedWithUndefined)), - projectResults); - }()); - - (function testNotInNullAndRegexQuery() { - const query = {a: {$nin: [null, /^str.*/]}}; - const noProjectResults = coll.find(query).toArray(); - const expected = [ - {_id: "a_empty_subobject", a: {}}, - {_id: "a_number", a: 4}, - {_id: "a_subobject_b_not_null", a: {b: "hi"}}, - {_id: "a_subobject_b_null", a: {b: null}}, - {_id: "a_subobject_b_undefined", a: {b: undefined}}, - - {_id: "a_double_array", a: [[]]}, - {_id: "a_empty_array", a: []}, - {_id: "a_object_array_all_b_nulls", a: [{b: null}, {b: undefined}, {b: null}, {}]}, - {_id: "a_object_array_no_b_nulls", a: [{b: 1}, {b: 3}, {b: "string"}]}, - {_id: "a_object_array_some_b_nulls", a: [{b: null}, {b: 3}, {b: null}]}, - {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]}, - {_id: "a_object_array_some_b_missing", a: [{b: 3}, {}]}, - ]; + let projectResults = coll.find({a: {$elemMatch: {$eq: null}}}, projectToOnlyA).toArray(); + assert(resultsEq(projectResults, extractAValues(expectedEqualToNull)), + tojson(projectResults)); - // TODO: SERVER-21929: $in may (or may not) miss fields with value "undefined". - const expectedWithUndefined = expected.concat([ - {_id: "a_undefined", a: undefined}, - ]); - - assert(resultsEq(noProjectResults, expected) || - resultsEq(noProjectResults, expectedWithUndefined), - noProjectResults); - - const projectResults = coll.find(query, projectToOnlyA).toArray(); - assert(resultsEq(projectResults, extractAValues(expected)) || - resultsEq(projectResults, extractAValues(expectedWithUndefined)), - projectResults); - }()); - - // Test the results of similar queries with an $elemMatch. - (function testElemMatchValue() { - // Test $elemMatch with equality to null. - let noProjectResults = coll.find({a: {$elemMatch: {$eq: null}}}).toArray(); - const expectedEqualToNull = [ - {_id: "a_value_array_all_nulls", a: [null, null]}, - {_id: "a_value_array_with_null", a: [1, "string", null, 4]}, - {_id: "a_value_array_with_undefined", a: [1, "string", undefined, 4]}, - ]; - assert(resultsEq(noProjectResults, expectedEqualToNull), tojson(noProjectResults)); - - let projectResults = - coll.find({a: {$elemMatch: {$eq: null}}}, projectToOnlyA).toArray(); - assert(resultsEq(projectResults, extractAValues(expectedEqualToNull)), - tojson(projectResults)); - - // Test $elemMatch with not equal to null. - noProjectResults = coll.find({a: {$elemMatch: {$ne: null}}}).toArray(); - const expectedNotEqualToNull = [ - {_id: "a_double_array", a: [[]]}, - {_id: "a_object_array_all_b_nulls", a: [{b: null}, {b: undefined}, {b: null}, {}]}, - {_id: "a_object_array_no_b_nulls", a: [{b: 1}, {b: 3}, {b: "string"}]}, - {_id: "a_object_array_some_b_nulls", a: [{b: null}, {b: 3}, {b: null}]}, - {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]}, - {_id: "a_object_array_some_b_missing", a: [{b: 3}, {}]}, - {_id: "a_value_array_no_nulls", a: [1, "string", 4]}, - {_id: "a_value_array_with_undefined", a: [1, "string", undefined, 4]}, - {_id: "a_value_array_with_null", a: [1, "string", null, 4]}, - ]; - assert(resultsEq(noProjectResults, expectedNotEqualToNull), tojson(noProjectResults)); - - projectResults = coll.find({a: {$elemMatch: {$ne: null}}}, projectToOnlyA).toArray(); - assert(resultsEq(projectResults, extractAValues(expectedNotEqualToNull)), - tojson(projectResults)); - }()); - - // Test the semantics of the query {"a.b": {$eq: null}}. The semantics here are to return - // those documents which have one of the following properties: - // - A non-object, non-array value for "a" - // - A subobject "a" with a missing, null, or undefined value for "b" - // - An array which has at least one object in it which has a missing, null, or undefined - // value for "b". - (function testDottedEqualsNull() { - const noProjectResults = coll.find({"a.b": {$eq: null}}).toArray(); - assert( - resultsEq(noProjectResults, - [ - {_id: "a_empty_subobject", a: {}}, - {_id: "a_null", a: null}, - {_id: "a_number", a: 4}, - {_id: "a_subobject_b_null", a: {b: null}}, - {_id: "a_subobject_b_undefined", a: {b: undefined}}, - {_id: "a_undefined", a: undefined}, - {_id: "no_a"}, - { + // Test $elemMatch with not equal to null. + noProjectResults = coll.find({a: {$elemMatch: {$ne: null}}}).toArray(); + const expectedNotEqualToNull = [ + {_id: "a_double_array", a: [[]]}, + {_id: "a_object_array_all_b_nulls", a: [{b: null}, {b: undefined}, {b: null}, {}]}, + {_id: "a_object_array_no_b_nulls", a: [{b: 1}, {b: 3}, {b: "string"}]}, + {_id: "a_object_array_some_b_nulls", a: [{b: null}, {b: 3}, {b: null}]}, + {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]}, + {_id: "a_object_array_some_b_missing", a: [{b: 3}, {}]}, + {_id: "a_value_array_no_nulls", a: [1, "string", 4]}, + {_id: "a_value_array_with_undefined", a: [1, "string", undefined, 4]}, + {_id: "a_value_array_with_null", a: [1, "string", null, 4]}, + ]; + assert(resultsEq(noProjectResults, expectedNotEqualToNull), tojson(noProjectResults)); + + projectResults = coll.find({a: {$elemMatch: {$ne: null}}}, projectToOnlyA).toArray(); + assert(resultsEq(projectResults, extractAValues(expectedNotEqualToNull)), + tojson(projectResults)); + }()); + + // Test the semantics of the query {"a.b": {$eq: null}}. The semantics here are to return + // those documents which have one of the following properties: + // - A non-object, non-array value for "a" + // - A subobject "a" with a missing, null, or undefined value for "b" + // - An array which has at least one object in it which has a missing, null, or undefined + // value for "b". + (function testDottedEqualsNull() { + const noProjectResults = coll.find({"a.b": {$eq: null}}).toArray(); + assert( + resultsEq(noProjectResults, + [ + {_id: "a_empty_subobject", a: {}}, + {_id: "a_null", a: null}, + {_id: "a_number", a: 4}, + {_id: "a_subobject_b_null", a: {b: null}}, + {_id: "a_subobject_b_undefined", a: {b: undefined}}, + {_id: "a_undefined", a: undefined}, + {_id: "no_a"}, + { _id: "a_object_array_all_b_nulls", a: [{b: null}, {b: undefined}, {b: null}, {}] - }, - {_id: "a_object_array_some_b_nulls", a: [{b: null}, {b: 3}, {b: null}]}, - {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]}, - {_id: "a_object_array_some_b_missing", a: [{b: 3}, {}]}, - ]), - tojson(noProjectResults)); - - const projectResults = coll.find({"a.b": {$eq: null}}, projectToOnlyADotB).toArray(); - assert(resultsEq(projectResults, - [ - {a: {}}, - {}, - {}, - {a: {b: null}}, - {a: {b: undefined}}, - {}, - {}, - {a: [{b: null}, {b: undefined}, {b: null}, {}]}, - {a: [{b: null}, {b: 3}, {b: null}]}, - {a: [{b: undefined}, {b: 3}]}, - {a: [{b: 3}, {}]}, - ]), - tojson(projectResults)); - }()); - - // Test the semantics of the query {"a.b": {$ne: null}}. - (function testDottedNotEqualsNull() { - const noProjectResults = coll.find({"a.b": {$ne: null}}).toArray(); - assert( - resultsEq(noProjectResults, - [ - {_id: "a_subobject_b_not_null", a: {b: "hi"}}, - {_id: "a_double_array", a: [[]]}, - {_id: "a_empty_array", a: []}, - {_id: "a_object_array_no_b_nulls", a: [{b: 1}, {b: 3}, {b: "string"}]}, - {_id: "a_value_array_all_nulls", a: [null, null]}, - {_id: "a_value_array_no_nulls", a: [1, "string", 4]}, - {_id: "a_value_array_with_null", a: [1, "string", null, 4]}, - {_id: "a_value_array_with_undefined", a: [1, "string", undefined, 4]} - ]), - tojson(noProjectResults)); - - const projectResults = coll.find({"a.b": {$ne: null}}, projectToOnlyADotB).toArray(); - assert(resultsEq(projectResults, - [ - {a: {b: "hi"}}, - {a: [[]]}, - {a: []}, - {a: [{b: 1}, {b: 3}, {b: "string"}]}, - {a: []}, - {a: []}, - {a: []}, - {a: []} - ]), - tojson(projectResults)); - }()); - - // Test the semantics of the query {a.b: {$nin: [null, <number>]}}. - (function testDottedNotInNullQuery() { - const query = {"a.b": {$nin: [null, 75]}}; - const noProjectResults = coll.find(query).toArray(); - const expected = [ - {_id: "a_subobject_b_not_null", a: {b: "hi"}}, - {_id: "a_double_array", a: [[]]}, - {_id: "a_empty_array", a: []}, - {_id: "a_object_array_no_b_nulls", a: [{b: 1}, {b: 3}, {b: "string"}]}, - {_id: "a_value_array_all_nulls", a: [null, null]}, - {_id: "a_value_array_no_nulls", a: [1, "string", 4]}, - {_id: "a_value_array_with_null", a: [1, "string", null, 4]}, - {_id: "a_value_array_with_undefined", a: [1, "string", undefined, 4]}, - ]; - - // TODO: SERVER-21929: $in may (or may not) miss fields with value "undefined". - const expectedWithUndefined = expected.concat([ - {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]}, - {_id: "a_subobject_b_undefined", a: {b: undefined}}, - ]); - assert(resultsEq(noProjectResults, expected) || - resultsEq(noProjectResults, expectedWithUndefined), - noProjectResults); - - const projectResults = coll.find(query, projectToOnlyA).toArray(); - assert(resultsEq(projectResults, extractAValues(expected)) || - resultsEq(projectResults, extractAValues(expectedWithUndefined)), - projectResults); - }()); - - // Test the semantics of the query {a.b: {$nin: [null, <regex>]}}. - (function testDottedNotInNullAndRegexQuery() { - const query = {"a.b": {$nin: [null, /^str.*/]}}; - const noProjectResults = coll.find(query).toArray(); - const expected = [ - {_id: "a_subobject_b_not_null", a: {b: "hi"}}, - {_id: "a_double_array", a: [[]]}, - {_id: "a_empty_array", a: []}, - {_id: "a_value_array_all_nulls", a: [null, null]}, - {_id: "a_value_array_no_nulls", a: [1, "string", 4]}, - {_id: "a_value_array_with_null", a: [1, "string", null, 4]}, - {_id: "a_value_array_with_undefined", a: [1, "string", undefined, 4]}, - ]; + }, + {_id: "a_object_array_some_b_nulls", a: [{b: null}, {b: 3}, {b: null}]}, + {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]}, + {_id: "a_object_array_some_b_missing", a: [{b: 3}, {}]}, + ]), + tojson(noProjectResults)); + + const projectResults = coll.find({"a.b": {$eq: null}}, projectToOnlyADotB).toArray(); + assert(resultsEq(projectResults, + [ + {a: {}}, + {}, + {}, + {a: {b: null}}, + {a: {b: undefined}}, + {}, + {}, + {a: [{b: null}, {b: undefined}, {b: null}, {}]}, + {a: [{b: null}, {b: 3}, {b: null}]}, + {a: [{b: undefined}, {b: 3}]}, + {a: [{b: 3}, {}]}, + ]), + tojson(projectResults)); + }()); + + // Test the semantics of the query {"a.b": {$ne: null}}. + (function testDottedNotEqualsNull() { + const noProjectResults = coll.find({"a.b": {$ne: null}}).toArray(); + assert(resultsEq(noProjectResults, + [ + {_id: "a_subobject_b_not_null", a: {b: "hi"}}, + {_id: "a_double_array", a: [[]]}, + {_id: "a_empty_array", a: []}, + {_id: "a_object_array_no_b_nulls", a: [{b: 1}, {b: 3}, {b: "string"}]}, + {_id: "a_value_array_all_nulls", a: [null, null]}, + {_id: "a_value_array_no_nulls", a: [1, "string", 4]}, + {_id: "a_value_array_with_null", a: [1, "string", null, 4]}, + {_id: "a_value_array_with_undefined", a: [1, "string", undefined, 4]} + ]), + tojson(noProjectResults)); + + const projectResults = coll.find({"a.b": {$ne: null}}, projectToOnlyADotB).toArray(); + assert(resultsEq(projectResults, + [ + {a: {b: "hi"}}, + {a: [[]]}, + {a: []}, + {a: [{b: 1}, {b: 3}, {b: "string"}]}, + {a: []}, + {a: []}, + {a: []}, + {a: []} + ]), + tojson(projectResults)); + }()); + + // Test the semantics of the query {a.b: {$nin: [null, <number>]}}. + (function testDottedNotInNullQuery() { + const query = {"a.b": {$nin: [null, 75]}}; + const noProjectResults = coll.find(query).toArray(); + const expected = [ + {_id: "a_subobject_b_not_null", a: {b: "hi"}}, + {_id: "a_double_array", a: [[]]}, + {_id: "a_empty_array", a: []}, + {_id: "a_object_array_no_b_nulls", a: [{b: 1}, {b: 3}, {b: "string"}]}, + {_id: "a_value_array_all_nulls", a: [null, null]}, + {_id: "a_value_array_no_nulls", a: [1, "string", 4]}, + {_id: "a_value_array_with_null", a: [1, "string", null, 4]}, + {_id: "a_value_array_with_undefined", a: [1, "string", undefined, 4]}, + ]; - // TODO: SERVER-21929: $in may (or may not) miss fields with value "undefined". - const expectedWithUndefined = expected.concat([ - {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]}, - {_id: "a_subobject_b_undefined", a: {b: undefined}}, - ]); - assert(resultsEq(noProjectResults, expected) || - resultsEq(noProjectResults, expectedWithUndefined), - noProjectResults); - - const projectResults = coll.find(query, projectToOnlyA).toArray(); - assert(resultsEq(projectResults, extractAValues(expected)) || - resultsEq(projectResults, extractAValues(expectedWithUndefined)), - projectResults); - }()); - - // Test the results of similar dotted queries with an $elemMatch. These should have no - // results since none of our documents have an array at the path "a.b". - (function testDottedElemMatchValue() { - let results = coll.find({"a.b": {$elemMatch: {$eq: null}}}).toArray(); - assert(resultsEq(results, []), tojson(results)); - - results = coll.find({"a.b": {$elemMatch: {$ne: null}}}).toArray(); - assert(resultsEq(results, []), tojson(results)); - }()); - - // Test null semantics within an $elemMatch object. - (function testElemMatchObject() { - // Test $elemMatch with equality to null. - let noProjectResults = coll.find({a: {$elemMatch: {b: {$eq: null}}}}).toArray(); - const expectedEqualToNull = [ - {_id: "a_double_array", a: [[]]}, - {_id: "a_object_array_all_b_nulls", a: [{b: null}, {b: undefined}, {b: null}, {}]}, - {_id: "a_object_array_some_b_nulls", a: [{b: null}, {b: 3}, {b: null}]}, - {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]}, - {_id: "a_object_array_some_b_missing", a: [{b: 3}, {}]}, - ]; - assert(resultsEq(noProjectResults, expectedEqualToNull), tojson(noProjectResults)); - - let projectResults = - coll.find({a: {$elemMatch: {b: {$eq: null}}}}, projectToOnlyADotB).toArray(); - assert(resultsEq(projectResults, - [ - {a: [[]]}, - {a: [{b: null}, {b: undefined}, {b: null}, {}]}, - {a: [{b: null}, {b: 3}, {b: null}]}, - {a: [{b: undefined}, {b: 3}]}, - {a: [{b: 3}, {}]}, - ]), - tojson(projectResults)); - - // Test $elemMatch with not equal to null. - noProjectResults = coll.find({a: {$elemMatch: {b: {$ne: null}}}}).toArray(); - const expectedNotEqualToNull = [ - {_id: "a_object_array_no_b_nulls", a: [{b: 1}, {b: 3}, {b: "string"}]}, - {_id: "a_object_array_some_b_nulls", a: [{b: null}, {b: 3}, {b: null}]}, - {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]}, - {_id: "a_object_array_some_b_missing", a: [{b: 3}, {}]}, - ]; - assert(resultsEq(noProjectResults, expectedNotEqualToNull), tojson(noProjectResults)); - - projectResults = - coll.find({a: {$elemMatch: {b: {$ne: null}}}}, projectToOnlyADotB).toArray(); - assert(resultsEq(projectResults, - [ - {a: [{b: 1}, {b: 3}, {b: "string"}]}, - {a: [{b: null}, {b: 3}, {b: null}]}, - {a: [{b: undefined}, {b: 3}]}, - {a: [{b: 3}, {}]}, - ]), - tojson(projectResults)); - }()); - } + // TODO: SERVER-21929: $in may (or may not) miss fields with value "undefined". + const expectedWithUndefined = expected.concat([ + {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]}, + {_id: "a_subobject_b_undefined", a: {b: undefined}}, + ]); + assert(resultsEq(noProjectResults, expected) || + resultsEq(noProjectResults, expectedWithUndefined), + noProjectResults); + + const projectResults = coll.find(query, projectToOnlyA).toArray(); + assert(resultsEq(projectResults, extractAValues(expected)) || + resultsEq(projectResults, extractAValues(expectedWithUndefined)), + projectResults); + }()); + + // Test the semantics of the query {a.b: {$nin: [null, <regex>]}}. + (function testDottedNotInNullAndRegexQuery() { + const query = {"a.b": {$nin: [null, /^str.*/]}}; + const noProjectResults = coll.find(query).toArray(); + const expected = [ + {_id: "a_subobject_b_not_null", a: {b: "hi"}}, + {_id: "a_double_array", a: [[]]}, + {_id: "a_empty_array", a: []}, + {_id: "a_value_array_all_nulls", a: [null, null]}, + {_id: "a_value_array_no_nulls", a: [1, "string", 4]}, + {_id: "a_value_array_with_null", a: [1, "string", null, 4]}, + {_id: "a_value_array_with_undefined", a: [1, "string", undefined, 4]}, + ]; - // Test without any indexes. + // TODO: SERVER-21929: $in may (or may not) miss fields with value "undefined". + const expectedWithUndefined = expected.concat([ + {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]}, + {_id: "a_subobject_b_undefined", a: {b: undefined}}, + ]); + assert(resultsEq(noProjectResults, expected) || + resultsEq(noProjectResults, expectedWithUndefined), + noProjectResults); + + const projectResults = coll.find(query, projectToOnlyA).toArray(); + assert(resultsEq(projectResults, extractAValues(expected)) || + resultsEq(projectResults, extractAValues(expectedWithUndefined)), + projectResults); + }()); + + // Test the results of similar dotted queries with an $elemMatch. These should have no + // results since none of our documents have an array at the path "a.b". + (function testDottedElemMatchValue() { + let results = coll.find({"a.b": {$elemMatch: {$eq: null}}}).toArray(); + assert(resultsEq(results, []), tojson(results)); + + results = coll.find({"a.b": {$elemMatch: {$ne: null}}}).toArray(); + assert(resultsEq(results, []), tojson(results)); + }()); + + // Test null semantics within an $elemMatch object. + (function testElemMatchObject() { + // Test $elemMatch with equality to null. + let noProjectResults = coll.find({a: {$elemMatch: {b: {$eq: null}}}}).toArray(); + const expectedEqualToNull = [ + {_id: "a_double_array", a: [[]]}, + {_id: "a_object_array_all_b_nulls", a: [{b: null}, {b: undefined}, {b: null}, {}]}, + {_id: "a_object_array_some_b_nulls", a: [{b: null}, {b: 3}, {b: null}]}, + {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]}, + {_id: "a_object_array_some_b_missing", a: [{b: 3}, {}]}, + ]; + assert(resultsEq(noProjectResults, expectedEqualToNull), tojson(noProjectResults)); + + let projectResults = + coll.find({a: {$elemMatch: {b: {$eq: null}}}}, projectToOnlyADotB).toArray(); + assert(resultsEq(projectResults, + [ + {a: [[]]}, + {a: [{b: null}, {b: undefined}, {b: null}, {}]}, + {a: [{b: null}, {b: 3}, {b: null}]}, + {a: [{b: undefined}, {b: 3}]}, + {a: [{b: 3}, {}]}, + ]), + tojson(projectResults)); + + // Test $elemMatch with not equal to null. + noProjectResults = coll.find({a: {$elemMatch: {b: {$ne: null}}}}).toArray(); + const expectedNotEqualToNull = [ + {_id: "a_object_array_no_b_nulls", a: [{b: 1}, {b: 3}, {b: "string"}]}, + {_id: "a_object_array_some_b_nulls", a: [{b: null}, {b: 3}, {b: null}]}, + {_id: "a_object_array_some_b_undefined", a: [{b: undefined}, {b: 3}]}, + {_id: "a_object_array_some_b_missing", a: [{b: 3}, {}]}, + ]; + assert(resultsEq(noProjectResults, expectedNotEqualToNull), tojson(noProjectResults)); + + projectResults = + coll.find({a: {$elemMatch: {b: {$ne: null}}}}, projectToOnlyADotB).toArray(); + assert(resultsEq(projectResults, + [ + {a: [{b: 1}, {b: 3}, {b: "string"}]}, + {a: [{b: null}, {b: 3}, {b: null}]}, + {a: [{b: undefined}, {b: 3}]}, + {a: [{b: 3}, {}]}, + ]), + tojson(projectResults)); + }()); +} + +// Test without any indexes. +testNotEqualsNullSemantics(coll); + +const keyPatterns = [ + {keyPattern: {a: 1}}, + {keyPattern: {a: -1}}, + {keyPattern: {a: "hashed"}}, + {keyPattern: {a: 1}, options: {partialFilterExpression: {a: {$exists: true}}}}, + {keyPattern: {a: 1}, options: {sparse: true}}, + {keyPattern: {"a.b": 1}}, + {keyPattern: {_id: 1, "a.b": 1}}, + {keyPattern: {"a.b": 1, _id: 1}}, + {keyPattern: {"a.b": 1}, options: {partialFilterExpression: {a: {$exists: true}}}}, + {keyPattern: {"a.b": 1, _id: 1}, options: {sparse: true}}, + {keyPattern: {"$**": 1}}, + {keyPattern: {"a.$**": 1}} +]; + +// Test with a variety of other indexes. +for (let indexSpec of keyPatterns) { + coll.drop(); + jsTestLog(`Index spec: ${tojson(indexSpec)}`); + assert.commandWorked(coll.createIndex(indexSpec.keyPattern, indexSpec.options)); testNotEqualsNullSemantics(coll); +} - const keyPatterns = [ - {keyPattern: {a: 1}}, - {keyPattern: {a: -1}}, - {keyPattern: {a: "hashed"}}, - {keyPattern: {a: 1}, options: {partialFilterExpression: {a: {$exists: true}}}}, - {keyPattern: {a: 1}, options: {sparse: true}}, - {keyPattern: {"a.b": 1}}, - {keyPattern: {_id: 1, "a.b": 1}}, - {keyPattern: {"a.b": 1, _id: 1}}, - {keyPattern: {"a.b": 1}, options: {partialFilterExpression: {a: {$exists: true}}}}, - {keyPattern: {"a.b": 1, _id: 1}, options: {sparse: true}}, - {keyPattern: {"$**": 1}}, - {keyPattern: {"a.$**": 1}} - ]; - - // Test with a variety of other indexes. - for (let indexSpec of keyPatterns) { - coll.drop(); - jsTestLog(`Index spec: ${tojson(indexSpec)}`); - assert.commandWorked(coll.createIndex(indexSpec.keyPattern, indexSpec.options)); - testNotEqualsNullSemantics(coll); - } - - // Test that you cannot use a $ne: null predicate in a partial filter expression. - assert.commandFailedWithCode( - coll.createIndex({a: 1}, {partialFilterExpression: {a: {$ne: null}}}), - ErrorCodes.CannotCreateIndex); +// Test that you cannot use a $ne: null predicate in a partial filter expression. +assert.commandFailedWithCode(coll.createIndex({a: 1}, {partialFilterExpression: {a: {$ne: null}}}), + ErrorCodes.CannotCreateIndex); - assert.commandFailedWithCode( - coll.createIndex({a: 1}, {partialFilterExpression: {a: {$elemMatch: {$ne: null}}}}), - ErrorCodes.CannotCreateIndex); +assert.commandFailedWithCode( + coll.createIndex({a: 1}, {partialFilterExpression: {a: {$elemMatch: {$ne: null}}}}), + ErrorCodes.CannotCreateIndex); }()); diff --git a/jstests/core/numberlong.js b/jstests/core/numberlong.js index c50fc8599c3..a7dfd014539 100644 --- a/jstests/core/numberlong.js +++ b/jstests/core/numberlong.js @@ -132,14 +132,14 @@ for (var i = 0; i < badValues.length; i++) { // parsing assert.throws.automsg(function() { - new NumberLong(""); -}); + new NumberLong(""); + }); assert.throws.automsg(function() { - new NumberLong("y"); -}); + new NumberLong("y"); + }); assert.throws.automsg(function() { - new NumberLong("11111111111111111111"); -}); + new NumberLong("11111111111111111111"); + }); // create NumberLong from NumberInt (SERVER-9973) assert.doesNotThrow.automsg(function() { diff --git a/jstests/core/numberlong3.js b/jstests/core/numberlong3.js index b62d1865ff4..0dabdec2a05 100644 --- a/jstests/core/numberlong3.js +++ b/jstests/core/numberlong3.js @@ -13,7 +13,7 @@ for (i = 10; i >= 0; --i) { } ret = t.find().sort({x: 1}).toArray().filter(function(x) { - return typeof(x.x.floatApprox) != 'undefined'; + return typeof (x.x.floatApprox) != 'undefined'; }); // printjson( ret ); diff --git a/jstests/core/objid6.js b/jstests/core/objid6.js index 0165d0c8e37..28be2a3fa42 100644 --- a/jstests/core/objid6.js +++ b/jstests/core/objid6.js @@ -1,10 +1,10 @@ (function() { - 'use strict'; +'use strict'; - var o = new ObjectId(); - assert(o.getTimestamp); +var o = new ObjectId(); +assert(o.getTimestamp); - var a = new ObjectId("4c17f616a707427266a2801a"); - var b = new ObjectId("4c17f616a707428966a2801c"); - assert.eq(a.getTimestamp(), b.getTimestamp()); +var a = new ObjectId("4c17f616a707427266a2801a"); +var b = new ObjectId("4c17f616a707428966a2801c"); +assert.eq(a.getTimestamp(), b.getTimestamp()); })(); diff --git a/jstests/core/opcounters_active.js b/jstests/core/opcounters_active.js index 9c93adcc719..4e4fe7ff326 100644 --- a/jstests/core/opcounters_active.js +++ b/jstests/core/opcounters_active.js @@ -3,33 +3,33 @@ // ] (function() { - "use strict"; - // Test the getActiveCommands function - // Should remove the listCollections section but keep the rest - var testInput = { - "isMaster": {"failed": NumberLong(0), "total": NumberLong(3)}, - "mapreduce": {"shardedfinish": {"failed": NumberLong(0), "total": NumberLong(1)}}, - "listCollections": {"failed": NumberLong(0), "total": NumberLong(0)} - }; - var testExpected = { - "isMaster": {"failed": NumberLong(0), "total": NumberLong(3)}, - "mapreduce": {"shardedfinish": {"failed": NumberLong(0), "total": NumberLong(1)}} - }; - var testResult = getActiveCommands(testInput); +"use strict"; +// Test the getActiveCommands function +// Should remove the listCollections section but keep the rest +var testInput = { + "isMaster": {"failed": NumberLong(0), "total": NumberLong(3)}, + "mapreduce": {"shardedfinish": {"failed": NumberLong(0), "total": NumberLong(1)}}, + "listCollections": {"failed": NumberLong(0), "total": NumberLong(0)} +}; +var testExpected = { + "isMaster": {"failed": NumberLong(0), "total": NumberLong(3)}, + "mapreduce": {"shardedfinish": {"failed": NumberLong(0), "total": NumberLong(1)}} +}; +var testResult = getActiveCommands(testInput); - assert.eq(testResult, testExpected, "getActiveCommands did not return the expected result"); +assert.eq(testResult, testExpected, "getActiveCommands did not return the expected result"); - // Test that the serverstatus helper works - var result = db.serverStatus(); - assert.neq(undefined, result, tojson(result)); - // Test that the metrics tree returns - assert.neq(undefined, result.metrics, tojson(result)); - // Test that the metrics.commands tree returns - assert.neq(undefined, result.metrics.commands, tojson(result)); - // Test that the metrics.commands.serverStatus value is non-zero - assert.neq(0, result.metrics.commands.serverStatus.total, tojson(result)); +// Test that the serverstatus helper works +var result = db.serverStatus(); +assert.neq(undefined, result, tojson(result)); +// Test that the metrics tree returns +assert.neq(undefined, result.metrics, tojson(result)); +// Test that the metrics.commands tree returns +assert.neq(undefined, result.metrics.commands, tojson(result)); +// Test that the metrics.commands.serverStatus value is non-zero +assert.neq(0, result.metrics.commands.serverStatus.total, tojson(result)); - // Test that the command returns successfully when no metrics tree is present - var result = db.serverStatus({"metrics": 0}); - assert.eq(undefined, result.metrics, tojson(result)); +// Test that the command returns successfully when no metrics tree is present +var result = db.serverStatus({"metrics": 0}); +assert.eq(undefined, result.metrics, tojson(result)); }());
\ No newline at end of file diff --git a/jstests/core/operation_latency_histogram.js b/jstests/core/operation_latency_histogram.js index d3bce1305c9..d81308192ac 100644 --- a/jstests/core/operation_latency_histogram.js +++ b/jstests/core/operation_latency_histogram.js @@ -15,170 +15,169 @@ // tag incompatible_with_embedded. (function() { - "use strict"; - - load("jstests/libs/stats.js"); - var name = "operationalLatencyHistogramTest"; - - var testDB = db.getSiblingDB(name); - var testColl = testDB[name + "coll"]; - - testColl.drop(); - - // Test aggregation command output format. - var commandResult = testDB.runCommand( - {aggregate: testColl.getName(), pipeline: [{$collStats: {latencyStats: {}}}], cursor: {}}); - assert.commandWorked(commandResult); - assert(commandResult.cursor.firstBatch.length == 1); - - var stats = commandResult.cursor.firstBatch[0]; - var histogramTypes = ["reads", "writes", "commands"]; - - assert(stats.hasOwnProperty("localTime")); - assert(stats.hasOwnProperty("latencyStats")); - - histogramTypes.forEach(function(key) { - assert(stats.latencyStats.hasOwnProperty(key)); - assert(stats.latencyStats[key].hasOwnProperty("ops")); - assert(stats.latencyStats[key].hasOwnProperty("latency")); - }); - - var lastHistogram = getHistogramStats(testColl); - - // Insert - var numRecords = 100; - for (var i = 0; i < numRecords; i++) { - assert.writeOK(testColl.insert({_id: i})); - } - lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, numRecords, 0); - - // Update - for (var i = 0; i < numRecords; i++) { - assert.writeOK(testColl.update({_id: i}, {x: i})); - } - lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, numRecords, 0); - - // Find - var cursors = []; - for (var i = 0; i < numRecords; i++) { - cursors[i] = testColl.find({x: {$gte: i}}).batchSize(2); - assert.eq(cursors[i].next()._id, i); - } - lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, numRecords, 0, 0); - - // GetMore - for (var i = 0; i < numRecords / 2; i++) { - // Trigger two getmore commands. - assert.eq(cursors[i].next()._id, i + 1); - assert.eq(cursors[i].next()._id, i + 2); - assert.eq(cursors[i].next()._id, i + 3); - assert.eq(cursors[i].next()._id, i + 4); - } - lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, numRecords, 0, 0); - - // KillCursors - // The last cursor has no additional results, hence does not need to be closed. - for (var i = 0; i < numRecords - 1; i++) { - cursors[i].close(); - } - lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, numRecords - 1); - - // Remove - for (var i = 0; i < numRecords; i++) { - assert.writeOK(testColl.remove({_id: i})); - } - lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, numRecords, 0); - - // Upsert - for (var i = 0; i < numRecords; i++) { - assert.writeOK(testColl.update({_id: i}, {x: i}, {upsert: 1})); - } - lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, numRecords, 0); - - // Aggregate - for (var i = 0; i < numRecords; i++) { - testColl.aggregate([{$match: {x: i}}, {$group: {_id: "$x"}}]); - } - lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, numRecords, 0, 0); - - // Count - for (var i = 0; i < numRecords; i++) { - testColl.count({x: i}); - } - lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, numRecords, 0, 0); - - // FindAndModify - testColl.findAndModify({query: {}, update: {pt: {type: "Point", coordinates: [0, 0]}}}); - lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 1, 0); - - // CreateIndex - assert.commandWorked(testColl.createIndex({pt: "2dsphere"})); - lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1); - - // $geoNear aggregation stage - assert.commandWorked(testDB.runCommand({ - aggregate: testColl.getName(), - pipeline: [{ - $geoNear: { - near: {type: "Point", coordinates: [0, 0]}, - spherical: true, - distanceField: "dist", - } - }], - cursor: {}, - })); - lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 1, 0, 0); - - // GetIndexes - testColl.getIndexes(); - lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1); - - // Reindex - assert.commandWorked(testColl.reIndex()); - lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1); - - // DropIndex - assert.commandWorked(testColl.dropIndex({pt: "2dsphere"})); - lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1); - - // Explain - testColl.explain().find().next(); - lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1); - - // CollStats - assert.commandWorked(testDB.runCommand({collStats: testColl.getName()})); - lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1); - - // CollMod - assert.commandWorked( - testDB.runCommand({collStats: testColl.getName(), validationLevel: "off"})); - lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1); - - // Compact - // Use force:true in case we're in replset. - var commandResult = testDB.runCommand({compact: testColl.getName(), force: true}); - // If storage engine supports compact, it should count as a command. - if (!commandResult.ok) { - assert.commandFailedWithCode(commandResult, ErrorCodes.CommandNotSupported); - } - lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1); - - // DataSize - testColl.dataSize(); - lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1); - - // PlanCache - testColl.getPlanCache().listQueryShapes(); - lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1); - - // Commands which occur on the database only should not effect the collection stats. - assert.commandWorked(testDB.serverStatus()); - lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 0); - - assert.commandWorked(testColl.runCommand("whatsmyuri")); - lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 0); - - // Test non-command. - assert.commandFailed(testColl.runCommand("IHopeNobodyEverMakesThisACommand")); - lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 0); +"use strict"; + +load("jstests/libs/stats.js"); +var name = "operationalLatencyHistogramTest"; + +var testDB = db.getSiblingDB(name); +var testColl = testDB[name + "coll"]; + +testColl.drop(); + +// Test aggregation command output format. +var commandResult = testDB.runCommand( + {aggregate: testColl.getName(), pipeline: [{$collStats: {latencyStats: {}}}], cursor: {}}); +assert.commandWorked(commandResult); +assert(commandResult.cursor.firstBatch.length == 1); + +var stats = commandResult.cursor.firstBatch[0]; +var histogramTypes = ["reads", "writes", "commands"]; + +assert(stats.hasOwnProperty("localTime")); +assert(stats.hasOwnProperty("latencyStats")); + +histogramTypes.forEach(function(key) { + assert(stats.latencyStats.hasOwnProperty(key)); + assert(stats.latencyStats[key].hasOwnProperty("ops")); + assert(stats.latencyStats[key].hasOwnProperty("latency")); +}); + +var lastHistogram = getHistogramStats(testColl); + +// Insert +var numRecords = 100; +for (var i = 0; i < numRecords; i++) { + assert.writeOK(testColl.insert({_id: i})); +} +lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, numRecords, 0); + +// Update +for (var i = 0; i < numRecords; i++) { + assert.writeOK(testColl.update({_id: i}, {x: i})); +} +lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, numRecords, 0); + +// Find +var cursors = []; +for (var i = 0; i < numRecords; i++) { + cursors[i] = testColl.find({x: {$gte: i}}).batchSize(2); + assert.eq(cursors[i].next()._id, i); +} +lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, numRecords, 0, 0); + +// GetMore +for (var i = 0; i < numRecords / 2; i++) { + // Trigger two getmore commands. + assert.eq(cursors[i].next()._id, i + 1); + assert.eq(cursors[i].next()._id, i + 2); + assert.eq(cursors[i].next()._id, i + 3); + assert.eq(cursors[i].next()._id, i + 4); +} +lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, numRecords, 0, 0); + +// KillCursors +// The last cursor has no additional results, hence does not need to be closed. +for (var i = 0; i < numRecords - 1; i++) { + cursors[i].close(); +} +lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, numRecords - 1); + +// Remove +for (var i = 0; i < numRecords; i++) { + assert.writeOK(testColl.remove({_id: i})); +} +lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, numRecords, 0); + +// Upsert +for (var i = 0; i < numRecords; i++) { + assert.writeOK(testColl.update({_id: i}, {x: i}, {upsert: 1})); +} +lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, numRecords, 0); + +// Aggregate +for (var i = 0; i < numRecords; i++) { + testColl.aggregate([{$match: {x: i}}, {$group: {_id: "$x"}}]); +} +lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, numRecords, 0, 0); + +// Count +for (var i = 0; i < numRecords; i++) { + testColl.count({x: i}); +} +lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, numRecords, 0, 0); + +// FindAndModify +testColl.findAndModify({query: {}, update: {pt: {type: "Point", coordinates: [0, 0]}}}); +lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 1, 0); + +// CreateIndex +assert.commandWorked(testColl.createIndex({pt: "2dsphere"})); +lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1); + +// $geoNear aggregation stage +assert.commandWorked(testDB.runCommand({ + aggregate: testColl.getName(), + pipeline: [{ + $geoNear: { + near: {type: "Point", coordinates: [0, 0]}, + spherical: true, + distanceField: "dist", + } + }], + cursor: {}, +})); +lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 1, 0, 0); + +// GetIndexes +testColl.getIndexes(); +lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1); + +// Reindex +assert.commandWorked(testColl.reIndex()); +lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1); + +// DropIndex +assert.commandWorked(testColl.dropIndex({pt: "2dsphere"})); +lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1); + +// Explain +testColl.explain().find().next(); +lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1); + +// CollStats +assert.commandWorked(testDB.runCommand({collStats: testColl.getName()})); +lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1); + +// CollMod +assert.commandWorked(testDB.runCommand({collStats: testColl.getName(), validationLevel: "off"})); +lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1); + +// Compact +// Use force:true in case we're in replset. +var commandResult = testDB.runCommand({compact: testColl.getName(), force: true}); +// If storage engine supports compact, it should count as a command. +if (!commandResult.ok) { + assert.commandFailedWithCode(commandResult, ErrorCodes.CommandNotSupported); +} +lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1); + +// DataSize +testColl.dataSize(); +lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1); + +// PlanCache +testColl.getPlanCache().listQueryShapes(); +lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1); + +// Commands which occur on the database only should not effect the collection stats. +assert.commandWorked(testDB.serverStatus()); +lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 0); + +assert.commandWorked(testColl.runCommand("whatsmyuri")); +lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 0); + +// Test non-command. +assert.commandFailed(testColl.runCommand("IHopeNobodyEverMakesThisACommand")); +lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 0); }()); diff --git a/jstests/core/optime_cmp.js b/jstests/core/optime_cmp.js index 436039946ce..db58f856f76 100644 --- a/jstests/core/optime_cmp.js +++ b/jstests/core/optime_cmp.js @@ -1,15 +1,14 @@ (function() { - 'use strict'; +'use strict'; - // PV1 - assert.eq(-1, rs.compareOpTimes({ts: Timestamp(2, 2), t: 2}, {ts: Timestamp(3, 1), t: 2})); - assert.eq(-1, rs.compareOpTimes({ts: Timestamp(2, 2), t: 2}, {ts: Timestamp(2, 4), t: 2})); - assert.eq(-1, rs.compareOpTimes({ts: Timestamp(3, 0), t: 2}, {ts: Timestamp(2, 0), t: 3})); +// PV1 +assert.eq(-1, rs.compareOpTimes({ts: Timestamp(2, 2), t: 2}, {ts: Timestamp(3, 1), t: 2})); +assert.eq(-1, rs.compareOpTimes({ts: Timestamp(2, 2), t: 2}, {ts: Timestamp(2, 4), t: 2})); +assert.eq(-1, rs.compareOpTimes({ts: Timestamp(3, 0), t: 2}, {ts: Timestamp(2, 0), t: 3})); - assert.eq(0, rs.compareOpTimes({ts: Timestamp(3, 0), t: 2}, {ts: Timestamp(3, 0), t: 2})); - - assert.eq(1, rs.compareOpTimes({ts: Timestamp(3, 1), t: 2}, {ts: Timestamp(2, 2), t: 2})); - assert.eq(1, rs.compareOpTimes({ts: Timestamp(2, 4), t: 2}, {ts: Timestamp(2, 2), t: 2})); - assert.eq(1, rs.compareOpTimes({ts: Timestamp(2, 0), t: 3}, {ts: Timestamp(3, 0), t: 2})); +assert.eq(0, rs.compareOpTimes({ts: Timestamp(3, 0), t: 2}, {ts: Timestamp(3, 0), t: 2})); +assert.eq(1, rs.compareOpTimes({ts: Timestamp(3, 1), t: 2}, {ts: Timestamp(2, 2), t: 2})); +assert.eq(1, rs.compareOpTimes({ts: Timestamp(2, 4), t: 2}, {ts: Timestamp(2, 2), t: 2})); +assert.eq(1, rs.compareOpTimes({ts: Timestamp(2, 0), t: 3}, {ts: Timestamp(3, 0), t: 2})); })(); diff --git a/jstests/core/optimized_match_explain.js b/jstests/core/optimized_match_explain.js index 6f73349e08c..5575b8498bb 100644 --- a/jstests/core/optimized_match_explain.js +++ b/jstests/core/optimized_match_explain.js @@ -4,22 +4,22 @@ * Tests that the explain output for $match reflects any optimizations. */ (function() { - "use strict"; - load("jstests/libs/analyze_plan.js"); +"use strict"; +load("jstests/libs/analyze_plan.js"); - const coll = db.match_explain; - coll.drop(); +const coll = db.match_explain; +coll.drop(); - assert.writeOK(coll.insert({a: 1, b: 1})); - assert.writeOK(coll.insert({a: 2, b: 3})); - assert.writeOK(coll.insert({a: 1, b: 2})); - assert.writeOK(coll.insert({a: 1, b: 4})); +assert.writeOK(coll.insert({a: 1, b: 1})); +assert.writeOK(coll.insert({a: 2, b: 3})); +assert.writeOK(coll.insert({a: 1, b: 2})); +assert.writeOK(coll.insert({a: 1, b: 4})); - // Explain output should reflect optimizations. - // $and should not be in the explain output because it is optimized out. - let explain = coll.explain().aggregate( - [{$sort: {b: -1}}, {$addFields: {c: {$mod: ["$a", 4]}}}, {$match: {$and: [{c: 1}]}}]); +// Explain output should reflect optimizations. +// $and should not be in the explain output because it is optimized out. +let explain = coll.explain().aggregate( + [{$sort: {b: -1}}, {$addFields: {c: {$mod: ["$a", 4]}}}, {$match: {$and: [{c: 1}]}}]); - assert.commandWorked(explain); - assert.eq(getAggPlanStage(explain, "$match"), {$match: {c: {$eq: 1}}}); +assert.commandWorked(explain); +assert.eq(getAggPlanStage(explain, "$match"), {$match: {c: {$eq: 1}}}); }()); diff --git a/jstests/core/or1.js b/jstests/core/or1.js index e7c417800b6..c5975a058f2 100644 --- a/jstests/core/or1.js +++ b/jstests/core/or1.js @@ -17,7 +17,6 @@ checkArrs = function(a, b) { }; doTest = function() { - t.save({_id: 0, a: 1}); t.save({_id: 1, a: 2}); t.save({_id: 2, b: 1}); @@ -43,11 +42,11 @@ doTest = function() { a1b2 = t.find({$or: [{a: 1}, {b: 2}]}).toArray(); checkArrs( [ - {_id: 0, a: 1}, - {_id: 3, b: 2}, - {_id: 4, a: 1, b: 1}, - {_id: 5, a: 1, b: 2}, - {_id: 7, a: 2, b: 2} + {_id: 0, a: 1}, + {_id: 3, b: 2}, + {_id: 4, a: 1, b: 1}, + {_id: 5, a: 1, b: 2}, + {_id: 7, a: 2, b: 2} ], a1b2); @@ -56,7 +55,6 @@ doTest = function() { assert.eq(1, t.find({$or: [{a: {$in: [0, 1]}}]}).toArray().length); assert.eq(1, t.find({$or: [{b: {$in: [0, 1]}}]}).toArray().length); assert.eq(1, t.find({$or: [{a: {$in: [0, 1]}}, {b: {$in: [0, 1]}}]}).toArray().length); - }; doTest(); diff --git a/jstests/core/or4.js b/jstests/core/or4.js index b71f4254c79..8e07a42efa7 100644 --- a/jstests/core/or4.js +++ b/jstests/core/or4.js @@ -6,80 +6,80 @@ // ] (function() { - "use strict"; +"use strict"; - const coll = db.or4; - coll.drop(); +const coll = db.or4; +coll.drop(); - coll.ensureIndex({a: 1}); - coll.ensureIndex({b: 1}); +coll.ensureIndex({a: 1}); +coll.ensureIndex({b: 1}); - assert.writeOK(coll.insert({a: 2})); - assert.writeOK(coll.insert({b: 3})); - assert.writeOK(coll.insert({b: 3})); - assert.writeOK(coll.insert({a: 2, b: 3})); +assert.writeOK(coll.insert({a: 2})); +assert.writeOK(coll.insert({b: 3})); +assert.writeOK(coll.insert({b: 3})); +assert.writeOK(coll.insert({a: 2, b: 3})); - assert.eq(4, coll.count({$or: [{a: 2}, {b: 3}]})); - assert.eq(2, coll.count({$or: [{a: 2}, {a: 2}]})); +assert.eq(4, coll.count({$or: [{a: 2}, {b: 3}]})); +assert.eq(2, coll.count({$or: [{a: 2}, {a: 2}]})); - assert.eq(2, coll.find({}).skip(2).count(true)); - assert.eq(2, coll.find({$or: [{a: 2}, {b: 3}]}).skip(2).count(true)); - assert.eq(1, coll.find({$or: [{a: 2}, {b: 3}]}).skip(3).count(true)); +assert.eq(2, coll.find({}).skip(2).count(true)); +assert.eq(2, coll.find({$or: [{a: 2}, {b: 3}]}).skip(2).count(true)); +assert.eq(1, coll.find({$or: [{a: 2}, {b: 3}]}).skip(3).count(true)); - assert.eq(2, coll.find({}).limit(2).count(true)); - assert.eq(1, coll.find({$or: [{a: 2}, {b: 3}]}).limit(1).count(true)); - assert.eq(2, coll.find({$or: [{a: 2}, {b: 3}]}).limit(2).count(true)); - assert.eq(3, coll.find({$or: [{a: 2}, {b: 3}]}).limit(3).count(true)); - assert.eq(4, coll.find({$or: [{a: 2}, {b: 3}]}).limit(4).count(true)); +assert.eq(2, coll.find({}).limit(2).count(true)); +assert.eq(1, coll.find({$or: [{a: 2}, {b: 3}]}).limit(1).count(true)); +assert.eq(2, coll.find({$or: [{a: 2}, {b: 3}]}).limit(2).count(true)); +assert.eq(3, coll.find({$or: [{a: 2}, {b: 3}]}).limit(3).count(true)); +assert.eq(4, coll.find({$or: [{a: 2}, {b: 3}]}).limit(4).count(true)); - coll.remove({$or: [{a: 2}, {b: 3}]}); - assert.eq(0, coll.count()); +coll.remove({$or: [{a: 2}, {b: 3}]}); +assert.eq(0, coll.count()); - assert.writeOK(coll.insert({b: 3})); - coll.remove({$or: [{a: 2}, {b: 3}]}); - assert.eq(0, coll.count()); +assert.writeOK(coll.insert({b: 3})); +coll.remove({$or: [{a: 2}, {b: 3}]}); +assert.eq(0, coll.count()); - assert.writeOK(coll.insert({a: 2})); - assert.writeOK(coll.insert({b: 3})); - assert.writeOK(coll.insert({a: 2, b: 3})); +assert.writeOK(coll.insert({a: 2})); +assert.writeOK(coll.insert({b: 3})); +assert.writeOK(coll.insert({a: 2, b: 3})); - coll.update({$or: [{a: 2}, {b: 3}]}, {$set: {z: 1}}, false, true); - assert.eq(3, coll.count({z: 1})); +coll.update({$or: [{a: 2}, {b: 3}]}, {$set: {z: 1}}, false, true); +assert.eq(3, coll.count({z: 1})); - assert.eq(3, coll.find({$or: [{a: 2}, {b: 3}]}).toArray().length); - assert.eq(coll.find().sort({_id: 1}).toArray(), - coll.find({$or: [{a: 2}, {b: 3}]}).sort({_id: 1}).toArray()); - assert.eq(2, coll.find({$or: [{a: 2}, {b: 3}]}).skip(1).toArray().length); +assert.eq(3, coll.find({$or: [{a: 2}, {b: 3}]}).toArray().length); +assert.eq(coll.find().sort({_id: 1}).toArray(), + coll.find({$or: [{a: 2}, {b: 3}]}).sort({_id: 1}).toArray()); +assert.eq(2, coll.find({$or: [{a: 2}, {b: 3}]}).skip(1).toArray().length); - assert.eq(3, coll.find({$or: [{a: 2}, {b: 3}]}).batchSize(2).toArray().length); +assert.eq(3, coll.find({$or: [{a: 2}, {b: 3}]}).batchSize(2).toArray().length); - assert.writeOK(coll.insert({a: 1})); - assert.writeOK(coll.insert({b: 4})); - assert.writeOK(coll.insert({a: 2})); +assert.writeOK(coll.insert({a: 1})); +assert.writeOK(coll.insert({b: 4})); +assert.writeOK(coll.insert({a: 2})); - assert.eq(4, coll.find({$or: [{a: 2}, {b: 3}]}).batchSize(2).toArray().length); +assert.eq(4, coll.find({$or: [{a: 2}, {b: 3}]}).batchSize(2).toArray().length); - assert.writeOK(coll.insert({a: 1, b: 3})); - assert.eq(4, coll.find({$or: [{a: 2}, {b: 3}]}).limit(4).toArray().length); +assert.writeOK(coll.insert({a: 1, b: 3})); +assert.eq(4, coll.find({$or: [{a: 2}, {b: 3}]}).limit(4).toArray().length); - assert.eq([1, 2], Array.sort(coll.distinct('a', {$or: [{a: 2}, {b: 3}]}))); +assert.eq([1, 2], Array.sort(coll.distinct('a', {$or: [{a: 2}, {b: 3}]}))); - assert.eq(5, - coll.mapReduce( - function() { - emit('a', this.a); - }, - function(key, vals) { - return vals.length; - }, - {out: {inline: true}, query: {$or: [{a: 2}, {b: 3}]}}) - .counts.input); +assert.eq(5, + coll.mapReduce( + function() { + emit('a', this.a); + }, + function(key, vals) { + return vals.length; + }, + {out: {inline: true}, query: {$or: [{a: 2}, {b: 3}]}}) + .counts.input); - coll.remove({}); +coll.remove({}); - assert.writeOK(coll.insert({a: [1, 2]})); - assert.eq(1, coll.find({$or: [{a: 1}, {a: 2}]}).toArray().length); - assert.eq(1, coll.count({$or: [{a: 1}, {a: 2}]})); - assert.eq(1, coll.find({$or: [{a: 2}, {a: 1}]}).toArray().length); - assert.eq(1, coll.count({$or: [{a: 2}, {a: 1}]})); +assert.writeOK(coll.insert({a: [1, 2]})); +assert.eq(1, coll.find({$or: [{a: 1}, {a: 2}]}).toArray().length); +assert.eq(1, coll.count({$or: [{a: 1}, {a: 2}]})); +assert.eq(1, coll.find({$or: [{a: 2}, {a: 1}]}).toArray().length); +assert.eq(1, coll.count({$or: [{a: 2}, {a: 1}]})); })(); diff --git a/jstests/core/or5.js b/jstests/core/or5.js index 7e61f9bf8cf..e0af20752f0 100644 --- a/jstests/core/or5.js +++ b/jstests/core/or5.js @@ -36,8 +36,8 @@ for (i = 2; i <= 7; ++i) { t.ensureIndex({z: "2d"}); assert.throws.automsg(function() { - return t.find({$or: [{z: {$near: [50, 50]}}, {a: 2}]}).toArray(); -}); + return t.find({$or: [{z: {$near: [50, 50]}}, {a: 2}]}).toArray(); + }); function reset() { t.drop(); diff --git a/jstests/core/or_always_false.js b/jstests/core/or_always_false.js index eb479486eac..6760ee37775 100644 --- a/jstests/core/or_always_false.js +++ b/jstests/core/or_always_false.js @@ -1,17 +1,16 @@ // Tests that a rooted-$or query with each clause provably false will not return any results. (function() { - "use strict"; +"use strict"; - const coll = db.or_always_false; - coll.drop(); +const coll = db.or_always_false; +coll.drop(); - assert.writeOK(coll.insert([{}, {}, {}])); - const emptyOrError = assert.throws(() => coll.find({$or: []}).itcount()); - assert.eq(emptyOrError.code, ErrorCodes.BadValue); +assert.writeOK(coll.insert([{}, {}, {}])); +const emptyOrError = assert.throws(() => coll.find({$or: []}).itcount()); +assert.eq(emptyOrError.code, ErrorCodes.BadValue); - assert.eq(coll.find({$or: [{$alwaysFalse: 1}]}).itcount(), 0); - assert.eq(coll.find({$or: [{a: {$all: []}}]}).itcount(), 0); - assert.eq(coll.find({$or: [{$alwaysFalse: 1}, {$alwaysFalse: 1}]}).itcount(), 0); - assert.eq(coll.find({$or: [{$alwaysFalse: 1}, {a: {$all: []}}, {$alwaysFalse: 1}]}).itcount(), - 0); +assert.eq(coll.find({$or: [{$alwaysFalse: 1}]}).itcount(), 0); +assert.eq(coll.find({$or: [{a: {$all: []}}]}).itcount(), 0); +assert.eq(coll.find({$or: [{$alwaysFalse: 1}, {$alwaysFalse: 1}]}).itcount(), 0); +assert.eq(coll.find({$or: [{$alwaysFalse: 1}, {a: {$all: []}}, {$alwaysFalse: 1}]}).itcount(), 0); }()); diff --git a/jstests/core/or_inexact.js b/jstests/core/or_inexact.js index 3e7e374d7f5..17aeea618b3 100644 --- a/jstests/core/or_inexact.js +++ b/jstests/core/or_inexact.js @@ -119,26 +119,26 @@ t.insert({_id: 1, pre: 4, loc: {type: "Point", coordinates: [0, 0]}}); cursor = t.find({ $or: [ { - pre: 3, - loc: { - $geoWithin: { - $geometry: { - type: "Polygon", - coordinates: [[[39, 4], [41, 4], [41, 6], [39, 6], [39, 4]]] - } - } - } + pre: 3, + loc: { + $geoWithin: { + $geometry: { + type: "Polygon", + coordinates: [[[39, 4], [41, 4], [41, 6], [39, 6], [39, 4]]] + } + } + } }, { - pre: 4, - loc: { - $geoWithin: { - $geometry: { - type: "Polygon", - coordinates: [[[-1, -1], [1, -1], [1, 1], [-1, 1], [-1, -1]]] - } - } - } + pre: 4, + loc: { + $geoWithin: { + $geometry: { + type: "Polygon", + coordinates: [[[-1, -1], [1, -1], [1, 1], [-1, 1], [-1, -1]]] + } + } + } } ] }); @@ -152,26 +152,26 @@ t.insert({_id: 1, pre: 4, loc: {type: "Point", coordinates: [0, 0]}}); cursor = t.find({ $or: [ { - pre: 3, - loc: { - $geoWithin: { - $geometry: { - type: "Polygon", - coordinates: [[[39, 4], [41, 4], [41, 6], [39, 6], [39, 4]]] - } - } - } + pre: 3, + loc: { + $geoWithin: { + $geometry: { + type: "Polygon", + coordinates: [[[39, 4], [41, 4], [41, 6], [39, 6], [39, 4]]] + } + } + } }, { - pre: 4, - loc: { - $geoWithin: { - $geometry: { - type: "Polygon", - coordinates: [[[-1, -1], [1, -1], [1, 1], [-1, 1], [-1, -1]]] - } - } - } + pre: 4, + loc: { + $geoWithin: { + $geometry: { + type: "Polygon", + coordinates: [[[-1, -1], [1, -1], [1, 1], [-1, 1], [-1, -1]]] + } + } + } } ] }); @@ -259,24 +259,24 @@ t.insert({_id: 1, loc: {type: "Point", coordinates: [0, 0]}}); cursor = t.find({ $or: [ { - loc: { - $geoWithin: { - $geometry: { - type: "Polygon", - coordinates: [[[39, 4], [41, 4], [41, 6], [39, 6], [39, 4]]] - } - } - } + loc: { + $geoWithin: { + $geometry: { + type: "Polygon", + coordinates: [[[39, 4], [41, 4], [41, 6], [39, 6], [39, 4]]] + } + } + } }, { - loc: { - $geoWithin: { - $geometry: { - type: "Polygon", - coordinates: [[[-1, -1], [1, -1], [1, 1], [-1, 1], [-1, -1]]] - } - } - } + loc: { + $geoWithin: { + $geometry: { + type: "Polygon", + coordinates: [[[-1, -1], [1, -1], [1, 1], [-1, 1], [-1, -1]]] + } + } + } } ] }); diff --git a/jstests/core/ord.js b/jstests/core/ord.js index 640f5de13cc..2cd2cef0a66 100644 --- a/jstests/core/ord.js +++ b/jstests/core/ord.js @@ -6,48 +6,48 @@ // behavior is changed. (function() { - "use strict"; - - load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. - - const t = db.jstests_ord; - t.drop(); - - t.ensureIndex({a: 1}); - t.ensureIndex({b: 1}); - - for (let i = 0; i < 80; ++i) { - t.save({a: 1}); - } - - for (let i = 0; i < 100; ++i) { - t.save({b: 1}); - } - - const c = t.find({$or: [{a: 1}, {b: 1}]}).batchSize(100); - for (let i = 0; i < 100; ++i) { - c.next(); - } - // At this point, our initial query has ended and there is a client cursor waiting - // to read additional documents from index {b:1}. Deduping is performed against - // the index key {a:1}. - - t.dropIndex({a: 1}); - - // Dropping an index kills all cursors on the indexed namespace, not just those - // cursors using the dropped index. - if (FixtureHelpers.isMongos(db)) { - // mongos may have some data left from a previous batch stored in memory, so it might not - // return an error immediately, but it should eventually. - assert.soon(function() { - try { - c.next(); - return false; // We didn't throw an error yet. - } catch (e) { - return true; - } - }); - } else { - assert.throws(() => c.next()); - } +"use strict"; + +load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. + +const t = db.jstests_ord; +t.drop(); + +t.ensureIndex({a: 1}); +t.ensureIndex({b: 1}); + +for (let i = 0; i < 80; ++i) { + t.save({a: 1}); +} + +for (let i = 0; i < 100; ++i) { + t.save({b: 1}); +} + +const c = t.find({$or: [{a: 1}, {b: 1}]}).batchSize(100); +for (let i = 0; i < 100; ++i) { + c.next(); +} +// At this point, our initial query has ended and there is a client cursor waiting +// to read additional documents from index {b:1}. Deduping is performed against +// the index key {a:1}. + +t.dropIndex({a: 1}); + +// Dropping an index kills all cursors on the indexed namespace, not just those +// cursors using the dropped index. +if (FixtureHelpers.isMongos(db)) { + // mongos may have some data left from a previous batch stored in memory, so it might not + // return an error immediately, but it should eventually. + assert.soon(function() { + try { + c.next(); + return false; // We didn't throw an error yet. + } catch (e) { + return true; + } + }); +} else { + assert.throws(() => c.next()); +} })(); diff --git a/jstests/core/plan_cache_clear.js b/jstests/core/plan_cache_clear.js index 5ac1ca775c3..8170aa9595d 100644 --- a/jstests/core/plan_cache_clear.js +++ b/jstests/core/plan_cache_clear.js @@ -12,100 +12,97 @@ // ] (function() { - var t = db.jstests_plan_cache_clear; - t.drop(); +var t = db.jstests_plan_cache_clear; +t.drop(); - // Utility function to list query shapes in cache. - function getShapes(collection) { - if (collection == undefined) { - collection = t; - } - var res = collection.runCommand('planCacheListQueryShapes'); - print('planCacheListQueryShapes() = ' + tojson(res)); - assert.commandWorked(res, 'planCacheListQueryShapes failed'); - assert(res.hasOwnProperty('shapes'), 'shapes missing from planCacheListQueryShapes result'); - return res.shapes; +// Utility function to list query shapes in cache. +function getShapes(collection) { + if (collection == undefined) { + collection = t; } + var res = collection.runCommand('planCacheListQueryShapes'); + print('planCacheListQueryShapes() = ' + tojson(res)); + assert.commandWorked(res, 'planCacheListQueryShapes failed'); + assert(res.hasOwnProperty('shapes'), 'shapes missing from planCacheListQueryShapes result'); + return res.shapes; +} - t.save({a: 1, b: 1}); - t.save({a: 1, b: 2}); - t.save({a: 1, b: 2}); - t.save({a: 2, b: 2}); +t.save({a: 1, b: 1}); +t.save({a: 1, b: 2}); +t.save({a: 1, b: 2}); +t.save({a: 2, b: 2}); - // We need two indices so that the MultiPlanRunner is executed. - t.ensureIndex({a: 1}); - t.ensureIndex({a: 1, b: 1}); +// We need two indices so that the MultiPlanRunner is executed. +t.ensureIndex({a: 1}); +t.ensureIndex({a: 1, b: 1}); - // Run a query so that an entry is inserted into the cache. - assert.eq(1, t.find({a: 1, b: 1}).itcount(), 'unexpected document count'); - - // Invalid key should be a no-op. - assert.commandWorked(t.runCommand('planCacheClear', {query: {unknownfield: 1}})); - assert.eq( - 1, getShapes().length, 'removing unknown query should not affecting exisiting entries'); +// Run a query so that an entry is inserted into the cache. +assert.eq(1, t.find({a: 1, b: 1}).itcount(), 'unexpected document count'); - // Run a new query shape and drop it from the cache - assert.eq(1, getShapes().length, 'unexpected cache size after running 2nd query'); - assert.commandWorked(t.runCommand('planCacheClear', {query: {a: 1, b: 1}})); - assert.eq(0, getShapes().length, 'unexpected cache size after dropping 2nd query from cache'); +// Invalid key should be a no-op. +assert.commandWorked(t.runCommand('planCacheClear', {query: {unknownfield: 1}})); +assert.eq(1, getShapes().length, 'removing unknown query should not affecting exisiting entries'); - // planCacheClear can clear $expr queries. - assert.eq( - 1, t.find({a: 1, b: 1, $expr: {$eq: ['$a', 1]}}).itcount(), 'unexpected document count'); - assert.eq(1, getShapes().length, 'unexpected cache size after running 2nd query'); - assert.commandWorked( - t.runCommand('planCacheClear', {query: {a: 1, b: 1, $expr: {$eq: ['$a', 1]}}})); - assert.eq(0, getShapes().length, 'unexpected cache size after dropping 2nd query from cache'); +// Run a new query shape and drop it from the cache +assert.eq(1, getShapes().length, 'unexpected cache size after running 2nd query'); +assert.commandWorked(t.runCommand('planCacheClear', {query: {a: 1, b: 1}})); +assert.eq(0, getShapes().length, 'unexpected cache size after dropping 2nd query from cache'); - // planCacheClear fails with an $expr query with an unbound variable. - assert.commandFailed( - t.runCommand('planCacheClear', {query: {a: 1, b: 1, $expr: {$eq: ['$a', '$$unbound']}}})); +// planCacheClear can clear $expr queries. +assert.eq(1, t.find({a: 1, b: 1, $expr: {$eq: ['$a', 1]}}).itcount(), 'unexpected document count'); +assert.eq(1, getShapes().length, 'unexpected cache size after running 2nd query'); +assert.commandWorked( + t.runCommand('planCacheClear', {query: {a: 1, b: 1, $expr: {$eq: ['$a', 1]}}})); +assert.eq(0, getShapes().length, 'unexpected cache size after dropping 2nd query from cache'); - // Insert two more shapes into the cache. - assert.eq(1, t.find({a: 1, b: 1}).itcount(), 'unexpected document count'); - assert.eq(1, t.find({a: 1, b: 1}, {_id: 0, a: 1}).itcount(), 'unexpected document count'); +// planCacheClear fails with an $expr query with an unbound variable. +assert.commandFailed( + t.runCommand('planCacheClear', {query: {a: 1, b: 1, $expr: {$eq: ['$a', '$$unbound']}}})); - // Drop query cache. This clears all cached queries in the collection. - res = t.runCommand('planCacheClear'); - print('planCacheClear() = ' + tojson(res)); - assert.commandWorked(res, 'planCacheClear failed'); - assert.eq( - 0, getShapes().length, 'plan cache should be empty after successful planCacheClear()'); +// Insert two more shapes into the cache. +assert.eq(1, t.find({a: 1, b: 1}).itcount(), 'unexpected document count'); +assert.eq(1, t.find({a: 1, b: 1}, {_id: 0, a: 1}).itcount(), 'unexpected document count'); - // - // Query Plan Revision - // http://docs.mongodb.org/manual/core/query-plans/#query-plan-revision - // As collections change over time, the query optimizer deletes the query plan and re-evaluates - // after any of the following events: - // - The reIndex rebuilds the index. - // - You add or drop an index. - // - The mongod process restarts. - // +// Drop query cache. This clears all cached queries in the collection. +res = t.runCommand('planCacheClear'); +print('planCacheClear() = ' + tojson(res)); +assert.commandWorked(res, 'planCacheClear failed'); +assert.eq(0, getShapes().length, 'plan cache should be empty after successful planCacheClear()'); - // Case 1: The reIndex rebuilds the index. - // Steps: - // Populate the cache with 1 entry. - // Run reIndex on the collection. - // Confirm that cache is empty. - const isMongos = db.adminCommand({isdbgrid: 1}).isdbgrid; - if (!isMongos) { - assert.eq(1, t.find({a: 1, b: 1}).itcount(), 'unexpected document count'); - assert.eq(1, getShapes().length, 'plan cache should not be empty after query'); - res = t.reIndex(); - print('reIndex result = ' + tojson(res)); - assert.eq(0, getShapes().length, 'plan cache should be empty after reIndex operation'); - } +// +// Query Plan Revision +// http://docs.mongodb.org/manual/core/query-plans/#query-plan-revision +// As collections change over time, the query optimizer deletes the query plan and re-evaluates +// after any of the following events: +// - The reIndex rebuilds the index. +// - You add or drop an index. +// - The mongod process restarts. +// - // Case 2: You add or drop an index. - // Steps: - // Populate the cache with 1 entry. - // Add an index. - // Confirm that cache is empty. +// Case 1: The reIndex rebuilds the index. +// Steps: +// Populate the cache with 1 entry. +// Run reIndex on the collection. +// Confirm that cache is empty. +const isMongos = db.adminCommand({isdbgrid: 1}).isdbgrid; +if (!isMongos) { assert.eq(1, t.find({a: 1, b: 1}).itcount(), 'unexpected document count'); assert.eq(1, getShapes().length, 'plan cache should not be empty after query'); - t.ensureIndex({b: 1}); - assert.eq(0, getShapes().length, 'plan cache should be empty after adding index'); + res = t.reIndex(); + print('reIndex result = ' + tojson(res)); + assert.eq(0, getShapes().length, 'plan cache should be empty after reIndex operation'); +} + +// Case 2: You add or drop an index. +// Steps: +// Populate the cache with 1 entry. +// Add an index. +// Confirm that cache is empty. +assert.eq(1, t.find({a: 1, b: 1}).itcount(), 'unexpected document count'); +assert.eq(1, getShapes().length, 'plan cache should not be empty after query'); +t.ensureIndex({b: 1}); +assert.eq(0, getShapes().length, 'plan cache should be empty after adding index'); - // Case 3: The mongod process restarts - // Not applicable. +// Case 3: The mongod process restarts +// Not applicable. })(); diff --git a/jstests/core/plan_cache_list_plans.js b/jstests/core/plan_cache_list_plans.js index b0ae8497ba4..a077f9fafbe 100644 --- a/jstests/core/plan_cache_list_plans.js +++ b/jstests/core/plan_cache_list_plans.js @@ -13,118 +13,115 @@ // ] (function() { - "use strict"; - let t = db.jstests_plan_cache_list_plans; - t.drop(); - - function getPlansForCacheEntry(query, sort, projection) { - let key = {query: query, sort: sort, projection: projection}; - let res = t.runCommand('planCacheListPlans', key); - assert.commandWorked(res, 'planCacheListPlans(' + tojson(key, '', true) + ' failed'); - assert(res.hasOwnProperty('plans'), - 'plans missing from planCacheListPlans(' + tojson(key, '', true) + ') result'); - return res; - } - - // Assert that timeOfCreation exists in the cache entry. The difference between the current time - // and the time a plan was cached should not be larger than an hour. - function checkTimeOfCreation(query, sort, projection, date) { - let key = {query: query, sort: sort, projection: projection}; - let res = t.runCommand('planCacheListPlans', key); - assert.commandWorked(res, 'planCacheListPlans(' + tojson(key, '', true) + ' failed'); - assert(res.hasOwnProperty('timeOfCreation'), - 'timeOfCreation missing from planCacheListPlans'); - let kMillisecondsPerHour = 1000 * 60 * 60; - assert.lte(Math.abs(date - res.timeOfCreation.getTime()), - kMillisecondsPerHour, - 'timeOfCreation value is incorrect'); - } - - assert.commandWorked(t.save({a: 1, b: 1})); - assert.commandWorked(t.save({a: 1, b: 2})); - assert.commandWorked(t.save({a: 1, b: 2})); - assert.commandWorked(t.save({a: 2, b: 2})); - - // We need two indices so that the MultiPlanRunner is executed. - assert.commandWorked(t.ensureIndex({a: 1})); - assert.commandWorked(t.ensureIndex({a: 1, b: 1})); - - // Invalid key should be an error. - assert.eq([], - getPlansForCacheEntry({unknownfield: 1}, {}, {}).plans, - 'planCacheListPlans should return empty results on unknown query shape'); - - // Create a cache entry. - assert.eq(1, - t.find({a: 1, b: 1}, {_id: 0, a: 1}).sort({a: -1}).itcount(), - 'unexpected document count'); - - let now = (new Date()).getTime(); - checkTimeOfCreation({a: 1, b: 1}, {a: -1}, {_id: 0, a: 1}, now); - - // Retrieve plans for valid cache entry. - let entry = getPlansForCacheEntry({a: 1, b: 1}, {a: -1}, {_id: 0, a: 1}); - assert(entry.hasOwnProperty('works'), - 'works missing from planCacheListPlans() result ' + tojson(entry)); - assert.eq(entry.isActive, false); - - let plans = entry.plans; - assert.eq(2, plans.length, 'unexpected number of plans cached for query'); - - // Print every plan. - // Plan details/feedback verified separately in section after Query Plan Revision tests. - print('planCacheListPlans result:'); - for (let i = 0; i < plans.length; i++) { - print('plan ' + i + ': ' + tojson(plans[i])); - } +"use strict"; +let t = db.jstests_plan_cache_list_plans; +t.drop(); + +function getPlansForCacheEntry(query, sort, projection) { + let key = {query: query, sort: sort, projection: projection}; + let res = t.runCommand('planCacheListPlans', key); + assert.commandWorked(res, 'planCacheListPlans(' + tojson(key, '', true) + ' failed'); + assert(res.hasOwnProperty('plans'), + 'plans missing from planCacheListPlans(' + tojson(key, '', true) + ') result'); + return res; +} + +// Assert that timeOfCreation exists in the cache entry. The difference between the current time +// and the time a plan was cached should not be larger than an hour. +function checkTimeOfCreation(query, sort, projection, date) { + let key = {query: query, sort: sort, projection: projection}; + let res = t.runCommand('planCacheListPlans', key); + assert.commandWorked(res, 'planCacheListPlans(' + tojson(key, '', true) + ' failed'); + assert(res.hasOwnProperty('timeOfCreation'), 'timeOfCreation missing from planCacheListPlans'); + let kMillisecondsPerHour = 1000 * 60 * 60; + assert.lte(Math.abs(date - res.timeOfCreation.getTime()), + kMillisecondsPerHour, + 'timeOfCreation value is incorrect'); +} + +assert.commandWorked(t.save({a: 1, b: 1})); +assert.commandWorked(t.save({a: 1, b: 2})); +assert.commandWorked(t.save({a: 1, b: 2})); +assert.commandWorked(t.save({a: 2, b: 2})); + +// We need two indices so that the MultiPlanRunner is executed. +assert.commandWorked(t.ensureIndex({a: 1})); +assert.commandWorked(t.ensureIndex({a: 1, b: 1})); + +// Invalid key should be an error. +assert.eq([], + getPlansForCacheEntry({unknownfield: 1}, {}, {}).plans, + 'planCacheListPlans should return empty results on unknown query shape'); + +// Create a cache entry. +assert.eq( + 1, t.find({a: 1, b: 1}, {_id: 0, a: 1}).sort({a: -1}).itcount(), 'unexpected document count'); + +let now = (new Date()).getTime(); +checkTimeOfCreation({a: 1, b: 1}, {a: -1}, {_id: 0, a: 1}, now); + +// Retrieve plans for valid cache entry. +let entry = getPlansForCacheEntry({a: 1, b: 1}, {a: -1}, {_id: 0, a: 1}); +assert(entry.hasOwnProperty('works'), + 'works missing from planCacheListPlans() result ' + tojson(entry)); +assert.eq(entry.isActive, false); + +let plans = entry.plans; +assert.eq(2, plans.length, 'unexpected number of plans cached for query'); + +// Print every plan. +// Plan details/feedback verified separately in section after Query Plan Revision tests. +print('planCacheListPlans result:'); +for (let i = 0; i < plans.length; i++) { + print('plan ' + i + ': ' + tojson(plans[i])); +} + +// Test the queryHash and planCacheKey property by comparing entries for two different +// query shapes. +assert.eq(0, t.find({a: 132}).sort({b: -1, a: 1}).itcount(), 'unexpected document count'); +let entryNewShape = getPlansForCacheEntry({a: 123}, {b: -1, a: 1}, {}); +assert.eq(entry.hasOwnProperty("queryHash"), true); +assert.eq(entryNewShape.hasOwnProperty("queryHash"), true); +assert.neq(entry["queryHash"], entryNewShape["queryHash"]); +assert.eq(entry.hasOwnProperty("planCacheKey"), true); +assert.eq(entryNewShape.hasOwnProperty("planCacheKey"), true); +assert.neq(entry["planCacheKey"], entryNewShape["planCacheKey"]); - // Test the queryHash and planCacheKey property by comparing entries for two different - // query shapes. - assert.eq(0, t.find({a: 132}).sort({b: -1, a: 1}).itcount(), 'unexpected document count'); - let entryNewShape = getPlansForCacheEntry({a: 123}, {b: -1, a: 1}, {}); - assert.eq(entry.hasOwnProperty("queryHash"), true); - assert.eq(entryNewShape.hasOwnProperty("queryHash"), true); - assert.neq(entry["queryHash"], entryNewShape["queryHash"]); - assert.eq(entry.hasOwnProperty("planCacheKey"), true); - assert.eq(entryNewShape.hasOwnProperty("planCacheKey"), true); - assert.neq(entry["planCacheKey"], entryNewShape["planCacheKey"]); - - // - // Tests for plan reason and feedback in planCacheListPlans - // - - // Generate more plans for test query by adding indexes (compound and sparse). This will also - // clear the plan cache. - assert.commandWorked(t.ensureIndex({a: -1}, {sparse: true})); - assert.commandWorked(t.ensureIndex({a: 1, b: 1})); - - // Implementation note: feedback stats is calculated after 20 executions. See - // PlanCacheEntry::kMaxFeedback. - let numExecutions = 100; - for (let i = 0; i < numExecutions; i++) { - assert.eq(0, t.find({a: 3, b: 3}, {_id: 0, a: 1}).sort({a: -1}).itcount(), 'query failed'); - } +// +// Tests for plan reason and feedback in planCacheListPlans +// - now = (new Date()).getTime(); - checkTimeOfCreation({a: 3, b: 3}, {a: -1}, {_id: 0, a: 1}, now); - - entry = getPlansForCacheEntry({a: 3, b: 3}, {a: -1}, {_id: 0, a: 1}); - assert(entry.hasOwnProperty('works'), 'works missing from planCacheListPlans() result'); - assert.eq(entry.isActive, true); - plans = entry.plans; - - // This should be obvious but feedback is available only for the first (winning) plan. - print('planCacheListPlans result (after adding indexes and completing 20 executions):'); - for (let i = 0; i < plans.length; i++) { - print('plan ' + i + ': ' + tojson(plans[i])); - assert.gt(plans[i].reason.score, 0, 'plan ' + i + ' score is invalid'); - if (i > 0) { - assert.lte(plans[i].reason.score, - plans[i - 1].reason.score, - 'plans not sorted by score in descending order. ' + - 'plan ' + i + - ' has a score that is greater than that of the previous plan'); - } - assert(plans[i].reason.stats.hasOwnProperty('stage'), 'no stats inserted for plan ' + i); +// Generate more plans for test query by adding indexes (compound and sparse). This will also +// clear the plan cache. +assert.commandWorked(t.ensureIndex({a: -1}, {sparse: true})); +assert.commandWorked(t.ensureIndex({a: 1, b: 1})); + +// Implementation note: feedback stats is calculated after 20 executions. See +// PlanCacheEntry::kMaxFeedback. +let numExecutions = 100; +for (let i = 0; i < numExecutions; i++) { + assert.eq(0, t.find({a: 3, b: 3}, {_id: 0, a: 1}).sort({a: -1}).itcount(), 'query failed'); +} + +now = (new Date()).getTime(); +checkTimeOfCreation({a: 3, b: 3}, {a: -1}, {_id: 0, a: 1}, now); + +entry = getPlansForCacheEntry({a: 3, b: 3}, {a: -1}, {_id: 0, a: 1}); +assert(entry.hasOwnProperty('works'), 'works missing from planCacheListPlans() result'); +assert.eq(entry.isActive, true); +plans = entry.plans; + +// This should be obvious but feedback is available only for the first (winning) plan. +print('planCacheListPlans result (after adding indexes and completing 20 executions):'); +for (let i = 0; i < plans.length; i++) { + print('plan ' + i + ': ' + tojson(plans[i])); + assert.gt(plans[i].reason.score, 0, 'plan ' + i + ' score is invalid'); + if (i > 0) { + assert.lte(plans[i].reason.score, + plans[i - 1].reason.score, + 'plans not sorted by score in descending order. ' + + 'plan ' + i + ' has a score that is greater than that of the previous plan'); } + assert(plans[i].reason.stats.hasOwnProperty('stage'), 'no stats inserted for plan ' + i); +} })(); diff --git a/jstests/core/plan_cache_list_shapes.js b/jstests/core/plan_cache_list_shapes.js index bda2a40b073..89b9c900354 100644 --- a/jstests/core/plan_cache_list_shapes.js +++ b/jstests/core/plan_cache_list_shapes.js @@ -11,85 +11,85 @@ // assumes_balancer_off, // ] (function() { - const t = db.jstests_plan_cache_list_shapes; - t.drop(); +const t = db.jstests_plan_cache_list_shapes; +t.drop(); - // Utility function to list query shapes in cache. - function getShapes(collection) { - if (collection === undefined) { - collection = t; - } - const res = collection.runCommand('planCacheListQueryShapes'); - print('planCacheListQueryShapes() = ' + tojson(res)); - assert.commandWorked(res, 'planCacheListQueryShapes failed'); - assert(res.hasOwnProperty('shapes'), 'shapes missing from planCacheListQueryShapes result'); - return res.shapes; +// Utility function to list query shapes in cache. +function getShapes(collection) { + if (collection === undefined) { + collection = t; } + const res = collection.runCommand('planCacheListQueryShapes'); + print('planCacheListQueryShapes() = ' + tojson(res)); + assert.commandWorked(res, 'planCacheListQueryShapes failed'); + assert(res.hasOwnProperty('shapes'), 'shapes missing from planCacheListQueryShapes result'); + return res.shapes; +} - // Attempting to retrieve cache information on non-existent collection is not an error and - // should return an empty array of query shapes. - const missingCollection = db.jstests_query_cache_missing; - missingCollection.drop(); - assert.eq(0, - getShapes(missingCollection).length, - 'planCacheListQueryShapes should return empty array on non-existent collection'); +// Attempting to retrieve cache information on non-existent collection is not an error and +// should return an empty array of query shapes. +const missingCollection = db.jstests_query_cache_missing; +missingCollection.drop(); +assert.eq(0, + getShapes(missingCollection).length, + 'planCacheListQueryShapes should return empty array on non-existent collection'); - assert.commandWorked(t.save({a: 1, b: 1})); - assert.commandWorked(t.save({a: 1, b: 2})); - assert.commandWorked(t.save({a: 1, b: 2})); - assert.commandWorked(t.save({a: 2, b: 2})); +assert.commandWorked(t.save({a: 1, b: 1})); +assert.commandWorked(t.save({a: 1, b: 2})); +assert.commandWorked(t.save({a: 1, b: 2})); +assert.commandWorked(t.save({a: 2, b: 2})); - // We need two indices so that the MultiPlanRunner is executed. - assert.commandWorked(t.ensureIndex({a: 1})); - assert.commandWorked(t.ensureIndex({a: 1, b: 1})); +// We need two indices so that the MultiPlanRunner is executed. +assert.commandWorked(t.ensureIndex({a: 1})); +assert.commandWorked(t.ensureIndex({a: 1, b: 1})); - // Run a query. - assert.eq(1, - t.find({a: 1, b: 1}, {_id: 1, a: 1}).sort({a: -1}).itcount(), - 'unexpected document count'); +// Run a query. +assert.eq( + 1, t.find({a: 1, b: 1}, {_id: 1, a: 1}).sort({a: -1}).itcount(), 'unexpected document count'); - // We now expect the two indices to be compared and a cache entry to exist. Retrieve query - // shapes from the test collection Number of shapes should match queries executed by multi-plan - // runner. - let shapes = getShapes(); - assert.eq(1, shapes.length, 'unexpected number of shapes in planCacheListQueryShapes result'); - // Since the queryHash is computed in the server, we filter it out when matching query shapes - // here. - let filteredShape0 = shapes[0]; - delete filteredShape0.queryHash; - assert.eq({query: {a: 1, b: 1}, sort: {a: -1}, projection: {_id: 1, a: 1}}, - filteredShape0, - 'unexpected query shape returned from planCacheListQueryShapes'); +// We now expect the two indices to be compared and a cache entry to exist. Retrieve query +// shapes from the test collection Number of shapes should match queries executed by multi-plan +// runner. +let shapes = getShapes(); +assert.eq(1, shapes.length, 'unexpected number of shapes in planCacheListQueryShapes result'); +// Since the queryHash is computed in the server, we filter it out when matching query shapes +// here. +let filteredShape0 = shapes[0]; +delete filteredShape0.queryHash; +assert.eq({query: {a: 1, b: 1}, sort: {a: -1}, projection: {_id: 1, a: 1}}, + filteredShape0, + 'unexpected query shape returned from planCacheListQueryShapes'); - // Running a different query shape should cause another entry to be cached. - assert.eq(1, t.find({a: 1, b: 1}).itcount(), 'unexpected document count'); - shapes = getShapes(); - assert.eq(2, shapes.length, 'unexpected number of shapes in planCacheListQueryShapes result'); +// Running a different query shape should cause another entry to be cached. +assert.eq(1, t.find({a: 1, b: 1}).itcount(), 'unexpected document count'); +shapes = getShapes(); +assert.eq(2, shapes.length, 'unexpected number of shapes in planCacheListQueryShapes result'); - // Check that each shape has a unique queryHash. - assert.neq(shapes[0]["queryHash"], shapes[1]["queryHash"]); +// Check that each shape has a unique queryHash. +assert.neq(shapes[0]["queryHash"], shapes[1]["queryHash"]); - // Check that queries with different regex options have distinct shapes. +// Check that queries with different regex options have distinct shapes. - // Insert some documents with strings so we have something to search for. - for (let i = 0; i < 5; i++) { - assert.commandWorked(t.insert({a: 3, s: 'hello world'})); - } - assert.commandWorked(t.insert({a: 3, s: 'hElLo wOrLd'})); +// Insert some documents with strings so we have something to search for. +for (let i = 0; i < 5; i++) { + assert.commandWorked(t.insert({a: 3, s: 'hello world'})); +} +assert.commandWorked(t.insert({a: 3, s: 'hElLo wOrLd'})); - // Run a query with a regex. Also must include 'a' so that the query may use more than one - // index, and thus, must use the MultiPlanner. - const regexQuery = {s: {$regex: 'hello world', $options: 'm'}, a: 3}; - assert.eq(5, t.find(regexQuery).itcount()); +// Run a query with a regex. Also must include 'a' so that the query may use more than one +// index, and thus, must use the MultiPlanner. +const regexQuery = { + s: {$regex: 'hello world', $options: 'm'}, + a: 3 +}; +assert.eq(5, t.find(regexQuery).itcount()); - assert.eq( - 3, getShapes().length, 'unexpected number of shapes in planCacheListQueryShapes result '); +assert.eq(3, getShapes().length, 'unexpected number of shapes in planCacheListQueryShapes result '); - // Run the same query, but with different regex options. We expect that this should cause a - // shape to get added. - regexQuery.s.$options = 'mi'; - // There is one more result since the query is now case sensitive. - assert.eq(6, t.find(regexQuery).itcount()); - assert.eq( - 4, getShapes().length, 'unexpected number of shapes in planCacheListQueryShapes result'); +// Run the same query, but with different regex options. We expect that this should cause a +// shape to get added. +regexQuery.s.$options = 'mi'; +// There is one more result since the query is now case sensitive. +assert.eq(6, t.find(regexQuery).itcount()); +assert.eq(4, getShapes().length, 'unexpected number of shapes in planCacheListQueryShapes result'); })(); diff --git a/jstests/core/profile1.js b/jstests/core/profile1.js index 09f1655937c..485b26f29fa 100644 --- a/jstests/core/profile1.js +++ b/jstests/core/profile1.js @@ -9,103 +9,103 @@ // ] (function() { - "use strict"; - function profileCursor(query) { - query = query || {}; - Object.extend(query, {user: username + "@" + db.getName()}); - return db.system.profile.find(query); - } - - function getProfileAString() { - var s = "\n"; - profileCursor().forEach(function(z) { - s += tojson(z) + " ,\n"; - }); - return s; - } - - function resetProfile(level, slowms) { - db.setProfilingLevel(0); - db.system.profile.drop(); - db.setProfilingLevel(level, slowms); - } - - // special db so that it can be run in parallel tests - var stddb = db; - db = db.getSisterDB("profile1"); - var username = "jstests_profile1_user"; - - db.dropUser(username); - db.dropDatabase(); - - try { - db.createUser({user: username, pwd: "password", roles: jsTest.basicUserRoles}); - db.auth(username, "password"); - - // With pre-created system.profile (capped) - db.runCommand({profile: 0}); +"use strict"; +function profileCursor(query) { + query = query || {}; + Object.extend(query, {user: username + "@" + db.getName()}); + return db.system.profile.find(query); +} + +function getProfileAString() { + var s = "\n"; + profileCursor().forEach(function(z) { + s += tojson(z) + " ,\n"; + }); + return s; +} + +function resetProfile(level, slowms) { + db.setProfilingLevel(0); + db.system.profile.drop(); + db.setProfilingLevel(level, slowms); +} + +// special db so that it can be run in parallel tests +var stddb = db; +db = db.getSisterDB("profile1"); +var username = "jstests_profile1_user"; + +db.dropUser(username); +db.dropDatabase(); + +try { + db.createUser({user: username, pwd: "password", roles: jsTest.basicUserRoles}); + db.auth(username, "password"); + + // With pre-created system.profile (capped) + db.runCommand({profile: 0}); + db.getCollection("system.profile").drop(); + assert.eq(0, db.runCommand({profile: -1}).was, "A"); + + // Create 32MB profile (capped) collection + db.system.profile.drop(); + db.createCollection("system.profile", {capped: true, size: 32 * 1024 * 1024}); + db.runCommand({profile: 2}); + assert.eq(2, db.runCommand({profile: -1}).was, "B"); + assert.eq(1, db.system.profile.stats().capped, "C"); + + db.foo.findOne(); + + var profileItems = profileCursor().toArray(); + + // create a msg for later if there is a failure. + var msg = ""; + profileItems.forEach(function(d) { + msg += "profile doc: " + d.ns + " " + d.op + " " + tojson(d.query ? d.query : d.command) + + '\n'; + }); + msg += tojson(db.system.profile.stats()); + + // If these nunmbers don't match, it is possible the collection has rolled over + // (set to 32MB above in the hope this doesn't happen) + assert.eq(2, profileItems.length, "E2 -- " + msg); + + // Make sure we can't drop if profiling is still on + assert.throws(function(z) { db.getCollection("system.profile").drop(); - assert.eq(0, db.runCommand({profile: -1}).was, "A"); - - // Create 32MB profile (capped) collection - db.system.profile.drop(); - db.createCollection("system.profile", {capped: true, size: 32 * 1024 * 1024}); - db.runCommand({profile: 2}); - assert.eq(2, db.runCommand({profile: -1}).was, "B"); - assert.eq(1, db.system.profile.stats().capped, "C"); - - db.foo.findOne(); - - var profileItems = profileCursor().toArray(); - - // create a msg for later if there is a failure. - var msg = ""; - profileItems.forEach(function(d) { - msg += "profile doc: " + d.ns + " " + d.op + " " + - tojson(d.query ? d.query : d.command) + '\n'; - }); - msg += tojson(db.system.profile.stats()); - - // If these nunmbers don't match, it is possible the collection has rolled over - // (set to 32MB above in the hope this doesn't happen) - assert.eq(2, profileItems.length, "E2 -- " + msg); - - // Make sure we can't drop if profiling is still on - assert.throws(function(z) { - db.getCollection("system.profile").drop(); - }); - - // With pre-created system.profile (un-capped) - db.runCommand({profile: 0}); - db.getCollection("system.profile").drop(); - assert.eq(0, db.runCommand({profile: -1}).was, "F"); - - db.createCollection("system.profile"); - assert.eq(0, db.runCommand({profile: 2}).ok); - assert.eq(0, db.runCommand({profile: -1}).was, "G"); - assert(!db.system.profile.stats().capped, "G1"); - - // With no system.profile collection - db.runCommand({profile: 0}); - db.getCollection("system.profile").drop(); - assert.eq(0, db.runCommand({profile: -1}).was, "H"); - - db.runCommand({profile: 2}); - assert.eq(2, db.runCommand({profile: -1}).was, "I"); - assert.eq(1, db.system.profile.stats().capped, "J"); - - resetProfile(2); - db.profile1.drop(); - var q = {_id: 5}; - var u = {$inc: {x: 1}}; - db.profile1.update(q, u); - var r = profileCursor({ns: db.profile1.getFullName()}).sort({$natural: -1})[0]; - assert.eq({q: q, u: u, multi: false, upsert: false}, r.command, tojson(r)); - assert.eq("update", r.op, tojson(r)); - assert.eq("profile1.profile1", r.ns, tojson(r)); - } finally { - // disable profiling for subsequent tests - assert.commandWorked(db.runCommand({profile: 0})); - db = stddb; - } + }); + + // With pre-created system.profile (un-capped) + db.runCommand({profile: 0}); + db.getCollection("system.profile").drop(); + assert.eq(0, db.runCommand({profile: -1}).was, "F"); + + db.createCollection("system.profile"); + assert.eq(0, db.runCommand({profile: 2}).ok); + assert.eq(0, db.runCommand({profile: -1}).was, "G"); + assert(!db.system.profile.stats().capped, "G1"); + + // With no system.profile collection + db.runCommand({profile: 0}); + db.getCollection("system.profile").drop(); + assert.eq(0, db.runCommand({profile: -1}).was, "H"); + + db.runCommand({profile: 2}); + assert.eq(2, db.runCommand({profile: -1}).was, "I"); + assert.eq(1, db.system.profile.stats().capped, "J"); + + resetProfile(2); + db.profile1.drop(); + var q = {_id: 5}; + var u = {$inc: {x: 1}}; + db.profile1.update(q, u); + var r = profileCursor({ns: db.profile1.getFullName()}).sort({$natural: -1})[0]; + assert.eq({q: q, u: u, multi: false, upsert: false}, r.command, tojson(r)); + assert.eq("update", r.op, tojson(r)); + assert.eq("profile1.profile1", r.ns, tojson(r)); +} finally { + // disable profiling for subsequent tests + assert.commandWorked(db.runCommand({profile: 0})); + db = stddb; +} }()); diff --git a/jstests/core/profile2.js b/jstests/core/profile2.js index 788f20f79a1..d71471b2e5f 100644 --- a/jstests/core/profile2.js +++ b/jstests/core/profile2.js @@ -24,7 +24,7 @@ var result = results[0]; assert(result.hasOwnProperty('ns')); assert(result.hasOwnProperty('millis')); assert(result.hasOwnProperty('command')); -assert.eq('string', typeof(result.command.$truncated)); +assert.eq('string', typeof (result.command.$truncated)); // String value is truncated. assert(result.command.$truncated.match(/filter: { a: "a+\.\.\." }/)); @@ -40,7 +40,7 @@ var result = results[0]; assert(result.hasOwnProperty('ns')); assert(result.hasOwnProperty('millis')); assert(result.hasOwnProperty('command')); -assert.eq('string', typeof(result.command.$truncated)); +assert.eq('string', typeof (result.command.$truncated)); // String value is truncated. assert(result.command.$truncated.match( /^{ q: { a: "a+\.\.\." }, u: {}, multi: false, upsert: false }$/)); @@ -57,7 +57,7 @@ var result = results[0]; assert(result.hasOwnProperty('ns')); assert(result.hasOwnProperty('millis')); assert(result.hasOwnProperty('command')); -assert.eq('string', typeof(result.command.$truncated)); +assert.eq('string', typeof (result.command.$truncated)); // String value is truncated. assert(result.command.$truncated.match( /^{ q: {}, u: { a: "a+\.\.\." }, multi: false, upsert: false }$/)); @@ -78,7 +78,7 @@ var result = results[0]; assert(result.hasOwnProperty('ns')); assert(result.hasOwnProperty('millis')); assert(result.hasOwnProperty('command')); -assert.eq('string', typeof(result.command.$truncated)); +assert.eq('string', typeof (result.command.$truncated)); // Query object itself is truncated. assert(result.command.$truncated.match(/filter: { a0: 1\.0, a1: .*\.\.\.$/)); diff --git a/jstests/core/profile_agg.js b/jstests/core/profile_agg.js index 1224105109b..02a29500cf6 100644 --- a/jstests/core/profile_agg.js +++ b/jstests/core/profile_agg.js @@ -3,95 +3,95 @@ // Confirms that profiled aggregation execution contains all expected metrics with proper values. (function() { - "use strict"; - - // For getLatestProfilerEntry and getProfilerProtocolStringForCommand - load("jstests/libs/profiler.js"); - - var testDB = db.getSiblingDB("profile_agg"); - assert.commandWorked(testDB.dropDatabase()); - var coll = testDB.getCollection("test"); - - testDB.setProfilingLevel(2); - - // - // Confirm metrics for agg w/ $match. - // - var i; - for (i = 0; i < 10; ++i) { - assert.writeOK(coll.insert({a: i})); - } - assert.commandWorked(coll.createIndex({a: 1})); - - assert.eq(8, - coll.aggregate([{$match: {a: {$gte: 2}}}], - {collation: {locale: "fr"}, comment: "agg_comment"}) - .itcount()); - var profileObj = getLatestProfilerEntry(testDB); - - assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj)); - assert.eq(profileObj.op, "command", tojson(profileObj)); - assert.eq(profileObj.nreturned, 8, tojson(profileObj)); - assert.eq(profileObj.keysExamined, 8, tojson(profileObj)); - assert.eq(profileObj.docsExamined, 8, tojson(profileObj)); - assert.eq(profileObj.planSummary, "IXSCAN { a: 1 }", tojson(profileObj)); - assert.eq(profileObj.protocol, - getProfilerProtocolStringForCommand(testDB.getMongo()), - tojson(profileObj)); - assert.eq(profileObj.command.aggregate, coll.getName(), tojson(profileObj)); - assert.eq(profileObj.command.collation, {locale: "fr"}, tojson(profileObj)); - assert.eq(profileObj.command.comment, "agg_comment", tojson(profileObj)); - assert(profileObj.hasOwnProperty("responseLength"), tojson(profileObj)); - assert(profileObj.hasOwnProperty("millis"), tojson(profileObj)); - assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj)); - assert(profileObj.hasOwnProperty("locks"), tojson(profileObj)); - assert(!profileObj.hasOwnProperty("hasSortStage"), tojson(profileObj)); - // Testing that 'usedDisk' is set when disk is used requires either using a lot of data or - // configuring a server parameter which could mess up other tests. This testing is - // done elsewhere so that this test can stay in the core suite - assert(!profileObj.hasOwnProperty("usedDisk"), tojson(profileObj)); - assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); - - // - // Confirm "fromMultiPlanner" metric. - // - coll.drop(); - assert.commandWorked(coll.createIndex({a: 1})); - assert.commandWorked(coll.createIndex({b: 1})); - for (i = 0; i < 5; ++i) { - assert.writeOK(coll.insert({a: i, b: i})); - } - - assert.eq(1, coll.aggregate([{$match: {a: 3, b: 3}}]).itcount()); - profileObj = getLatestProfilerEntry(testDB); - - assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj)); - - // - // Confirm that the "hint" modifier is in the profiler document. - // - coll.drop(); - assert.commandWorked(coll.createIndex({a: 1})); - for (i = 0; i < 5; ++i) { - assert.writeOK(coll.insert({a: i, b: i})); - } - - assert.eq(1, coll.aggregate([{$match: {a: 3, b: 3}}], {hint: {_id: 1}}).itcount()); - profileObj = getLatestProfilerEntry(testDB); - assert.eq(profileObj.command.hint, {_id: 1}, tojson(profileObj)); - - // - // Confirm that aggregations are truncated in the profiler as { $truncated: <string>, comment: - // <string> } when a comment parameter is provided. - // - let matchPredicate = {}; - - for (let i = 0; i < 501; i++) { - matchPredicate[i] = "a".repeat(150); - } - - assert.eq(coll.aggregate([{$match: matchPredicate}], {comment: "profile_agg"}).itcount(), 0); - profileObj = getLatestProfilerEntry(testDB); - assert.eq((typeof profileObj.command.$truncated), "string", tojson(profileObj)); - assert.eq(profileObj.command.comment, "profile_agg", tojson(profileObj)); +"use strict"; + +// For getLatestProfilerEntry and getProfilerProtocolStringForCommand +load("jstests/libs/profiler.js"); + +var testDB = db.getSiblingDB("profile_agg"); +assert.commandWorked(testDB.dropDatabase()); +var coll = testDB.getCollection("test"); + +testDB.setProfilingLevel(2); + +// +// Confirm metrics for agg w/ $match. +// +var i; +for (i = 0; i < 10; ++i) { + assert.writeOK(coll.insert({a: i})); +} +assert.commandWorked(coll.createIndex({a: 1})); + +assert.eq( + 8, + coll.aggregate([{$match: {a: {$gte: 2}}}], {collation: {locale: "fr"}, comment: "agg_comment"}) + .itcount()); +var profileObj = getLatestProfilerEntry(testDB); + +assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj)); +assert.eq(profileObj.op, "command", tojson(profileObj)); +assert.eq(profileObj.nreturned, 8, tojson(profileObj)); +assert.eq(profileObj.keysExamined, 8, tojson(profileObj)); +assert.eq(profileObj.docsExamined, 8, tojson(profileObj)); +assert.eq(profileObj.planSummary, "IXSCAN { a: 1 }", tojson(profileObj)); +assert.eq(profileObj.protocol, + getProfilerProtocolStringForCommand(testDB.getMongo()), + tojson(profileObj)); +assert.eq(profileObj.command.aggregate, coll.getName(), tojson(profileObj)); +assert.eq(profileObj.command.collation, {locale: "fr"}, tojson(profileObj)); +assert.eq(profileObj.command.comment, "agg_comment", tojson(profileObj)); +assert(profileObj.hasOwnProperty("responseLength"), tojson(profileObj)); +assert(profileObj.hasOwnProperty("millis"), tojson(profileObj)); +assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj)); +assert(profileObj.hasOwnProperty("locks"), tojson(profileObj)); +assert(!profileObj.hasOwnProperty("hasSortStage"), tojson(profileObj)); +// Testing that 'usedDisk' is set when disk is used requires either using a lot of data or +// configuring a server parameter which could mess up other tests. This testing is +// done elsewhere so that this test can stay in the core suite +assert(!profileObj.hasOwnProperty("usedDisk"), tojson(profileObj)); +assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); + +// +// Confirm "fromMultiPlanner" metric. +// +coll.drop(); +assert.commandWorked(coll.createIndex({a: 1})); +assert.commandWorked(coll.createIndex({b: 1})); +for (i = 0; i < 5; ++i) { + assert.writeOK(coll.insert({a: i, b: i})); +} + +assert.eq(1, coll.aggregate([{$match: {a: 3, b: 3}}]).itcount()); +profileObj = getLatestProfilerEntry(testDB); + +assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj)); + +// +// Confirm that the "hint" modifier is in the profiler document. +// +coll.drop(); +assert.commandWorked(coll.createIndex({a: 1})); +for (i = 0; i < 5; ++i) { + assert.writeOK(coll.insert({a: i, b: i})); +} + +assert.eq(1, coll.aggregate([{$match: {a: 3, b: 3}}], {hint: {_id: 1}}).itcount()); +profileObj = getLatestProfilerEntry(testDB); +assert.eq(profileObj.command.hint, {_id: 1}, tojson(profileObj)); + +// +// Confirm that aggregations are truncated in the profiler as { $truncated: <string>, comment: +// <string> } when a comment parameter is provided. +// +let matchPredicate = {}; + +for (let i = 0; i < 501; i++) { + matchPredicate[i] = "a".repeat(150); +} + +assert.eq(coll.aggregate([{$match: matchPredicate}], {comment: "profile_agg"}).itcount(), 0); +profileObj = getLatestProfilerEntry(testDB); +assert.eq((typeof profileObj.command.$truncated), "string", tojson(profileObj)); +assert.eq(profileObj.command.comment, "profile_agg", tojson(profileObj)); })(); diff --git a/jstests/core/profile_count.js b/jstests/core/profile_count.js index 4ef361e06e9..103a08cb728 100644 --- a/jstests/core/profile_count.js +++ b/jstests/core/profile_count.js @@ -3,90 +3,92 @@ // Confirms that profiled count execution contains all expected metrics with proper values. (function() { - "use strict"; - - // For getLatestProfilerEntry and getProfilerProtocolStringForCommand - load("jstests/libs/profiler.js"); - - var testDB = db.getSiblingDB("profile_count"); - assert.commandWorked(testDB.dropDatabase()); - var conn = testDB.getMongo(); - var coll = testDB.getCollection("test"); - - testDB.setProfilingLevel(2); - - // - // Collection-level count. - // - var i; - for (i = 0; i < 10; ++i) { - assert.writeOK(coll.insert({a: i})); - } - - assert.eq(10, coll.count({}, {collation: {locale: "fr"}})); - - var profileObj = getLatestProfilerEntry(testDB); - - assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj)); - assert.eq(profileObj.op, "command", tojson(profileObj)); - assert.eq(profileObj.protocol, getProfilerProtocolStringForCommand(conn), tojson(profileObj)); - assert.eq(profileObj.command.count, coll.getName(), tojson(profileObj)); - assert.eq(profileObj.command.collation, {locale: "fr"}, tojson(profileObj)); - assert.eq(profileObj.planSummary, "RECORD_STORE_FAST_COUNT", tojson(profileObj)); - assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj)); - assert(profileObj.hasOwnProperty("responseLength"), tojson(profileObj)); - assert(profileObj.hasOwnProperty("millis"), tojson(profileObj)); - assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj)); - assert(profileObj.hasOwnProperty("locks"), tojson(profileObj)); - assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); - - // - // Count with non-indexed query. - // - coll.drop(); - for (i = 0; i < 10; ++i) { - assert.writeOK(coll.insert({a: i})); - } - - var query = {a: {$gte: 5}}; - assert.eq(5, coll.count(query)); - profileObj = getLatestProfilerEntry(testDB); - - assert.eq(profileObj.command.query, query, tojson(profileObj)); - assert.eq(profileObj.docsExamined, 10, tojson(profileObj)); - - // - // Count with indexed query. - // - coll.drop(); - for (i = 0; i < 10; ++i) { - assert.writeOK(coll.insert({a: i})); - } - assert.commandWorked(coll.createIndex({a: 1})); - - query = {a: {$gte: 5}}; - assert.eq(5, coll.count(query)); - profileObj = getLatestProfilerEntry(testDB); - - assert.eq(profileObj.command.query, query, tojson(profileObj)); - assert.eq(profileObj.keysExamined, 6, tojson(profileObj)); - assert.eq(profileObj.planSummary, "COUNT_SCAN { a: 1 }", tojson(profileObj)); - assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj)); - assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); - - // - // Confirm "fromMultiPlanner" metric. - // - coll.drop(); - assert.commandWorked(coll.createIndex({a: 1})); - assert.commandWorked(coll.createIndex({b: 1})); - for (i = 0; i < 5; ++i) { - assert.writeOK(coll.insert({a: i, b: i})); - } - - assert.eq(1, coll.count({a: 3, b: 3})); - profileObj = getLatestProfilerEntry(testDB); - - assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj)); - assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); +"use strict"; + +// For getLatestProfilerEntry and getProfilerProtocolStringForCommand +load("jstests/libs/profiler.js"); + +var testDB = db.getSiblingDB("profile_count"); +assert.commandWorked(testDB.dropDatabase()); +var conn = testDB.getMongo(); +var coll = testDB.getCollection("test"); + +testDB.setProfilingLevel(2); + +// +// Collection-level count. +// +var i; +for (i = 0; i < 10; ++i) { + assert.writeOK(coll.insert({a: i})); +} + +assert.eq(10, coll.count({}, {collation: {locale: "fr"}})); + +var profileObj = getLatestProfilerEntry(testDB); + +assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj)); +assert.eq(profileObj.op, "command", tojson(profileObj)); +assert.eq(profileObj.protocol, getProfilerProtocolStringForCommand(conn), tojson(profileObj)); +assert.eq(profileObj.command.count, coll.getName(), tojson(profileObj)); +assert.eq(profileObj.command.collation, {locale: "fr"}, tojson(profileObj)); +assert.eq(profileObj.planSummary, "RECORD_STORE_FAST_COUNT", tojson(profileObj)); +assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj)); +assert(profileObj.hasOwnProperty("responseLength"), tojson(profileObj)); +assert(profileObj.hasOwnProperty("millis"), tojson(profileObj)); +assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj)); +assert(profileObj.hasOwnProperty("locks"), tojson(profileObj)); +assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); + +// +// Count with non-indexed query. +// +coll.drop(); +for (i = 0; i < 10; ++i) { + assert.writeOK(coll.insert({a: i})); +} + +var query = {a: {$gte: 5}}; +assert.eq(5, coll.count(query)); +profileObj = getLatestProfilerEntry(testDB); + +assert.eq(profileObj.command.query, query, tojson(profileObj)); +assert.eq(profileObj.docsExamined, 10, tojson(profileObj)); + +// +// Count with indexed query. +// +coll.drop(); +for (i = 0; i < 10; ++i) { + assert.writeOK(coll.insert({a: i})); +} +assert.commandWorked(coll.createIndex({a: 1})); + +query = { + a: {$gte: 5} +}; +assert.eq(5, coll.count(query)); +profileObj = getLatestProfilerEntry(testDB); + +assert.eq(profileObj.command.query, query, tojson(profileObj)); +assert.eq(profileObj.keysExamined, 6, tojson(profileObj)); +assert.eq(profileObj.planSummary, "COUNT_SCAN { a: 1 }", tojson(profileObj)); +assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj)); +assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); + +// +// Confirm "fromMultiPlanner" metric. +// +coll.drop(); +assert.commandWorked(coll.createIndex({a: 1})); +assert.commandWorked(coll.createIndex({b: 1})); +for (i = 0; i < 5; ++i) { + assert.writeOK(coll.insert({a: i, b: i})); +} + +assert.eq(1, coll.count({a: 3, b: 3})); +profileObj = getLatestProfilerEntry(testDB); + +assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj)); +assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); })(); diff --git a/jstests/core/profile_delete.js b/jstests/core/profile_delete.js index c860ddb36f4..29f3b3ff5e7 100644 --- a/jstests/core/profile_delete.js +++ b/jstests/core/profile_delete.js @@ -3,100 +3,100 @@ // Confirms that profiled delete execution contains all expected metrics with proper values. (function() { - "use strict"; - - load("jstests/libs/profiler.js"); // For getLatestProfilerEntry. - - // Setup test db and collection. - var testDB = db.getSiblingDB("profile_delete"); - assert.commandWorked(testDB.dropDatabase()); - var coll = testDB.getCollection("test"); - - testDB.setProfilingLevel(2); - - // - // Confirm metrics for single document delete. - // - var i; - for (i = 0; i < 10; ++i) { - assert.writeOK(coll.insert({a: i, b: i})); - } - assert.commandWorked(coll.createIndex({a: 1})); - - assert.writeOK(coll.remove({a: {$gte: 2}, b: {$gte: 2}}, - db.getMongo().writeMode() === "commands" - ? {justOne: true, collation: {locale: "fr"}} - : {justOne: true})); - - var profileObj = getLatestProfilerEntry(testDB); - - assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj)); - assert.eq(profileObj.op, "remove", tojson(profileObj)); - if (db.getMongo().writeMode() === "commands") { - assert.eq(profileObj.command.collation, {locale: "fr"}, tojson(profileObj)); - } - assert.eq(profileObj.ndeleted, 1, tojson(profileObj)); - assert.eq(profileObj.keysExamined, 1, tojson(profileObj)); - assert.eq(profileObj.docsExamined, 1, tojson(profileObj)); - assert.eq(profileObj.keysDeleted, 2, tojson(profileObj)); - assert.eq(profileObj.planSummary, "IXSCAN { a: 1 }", tojson(profileObj)); - assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj)); - assert(profileObj.hasOwnProperty("millis"), tojson(profileObj)); - assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj)); - assert(profileObj.hasOwnProperty("locks"), tojson(profileObj)); - assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); - - // - // Confirm metrics for multiple document delete. - // - coll.drop(); - for (i = 0; i < 10; ++i) { - assert.writeOK(coll.insert({a: i})); - } - - assert.writeOK(coll.remove({a: {$gte: 2}})); - profileObj = getLatestProfilerEntry(testDB); - - assert.eq(profileObj.ndeleted, 8, tojson(profileObj)); - assert.eq(profileObj.keysDeleted, 8, tojson(profileObj)); - assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); - - // - // Confirm "fromMultiPlanner" metric. - // - coll.drop(); - assert.commandWorked(coll.createIndex({a: 1})); - assert.commandWorked(coll.createIndex({b: 1})); - for (i = 0; i < 5; ++i) { - assert.writeOK(coll.insert({a: i, b: i})); - } - - assert.writeOK(coll.remove({a: 3, b: 3})); - profileObj = getLatestProfilerEntry(testDB); - - assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj)); - assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); - - // - // Confirm killing a remove operation will not log 'ndeleted' to the profiler. - // - assert(coll.drop()); - - for (let i = 0; i < 100; ++i) { - assert.commandWorked(coll.insert({a: 1})); - } - - const deleteResult = testDB.runCommand({ - delete: coll.getName(), - deletes: [{q: {$where: "sleep(1000);return true", a: 1}, limit: 0}], - maxTimeMS: 1 - }); - - // This command will time out before completing. - assert.commandFailedWithCode(deleteResult, ErrorCodes.MaxTimeMSExpired); - - profileObj = getLatestProfilerEntry(testDB); - - // 'ndeleted' should not be defined. - assert(!profileObj.hasOwnProperty("ndeleted"), profileObj); +"use strict"; + +load("jstests/libs/profiler.js"); // For getLatestProfilerEntry. + +// Setup test db and collection. +var testDB = db.getSiblingDB("profile_delete"); +assert.commandWorked(testDB.dropDatabase()); +var coll = testDB.getCollection("test"); + +testDB.setProfilingLevel(2); + +// +// Confirm metrics for single document delete. +// +var i; +for (i = 0; i < 10; ++i) { + assert.writeOK(coll.insert({a: i, b: i})); +} +assert.commandWorked(coll.createIndex({a: 1})); + +assert.writeOK(coll.remove({a: {$gte: 2}, b: {$gte: 2}}, + db.getMongo().writeMode() === "commands" + ? {justOne: true, collation: {locale: "fr"}} + : {justOne: true})); + +var profileObj = getLatestProfilerEntry(testDB); + +assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj)); +assert.eq(profileObj.op, "remove", tojson(profileObj)); +if (db.getMongo().writeMode() === "commands") { + assert.eq(profileObj.command.collation, {locale: "fr"}, tojson(profileObj)); +} +assert.eq(profileObj.ndeleted, 1, tojson(profileObj)); +assert.eq(profileObj.keysExamined, 1, tojson(profileObj)); +assert.eq(profileObj.docsExamined, 1, tojson(profileObj)); +assert.eq(profileObj.keysDeleted, 2, tojson(profileObj)); +assert.eq(profileObj.planSummary, "IXSCAN { a: 1 }", tojson(profileObj)); +assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj)); +assert(profileObj.hasOwnProperty("millis"), tojson(profileObj)); +assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj)); +assert(profileObj.hasOwnProperty("locks"), tojson(profileObj)); +assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); + +// +// Confirm metrics for multiple document delete. +// +coll.drop(); +for (i = 0; i < 10; ++i) { + assert.writeOK(coll.insert({a: i})); +} + +assert.writeOK(coll.remove({a: {$gte: 2}})); +profileObj = getLatestProfilerEntry(testDB); + +assert.eq(profileObj.ndeleted, 8, tojson(profileObj)); +assert.eq(profileObj.keysDeleted, 8, tojson(profileObj)); +assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); + +// +// Confirm "fromMultiPlanner" metric. +// +coll.drop(); +assert.commandWorked(coll.createIndex({a: 1})); +assert.commandWorked(coll.createIndex({b: 1})); +for (i = 0; i < 5; ++i) { + assert.writeOK(coll.insert({a: i, b: i})); +} + +assert.writeOK(coll.remove({a: 3, b: 3})); +profileObj = getLatestProfilerEntry(testDB); + +assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj)); +assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); + +// +// Confirm killing a remove operation will not log 'ndeleted' to the profiler. +// +assert(coll.drop()); + +for (let i = 0; i < 100; ++i) { + assert.commandWorked(coll.insert({a: 1})); +} + +const deleteResult = testDB.runCommand({ + delete: coll.getName(), + deletes: [{q: {$where: "sleep(1000);return true", a: 1}, limit: 0}], + maxTimeMS: 1 +}); + +// This command will time out before completing. +assert.commandFailedWithCode(deleteResult, ErrorCodes.MaxTimeMSExpired); + +profileObj = getLatestProfilerEntry(testDB); + +// 'ndeleted' should not be defined. +assert(!profileObj.hasOwnProperty("ndeleted"), profileObj); })(); diff --git a/jstests/core/profile_distinct.js b/jstests/core/profile_distinct.js index 72d010636d6..6a2272e0f8a 100644 --- a/jstests/core/profile_distinct.js +++ b/jstests/core/profile_distinct.js @@ -3,58 +3,58 @@ // Confirms that profiled distinct execution contains all expected metrics with proper values. (function() { - "use strict"; - - // For getLatestProfilerEntry and getProfilerProtocolStringForCommand - load("jstests/libs/profiler.js"); - - var testDB = db.getSiblingDB("profile_distinct"); - assert.commandWorked(testDB.dropDatabase()); - var conn = testDB.getMongo(); - var coll = testDB.getCollection("test"); - - testDB.setProfilingLevel(2); - - // - // Confirm metrics for distinct with query. - // - var i; - for (i = 0; i < 10; ++i) { - assert.writeOK(coll.insert({a: i % 5, b: i})); - } - assert.commandWorked(coll.createIndex({b: 1})); - - coll.distinct("a", {b: {$gte: 5}}, {collation: {locale: "fr"}}); - var profileObj = getLatestProfilerEntry(testDB); - - assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj)); - assert.eq(profileObj.op, "command", tojson(profileObj)); - assert.eq(profileObj.keysExamined, 5, tojson(profileObj)); - assert.eq(profileObj.docsExamined, 5, tojson(profileObj)); - assert.eq(profileObj.planSummary, "IXSCAN { b: 1 }", tojson(profileObj)); - assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj)); - assert.eq(profileObj.protocol, getProfilerProtocolStringForCommand(conn), tojson(profileObj)); - assert.eq(coll.getName(), profileObj.command.distinct, tojson(profileObj)); - assert.eq(profileObj.command.collation, {locale: "fr"}, tojson(profileObj)); - assert(profileObj.hasOwnProperty("responseLength"), tojson(profileObj)); - assert(profileObj.hasOwnProperty("millis"), tojson(profileObj)); - assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj)); - assert(profileObj.hasOwnProperty("locks"), tojson(profileObj)); - assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); - - // - // Confirm "fromMultiPlanner" metric. - // - coll.drop(); - assert.commandWorked(coll.createIndex({a: 1})); - assert.commandWorked(coll.createIndex({b: 1})); - for (i = 0; i < 5; ++i) { - assert.writeOK(coll.insert({a: i, b: i})); - } - - coll.distinct("a", {a: 3, b: 3}); - profileObj = getLatestProfilerEntry(testDB); - - assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj)); - assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); +"use strict"; + +// For getLatestProfilerEntry and getProfilerProtocolStringForCommand +load("jstests/libs/profiler.js"); + +var testDB = db.getSiblingDB("profile_distinct"); +assert.commandWorked(testDB.dropDatabase()); +var conn = testDB.getMongo(); +var coll = testDB.getCollection("test"); + +testDB.setProfilingLevel(2); + +// +// Confirm metrics for distinct with query. +// +var i; +for (i = 0; i < 10; ++i) { + assert.writeOK(coll.insert({a: i % 5, b: i})); +} +assert.commandWorked(coll.createIndex({b: 1})); + +coll.distinct("a", {b: {$gte: 5}}, {collation: {locale: "fr"}}); +var profileObj = getLatestProfilerEntry(testDB); + +assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj)); +assert.eq(profileObj.op, "command", tojson(profileObj)); +assert.eq(profileObj.keysExamined, 5, tojson(profileObj)); +assert.eq(profileObj.docsExamined, 5, tojson(profileObj)); +assert.eq(profileObj.planSummary, "IXSCAN { b: 1 }", tojson(profileObj)); +assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj)); +assert.eq(profileObj.protocol, getProfilerProtocolStringForCommand(conn), tojson(profileObj)); +assert.eq(coll.getName(), profileObj.command.distinct, tojson(profileObj)); +assert.eq(profileObj.command.collation, {locale: "fr"}, tojson(profileObj)); +assert(profileObj.hasOwnProperty("responseLength"), tojson(profileObj)); +assert(profileObj.hasOwnProperty("millis"), tojson(profileObj)); +assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj)); +assert(profileObj.hasOwnProperty("locks"), tojson(profileObj)); +assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); + +// +// Confirm "fromMultiPlanner" metric. +// +coll.drop(); +assert.commandWorked(coll.createIndex({a: 1})); +assert.commandWorked(coll.createIndex({b: 1})); +for (i = 0; i < 5; ++i) { + assert.writeOK(coll.insert({a: i, b: i})); +} + +coll.distinct("a", {a: 3, b: 3}); +profileObj = getLatestProfilerEntry(testDB); + +assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj)); +assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); })(); diff --git a/jstests/core/profile_find.js b/jstests/core/profile_find.js index cfdf1c995ec..23bcf96e2ad 100644 --- a/jstests/core/profile_find.js +++ b/jstests/core/profile_find.js @@ -3,182 +3,184 @@ // Confirms that profiled find execution contains all expected metrics with proper values. (function() { - "use strict"; - - // For getLatestProfilerEntry and getProfilerProtocolStringForCommand - load("jstests/libs/profiler.js"); - - var testDB = db.getSiblingDB("profile_find"); - assert.commandWorked(testDB.dropDatabase()); - var coll = testDB.getCollection("test"); - var isLegacyReadMode = (testDB.getMongo().readMode() === "legacy"); - - testDB.setProfilingLevel(2); - const profileEntryFilter = {op: "query"}; - - // - // Confirm most metrics on single document read. - // - var i; - for (i = 0; i < 3; ++i) { - assert.writeOK(coll.insert({a: i, b: i})); - } - assert.commandWorked(coll.createIndex({a: 1}, {collation: {locale: "fr"}})); - - if (!isLegacyReadMode) { - assert.eq(coll.find({a: 1}).collation({locale: "fr"}).limit(1).itcount(), 1); - } else { - assert.neq(coll.findOne({a: 1}), null); - } - - var profileObj = getLatestProfilerEntry(testDB, profileEntryFilter); - - assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj)); - assert.eq(profileObj.keysExamined, 1, tojson(profileObj)); - assert.eq(profileObj.docsExamined, 1, tojson(profileObj)); - assert.eq(profileObj.nreturned, 1, tojson(profileObj)); - assert.eq(profileObj.planSummary, "IXSCAN { a: 1 }", tojson(profileObj)); - assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj)); - assert.eq(profileObj.command.filter, {a: 1}, tojson(profileObj)); - if (isLegacyReadMode) { - assert.eq(profileObj.command.ntoreturn, -1, tojson(profileObj)); - } else { - assert.eq(profileObj.command.limit, 1, tojson(profileObj)); - assert.eq(profileObj.protocol, - getProfilerProtocolStringForCommand(testDB.getMongo()), - tojson(profileObj)); - } - - if (!isLegacyReadMode) { - assert.eq(profileObj.command.collation, {locale: "fr"}); - } - assert.eq(profileObj.cursorExhausted, true, tojson(profileObj)); - assert(!profileObj.hasOwnProperty("cursorid"), tojson(profileObj)); - assert(profileObj.hasOwnProperty("responseLength"), tojson(profileObj)); - assert(profileObj.hasOwnProperty("millis"), tojson(profileObj)); - assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj)); - assert(profileObj.hasOwnProperty("locks"), tojson(profileObj)); - assert(profileObj.locks.hasOwnProperty("Global"), tojson(profileObj)); - assert(profileObj.locks.hasOwnProperty("Database"), tojson(profileObj)); - assert(profileObj.locks.hasOwnProperty("Collection"), tojson(profileObj)); - assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); - - // - // Confirm "cursorId" and "hasSortStage" metrics. - // - coll.drop(); - for (i = 0; i < 3; ++i) { - assert.writeOK(coll.insert({a: i, b: i})); - } - assert.commandWorked(coll.createIndex({a: 1})); - +"use strict"; + +// For getLatestProfilerEntry and getProfilerProtocolStringForCommand +load("jstests/libs/profiler.js"); + +var testDB = db.getSiblingDB("profile_find"); +assert.commandWorked(testDB.dropDatabase()); +var coll = testDB.getCollection("test"); +var isLegacyReadMode = (testDB.getMongo().readMode() === "legacy"); + +testDB.setProfilingLevel(2); +const profileEntryFilter = { + op: "query" +}; + +// +// Confirm most metrics on single document read. +// +var i; +for (i = 0; i < 3; ++i) { + assert.writeOK(coll.insert({a: i, b: i})); +} +assert.commandWorked(coll.createIndex({a: 1}, {collation: {locale: "fr"}})); + +if (!isLegacyReadMode) { + assert.eq(coll.find({a: 1}).collation({locale: "fr"}).limit(1).itcount(), 1); +} else { assert.neq(coll.findOne({a: 1}), null); - - assert.neq(coll.find({a: {$gte: 0}}).sort({b: 1}).batchSize(1).next(), null); - profileObj = getLatestProfilerEntry(testDB, profileEntryFilter); - - assert.eq(profileObj.hasSortStage, true, tojson(profileObj)); - assert(profileObj.hasOwnProperty("cursorid"), tojson(profileObj)); - assert(!profileObj.hasOwnProperty("cursorExhausted"), tojson(profileObj)); - assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); - - // - // Confirm "fromMultiPlanner" metric. - // - coll.drop(); - assert.commandWorked(coll.createIndex({a: 1})); - assert.commandWorked(coll.createIndex({b: 1})); - for (i = 0; i < 5; ++i) { - assert.writeOK(coll.insert({a: i, b: i})); - } - - assert.neq(coll.findOne({a: 3, b: 3}), null); - profileObj = getLatestProfilerEntry(testDB, profileEntryFilter); - - assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj)); - assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); - - // - // Confirm "replanned" metric. - // We should ideally be using a fail-point to trigger "replanned" rather than relying on - // current query planner behavior knowledge to setup a scenario. SERVER-23620 has been entered - // to add this fail-point and to update appropriate tests. - // - coll.drop(); - assert.commandWorked(coll.createIndex({a: 1})); - assert.commandWorked(coll.createIndex({b: 1})); - for (i = 0; i < 20; ++i) { - assert.writeOK(coll.insert({a: 5, b: i})); - assert.writeOK(coll.insert({a: i, b: 10})); - } - - // Until we get the failpoint described in the above comment (regarding SERVER-23620), we must - // run the query twice. The first time will create an inactive cache entry. The second run will - // take the same number of works, and create an active cache entry. - assert.neq(coll.findOne({a: 5, b: 15}), null); - assert.neq(coll.findOne({a: 5, b: 15}), null); - - // Run a query with the same shape, but with different parameters. The plan cached for the - // query above will perform poorly (since the selectivities are different) and we will be - // forced to replan. - assert.neq(coll.findOne({a: 15, b: 10}), null); - profileObj = getLatestProfilerEntry(testDB, profileEntryFilter); - - assert.eq(profileObj.replanned, true, tojson(profileObj)); - assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); - - // - // Confirm that query modifiers such as "hint" are in the profiler document. - // - coll.drop(); - assert.writeOK(coll.insert({_id: 2})); - - assert.eq(coll.find().hint({_id: 1}).itcount(), 1); - profileObj = getLatestProfilerEntry(testDB, profileEntryFilter); - assert.eq(profileObj.command.hint, {_id: 1}, tojson(profileObj)); - - assert.eq(coll.find().comment("a comment").itcount(), 1); - profileObj = getLatestProfilerEntry(testDB, profileEntryFilter); - assert.eq(profileObj.command.comment, "a comment", tojson(profileObj)); - - var maxTimeMS = 100000; - assert.eq(coll.find().maxTimeMS(maxTimeMS).itcount(), 1); - profileObj = getLatestProfilerEntry(testDB, profileEntryFilter); - assert.eq(profileObj.command.maxTimeMS, maxTimeMS, tojson(profileObj)); - - assert.eq(coll.find().max({_id: 3}).hint({_id: 1}).itcount(), 1); - profileObj = getLatestProfilerEntry(testDB, profileEntryFilter); - assert.eq(profileObj.command.max, {_id: 3}, tojson(profileObj)); - - assert.eq(coll.find().min({_id: 0}).hint({_id: 1}).itcount(), 1); - profileObj = getLatestProfilerEntry(testDB, profileEntryFilter); - assert.eq(profileObj.command.min, {_id: 0}, tojson(profileObj)); - - assert.eq(coll.find().returnKey().itcount(), 1); - profileObj = getLatestProfilerEntry(testDB, profileEntryFilter); - assert.eq(profileObj.command.returnKey, true, tojson(profileObj)); - - // - // Confirm that queries are truncated in the profiler as { $truncated: <string>, comment: - // <string> } - // - let queryPredicate = {}; - - for (let i = 0; i < 501; i++) { - queryPredicate[i] = "a".repeat(150); - } - - assert.eq(coll.find(queryPredicate).comment("profile_find").itcount(), 0); - profileObj = getLatestProfilerEntry(testDB, profileEntryFilter); - assert.eq((typeof profileObj.command.$truncated), "string", tojson(profileObj)); - assert.eq(profileObj.command.comment, "profile_find", tojson(profileObj)); - - // - // Confirm that a query whose filter contains a field named 'query' appears as expected in the - // profiler. This test ensures that upconverting a legacy query correctly identifies this as a - // user field rather than a wrapped filter spec. - // - coll.find({query: "foo"}).itcount(); - profileObj = getLatestProfilerEntry(testDB, profileEntryFilter); - assert.eq(profileObj.command.filter, {query: "foo"}, tojson(profileObj)); +} + +var profileObj = getLatestProfilerEntry(testDB, profileEntryFilter); + +assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj)); +assert.eq(profileObj.keysExamined, 1, tojson(profileObj)); +assert.eq(profileObj.docsExamined, 1, tojson(profileObj)); +assert.eq(profileObj.nreturned, 1, tojson(profileObj)); +assert.eq(profileObj.planSummary, "IXSCAN { a: 1 }", tojson(profileObj)); +assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj)); +assert.eq(profileObj.command.filter, {a: 1}, tojson(profileObj)); +if (isLegacyReadMode) { + assert.eq(profileObj.command.ntoreturn, -1, tojson(profileObj)); +} else { + assert.eq(profileObj.command.limit, 1, tojson(profileObj)); + assert.eq(profileObj.protocol, + getProfilerProtocolStringForCommand(testDB.getMongo()), + tojson(profileObj)); +} + +if (!isLegacyReadMode) { + assert.eq(profileObj.command.collation, {locale: "fr"}); +} +assert.eq(profileObj.cursorExhausted, true, tojson(profileObj)); +assert(!profileObj.hasOwnProperty("cursorid"), tojson(profileObj)); +assert(profileObj.hasOwnProperty("responseLength"), tojson(profileObj)); +assert(profileObj.hasOwnProperty("millis"), tojson(profileObj)); +assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj)); +assert(profileObj.hasOwnProperty("locks"), tojson(profileObj)); +assert(profileObj.locks.hasOwnProperty("Global"), tojson(profileObj)); +assert(profileObj.locks.hasOwnProperty("Database"), tojson(profileObj)); +assert(profileObj.locks.hasOwnProperty("Collection"), tojson(profileObj)); +assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); + +// +// Confirm "cursorId" and "hasSortStage" metrics. +// +coll.drop(); +for (i = 0; i < 3; ++i) { + assert.writeOK(coll.insert({a: i, b: i})); +} +assert.commandWorked(coll.createIndex({a: 1})); + +assert.neq(coll.findOne({a: 1}), null); + +assert.neq(coll.find({a: {$gte: 0}}).sort({b: 1}).batchSize(1).next(), null); +profileObj = getLatestProfilerEntry(testDB, profileEntryFilter); + +assert.eq(profileObj.hasSortStage, true, tojson(profileObj)); +assert(profileObj.hasOwnProperty("cursorid"), tojson(profileObj)); +assert(!profileObj.hasOwnProperty("cursorExhausted"), tojson(profileObj)); +assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); + +// +// Confirm "fromMultiPlanner" metric. +// +coll.drop(); +assert.commandWorked(coll.createIndex({a: 1})); +assert.commandWorked(coll.createIndex({b: 1})); +for (i = 0; i < 5; ++i) { + assert.writeOK(coll.insert({a: i, b: i})); +} + +assert.neq(coll.findOne({a: 3, b: 3}), null); +profileObj = getLatestProfilerEntry(testDB, profileEntryFilter); + +assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj)); +assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); + +// +// Confirm "replanned" metric. +// We should ideally be using a fail-point to trigger "replanned" rather than relying on +// current query planner behavior knowledge to setup a scenario. SERVER-23620 has been entered +// to add this fail-point and to update appropriate tests. +// +coll.drop(); +assert.commandWorked(coll.createIndex({a: 1})); +assert.commandWorked(coll.createIndex({b: 1})); +for (i = 0; i < 20; ++i) { + assert.writeOK(coll.insert({a: 5, b: i})); + assert.writeOK(coll.insert({a: i, b: 10})); +} + +// Until we get the failpoint described in the above comment (regarding SERVER-23620), we must +// run the query twice. The first time will create an inactive cache entry. The second run will +// take the same number of works, and create an active cache entry. +assert.neq(coll.findOne({a: 5, b: 15}), null); +assert.neq(coll.findOne({a: 5, b: 15}), null); + +// Run a query with the same shape, but with different parameters. The plan cached for the +// query above will perform poorly (since the selectivities are different) and we will be +// forced to replan. +assert.neq(coll.findOne({a: 15, b: 10}), null); +profileObj = getLatestProfilerEntry(testDB, profileEntryFilter); + +assert.eq(profileObj.replanned, true, tojson(profileObj)); +assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); + +// +// Confirm that query modifiers such as "hint" are in the profiler document. +// +coll.drop(); +assert.writeOK(coll.insert({_id: 2})); + +assert.eq(coll.find().hint({_id: 1}).itcount(), 1); +profileObj = getLatestProfilerEntry(testDB, profileEntryFilter); +assert.eq(profileObj.command.hint, {_id: 1}, tojson(profileObj)); + +assert.eq(coll.find().comment("a comment").itcount(), 1); +profileObj = getLatestProfilerEntry(testDB, profileEntryFilter); +assert.eq(profileObj.command.comment, "a comment", tojson(profileObj)); + +var maxTimeMS = 100000; +assert.eq(coll.find().maxTimeMS(maxTimeMS).itcount(), 1); +profileObj = getLatestProfilerEntry(testDB, profileEntryFilter); +assert.eq(profileObj.command.maxTimeMS, maxTimeMS, tojson(profileObj)); + +assert.eq(coll.find().max({_id: 3}).hint({_id: 1}).itcount(), 1); +profileObj = getLatestProfilerEntry(testDB, profileEntryFilter); +assert.eq(profileObj.command.max, {_id: 3}, tojson(profileObj)); + +assert.eq(coll.find().min({_id: 0}).hint({_id: 1}).itcount(), 1); +profileObj = getLatestProfilerEntry(testDB, profileEntryFilter); +assert.eq(profileObj.command.min, {_id: 0}, tojson(profileObj)); + +assert.eq(coll.find().returnKey().itcount(), 1); +profileObj = getLatestProfilerEntry(testDB, profileEntryFilter); +assert.eq(profileObj.command.returnKey, true, tojson(profileObj)); + +// +// Confirm that queries are truncated in the profiler as { $truncated: <string>, comment: +// <string> } +// +let queryPredicate = {}; + +for (let i = 0; i < 501; i++) { + queryPredicate[i] = "a".repeat(150); +} + +assert.eq(coll.find(queryPredicate).comment("profile_find").itcount(), 0); +profileObj = getLatestProfilerEntry(testDB, profileEntryFilter); +assert.eq((typeof profileObj.command.$truncated), "string", tojson(profileObj)); +assert.eq(profileObj.command.comment, "profile_find", tojson(profileObj)); + +// +// Confirm that a query whose filter contains a field named 'query' appears as expected in the +// profiler. This test ensures that upconverting a legacy query correctly identifies this as a +// user field rather than a wrapped filter spec. +// +coll.find({query: "foo"}).itcount(); +profileObj = getLatestProfilerEntry(testDB, profileEntryFilter); +assert.eq(profileObj.command.filter, {query: "foo"}, tojson(profileObj)); })(); diff --git a/jstests/core/profile_findandmodify.js b/jstests/core/profile_findandmodify.js index 56e673ae639..3c646d6468c 100644 --- a/jstests/core/profile_findandmodify.js +++ b/jstests/core/profile_findandmodify.js @@ -2,184 +2,183 @@ // @tags: [requires_profiling] (function() { - "use strict"; - - load("jstests/libs/profiler.js"); // For getLatestProfilerEntry. - - var testDB = db.getSiblingDB("profile_findandmodify"); - assert.commandWorked(testDB.dropDatabase()); - var coll = testDB.getCollection("test"); - - testDB.setProfilingLevel(2); - - // - // Update as findAndModify. - // - coll.drop(); - for (var i = 0; i < 3; i++) { - assert.writeOK(coll.insert({_id: i, a: i, b: [0]})); - } - assert.commandWorked(coll.createIndex({b: 1})); - - assert.eq({_id: 2, a: 2, b: [0]}, coll.findAndModify({ - query: {a: 2}, - update: {$inc: {"b.$[i]": 1}}, - collation: {locale: "fr"}, - arrayFilters: [{i: 0}] - })); - - var profileObj = getLatestProfilerEntry(testDB); - - assert.eq(profileObj.op, "command", tojson(profileObj)); - assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj)); - assert.eq(profileObj.command.query, {a: 2}, tojson(profileObj)); - assert.eq(profileObj.command.update, {$inc: {"b.$[i]": 1}}, tojson(profileObj)); - assert.eq(profileObj.command.collation, {locale: "fr"}, tojson(profileObj)); - assert.eq(profileObj.command.arrayFilters, [{i: 0}], tojson(profileObj)); - assert.eq(profileObj.keysExamined, 0, tojson(profileObj)); - assert.eq(profileObj.docsExamined, 3, tojson(profileObj)); - assert.eq(profileObj.nMatched, 1, tojson(profileObj)); - assert.eq(profileObj.nModified, 1, tojson(profileObj)); - assert.eq(profileObj.keysInserted, 1, tojson(profileObj)); - assert.eq(profileObj.keysDeleted, 1, tojson(profileObj)); - assert.eq(profileObj.planSummary, "COLLSCAN", tojson(profileObj)); - assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj)); - assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj)); - assert(profileObj.hasOwnProperty("responseLength"), tojson(profileObj)); - assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); - - // - // Delete as findAndModify. - // - coll.drop(); - for (var i = 0; i < 3; i++) { - assert.writeOK(coll.insert({_id: i, a: i})); - } - - assert.eq({_id: 2, a: 2}, coll.findAndModify({query: {a: 2}, remove: true})); - profileObj = getLatestProfilerEntry(testDB); - assert.eq(profileObj.op, "command", tojson(profileObj)); - assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj)); - assert.eq(profileObj.command.query, {a: 2}, tojson(profileObj)); - assert.eq(profileObj.command.remove, true, tojson(profileObj)); - assert.eq(profileObj.keysExamined, 0, tojson(profileObj)); - assert.eq(profileObj.docsExamined, 3, tojson(profileObj)); - assert.eq(profileObj.ndeleted, 1, tojson(profileObj)); - assert.eq(profileObj.keysDeleted, 1, tojson(profileObj)); - assert.eq(profileObj.planSummary, "COLLSCAN", tojson(profileObj)); - assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj)); - assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); - - // - // Update with {upsert: true} as findAndModify. - // - coll.drop(); - for (var i = 0; i < 3; i++) { - assert.writeOK(coll.insert({_id: i, a: i})); - } - - assert.eq( - {_id: 4, a: 1}, - coll.findAndModify({query: {_id: 4}, update: {$inc: {a: 1}}, upsert: true, new: true})); - profileObj = getLatestProfilerEntry(testDB); - assert.eq(profileObj.op, "command", tojson(profileObj)); - assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj)); - assert.eq(profileObj.command.query, {_id: 4}, tojson(profileObj)); - assert.eq(profileObj.command.update, {$inc: {a: 1}}, tojson(profileObj)); - assert.eq(profileObj.command.upsert, true, tojson(profileObj)); - assert.eq(profileObj.command.new, true, tojson(profileObj)); - assert.eq(profileObj.keysExamined, 0, tojson(profileObj)); - assert.eq(profileObj.docsExamined, 0, tojson(profileObj)); - assert.eq(profileObj.nMatched, 0, tojson(profileObj)); - assert.eq(profileObj.nModified, 0, tojson(profileObj)); - assert.eq(profileObj.upsert, true, tojson(profileObj)); - assert.eq(profileObj.keysInserted, 1, tojson(profileObj)); - assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); - - // - // Idhack update as findAndModify. - // - coll.drop(); - for (var i = 0; i < 3; i++) { - assert.writeOK(coll.insert({_id: i, a: i})); - } - - assert.eq({_id: 2, a: 2}, coll.findAndModify({query: {_id: 2}, update: {$inc: {b: 1}}})); - profileObj = getLatestProfilerEntry(testDB); - assert.eq(profileObj.keysExamined, 1, tojson(profileObj)); - assert.eq(profileObj.docsExamined, 1, tojson(profileObj)); - assert.eq(profileObj.nMatched, 1, tojson(profileObj)); - assert.eq(profileObj.nModified, 1, tojson(profileObj)); - assert.eq(profileObj.planSummary, "IDHACK", tojson(profileObj)); - assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); - - // - // Update as findAndModify with projection. - // - coll.drop(); - for (var i = 0; i < 3; i++) { - assert.writeOK(coll.insert({_id: i, a: i})); - } - - assert.eq({a: 2}, - coll.findAndModify({query: {a: 2}, update: {$inc: {b: 1}}, fields: {_id: 0, a: 1}})); - profileObj = getLatestProfilerEntry(testDB); - assert.eq(profileObj.op, "command", tojson(profileObj)); - assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj)); - assert.eq(profileObj.command.query, {a: 2}, tojson(profileObj)); - assert.eq(profileObj.command.update, {$inc: {b: 1}}, tojson(profileObj)); - assert.eq(profileObj.command.fields, {_id: 0, a: 1}, tojson(profileObj)); - assert.eq(profileObj.keysExamined, 0, tojson(profileObj)); - assert.eq(profileObj.docsExamined, 3, tojson(profileObj)); - assert.eq(profileObj.nMatched, 1, tojson(profileObj)); - assert.eq(profileObj.nModified, 1, tojson(profileObj)); - assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); - - // - // Delete as findAndModify with projection. - // - coll.drop(); - for (var i = 0; i < 3; i++) { - assert.writeOK(coll.insert({_id: i, a: i})); - } - - assert.eq({a: 2}, coll.findAndModify({query: {a: 2}, remove: true, fields: {_id: 0, a: 1}})); - profileObj = getLatestProfilerEntry(testDB); - assert.eq(profileObj.op, "command", tojson(profileObj)); - assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj)); - assert.eq(profileObj.command.query, {a: 2}, tojson(profileObj)); - assert.eq(profileObj.command.remove, true, tojson(profileObj)); - assert.eq(profileObj.command.fields, {_id: 0, a: 1}, tojson(profileObj)); - assert.eq(profileObj.ndeleted, 1, tojson(profileObj)); - assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); - - // - // Confirm "hasSortStage" on findAndModify with sort. - // - coll.drop(); - for (var i = 0; i < 3; i++) { - assert.writeOK(coll.insert({_id: i, a: i})); - } - - assert.eq({_id: 0, a: 0}, - coll.findAndModify({query: {a: {$gte: 0}}, sort: {a: 1}, update: {$inc: {b: 1}}})); - - profileObj = getLatestProfilerEntry(testDB); - - assert.eq(profileObj.hasSortStage, true, tojson(profileObj)); - - // - // Confirm "fromMultiPlanner" metric. - // - coll.drop(); - assert.commandWorked(coll.createIndex({a: 1})); - assert.commandWorked(coll.createIndex({b: 1})); - for (i = 0; i < 5; ++i) { - assert.writeOK(coll.insert({a: i, b: i})); - } - - coll.findAndModify({query: {a: 3, b: 3}, update: {$set: {c: 1}}}); - profileObj = getLatestProfilerEntry(testDB); - - assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj)); +"use strict"; + +load("jstests/libs/profiler.js"); // For getLatestProfilerEntry. + +var testDB = db.getSiblingDB("profile_findandmodify"); +assert.commandWorked(testDB.dropDatabase()); +var coll = testDB.getCollection("test"); + +testDB.setProfilingLevel(2); + +// +// Update as findAndModify. +// +coll.drop(); +for (var i = 0; i < 3; i++) { + assert.writeOK(coll.insert({_id: i, a: i, b: [0]})); +} +assert.commandWorked(coll.createIndex({b: 1})); + +assert.eq({_id: 2, a: 2, b: [0]}, coll.findAndModify({ + query: {a: 2}, + update: {$inc: {"b.$[i]": 1}}, + collation: {locale: "fr"}, + arrayFilters: [{i: 0}] +})); + +var profileObj = getLatestProfilerEntry(testDB); + +assert.eq(profileObj.op, "command", tojson(profileObj)); +assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj)); +assert.eq(profileObj.command.query, {a: 2}, tojson(profileObj)); +assert.eq(profileObj.command.update, {$inc: {"b.$[i]": 1}}, tojson(profileObj)); +assert.eq(profileObj.command.collation, {locale: "fr"}, tojson(profileObj)); +assert.eq(profileObj.command.arrayFilters, [{i: 0}], tojson(profileObj)); +assert.eq(profileObj.keysExamined, 0, tojson(profileObj)); +assert.eq(profileObj.docsExamined, 3, tojson(profileObj)); +assert.eq(profileObj.nMatched, 1, tojson(profileObj)); +assert.eq(profileObj.nModified, 1, tojson(profileObj)); +assert.eq(profileObj.keysInserted, 1, tojson(profileObj)); +assert.eq(profileObj.keysDeleted, 1, tojson(profileObj)); +assert.eq(profileObj.planSummary, "COLLSCAN", tojson(profileObj)); +assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj)); +assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj)); +assert(profileObj.hasOwnProperty("responseLength"), tojson(profileObj)); +assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); + +// +// Delete as findAndModify. +// +coll.drop(); +for (var i = 0; i < 3; i++) { + assert.writeOK(coll.insert({_id: i, a: i})); +} + +assert.eq({_id: 2, a: 2}, coll.findAndModify({query: {a: 2}, remove: true})); +profileObj = getLatestProfilerEntry(testDB); +assert.eq(profileObj.op, "command", tojson(profileObj)); +assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj)); +assert.eq(profileObj.command.query, {a: 2}, tojson(profileObj)); +assert.eq(profileObj.command.remove, true, tojson(profileObj)); +assert.eq(profileObj.keysExamined, 0, tojson(profileObj)); +assert.eq(profileObj.docsExamined, 3, tojson(profileObj)); +assert.eq(profileObj.ndeleted, 1, tojson(profileObj)); +assert.eq(profileObj.keysDeleted, 1, tojson(profileObj)); +assert.eq(profileObj.planSummary, "COLLSCAN", tojson(profileObj)); +assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj)); +assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); + +// +// Update with {upsert: true} as findAndModify. +// +coll.drop(); +for (var i = 0; i < 3; i++) { + assert.writeOK(coll.insert({_id: i, a: i})); +} + +assert.eq({_id: 4, a: 1}, + coll.findAndModify({query: {_id: 4}, update: {$inc: {a: 1}}, upsert: true, new: true})); +profileObj = getLatestProfilerEntry(testDB); +assert.eq(profileObj.op, "command", tojson(profileObj)); +assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj)); +assert.eq(profileObj.command.query, {_id: 4}, tojson(profileObj)); +assert.eq(profileObj.command.update, {$inc: {a: 1}}, tojson(profileObj)); +assert.eq(profileObj.command.upsert, true, tojson(profileObj)); +assert.eq(profileObj.command.new, true, tojson(profileObj)); +assert.eq(profileObj.keysExamined, 0, tojson(profileObj)); +assert.eq(profileObj.docsExamined, 0, tojson(profileObj)); +assert.eq(profileObj.nMatched, 0, tojson(profileObj)); +assert.eq(profileObj.nModified, 0, tojson(profileObj)); +assert.eq(profileObj.upsert, true, tojson(profileObj)); +assert.eq(profileObj.keysInserted, 1, tojson(profileObj)); +assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); + +// +// Idhack update as findAndModify. +// +coll.drop(); +for (var i = 0; i < 3; i++) { + assert.writeOK(coll.insert({_id: i, a: i})); +} + +assert.eq({_id: 2, a: 2}, coll.findAndModify({query: {_id: 2}, update: {$inc: {b: 1}}})); +profileObj = getLatestProfilerEntry(testDB); +assert.eq(profileObj.keysExamined, 1, tojson(profileObj)); +assert.eq(profileObj.docsExamined, 1, tojson(profileObj)); +assert.eq(profileObj.nMatched, 1, tojson(profileObj)); +assert.eq(profileObj.nModified, 1, tojson(profileObj)); +assert.eq(profileObj.planSummary, "IDHACK", tojson(profileObj)); +assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); + +// +// Update as findAndModify with projection. +// +coll.drop(); +for (var i = 0; i < 3; i++) { + assert.writeOK(coll.insert({_id: i, a: i})); +} + +assert.eq({a: 2}, + coll.findAndModify({query: {a: 2}, update: {$inc: {b: 1}}, fields: {_id: 0, a: 1}})); +profileObj = getLatestProfilerEntry(testDB); +assert.eq(profileObj.op, "command", tojson(profileObj)); +assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj)); +assert.eq(profileObj.command.query, {a: 2}, tojson(profileObj)); +assert.eq(profileObj.command.update, {$inc: {b: 1}}, tojson(profileObj)); +assert.eq(profileObj.command.fields, {_id: 0, a: 1}, tojson(profileObj)); +assert.eq(profileObj.keysExamined, 0, tojson(profileObj)); +assert.eq(profileObj.docsExamined, 3, tojson(profileObj)); +assert.eq(profileObj.nMatched, 1, tojson(profileObj)); +assert.eq(profileObj.nModified, 1, tojson(profileObj)); +assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); + +// +// Delete as findAndModify with projection. +// +coll.drop(); +for (var i = 0; i < 3; i++) { + assert.writeOK(coll.insert({_id: i, a: i})); +} + +assert.eq({a: 2}, coll.findAndModify({query: {a: 2}, remove: true, fields: {_id: 0, a: 1}})); +profileObj = getLatestProfilerEntry(testDB); +assert.eq(profileObj.op, "command", tojson(profileObj)); +assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj)); +assert.eq(profileObj.command.query, {a: 2}, tojson(profileObj)); +assert.eq(profileObj.command.remove, true, tojson(profileObj)); +assert.eq(profileObj.command.fields, {_id: 0, a: 1}, tojson(profileObj)); +assert.eq(profileObj.ndeleted, 1, tojson(profileObj)); +assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); + +// +// Confirm "hasSortStage" on findAndModify with sort. +// +coll.drop(); +for (var i = 0; i < 3; i++) { + assert.writeOK(coll.insert({_id: i, a: i})); +} + +assert.eq({_id: 0, a: 0}, + coll.findAndModify({query: {a: {$gte: 0}}, sort: {a: 1}, update: {$inc: {b: 1}}})); + +profileObj = getLatestProfilerEntry(testDB); + +assert.eq(profileObj.hasSortStage, true, tojson(profileObj)); + +// +// Confirm "fromMultiPlanner" metric. +// +coll.drop(); +assert.commandWorked(coll.createIndex({a: 1})); +assert.commandWorked(coll.createIndex({b: 1})); +for (i = 0; i < 5; ++i) { + assert.writeOK(coll.insert({a: i, b: i})); +} + +coll.findAndModify({query: {a: 3, b: 3}, update: {$set: {c: 1}}}); +profileObj = getLatestProfilerEntry(testDB); + +assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj)); })(); diff --git a/jstests/core/profile_getmore.js b/jstests/core/profile_getmore.js index 74c62f0176b..5cdc3a51ffe 100644 --- a/jstests/core/profile_getmore.js +++ b/jstests/core/profile_getmore.js @@ -3,148 +3,146 @@ // Confirms that profiled getMore execution contains all expected metrics with proper values. (function() { - "use strict"; - - load("jstests/libs/profiler.js"); // For getLatestProfilerEntry. - - var testDB = db.getSiblingDB("profile_getmore"); - assert.commandWorked(testDB.dropDatabase()); - var coll = testDB.getCollection("test"); - - testDB.setProfilingLevel(2); - - // - // Confirm basic metrics on getMore with a not-exhausted cursor. - // - var i; - for (i = 0; i < 10; ++i) { - assert.writeOK(coll.insert({a: i})); - } - assert.commandWorked(coll.createIndex({a: 1})); - - var cursor = coll.find({a: {$gt: 0}}).sort({a: 1}).batchSize(2); - cursor.next(); // Perform initial query and consume first of 2 docs returned. - - var cursorId = - getLatestProfilerEntry(testDB, {op: "query"}).cursorid; // Save cursorid from find. - - cursor.next(); // Consume second of 2 docs from initial query. - cursor.next(); // getMore performed, leaving open cursor. - - var profileObj = getLatestProfilerEntry(testDB, {op: "getmore"}); - - assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj)); - assert.eq(profileObj.op, "getmore", tojson(profileObj)); - assert.eq(profileObj.keysExamined, 2, tojson(profileObj)); - assert.eq(profileObj.docsExamined, 2, tojson(profileObj)); - assert.eq(profileObj.cursorid, cursorId, tojson(profileObj)); - assert.eq(profileObj.nreturned, 2, tojson(profileObj)); - assert.eq(profileObj.command.getMore, cursorId, tojson(profileObj)); - assert.eq(profileObj.command.collection, coll.getName(), tojson(profileObj)); - assert.eq(profileObj.command.batchSize, 2, tojson(profileObj)); - assert.eq(profileObj.originatingCommand.filter, {a: {$gt: 0}}); - assert.eq(profileObj.originatingCommand.sort, {a: 1}); - assert.eq(profileObj.planSummary, "IXSCAN { a: 1 }", tojson(profileObj)); - assert(profileObj.hasOwnProperty("execStats"), tojson(profileObj)); - assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj)); - assert(profileObj.hasOwnProperty("responseLength"), tojson(profileObj)); - assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj)); - assert(profileObj.hasOwnProperty("locks"), tojson(profileObj)); - assert(profileObj.locks.hasOwnProperty("Global"), tojson(profileObj)); - assert(profileObj.locks.hasOwnProperty("Database"), tojson(profileObj)); - assert(profileObj.locks.hasOwnProperty("Collection"), tojson(profileObj)); - assert(profileObj.hasOwnProperty("millis"), tojson(profileObj)); - assert(!profileObj.hasOwnProperty("cursorExhausted"), tojson(profileObj)); - assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); - - // - // Confirm hasSortStage on getMore with a not-exhausted cursor and in-memory sort. - // - coll.drop(); - for (i = 0; i < 10; ++i) { - assert.writeOK(coll.insert({a: i})); - } - - cursor = coll.find({a: {$gt: 0}}).sort({a: 1}).batchSize(2); - cursor.next(); // Perform initial query and consume first of 2 docs returned. - cursor.next(); // Consume second of 2 docs from initial query. - cursor.next(); // getMore performed, leaving open cursor. - - profileObj = getLatestProfilerEntry(testDB, {op: "getmore"}); - - assert.eq(profileObj.hasSortStage, true, tojson(profileObj)); - - // - // Confirm "cursorExhausted" metric. - // - coll.drop(); - for (i = 0; i < 3; ++i) { - assert.writeOK(coll.insert({a: i})); - } - - cursor = coll.find().batchSize(2); - cursor.next(); // Perform initial query and consume first of 3 docs returned. - cursor.itcount(); // Exhaust the cursor. - - profileObj = getLatestProfilerEntry(testDB, {op: "getmore"}); - - assert(profileObj.hasOwnProperty("cursorid"), - tojson(profileObj)); // cursorid should always be present on getMore. - assert.neq(0, profileObj.cursorid, tojson(profileObj)); - assert.eq(profileObj.cursorExhausted, true, tojson(profileObj)); - assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); - - // - // Confirm getMore on aggregation. - // - coll.drop(); - for (i = 0; i < 20; ++i) { - assert.writeOK(coll.insert({a: i})); - } - assert.commandWorked(coll.createIndex({a: 1})); - - var cursor = coll.aggregate([{$match: {a: {$gte: 0}}}], {cursor: {batchSize: 0}, hint: {a: 1}}); - var cursorId = getLatestProfilerEntry(testDB, {"command.aggregate": coll.getName()}).cursorid; - assert.neq(0, cursorId); - - cursor.next(); // Consume the result set. - - profileObj = getLatestProfilerEntry(testDB, {op: "getmore"}); - - assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj)); - assert.eq(profileObj.op, "getmore", tojson(profileObj)); - assert.eq(profileObj.command.getMore, cursorId, tojson(profileObj)); - assert.eq(profileObj.command.collection, coll.getName(), tojson(profileObj)); - assert.eq( - profileObj.originatingCommand.pipeline[0], {$match: {a: {$gte: 0}}}, tojson(profileObj)); - assert.eq(profileObj.cursorid, cursorId, tojson(profileObj)); - assert.eq(profileObj.nreturned, 20, tojson(profileObj)); - assert.eq(profileObj.planSummary, "IXSCAN { a: 1 }", tojson(profileObj)); - assert.eq(profileObj.cursorExhausted, true, tojson(profileObj)); - assert.eq(profileObj.keysExamined, 20, tojson(profileObj)); - assert.eq(profileObj.docsExamined, 20, tojson(profileObj)); - assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); - assert.eq(profileObj.originatingCommand.hint, {a: 1}, tojson(profileObj)); - - // - // Confirm that originatingCommand is truncated in the profiler as { $truncated: <string>, - // comment: <string> } - // - let docToInsert = {}; - - for (i = 0; i < 501; i++) { - docToInsert[i] = "a".repeat(150); - } - - coll.drop(); - for (i = 0; i < 4; i++) { - assert.writeOK(coll.insert(docToInsert)); - } - - cursor = coll.find(docToInsert).comment("profile_getmore").batchSize(2); - assert.eq(cursor.itcount(), 4); // Consume result set and trigger getMore. - - profileObj = getLatestProfilerEntry(testDB, {op: "getmore"}); - assert.eq((typeof profileObj.originatingCommand.$truncated), "string", tojson(profileObj)); - assert.eq(profileObj.originatingCommand.comment, "profile_getmore", tojson(profileObj)); +"use strict"; + +load("jstests/libs/profiler.js"); // For getLatestProfilerEntry. + +var testDB = db.getSiblingDB("profile_getmore"); +assert.commandWorked(testDB.dropDatabase()); +var coll = testDB.getCollection("test"); + +testDB.setProfilingLevel(2); + +// +// Confirm basic metrics on getMore with a not-exhausted cursor. +// +var i; +for (i = 0; i < 10; ++i) { + assert.writeOK(coll.insert({a: i})); +} +assert.commandWorked(coll.createIndex({a: 1})); + +var cursor = coll.find({a: {$gt: 0}}).sort({a: 1}).batchSize(2); +cursor.next(); // Perform initial query and consume first of 2 docs returned. + +var cursorId = getLatestProfilerEntry(testDB, {op: "query"}).cursorid; // Save cursorid from find. + +cursor.next(); // Consume second of 2 docs from initial query. +cursor.next(); // getMore performed, leaving open cursor. + +var profileObj = getLatestProfilerEntry(testDB, {op: "getmore"}); + +assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj)); +assert.eq(profileObj.op, "getmore", tojson(profileObj)); +assert.eq(profileObj.keysExamined, 2, tojson(profileObj)); +assert.eq(profileObj.docsExamined, 2, tojson(profileObj)); +assert.eq(profileObj.cursorid, cursorId, tojson(profileObj)); +assert.eq(profileObj.nreturned, 2, tojson(profileObj)); +assert.eq(profileObj.command.getMore, cursorId, tojson(profileObj)); +assert.eq(profileObj.command.collection, coll.getName(), tojson(profileObj)); +assert.eq(profileObj.command.batchSize, 2, tojson(profileObj)); +assert.eq(profileObj.originatingCommand.filter, {a: {$gt: 0}}); +assert.eq(profileObj.originatingCommand.sort, {a: 1}); +assert.eq(profileObj.planSummary, "IXSCAN { a: 1 }", tojson(profileObj)); +assert(profileObj.hasOwnProperty("execStats"), tojson(profileObj)); +assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj)); +assert(profileObj.hasOwnProperty("responseLength"), tojson(profileObj)); +assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj)); +assert(profileObj.hasOwnProperty("locks"), tojson(profileObj)); +assert(profileObj.locks.hasOwnProperty("Global"), tojson(profileObj)); +assert(profileObj.locks.hasOwnProperty("Database"), tojson(profileObj)); +assert(profileObj.locks.hasOwnProperty("Collection"), tojson(profileObj)); +assert(profileObj.hasOwnProperty("millis"), tojson(profileObj)); +assert(!profileObj.hasOwnProperty("cursorExhausted"), tojson(profileObj)); +assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); + +// +// Confirm hasSortStage on getMore with a not-exhausted cursor and in-memory sort. +// +coll.drop(); +for (i = 0; i < 10; ++i) { + assert.writeOK(coll.insert({a: i})); +} + +cursor = coll.find({a: {$gt: 0}}).sort({a: 1}).batchSize(2); +cursor.next(); // Perform initial query and consume first of 2 docs returned. +cursor.next(); // Consume second of 2 docs from initial query. +cursor.next(); // getMore performed, leaving open cursor. + +profileObj = getLatestProfilerEntry(testDB, {op: "getmore"}); + +assert.eq(profileObj.hasSortStage, true, tojson(profileObj)); + +// +// Confirm "cursorExhausted" metric. +// +coll.drop(); +for (i = 0; i < 3; ++i) { + assert.writeOK(coll.insert({a: i})); +} + +cursor = coll.find().batchSize(2); +cursor.next(); // Perform initial query and consume first of 3 docs returned. +cursor.itcount(); // Exhaust the cursor. + +profileObj = getLatestProfilerEntry(testDB, {op: "getmore"}); + +assert(profileObj.hasOwnProperty("cursorid"), + tojson(profileObj)); // cursorid should always be present on getMore. +assert.neq(0, profileObj.cursorid, tojson(profileObj)); +assert.eq(profileObj.cursorExhausted, true, tojson(profileObj)); +assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); + +// +// Confirm getMore on aggregation. +// +coll.drop(); +for (i = 0; i < 20; ++i) { + assert.writeOK(coll.insert({a: i})); +} +assert.commandWorked(coll.createIndex({a: 1})); + +var cursor = coll.aggregate([{$match: {a: {$gte: 0}}}], {cursor: {batchSize: 0}, hint: {a: 1}}); +var cursorId = getLatestProfilerEntry(testDB, {"command.aggregate": coll.getName()}).cursorid; +assert.neq(0, cursorId); + +cursor.next(); // Consume the result set. + +profileObj = getLatestProfilerEntry(testDB, {op: "getmore"}); + +assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj)); +assert.eq(profileObj.op, "getmore", tojson(profileObj)); +assert.eq(profileObj.command.getMore, cursorId, tojson(profileObj)); +assert.eq(profileObj.command.collection, coll.getName(), tojson(profileObj)); +assert.eq(profileObj.originatingCommand.pipeline[0], {$match: {a: {$gte: 0}}}, tojson(profileObj)); +assert.eq(profileObj.cursorid, cursorId, tojson(profileObj)); +assert.eq(profileObj.nreturned, 20, tojson(profileObj)); +assert.eq(profileObj.planSummary, "IXSCAN { a: 1 }", tojson(profileObj)); +assert.eq(profileObj.cursorExhausted, true, tojson(profileObj)); +assert.eq(profileObj.keysExamined, 20, tojson(profileObj)); +assert.eq(profileObj.docsExamined, 20, tojson(profileObj)); +assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); +assert.eq(profileObj.originatingCommand.hint, {a: 1}, tojson(profileObj)); + +// +// Confirm that originatingCommand is truncated in the profiler as { $truncated: <string>, +// comment: <string> } +// +let docToInsert = {}; + +for (i = 0; i < 501; i++) { + docToInsert[i] = "a".repeat(150); +} + +coll.drop(); +for (i = 0; i < 4; i++) { + assert.writeOK(coll.insert(docToInsert)); +} + +cursor = coll.find(docToInsert).comment("profile_getmore").batchSize(2); +assert.eq(cursor.itcount(), 4); // Consume result set and trigger getMore. + +profileObj = getLatestProfilerEntry(testDB, {op: "getmore"}); +assert.eq((typeof profileObj.originatingCommand.$truncated), "string", tojson(profileObj)); +assert.eq(profileObj.originatingCommand.comment, "profile_getmore", tojson(profileObj)); })(); diff --git a/jstests/core/profile_insert.js b/jstests/core/profile_insert.js index 5f1bff8e2ea..fa53801521d 100644 --- a/jstests/core/profile_insert.js +++ b/jstests/core/profile_insert.js @@ -7,89 +7,91 @@ // ] (function() { - "use strict"; +"use strict"; - // For getLatestProfilerEntry and getProfilerProtocolStringForCommand - load("jstests/libs/profiler.js"); +// For getLatestProfilerEntry and getProfilerProtocolStringForCommand +load("jstests/libs/profiler.js"); - var testDB = db.getSiblingDB("profile_insert"); - assert.commandWorked(testDB.dropDatabase()); - var coll = testDB.getCollection("test"); - var isWriteCommand = (db.getMongo().writeMode() === "commands"); +var testDB = db.getSiblingDB("profile_insert"); +assert.commandWorked(testDB.dropDatabase()); +var coll = testDB.getCollection("test"); +var isWriteCommand = (db.getMongo().writeMode() === "commands"); - testDB.setProfilingLevel(2); +testDB.setProfilingLevel(2); - // - // Test single insert. - // - var doc = {_id: 1}; - var result = coll.insert(doc); - if (isWriteCommand) { - assert.writeOK(result); - } +// +// Test single insert. +// +var doc = {_id: 1}; +var result = coll.insert(doc); +if (isWriteCommand) { + assert.writeOK(result); +} - var profileObj = getLatestProfilerEntry(testDB); +var profileObj = getLatestProfilerEntry(testDB); - assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj)); - assert.eq(profileObj.op, "insert", tojson(profileObj)); - assert.eq(profileObj.ninserted, 1, tojson(profileObj)); - assert.eq(profileObj.keysInserted, 1, tojson(profileObj)); - if (isWriteCommand) { - assert.eq(profileObj.command.ordered, true, tojson(profileObj)); - assert.eq(profileObj.protocol, - getProfilerProtocolStringForCommand(testDB.getMongo()), - tojson(profileObj)); - assert(profileObj.hasOwnProperty("responseLength"), tojson(profileObj)); - } +assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj)); +assert.eq(profileObj.op, "insert", tojson(profileObj)); +assert.eq(profileObj.ninserted, 1, tojson(profileObj)); +assert.eq(profileObj.keysInserted, 1, tojson(profileObj)); +if (isWriteCommand) { + assert.eq(profileObj.command.ordered, true, tojson(profileObj)); + assert.eq(profileObj.protocol, + getProfilerProtocolStringForCommand(testDB.getMongo()), + tojson(profileObj)); + assert(profileObj.hasOwnProperty("responseLength"), tojson(profileObj)); +} - assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj)); - assert(profileObj.hasOwnProperty("locks"), tojson(profileObj)); - assert(profileObj.hasOwnProperty("millis"), tojson(profileObj)); - assert(profileObj.hasOwnProperty("ts"), tojson(profileObj)); - assert(profileObj.hasOwnProperty("client"), tojson(profileObj)); - assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); +assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj)); +assert(profileObj.hasOwnProperty("locks"), tojson(profileObj)); +assert(profileObj.hasOwnProperty("millis"), tojson(profileObj)); +assert(profileObj.hasOwnProperty("ts"), tojson(profileObj)); +assert(profileObj.hasOwnProperty("client"), tojson(profileObj)); +assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); - // - // Test multi-insert. - // - coll.drop(); +// +// Test multi-insert. +// +coll.drop(); - var docArray = [{_id: 1}, {_id: 2}]; - var bulk = coll.initializeUnorderedBulkOp(); - bulk.insert(docArray[0]); - bulk.insert(docArray[1]); - result = bulk.execute(); - if (isWriteCommand) { - assert.writeOK(result); - } +var docArray = [{_id: 1}, {_id: 2}]; +var bulk = coll.initializeUnorderedBulkOp(); +bulk.insert(docArray[0]); +bulk.insert(docArray[1]); +result = bulk.execute(); +if (isWriteCommand) { + assert.writeOK(result); +} - profileObj = getLatestProfilerEntry(testDB); +profileObj = getLatestProfilerEntry(testDB); - if (isWriteCommand) { - assert.eq(profileObj.ninserted, 2, tojson(profileObj)); - assert.eq(profileObj.keysInserted, 2, tojson(profileObj)); - assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); - } else { - // Documents were inserted one at a time. - assert.eq(profileObj.ninserted, 1, tojson(profileObj)); - assert.eq(profileObj.keysInserted, 1, tojson(profileObj)); - assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); - } +if (isWriteCommand) { + assert.eq(profileObj.ninserted, 2, tojson(profileObj)); + assert.eq(profileObj.keysInserted, 2, tojson(profileObj)); + assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); +} else { + // Documents were inserted one at a time. + assert.eq(profileObj.ninserted, 1, tojson(profileObj)); + assert.eq(profileObj.keysInserted, 1, tojson(profileObj)); + assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); +} - // - // Test insert options. - // - coll.drop(); - doc = {_id: 1}; - var wtimeout = 60000; - assert.writeOK(coll.insert(doc, {writeConcern: {w: 1, wtimeout: wtimeout}, ordered: false})); +// +// Test insert options. +// +coll.drop(); +doc = { + _id: 1 +}; +var wtimeout = 60000; +assert.writeOK(coll.insert(doc, {writeConcern: {w: 1, wtimeout: wtimeout}, ordered: false})); - profileObj = getLatestProfilerEntry(testDB); +profileObj = getLatestProfilerEntry(testDB); - if (isWriteCommand) { - assert.eq(profileObj.command.ordered, false, tojson(profileObj)); - assert.eq(profileObj.command.writeConcern.w, 1, tojson(profileObj)); - assert.eq(profileObj.command.writeConcern.wtimeout, wtimeout, tojson(profileObj)); - assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); - } +if (isWriteCommand) { + assert.eq(profileObj.command.ordered, false, tojson(profileObj)); + assert.eq(profileObj.command.writeConcern.w, 1, tojson(profileObj)); + assert.eq(profileObj.command.writeConcern.wtimeout, wtimeout, tojson(profileObj)); + assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); +} })(); diff --git a/jstests/core/profile_list_collections.js b/jstests/core/profile_list_collections.js index 3db9e7971c9..cf6132e71c7 100644 --- a/jstests/core/profile_list_collections.js +++ b/jstests/core/profile_list_collections.js @@ -3,37 +3,39 @@ // Confirms that a listCollections command is not profiled. (function() { - "use strict"; +"use strict"; - // For getLatestProfilerEntry and getProfilerProtocolStringForCommand. - load("jstests/libs/profiler.js"); +// For getLatestProfilerEntry and getProfilerProtocolStringForCommand. +load("jstests/libs/profiler.js"); - var testDB = db.getSiblingDB("profile_list_collections"); - assert.commandWorked(testDB.dropDatabase()); - const numCollections = 5; - for (let i = 0; i < numCollections; ++i) { - assert.commandWorked(testDB.runCommand({create: "test_" + i})); - } +var testDB = db.getSiblingDB("profile_list_collections"); +assert.commandWorked(testDB.dropDatabase()); +const numCollections = 5; +for (let i = 0; i < numCollections; ++i) { + assert.commandWorked(testDB.runCommand({create: "test_" + i})); +} - testDB.setProfilingLevel(2); +testDB.setProfilingLevel(2); - const profileEntryFilter = {op: "command", command: "listCollections"}; +const profileEntryFilter = { + op: "command", + command: "listCollections" +}; - let cmdRes = - assert.commandWorked(testDB.runCommand({listCollections: 1, cursor: {batchSize: 1}})); +let cmdRes = assert.commandWorked(testDB.runCommand({listCollections: 1, cursor: {batchSize: 1}})); - // We don't profile listCollections commands. - assert.eq(testDB.system.profile.find(profileEntryFilter).itcount(), - 0, - "Did not expect any profile entry for a listCollections command"); +// We don't profile listCollections commands. +assert.eq(testDB.system.profile.find(profileEntryFilter).itcount(), + 0, + "Did not expect any profile entry for a listCollections command"); - const getMoreCollName = cmdRes.cursor.ns.substr(cmdRes.cursor.ns.indexOf(".") + 1); - cmdRes = assert.commandWorked( - testDB.runCommand({getMore: cmdRes.cursor.id, collection: getMoreCollName})); +const getMoreCollName = cmdRes.cursor.ns.substr(cmdRes.cursor.ns.indexOf(".") + 1); +cmdRes = assert.commandWorked( + testDB.runCommand({getMore: cmdRes.cursor.id, collection: getMoreCollName})); - // A listCollections cursor doesn't really have a namespace to use to record profile entries, so - // does not get recorded in the profile. - assert.throws(() => getLatestProfilerEntry(testDB, {op: "getmore"}), - [], - "Did not expect to find entry for getMore on a listCollections cursor"); +// A listCollections cursor doesn't really have a namespace to use to record profile entries, so +// does not get recorded in the profile. +assert.throws(() => getLatestProfilerEntry(testDB, {op: "getmore"}), + [], + "Did not expect to find entry for getMore on a listCollections cursor"); })(); diff --git a/jstests/core/profile_list_indexes.js b/jstests/core/profile_list_indexes.js index 2876a58ae90..b94ee6b422b 100644 --- a/jstests/core/profile_list_indexes.js +++ b/jstests/core/profile_list_indexes.js @@ -3,41 +3,46 @@ // Confirms that a listIndexes command and subsequent getMores of its cursor are profiled correctly. (function() { - "use strict"; - - // For getLatestProfilerEntry and getProfilerProtocolStringForCommand. - load("jstests/libs/profiler.js"); - - var testDB = db.getSiblingDB("profile_list_indexes"); - var testColl = testDB.testColl; - assert.commandWorked(testDB.dropDatabase()); - const numIndexes = 5; - for (let i = 0; i < numIndexes; ++i) { - let indexSpec = {}; - indexSpec["fakeField_" + i] = 1; - assert.commandWorked(testColl.ensureIndex(indexSpec)); - } - - testDB.setProfilingLevel(2); - - const listIndexesCommand = {listIndexes: testColl.getName(), cursor: {batchSize: 1}}; - const profileEntryFilter = {op: "command"}; - for (var field in listIndexesCommand) { - profileEntryFilter['command.' + field] = listIndexesCommand[field]; - } - - let cmdRes = assert.commandWorked(testDB.runCommand(listIndexesCommand)); - - assert.eq(testDB.system.profile.find(profileEntryFilter).itcount(), - 1, - "Expected to find profile entry for a listIndexes command"); - - const getMoreCollName = cmdRes.cursor.ns.substr(cmdRes.cursor.ns.indexOf(".") + 1); - cmdRes = assert.commandWorked( - testDB.runCommand({getMore: cmdRes.cursor.id, collection: getMoreCollName})); - - const getMoreProfileEntry = getLatestProfilerEntry(testDB, {op: "getmore"}); - for (var field in listIndexesCommand) { - assert.eq(getMoreProfileEntry.originatingCommand[field], listIndexesCommand[field], field); - } +"use strict"; + +// For getLatestProfilerEntry and getProfilerProtocolStringForCommand. +load("jstests/libs/profiler.js"); + +var testDB = db.getSiblingDB("profile_list_indexes"); +var testColl = testDB.testColl; +assert.commandWorked(testDB.dropDatabase()); +const numIndexes = 5; +for (let i = 0; i < numIndexes; ++i) { + let indexSpec = {}; + indexSpec["fakeField_" + i] = 1; + assert.commandWorked(testColl.ensureIndex(indexSpec)); +} + +testDB.setProfilingLevel(2); + +const listIndexesCommand = { + listIndexes: testColl.getName(), + cursor: {batchSize: 1} +}; +const profileEntryFilter = { + op: "command" +}; +for (var field in listIndexesCommand) { + profileEntryFilter['command.' + field] = listIndexesCommand[field]; +} + +let cmdRes = assert.commandWorked(testDB.runCommand(listIndexesCommand)); + +assert.eq(testDB.system.profile.find(profileEntryFilter).itcount(), + 1, + "Expected to find profile entry for a listIndexes command"); + +const getMoreCollName = cmdRes.cursor.ns.substr(cmdRes.cursor.ns.indexOf(".") + 1); +cmdRes = assert.commandWorked( + testDB.runCommand({getMore: cmdRes.cursor.id, collection: getMoreCollName})); + +const getMoreProfileEntry = getLatestProfilerEntry(testDB, {op: "getmore"}); +for (var field in listIndexesCommand) { + assert.eq(getMoreProfileEntry.originatingCommand[field], listIndexesCommand[field], field); +} })(); diff --git a/jstests/core/profile_mapreduce.js b/jstests/core/profile_mapreduce.js index 117689c4d73..0dc3c81a0c2 100644 --- a/jstests/core/profile_mapreduce.js +++ b/jstests/core/profile_mapreduce.js @@ -8,97 +8,97 @@ // Confirms that profiled findAndModify execution contains all expected metrics with proper values. (function() { - "use strict"; - - // For getLatestProfilerEntry and getProfilerProtocolStringForCommand - load("jstests/libs/profiler.js"); - - var testDB = db.getSiblingDB("profile_mapreduce"); - assert.commandWorked(testDB.dropDatabase()); - var conn = testDB.getMongo(); - var coll = testDB.getCollection("test"); - - testDB.setProfilingLevel(2); - - var mapFunction = function() { - emit(this.a, this.b); - }; - - var reduceFunction = function(a, b) { - return Array.sum(b); - }; - - // - // Confirm metrics for mapReduce with query. - // - coll.drop(); - for (var i = 0; i < 3; i++) { - assert.writeOK(coll.insert({a: i, b: i})); - } - assert.commandWorked(coll.createIndex({a: 1})); - - coll.mapReduce(mapFunction, - reduceFunction, - {query: {a: {$gte: 0}}, out: {inline: 1}, collation: {locale: "fr"}}); - - var profileObj = getLatestProfilerEntry(testDB); - - assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj)); - assert.eq(profileObj.op, "command", tojson(profileObj)); - assert.eq(profileObj.keysExamined, 3, tojson(profileObj)); - assert.eq(profileObj.docsExamined, 3, tojson(profileObj)); - assert.eq(profileObj.planSummary, "IXSCAN { a: 1 }", tojson(profileObj)); - assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj)); - assert.eq(profileObj.protocol, getProfilerProtocolStringForCommand(conn), tojson(profileObj)); - assert.eq(coll.getName(), profileObj.command.mapreduce, tojson(profileObj)); - assert.eq({locale: "fr"}, profileObj.command.collation, tojson(profileObj)); - assert(profileObj.hasOwnProperty("responseLength"), tojson(profileObj)); - assert(profileObj.hasOwnProperty("millis"), tojson(profileObj)); - assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj)); - assert(profileObj.hasOwnProperty("locks"), tojson(profileObj)); - assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); - - // - // Confirm metrics for mapReduce with sort stage. - // - coll.drop(); - for (var i = 0; i < 5; i++) { - assert.writeOK(coll.insert({a: i, b: i})); - } - - coll.mapReduce(mapFunction, reduceFunction, {sort: {b: 1}, out: {inline: 1}}); - - profileObj = getLatestProfilerEntry(testDB); - assert.eq(profileObj.hasSortStage, true, tojson(profileObj)); - assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); - - // - // Confirm namespace field is correct when output is a collection. - // - coll.drop(); - for (var i = 0; i < 3; i++) { - assert.writeOK(coll.insert({a: i, b: i})); - } - - var outputCollectionName = "output_col"; - coll.mapReduce(mapFunction, reduceFunction, {query: {a: {$gte: 0}}, out: outputCollectionName}); - - profileObj = getLatestProfilerEntry(testDB); - assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj)); - - // - // Confirm "fromMultiPlanner" metric. - // - coll.drop(); - assert.commandWorked(coll.createIndex({a: 1})); - assert.commandWorked(coll.createIndex({b: 1})); - for (i = 0; i < 5; ++i) { - assert.writeOK(coll.insert({a: i, b: i})); - } - - coll.mapReduce(mapFunction, reduceFunction, {query: {a: 3, b: 3}, out: {inline: 1}}); - profileObj = getLatestProfilerEntry(testDB); - - assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj)); - assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); +"use strict"; + +// For getLatestProfilerEntry and getProfilerProtocolStringForCommand +load("jstests/libs/profiler.js"); + +var testDB = db.getSiblingDB("profile_mapreduce"); +assert.commandWorked(testDB.dropDatabase()); +var conn = testDB.getMongo(); +var coll = testDB.getCollection("test"); + +testDB.setProfilingLevel(2); + +var mapFunction = function() { + emit(this.a, this.b); +}; + +var reduceFunction = function(a, b) { + return Array.sum(b); +}; + +// +// Confirm metrics for mapReduce with query. +// +coll.drop(); +for (var i = 0; i < 3; i++) { + assert.writeOK(coll.insert({a: i, b: i})); +} +assert.commandWorked(coll.createIndex({a: 1})); + +coll.mapReduce(mapFunction, + reduceFunction, + {query: {a: {$gte: 0}}, out: {inline: 1}, collation: {locale: "fr"}}); + +var profileObj = getLatestProfilerEntry(testDB); + +assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj)); +assert.eq(profileObj.op, "command", tojson(profileObj)); +assert.eq(profileObj.keysExamined, 3, tojson(profileObj)); +assert.eq(profileObj.docsExamined, 3, tojson(profileObj)); +assert.eq(profileObj.planSummary, "IXSCAN { a: 1 }", tojson(profileObj)); +assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj)); +assert.eq(profileObj.protocol, getProfilerProtocolStringForCommand(conn), tojson(profileObj)); +assert.eq(coll.getName(), profileObj.command.mapreduce, tojson(profileObj)); +assert.eq({locale: "fr"}, profileObj.command.collation, tojson(profileObj)); +assert(profileObj.hasOwnProperty("responseLength"), tojson(profileObj)); +assert(profileObj.hasOwnProperty("millis"), tojson(profileObj)); +assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj)); +assert(profileObj.hasOwnProperty("locks"), tojson(profileObj)); +assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); + +// +// Confirm metrics for mapReduce with sort stage. +// +coll.drop(); +for (var i = 0; i < 5; i++) { + assert.writeOK(coll.insert({a: i, b: i})); +} + +coll.mapReduce(mapFunction, reduceFunction, {sort: {b: 1}, out: {inline: 1}}); + +profileObj = getLatestProfilerEntry(testDB); +assert.eq(profileObj.hasSortStage, true, tojson(profileObj)); +assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); + +// +// Confirm namespace field is correct when output is a collection. +// +coll.drop(); +for (var i = 0; i < 3; i++) { + assert.writeOK(coll.insert({a: i, b: i})); +} + +var outputCollectionName = "output_col"; +coll.mapReduce(mapFunction, reduceFunction, {query: {a: {$gte: 0}}, out: outputCollectionName}); + +profileObj = getLatestProfilerEntry(testDB); +assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj)); + +// +// Confirm "fromMultiPlanner" metric. +// +coll.drop(); +assert.commandWorked(coll.createIndex({a: 1})); +assert.commandWorked(coll.createIndex({b: 1})); +for (i = 0; i < 5; ++i) { + assert.writeOK(coll.insert({a: i, b: i})); +} + +coll.mapReduce(mapFunction, reduceFunction, {query: {a: 3, b: 3}, out: {inline: 1}}); +profileObj = getLatestProfilerEntry(testDB); + +assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj)); +assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); })(); diff --git a/jstests/core/profile_no_such_db.js b/jstests/core/profile_no_such_db.js index 905c49ae409..1ac30b7c22e 100644 --- a/jstests/core/profile_no_such_db.js +++ b/jstests/core/profile_no_such_db.js @@ -2,39 +2,38 @@ // Test that reading the profiling level doesn't create databases, but setting it does. (function(db) { - 'use strict'; +'use strict'; - function dbExists() { - return Array.contains(db.getMongo().getDBNames(), db.getName()); - } +function dbExists() { + return Array.contains(db.getMongo().getDBNames(), db.getName()); +} - db = db.getSiblingDB('profile_no_such_db'); // Note: changes db argument not global var. - assert.commandWorked(db.dropDatabase()); - assert(!dbExists()); - - // Reading the profiling level shouldn't create the database. - var defaultProfilingLevel = db.getProfilingLevel(); - assert(!dbExists()); +db = db.getSiblingDB('profile_no_such_db'); // Note: changes db argument not global var. +assert.commandWorked(db.dropDatabase()); +assert(!dbExists()); - // This test assumes that the default profiling level hasn't been changed. - assert.eq(defaultProfilingLevel, 0); +// Reading the profiling level shouldn't create the database. +var defaultProfilingLevel = db.getProfilingLevel(); +assert(!dbExists()); - [0, 1, 2].forEach(function(level) { - jsTest.log('Testing profiling level ' + level); +// This test assumes that the default profiling level hasn't been changed. +assert.eq(defaultProfilingLevel, 0); - // Setting the profiling level creates the database. - // Note: setting the profiling level to 0 puts the database in a weird state where it - // exists internally, but doesn't show up in listDatabases, and won't exist if you - // restart the server. - var res = db.setProfilingLevel(level); - assert.eq(res.was, defaultProfilingLevel); - assert(dbExists() || level == 0); - assert.eq(db.getProfilingLevel(), level); +[0, 1, 2].forEach(function(level) { + jsTest.log('Testing profiling level ' + level); - // Dropping the db reverts the profiling level to the default. - assert.commandWorked(db.dropDatabase()); - assert.eq(db.getProfilingLevel(), defaultProfilingLevel); - assert(!dbExists()); - }); + // Setting the profiling level creates the database. + // Note: setting the profiling level to 0 puts the database in a weird state where it + // exists internally, but doesn't show up in listDatabases, and won't exist if you + // restart the server. + var res = db.setProfilingLevel(level); + assert.eq(res.was, defaultProfilingLevel); + assert(dbExists() || level == 0); + assert.eq(db.getProfilingLevel(), level); + // Dropping the db reverts the profiling level to the default. + assert.commandWorked(db.dropDatabase()); + assert.eq(db.getProfilingLevel(), defaultProfilingLevel); + assert(!dbExists()); +}); }(db)); diff --git a/jstests/core/profile_query_hash.js b/jstests/core/profile_query_hash.js index 4c7b3e23ab7..e635c7b6b56 100644 --- a/jstests/core/profile_query_hash.js +++ b/jstests/core/profile_query_hash.js @@ -3,109 +3,106 @@ // Confirms that profile entries for find commands contain the appropriate query hash. (function() { - "use strict"; - - // For getLatestProfilerEntry - load("jstests/libs/profiler.js"); - - const testDB = db.getSiblingDB("query_hash"); - assert.commandWorked(testDB.dropDatabase()); - - const coll = testDB.test; - - // Utility function to list query shapes in cache. The length of the list of query shapes - // returned is used to validate the number of query hashes accumulated. - function getShapes(collection) { - const res = collection.runCommand('planCacheListQueryShapes'); - return res.shapes; - } - - assert.writeOK(coll.insert({a: 1, b: 1})); - assert.writeOK(coll.insert({a: 1, b: 2})); - assert.writeOK(coll.insert({a: 1, b: 2})); - assert.writeOK(coll.insert({a: 2, b: 2})); - - // We need two indices since we do not currently create cache entries for queries with a single - // candidate plan. - assert.commandWorked(coll.createIndex({a: 1})); - assert.commandWorked(coll.createIndex({a: 1, b: 1})); - - assert.commandWorked(testDB.setProfilingLevel(2)); - - // Executes query0 and gets the corresponding system.profile entry. - assert.eq( - 1, - coll.find({a: 1, b: 1}, {a: 1}).sort({a: -1}).comment("Query0 find command").itcount(), - 'unexpected document count'); - const profileObj0 = - getLatestProfilerEntry(testDB, {op: "query", "command.comment": "Query0 find command"}); - assert(profileObj0.hasOwnProperty("planCacheKey"), tojson(profileObj0)); - let shapes = getShapes(coll); - assert.eq(1, shapes.length, 'unexpected number of shapes in planCacheListQueryShapes result'); - - // Executes query1 and gets the corresponding system.profile entry. - assert.eq( - 0, - coll.find({a: 2, b: 1}, {a: 1}).sort({a: -1}).comment("Query1 find command").itcount(), - 'unexpected document count'); - const profileObj1 = - getLatestProfilerEntry(testDB, {op: "query", "command.comment": "Query1 find command"}); - assert(profileObj1.hasOwnProperty("planCacheKey"), tojson(profileObj1)); - - // Since the query shapes are the same, we only expect there to be one query shape present in - // the plan cache commands output. - shapes = getShapes(coll); - assert.eq(1, shapes.length, 'unexpected number of shapes in planCacheListQueryShapes result'); - assert.eq( - profileObj0.planCacheKey, profileObj1.planCacheKey, 'unexpected not matching query hashes'); - - // Test that the planCacheKey is the same in explain output for query0 and query1 as it was - // in system.profile output. - const explainQuery0 = assert.commandWorked(coll.find({a: 1, b: 1}, {a: 1}) - .sort({a: -1}) - .comment("Query0 find command") - .explain("queryPlanner")); - assert.eq(explainQuery0.queryPlanner.planCacheKey, profileObj0.planCacheKey, explainQuery0); - const explainQuery1 = assert.commandWorked(coll.find({a: 2, b: 1}, {a: 1}) - .sort({a: -1}) - .comment("Query1 find command") - .explain("queryPlanner")); - assert.eq(explainQuery1.queryPlanner.planCacheKey, profileObj0.planCacheKey, explainQuery1); - - // Check that the 'planCacheKey' is the same for both query 0 and query 1. - assert.eq(explainQuery0.queryPlanner.planCacheKey, explainQuery1.queryPlanner.planCacheKey); - - // Executes query2 and gets the corresponding system.profile entry. - assert.eq(0, - coll.find({a: 12000, b: 1}).comment("Query2 find command").itcount(), - 'unexpected document count'); - const profileObj2 = - getLatestProfilerEntry(testDB, {op: "query", "command.comment": "Query2 find command"}); - assert(profileObj2.hasOwnProperty("planCacheKey"), tojson(profileObj2)); - - // Query0 and query1 should both have the same query hash for the given indexes. Whereas, query2 - // should have a unique hash. Asserts that a total of two distinct hashes results in two query - // shapes. - shapes = getShapes(coll); - assert.eq(2, shapes.length, 'unexpected number of shapes in planCacheListQueryShapes result'); - assert.neq( - profileObj0.planCacheKey, profileObj2.planCacheKey, 'unexpected matching query hashes'); - - // The planCacheKey in explain should be different for query2 than the hash from query0 and - // query1. - const explainQuery2 = assert.commandWorked( - coll.find({a: 12000, b: 1}).comment("Query2 find command").explain("queryPlanner")); - assert(explainQuery2.queryPlanner.hasOwnProperty("planCacheKey")); - assert.neq(explainQuery2.queryPlanner.planCacheKey, profileObj0.planCacheKey, explainQuery2); - assert.eq(explainQuery2.queryPlanner.planCacheKey, profileObj2.planCacheKey, explainQuery2); - - // Now drop an index. This should change the 'planCacheKey' value for queries, but not the - // 'queryHash'. - assert.commandWorked(coll.dropIndex({a: 1})); - const explainQuery2PostCatalogChange = assert.commandWorked( - coll.find({a: 12000, b: 1}).comment("Query2 find command").explain("queryPlanner")); - assert.eq(explainQuery2.queryPlanner.queryHash, - explainQuery2PostCatalogChange.queryPlanner.queryHash); - assert.neq(explainQuery2.queryPlanner.planCacheKey, - explainQuery2PostCatalogChange.queryPlanner.planCacheKey); +"use strict"; + +// For getLatestProfilerEntry +load("jstests/libs/profiler.js"); + +const testDB = db.getSiblingDB("query_hash"); +assert.commandWorked(testDB.dropDatabase()); + +const coll = testDB.test; + +// Utility function to list query shapes in cache. The length of the list of query shapes +// returned is used to validate the number of query hashes accumulated. +function getShapes(collection) { + const res = collection.runCommand('planCacheListQueryShapes'); + return res.shapes; +} + +assert.writeOK(coll.insert({a: 1, b: 1})); +assert.writeOK(coll.insert({a: 1, b: 2})); +assert.writeOK(coll.insert({a: 1, b: 2})); +assert.writeOK(coll.insert({a: 2, b: 2})); + +// We need two indices since we do not currently create cache entries for queries with a single +// candidate plan. +assert.commandWorked(coll.createIndex({a: 1})); +assert.commandWorked(coll.createIndex({a: 1, b: 1})); + +assert.commandWorked(testDB.setProfilingLevel(2)); + +// Executes query0 and gets the corresponding system.profile entry. +assert.eq(1, + coll.find({a: 1, b: 1}, {a: 1}).sort({a: -1}).comment("Query0 find command").itcount(), + 'unexpected document count'); +const profileObj0 = + getLatestProfilerEntry(testDB, {op: "query", "command.comment": "Query0 find command"}); +assert(profileObj0.hasOwnProperty("planCacheKey"), tojson(profileObj0)); +let shapes = getShapes(coll); +assert.eq(1, shapes.length, 'unexpected number of shapes in planCacheListQueryShapes result'); + +// Executes query1 and gets the corresponding system.profile entry. +assert.eq(0, + coll.find({a: 2, b: 1}, {a: 1}).sort({a: -1}).comment("Query1 find command").itcount(), + 'unexpected document count'); +const profileObj1 = + getLatestProfilerEntry(testDB, {op: "query", "command.comment": "Query1 find command"}); +assert(profileObj1.hasOwnProperty("planCacheKey"), tojson(profileObj1)); + +// Since the query shapes are the same, we only expect there to be one query shape present in +// the plan cache commands output. +shapes = getShapes(coll); +assert.eq(1, shapes.length, 'unexpected number of shapes in planCacheListQueryShapes result'); +assert.eq( + profileObj0.planCacheKey, profileObj1.planCacheKey, 'unexpected not matching query hashes'); + +// Test that the planCacheKey is the same in explain output for query0 and query1 as it was +// in system.profile output. +const explainQuery0 = assert.commandWorked(coll.find({a: 1, b: 1}, {a: 1}) + .sort({a: -1}) + .comment("Query0 find command") + .explain("queryPlanner")); +assert.eq(explainQuery0.queryPlanner.planCacheKey, profileObj0.planCacheKey, explainQuery0); +const explainQuery1 = assert.commandWorked(coll.find({a: 2, b: 1}, {a: 1}) + .sort({a: -1}) + .comment("Query1 find command") + .explain("queryPlanner")); +assert.eq(explainQuery1.queryPlanner.planCacheKey, profileObj0.planCacheKey, explainQuery1); + +// Check that the 'planCacheKey' is the same for both query 0 and query 1. +assert.eq(explainQuery0.queryPlanner.planCacheKey, explainQuery1.queryPlanner.planCacheKey); + +// Executes query2 and gets the corresponding system.profile entry. +assert.eq(0, + coll.find({a: 12000, b: 1}).comment("Query2 find command").itcount(), + 'unexpected document count'); +const profileObj2 = + getLatestProfilerEntry(testDB, {op: "query", "command.comment": "Query2 find command"}); +assert(profileObj2.hasOwnProperty("planCacheKey"), tojson(profileObj2)); + +// Query0 and query1 should both have the same query hash for the given indexes. Whereas, query2 +// should have a unique hash. Asserts that a total of two distinct hashes results in two query +// shapes. +shapes = getShapes(coll); +assert.eq(2, shapes.length, 'unexpected number of shapes in planCacheListQueryShapes result'); +assert.neq(profileObj0.planCacheKey, profileObj2.planCacheKey, 'unexpected matching query hashes'); + +// The planCacheKey in explain should be different for query2 than the hash from query0 and +// query1. +const explainQuery2 = assert.commandWorked( + coll.find({a: 12000, b: 1}).comment("Query2 find command").explain("queryPlanner")); +assert(explainQuery2.queryPlanner.hasOwnProperty("planCacheKey")); +assert.neq(explainQuery2.queryPlanner.planCacheKey, profileObj0.planCacheKey, explainQuery2); +assert.eq(explainQuery2.queryPlanner.planCacheKey, profileObj2.planCacheKey, explainQuery2); + +// Now drop an index. This should change the 'planCacheKey' value for queries, but not the +// 'queryHash'. +assert.commandWorked(coll.dropIndex({a: 1})); +const explainQuery2PostCatalogChange = assert.commandWorked( + coll.find({a: 12000, b: 1}).comment("Query2 find command").explain("queryPlanner")); +assert.eq(explainQuery2.queryPlanner.queryHash, + explainQuery2PostCatalogChange.queryPlanner.queryHash); +assert.neq(explainQuery2.queryPlanner.planCacheKey, + explainQuery2PostCatalogChange.queryPlanner.planCacheKey); })(); diff --git a/jstests/core/profile_repair_cursor.js b/jstests/core/profile_repair_cursor.js index f22399c58ff..c0b3a34a929 100644 --- a/jstests/core/profile_repair_cursor.js +++ b/jstests/core/profile_repair_cursor.js @@ -4,37 +4,40 @@ // correctly. (function() { - "use strict"; +"use strict"; - // For getLatestProfilerEntry and getProfilerProtocolStringForCommand - load("jstests/libs/profiler.js"); +// For getLatestProfilerEntry and getProfilerProtocolStringForCommand +load("jstests/libs/profiler.js"); - var testDB = db.getSiblingDB("profile_repair_cursor"); - var testColl = testDB.testColl; - assert.commandWorked(testDB.dropDatabase()); +var testDB = db.getSiblingDB("profile_repair_cursor"); +var testColl = testDB.testColl; +assert.commandWorked(testDB.dropDatabase()); - // Insert some data to scan over. - assert.writeOK(testColl.insert([{}, {}, {}, {}])); +// Insert some data to scan over. +assert.writeOK(testColl.insert([{}, {}, {}, {}])); - testDB.setProfilingLevel(2); +testDB.setProfilingLevel(2); - const profileEntryFilter = {op: "command", "command.repairCursor": testColl.getName()}; +const profileEntryFilter = { + op: "command", + "command.repairCursor": testColl.getName() +}; - let cmdRes = testDB.runCommand({repairCursor: testColl.getName()}); - if (cmdRes.code === ErrorCodes.CommandNotSupported) { - // Some storage engines do not support this command, so we can skip this test. - return; - } - assert.commandWorked(cmdRes); +let cmdRes = testDB.runCommand({repairCursor: testColl.getName()}); +if (cmdRes.code === ErrorCodes.CommandNotSupported) { + // Some storage engines do not support this command, so we can skip this test. + return; +} +assert.commandWorked(cmdRes); - assert.eq(testDB.system.profile.find(profileEntryFilter).itcount(), - 1, - "expected to find profile entry for a repairCursor command"); +assert.eq(testDB.system.profile.find(profileEntryFilter).itcount(), + 1, + "expected to find profile entry for a repairCursor command"); - const getMoreCollName = cmdRes.cursor.ns.substr(cmdRes.cursor.ns.indexOf(".") + 1); - cmdRes = assert.commandWorked( - testDB.runCommand({getMore: cmdRes.cursor.id, collection: getMoreCollName})); +const getMoreCollName = cmdRes.cursor.ns.substr(cmdRes.cursor.ns.indexOf(".") + 1); +cmdRes = assert.commandWorked( + testDB.runCommand({getMore: cmdRes.cursor.id, collection: getMoreCollName})); - const getMoreProfileEntry = getLatestProfilerEntry(testDB, {op: "getmore"}); - assert.eq(getMoreProfileEntry.originatingCommand.repairCursor, testColl.getName()); +const getMoreProfileEntry = getLatestProfilerEntry(testDB, {op: "getmore"}); +assert.eq(getMoreProfileEntry.originatingCommand.repairCursor, testColl.getName()); })(); diff --git a/jstests/core/profile_sampling.js b/jstests/core/profile_sampling.js index 9b37e274055..2bd2261031e 100644 --- a/jstests/core/profile_sampling.js +++ b/jstests/core/profile_sampling.js @@ -1,64 +1,64 @@ // Confirms that the number of profiled operations is consistent with the sampleRate, if set. // @tags: [does_not_support_stepdowns, requires_fastcount, requires_profiling] (function() { - "use strict"; +"use strict"; - // Use a special db to support running other tests in parallel. - const profileDB = db.getSisterDB("profile_sampling"); - const coll = profileDB.profile_sampling; +// Use a special db to support running other tests in parallel. +const profileDB = db.getSisterDB("profile_sampling"); +const coll = profileDB.profile_sampling; - profileDB.dropDatabase(); +profileDB.dropDatabase(); - let originalProfilingSettings; - try { - originalProfilingSettings = assert.commandWorked(profileDB.setProfilingLevel(0)); - profileDB.system.profile.drop(); - assert.eq(0, profileDB.system.profile.count()); +let originalProfilingSettings; +try { + originalProfilingSettings = assert.commandWorked(profileDB.setProfilingLevel(0)); + profileDB.system.profile.drop(); + assert.eq(0, profileDB.system.profile.count()); - profileDB.createCollection(coll.getName()); - assert.writeOK(coll.insert({x: 1})); + profileDB.createCollection(coll.getName()); + assert.writeOK(coll.insert({x: 1})); - assert.commandWorked(profileDB.setProfilingLevel(1, {sampleRate: 0, slowms: -1})); + assert.commandWorked(profileDB.setProfilingLevel(1, {sampleRate: 0, slowms: -1})); - assert.neq(null, coll.findOne({x: 1})); - assert.eq(1, coll.find({x: 1}).count()); - assert.writeOK(coll.update({x: 1}, {$inc: {a: 1}})); - - assert.commandWorked(profileDB.setProfilingLevel(0)); + assert.neq(null, coll.findOne({x: 1})); + assert.eq(1, coll.find({x: 1}).count()); + assert.writeOK(coll.update({x: 1}, {$inc: {a: 1}})); - assert.eq(0, profileDB.system.profile.count()); + assert.commandWorked(profileDB.setProfilingLevel(0)); - profileDB.system.profile.drop(); - assert.commandWorked(profileDB.setProfilingLevel(1, {sampleRate: 0.5, slowms: -1})); + assert.eq(0, profileDB.system.profile.count()); - // This should generate about 500 profile log entries. - for (let i = 0; i < 500; i++) { - assert.neq(null, coll.findOne({x: 1})); - assert.writeOK(coll.update({x: 1}, {$inc: {a: 1}})); - } + profileDB.system.profile.drop(); + assert.commandWorked(profileDB.setProfilingLevel(1, {sampleRate: 0.5, slowms: -1})); - assert.commandWorked(profileDB.setProfilingLevel(0)); + // This should generate about 500 profile log entries. + for (let i = 0; i < 500; i++) { + assert.neq(null, coll.findOne({x: 1})); + assert.writeOK(coll.update({x: 1}, {$inc: {a: 1}})); + } - assert.between(10, profileDB.system.profile.count(), 990); - profileDB.system.profile.drop(); + assert.commandWorked(profileDB.setProfilingLevel(0)); - // Profiling level of 2 should log all operations, regardless of sample rate setting. - assert.commandWorked(profileDB.setProfilingLevel(2, {sampleRate: 0})); - // This should generate exactly 1000 profile log entries. - for (let i = 0; i < 5; i++) { - assert.neq(null, coll.findOne({x: 1})); - assert.writeOK(coll.update({x: 1}, {$inc: {a: 1}})); - } - assert.commandWorked(profileDB.setProfilingLevel(0)); - assert.eq(10, profileDB.system.profile.count()); - profileDB.system.profile.drop(); + assert.between(10, profileDB.system.profile.count(), 990); + profileDB.system.profile.drop(); - } finally { - let profileCmd = {}; - profileCmd.profile = originalProfilingSettings.was; - profileCmd = Object.extend(profileCmd, originalProfilingSettings); - delete profileCmd.was; - delete profileCmd.ok; - assert.commandWorked(profileDB.runCommand(profileCmd)); + // Profiling level of 2 should log all operations, regardless of sample rate setting. + assert.commandWorked(profileDB.setProfilingLevel(2, {sampleRate: 0})); + // This should generate exactly 1000 profile log entries. + for (let i = 0; i < 5; i++) { + assert.neq(null, coll.findOne({x: 1})); + assert.writeOK(coll.update({x: 1}, {$inc: {a: 1}})); } + assert.commandWorked(profileDB.setProfilingLevel(0)); + assert.eq(10, profileDB.system.profile.count()); + profileDB.system.profile.drop(); + +} finally { + let profileCmd = {}; + profileCmd.profile = originalProfilingSettings.was; + profileCmd = Object.extend(profileCmd, originalProfilingSettings); + delete profileCmd.was; + delete profileCmd.ok; + assert.commandWorked(profileDB.runCommand(profileCmd)); +} }()); diff --git a/jstests/core/profile_update.js b/jstests/core/profile_update.js index 685594cb45f..8cde2ea6784 100644 --- a/jstests/core/profile_update.js +++ b/jstests/core/profile_update.js @@ -3,124 +3,124 @@ // Confirms that profiled update execution contains all expected metrics with proper values. (function() { - "use strict"; - - load("jstests/libs/profiler.js"); // For getLatestProfilerEntry. - - // Setup test db and collection. - var testDB = db.getSiblingDB("profile_update"); - assert.commandWorked(testDB.dropDatabase()); - var coll = testDB.getCollection("test"); - - testDB.setProfilingLevel(2); - - // - // Confirm metrics for single document update. - // - var i; - for (i = 0; i < 10; ++i) { - assert.writeOK(coll.insert({a: i})); - } - assert.commandWorked(coll.createIndex({a: 1})); - - assert.writeOK(coll.update({a: {$gte: 2}}, {$set: {c: 1}, $inc: {a: -10}})); - - var profileObj = getLatestProfilerEntry(testDB); - - assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj)); - assert.eq(profileObj.op, "update", tojson(profileObj)); - assert.eq(profileObj.keysExamined, 1, tojson(profileObj)); - assert.eq(profileObj.docsExamined, 1, tojson(profileObj)); - assert.eq(profileObj.keysInserted, 1, tojson(profileObj)); - assert.eq(profileObj.keysDeleted, 1, tojson(profileObj)); - assert.eq(profileObj.nMatched, 1, tojson(profileObj)); - assert.eq(profileObj.nModified, 1, tojson(profileObj)); - assert.eq(profileObj.planSummary, "IXSCAN { a: 1 }", tojson(profileObj)); - assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj)); - assert(profileObj.hasOwnProperty("millis"), tojson(profileObj)); - assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj)); - assert(profileObj.hasOwnProperty("locks"), tojson(profileObj)); - assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); - - // - // Confirm metrics for parameters that require "commands" mode. - // - - if (db.getMongo().writeMode() === "commands") { - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [0]})); - - assert.writeOK(coll.update( - {_id: 0}, {$set: {"a.$[i]": 1}}, {collation: {locale: "fr"}, arrayFilters: [{i: 0}]})); - - profileObj = getLatestProfilerEntry(testDB); - - assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj)); - assert.eq(profileObj.op, "update", tojson(profileObj)); - assert.eq(profileObj.command.collation, {locale: "fr"}, tojson(profileObj)); - assert.eq(profileObj.command.arrayFilters, [{i: 0}], tojson(profileObj)); - } - - // - // Confirm metrics for multiple indexed document update. - // +"use strict"; + +load("jstests/libs/profiler.js"); // For getLatestProfilerEntry. + +// Setup test db and collection. +var testDB = db.getSiblingDB("profile_update"); +assert.commandWorked(testDB.dropDatabase()); +var coll = testDB.getCollection("test"); + +testDB.setProfilingLevel(2); + +// +// Confirm metrics for single document update. +// +var i; +for (i = 0; i < 10; ++i) { + assert.writeOK(coll.insert({a: i})); +} +assert.commandWorked(coll.createIndex({a: 1})); + +assert.writeOK(coll.update({a: {$gte: 2}}, {$set: {c: 1}, $inc: {a: -10}})); + +var profileObj = getLatestProfilerEntry(testDB); + +assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj)); +assert.eq(profileObj.op, "update", tojson(profileObj)); +assert.eq(profileObj.keysExamined, 1, tojson(profileObj)); +assert.eq(profileObj.docsExamined, 1, tojson(profileObj)); +assert.eq(profileObj.keysInserted, 1, tojson(profileObj)); +assert.eq(profileObj.keysDeleted, 1, tojson(profileObj)); +assert.eq(profileObj.nMatched, 1, tojson(profileObj)); +assert.eq(profileObj.nModified, 1, tojson(profileObj)); +assert.eq(profileObj.planSummary, "IXSCAN { a: 1 }", tojson(profileObj)); +assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj)); +assert(profileObj.hasOwnProperty("millis"), tojson(profileObj)); +assert(profileObj.hasOwnProperty("numYield"), tojson(profileObj)); +assert(profileObj.hasOwnProperty("locks"), tojson(profileObj)); +assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); + +// +// Confirm metrics for parameters that require "commands" mode. +// + +if (db.getMongo().writeMode() === "commands") { coll.drop(); - for (i = 0; i < 10; ++i) { - assert.writeOK(coll.insert({a: i})); - } - assert.commandWorked(coll.createIndex({a: 1})); + assert.writeOK(coll.insert({_id: 0, a: [0]})); - assert.writeOK(coll.update({a: {$gte: 5}}, {$set: {c: 1}, $inc: {a: -10}}, {multi: true})); - profileObj = getLatestProfilerEntry(testDB); - - assert.eq(profileObj.keysExamined, 5, tojson(profileObj)); - assert.eq(profileObj.docsExamined, 5, tojson(profileObj)); - assert.eq(profileObj.keysInserted, 5, tojson(profileObj)); - assert.eq(profileObj.keysDeleted, 5, tojson(profileObj)); - assert.eq(profileObj.nMatched, 5, tojson(profileObj)); - assert.eq(profileObj.nModified, 5, tojson(profileObj)); - assert.eq(profileObj.planSummary, "IXSCAN { a: 1 }", tojson(profileObj)); - assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj)); - assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); - - // - // Confirm metrics for insert on update with "upsert: true". - // - coll.drop(); - for (i = 0; i < 10; ++i) { - assert.writeOK(coll.insert({a: i})); - } - assert.commandWorked(coll.createIndex({a: 1})); + assert.writeOK(coll.update( + {_id: 0}, {$set: {"a.$[i]": 1}}, {collation: {locale: "fr"}, arrayFilters: [{i: 0}]})); - assert.writeOK(coll.update({_id: "new value", a: 4}, {$inc: {b: 1}}, {upsert: true})); profileObj = getLatestProfilerEntry(testDB); - assert.eq(profileObj.command, - {q: {_id: "new value", a: 4}, u: {$inc: {b: 1}}, multi: false, upsert: true}, - tojson(profileObj)); - assert.eq(profileObj.keysExamined, 0, tojson(profileObj)); - assert.eq(profileObj.docsExamined, 0, tojson(profileObj)); - assert.eq(profileObj.keysInserted, 2, tojson(profileObj)); - assert.eq(profileObj.nMatched, 0, tojson(profileObj)); - assert.eq(profileObj.nModified, 0, tojson(profileObj)); - assert.eq(profileObj.upsert, true, tojson(profileObj)); - assert.eq(profileObj.planSummary, "IXSCAN { _id: 1 }", tojson(profileObj)); - assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj)); - assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); - - // - // Confirm "fromMultiPlanner" metric. - // - coll.drop(); - assert.commandWorked(coll.createIndex({a: 1})); - assert.commandWorked(coll.createIndex({b: 1})); - for (i = 0; i < 5; ++i) { - assert.writeOK(coll.insert({a: i, b: i})); - } - - assert.writeOK(coll.update({a: 3, b: 3}, {$set: {c: 1}})); - profileObj = getLatestProfilerEntry(testDB); - - assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj)); - assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); + assert.eq(profileObj.ns, coll.getFullName(), tojson(profileObj)); + assert.eq(profileObj.op, "update", tojson(profileObj)); + assert.eq(profileObj.command.collation, {locale: "fr"}, tojson(profileObj)); + assert.eq(profileObj.command.arrayFilters, [{i: 0}], tojson(profileObj)); +} + +// +// Confirm metrics for multiple indexed document update. +// +coll.drop(); +for (i = 0; i < 10; ++i) { + assert.writeOK(coll.insert({a: i})); +} +assert.commandWorked(coll.createIndex({a: 1})); + +assert.writeOK(coll.update({a: {$gte: 5}}, {$set: {c: 1}, $inc: {a: -10}}, {multi: true})); +profileObj = getLatestProfilerEntry(testDB); + +assert.eq(profileObj.keysExamined, 5, tojson(profileObj)); +assert.eq(profileObj.docsExamined, 5, tojson(profileObj)); +assert.eq(profileObj.keysInserted, 5, tojson(profileObj)); +assert.eq(profileObj.keysDeleted, 5, tojson(profileObj)); +assert.eq(profileObj.nMatched, 5, tojson(profileObj)); +assert.eq(profileObj.nModified, 5, tojson(profileObj)); +assert.eq(profileObj.planSummary, "IXSCAN { a: 1 }", tojson(profileObj)); +assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj)); +assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); + +// +// Confirm metrics for insert on update with "upsert: true". +// +coll.drop(); +for (i = 0; i < 10; ++i) { + assert.writeOK(coll.insert({a: i})); +} +assert.commandWorked(coll.createIndex({a: 1})); + +assert.writeOK(coll.update({_id: "new value", a: 4}, {$inc: {b: 1}}, {upsert: true})); +profileObj = getLatestProfilerEntry(testDB); + +assert.eq(profileObj.command, + {q: {_id: "new value", a: 4}, u: {$inc: {b: 1}}, multi: false, upsert: true}, + tojson(profileObj)); +assert.eq(profileObj.keysExamined, 0, tojson(profileObj)); +assert.eq(profileObj.docsExamined, 0, tojson(profileObj)); +assert.eq(profileObj.keysInserted, 2, tojson(profileObj)); +assert.eq(profileObj.nMatched, 0, tojson(profileObj)); +assert.eq(profileObj.nModified, 0, tojson(profileObj)); +assert.eq(profileObj.upsert, true, tojson(profileObj)); +assert.eq(profileObj.planSummary, "IXSCAN { _id: 1 }", tojson(profileObj)); +assert(profileObj.execStats.hasOwnProperty("stage"), tojson(profileObj)); +assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); + +// +// Confirm "fromMultiPlanner" metric. +// +coll.drop(); +assert.commandWorked(coll.createIndex({a: 1})); +assert.commandWorked(coll.createIndex({b: 1})); +for (i = 0; i < 5; ++i) { + assert.writeOK(coll.insert({a: i, b: i})); +} + +assert.writeOK(coll.update({a: 3, b: 3}, {$set: {c: 1}})); +profileObj = getLatestProfilerEntry(testDB); + +assert.eq(profileObj.fromMultiPlanner, true, tojson(profileObj)); +assert.eq(profileObj.appName, "MongoDB Shell", tojson(profileObj)); })(); diff --git a/jstests/core/projection_dotted_paths.js b/jstests/core/projection_dotted_paths.js index e76feb7a2ee..5af357bde02 100644 --- a/jstests/core/projection_dotted_paths.js +++ b/jstests/core/projection_dotted_paths.js @@ -7,90 +7,89 @@ * when appropriate. */ (function() { - "use strict"; +"use strict"; - load("jstests/libs/analyze_plan.js"); +load("jstests/libs/analyze_plan.js"); - let coll = db["projection_dotted_paths"]; - coll.drop(); - assert.commandWorked(coll.createIndex({a: 1, "b.c": 1, "b.d": 1, c: 1})); - assert.writeOK(coll.insert({_id: 1, a: 1, b: {c: 1, d: 1, e: 1}, c: 1, e: 1})); +let coll = db["projection_dotted_paths"]; +coll.drop(); +assert.commandWorked(coll.createIndex({a: 1, "b.c": 1, "b.d": 1, c: 1})); +assert.writeOK(coll.insert({_id: 1, a: 1, b: {c: 1, d: 1, e: 1}, c: 1, e: 1})); - // Project exactly the set of fields in the index. Verify that the projection is computed - // correctly and that the plan is covered. - let resultDoc = coll.findOne({a: 1}, {_id: 0, a: 1, "b.c": 1, "b.d": 1, c: 1}); - assert.eq(resultDoc, {a: 1, b: {c: 1, d: 1}, c: 1}); - let explain = - coll.find({a: 1}, {_id: 0, a: 1, "b.c": 1, "b.d": 1, c: 1}).explain("queryPlanner"); - assert(isIxscan(db, explain.queryPlanner.winningPlan)); - assert(isIndexOnly(db, explain.queryPlanner.winningPlan)); +// Project exactly the set of fields in the index. Verify that the projection is computed +// correctly and that the plan is covered. +let resultDoc = coll.findOne({a: 1}, {_id: 0, a: 1, "b.c": 1, "b.d": 1, c: 1}); +assert.eq(resultDoc, {a: 1, b: {c: 1, d: 1}, c: 1}); +let explain = coll.find({a: 1}, {_id: 0, a: 1, "b.c": 1, "b.d": 1, c: 1}).explain("queryPlanner"); +assert(isIxscan(db, explain.queryPlanner.winningPlan)); +assert(isIndexOnly(db, explain.queryPlanner.winningPlan)); - // Project a subset of the indexed fields. Verify that the projection is computed correctly and - // that the plan is covered. - resultDoc = coll.findOne({a: 1}, {_id: 0, "b.c": 1, c: 1}); - assert.eq(resultDoc, {b: {c: 1}, c: 1}); - explain = coll.find({a: 1}, {_id: 0, "b.c": 1, c: 1}).explain("queryPlanner"); - assert(isIxscan(db, explain.queryPlanner.winningPlan)); - assert(isIndexOnly(db, explain.queryPlanner.winningPlan)); +// Project a subset of the indexed fields. Verify that the projection is computed correctly and +// that the plan is covered. +resultDoc = coll.findOne({a: 1}, {_id: 0, "b.c": 1, c: 1}); +assert.eq(resultDoc, {b: {c: 1}, c: 1}); +explain = coll.find({a: 1}, {_id: 0, "b.c": 1, c: 1}).explain("queryPlanner"); +assert(isIxscan(db, explain.queryPlanner.winningPlan)); +assert(isIndexOnly(db, explain.queryPlanner.winningPlan)); - // Project exactly the set of fields in the index but also include _id. Verify that the - // projection is computed correctly and that the plan cannot be covered. - resultDoc = coll.findOne({a: 1}, {_id: 1, a: 1, "b.c": 1, "b.d": 1, c: 1}); - assert.eq(resultDoc, {_id: 1, a: 1, b: {c: 1, d: 1}, c: 1}); - explain = coll.find({a: 1}, {_id: 0, "b.c": 1, c: 1}).explain("queryPlanner"); - explain = coll.find({a: 1}, {_id: 1, a: 1, "b.c": 1, "b.d": 1, c: 1}).explain("queryPlanner"); - assert(isIxscan(db, explain.queryPlanner.winningPlan)); - assert(!isIndexOnly(db, explain.queryPlanner.winningPlan)); +// Project exactly the set of fields in the index but also include _id. Verify that the +// projection is computed correctly and that the plan cannot be covered. +resultDoc = coll.findOne({a: 1}, {_id: 1, a: 1, "b.c": 1, "b.d": 1, c: 1}); +assert.eq(resultDoc, {_id: 1, a: 1, b: {c: 1, d: 1}, c: 1}); +explain = coll.find({a: 1}, {_id: 0, "b.c": 1, c: 1}).explain("queryPlanner"); +explain = coll.find({a: 1}, {_id: 1, a: 1, "b.c": 1, "b.d": 1, c: 1}).explain("queryPlanner"); +assert(isIxscan(db, explain.queryPlanner.winningPlan)); +assert(!isIndexOnly(db, explain.queryPlanner.winningPlan)); - // Project a not-indexed field that exists in the collection. The plan should not be covered. - resultDoc = coll.findOne({a: 1}, {_id: 0, "b.c": 1, "b.e": 1, c: 1}); - assert.eq(resultDoc, {b: {c: 1, e: 1}, c: 1}); - explain = coll.find({a: 1}, {_id: 0, "b.c": 1, "b.e": 1, c: 1}).explain("queryPlanner"); - assert(isIxscan(db, explain.queryPlanner.winningPlan)); - assert(!isIndexOnly(db, explain.queryPlanner.winningPlan)); +// Project a not-indexed field that exists in the collection. The plan should not be covered. +resultDoc = coll.findOne({a: 1}, {_id: 0, "b.c": 1, "b.e": 1, c: 1}); +assert.eq(resultDoc, {b: {c: 1, e: 1}, c: 1}); +explain = coll.find({a: 1}, {_id: 0, "b.c": 1, "b.e": 1, c: 1}).explain("queryPlanner"); +assert(isIxscan(db, explain.queryPlanner.winningPlan)); +assert(!isIndexOnly(db, explain.queryPlanner.winningPlan)); - // Project a not-indexed field that does not exist in the collection. The plan should not be - // covered. - resultDoc = coll.findOne({a: 1}, {_id: 0, "b.c": 1, "b.z": 1, c: 1}); - assert.eq(resultDoc, {b: {c: 1}, c: 1}); - explain = coll.find({a: 1}, {_id: 0, "b.c": 1, "b.z": 1, c: 1}).explain("queryPlanner"); - assert(isIxscan(db, explain.queryPlanner.winningPlan)); - assert(!isIndexOnly(db, explain.queryPlanner.winningPlan)); +// Project a not-indexed field that does not exist in the collection. The plan should not be +// covered. +resultDoc = coll.findOne({a: 1}, {_id: 0, "b.c": 1, "b.z": 1, c: 1}); +assert.eq(resultDoc, {b: {c: 1}, c: 1}); +explain = coll.find({a: 1}, {_id: 0, "b.c": 1, "b.z": 1, c: 1}).explain("queryPlanner"); +assert(isIxscan(db, explain.queryPlanner.winningPlan)); +assert(!isIndexOnly(db, explain.queryPlanner.winningPlan)); - // Verify that the correct projection is computed with an idhack query. - resultDoc = coll.findOne({_id: 1}, {_id: 0, "b.c": 1, "b.e": 1, c: 1}); - assert.eq(resultDoc, {b: {c: 1, e: 1}, c: 1}); - explain = coll.find({_id: 1}, {_id: 0, "b.c": 1, "b.e": 1, c: 1}).explain("queryPlanner"); - assert(isIdhack(db, explain.queryPlanner.winningPlan)); +// Verify that the correct projection is computed with an idhack query. +resultDoc = coll.findOne({_id: 1}, {_id: 0, "b.c": 1, "b.e": 1, c: 1}); +assert.eq(resultDoc, {b: {c: 1, e: 1}, c: 1}); +explain = coll.find({_id: 1}, {_id: 0, "b.c": 1, "b.e": 1, c: 1}).explain("queryPlanner"); +assert(isIdhack(db, explain.queryPlanner.winningPlan)); - // If we make a dotted path multikey, projections using that path cannot be covered. But - // projections which do not include the multikey path can still be covered. - assert.writeOK(coll.insert({a: 2, b: {c: 1, d: [1, 2, 3]}})); +// If we make a dotted path multikey, projections using that path cannot be covered. But +// projections which do not include the multikey path can still be covered. +assert.writeOK(coll.insert({a: 2, b: {c: 1, d: [1, 2, 3]}})); - resultDoc = coll.findOne({a: 2}, {_id: 0, "b.c": 1, "b.d": 1}); - assert.eq(resultDoc, {b: {c: 1, d: [1, 2, 3]}}); - explain = coll.find({a: 2}, {_id: 0, "b.c": 1, "b.d": 1}).explain("queryPlanner"); - assert(isIxscan(db, explain.queryPlanner.winningPlan)); - assert(!isIndexOnly(db, explain.queryPlanner.winningPlan)); +resultDoc = coll.findOne({a: 2}, {_id: 0, "b.c": 1, "b.d": 1}); +assert.eq(resultDoc, {b: {c: 1, d: [1, 2, 3]}}); +explain = coll.find({a: 2}, {_id: 0, "b.c": 1, "b.d": 1}).explain("queryPlanner"); +assert(isIxscan(db, explain.queryPlanner.winningPlan)); +assert(!isIndexOnly(db, explain.queryPlanner.winningPlan)); - resultDoc = coll.findOne({a: 2}, {_id: 0, "b.c": 1}); - assert.eq(resultDoc, {b: {c: 1}}); - explain = coll.find({a: 2}, {_id: 0, "b.c": 1}).explain("queryPlanner"); - assert(isIxscan(db, explain.queryPlanner.winningPlan)); - // Path-level multikey info allows for generating a covered plan. - assert(isIndexOnly(db, explain.queryPlanner.winningPlan)); +resultDoc = coll.findOne({a: 2}, {_id: 0, "b.c": 1}); +assert.eq(resultDoc, {b: {c: 1}}); +explain = coll.find({a: 2}, {_id: 0, "b.c": 1}).explain("queryPlanner"); +assert(isIxscan(db, explain.queryPlanner.winningPlan)); +// Path-level multikey info allows for generating a covered plan. +assert(isIndexOnly(db, explain.queryPlanner.winningPlan)); - // Verify that dotted projections work for multiple levels of nesting. - assert.commandWorked(coll.createIndex({a: 1, "x.y.y": 1, "x.y.z": 1, "x.z": 1})); - assert.writeOK(coll.insert({a: 3, x: {y: {y: 1, f: 1, z: 1}, f: 1, z: 1}})); - resultDoc = coll.findOne({a: 3}, {_id: 0, "x.y.y": 1, "x.y.z": 1, "x.z": 1}); - assert.eq(resultDoc, {x: {y: {y: 1, z: 1}, z: 1}}); - explain = coll.find({a: 3}, {_id: 0, "x.y.y": 1, "x.y.z": 1, "x.z": 1}).explain("queryPlanner"); - assert(isIxscan(db, explain.queryPlanner.winningPlan)); - assert(isIndexOnly(db, explain.queryPlanner.winningPlan)); +// Verify that dotted projections work for multiple levels of nesting. +assert.commandWorked(coll.createIndex({a: 1, "x.y.y": 1, "x.y.z": 1, "x.z": 1})); +assert.writeOK(coll.insert({a: 3, x: {y: {y: 1, f: 1, z: 1}, f: 1, z: 1}})); +resultDoc = coll.findOne({a: 3}, {_id: 0, "x.y.y": 1, "x.y.z": 1, "x.z": 1}); +assert.eq(resultDoc, {x: {y: {y: 1, z: 1}, z: 1}}); +explain = coll.find({a: 3}, {_id: 0, "x.y.y": 1, "x.y.z": 1, "x.z": 1}).explain("queryPlanner"); +assert(isIxscan(db, explain.queryPlanner.winningPlan)); +assert(isIndexOnly(db, explain.queryPlanner.winningPlan)); - // If projected nested paths do not exist in the indexed document, then they will get filled in - // with nulls. This is a bug tracked by SERVER-23229. - resultDoc = coll.findOne({a: 1}, {_id: 0, "x.y.y": 1, "x.y.z": 1, "x.z": 1}); - assert.eq(resultDoc, {x: {y: {y: null, z: null}, z: null}}); +// If projected nested paths do not exist in the indexed document, then they will get filled in +// with nulls. This is a bug tracked by SERVER-23229. +resultDoc = coll.findOne({a: 1}, {_id: 0, "x.y.y": 1, "x.y.z": 1, "x.z": 1}); +assert.eq(resultDoc, {x: {y: {y: null, z: null}, z: null}}); }()); diff --git a/jstests/core/push2.js b/jstests/core/push2.js index c8d8e7be64c..10669aa2581 100644 --- a/jstests/core/push2.js +++ b/jstests/core/push2.js @@ -1,22 +1,22 @@ (function() { - t = db.push2; - t.drop(); +t = db.push2; +t.drop(); - t.save({_id: 1, a: []}); +t.save({_id: 1, a: []}); - s = new Array(700000).toString(); +s = new Array(700000).toString(); - gotError = null; +gotError = null; - for (x = 0; x < 100; x++) { - print(x + " pushes"); - var res = t.update({}, {$push: {a: s}}); - gotError = res.hasWriteError(); - if (gotError) - break; - } +for (x = 0; x < 100; x++) { + print(x + " pushes"); + var res = t.update({}, {$push: {a: s}}); + gotError = res.hasWriteError(); + if (gotError) + break; +} - assert(gotError, "should have gotten error"); +assert(gotError, "should have gotten error"); - t.drop(); +t.drop(); })(); diff --git a/jstests/core/query_hash_stability.js b/jstests/core/query_hash_stability.js index 14ae20fdb98..4efa9b74e4e 100644 --- a/jstests/core/query_hash_stability.js +++ b/jstests/core/query_hash_stability.js @@ -3,54 +3,55 @@ * across catalog changes. */ (function() { - "use strict"; - load('jstests/libs/fixture_helpers.js'); // For and isMongos(). - - const collName = "query_hash_stability"; - const coll = db[collName]; - coll.drop(); - // Be sure the collection exists. - assert.commandWorked(coll.insert({x: 5})); - - function getPlanCacheKeyFromExplain(explainRes) { - const hash = FixtureHelpers.isMongos(db) - ? explainRes.queryPlanner.winningPlan.shards[0].planCacheKey - : explainRes.queryPlanner.planCacheKey; - assert.eq(typeof(hash), "string"); - return hash; - } - - function getQueryHashFromExplain(explainRes) { - const hash = FixtureHelpers.isMongos(db) - ? explainRes.queryPlanner.winningPlan.shards[0].queryHash - : explainRes.queryPlanner.queryHash; - assert.eq(typeof(hash), "string"); - return hash; - } - - const query = {x: 3}; - - const initialExplain = coll.find(query).explain(); - - // Add a sparse index. - assert.commandWorked(coll.createIndex({x: 1}, {sparse: true})); - - const withIndexExplain = coll.find(query).explain(); - - // 'queryHash' shouldn't change across catalog changes. - assert.eq(getQueryHashFromExplain(initialExplain), getQueryHashFromExplain(withIndexExplain)); - // We added an index so the plan cache key changed. - assert.neq(getPlanCacheKeyFromExplain(initialExplain), - getPlanCacheKeyFromExplain(withIndexExplain)); - - // Drop the index. - assert.commandWorked(coll.dropIndex({x: 1})); - const postDropExplain = coll.find(query).explain(); - - // 'queryHash' shouldn't change across catalog changes. - assert.eq(getQueryHashFromExplain(initialExplain), getQueryHashFromExplain(postDropExplain)); - - // The 'planCacheKey' should be the same as what it was before we dropped the index. - assert.eq(getPlanCacheKeyFromExplain(initialExplain), - getPlanCacheKeyFromExplain(postDropExplain)); +"use strict"; +load('jstests/libs/fixture_helpers.js'); // For and isMongos(). + +const collName = "query_hash_stability"; +const coll = db[collName]; +coll.drop(); +// Be sure the collection exists. +assert.commandWorked(coll.insert({x: 5})); + +function getPlanCacheKeyFromExplain(explainRes) { + const hash = FixtureHelpers.isMongos(db) + ? explainRes.queryPlanner.winningPlan.shards[0].planCacheKey + : explainRes.queryPlanner.planCacheKey; + assert.eq(typeof (hash), "string"); + return hash; +} + +function getQueryHashFromExplain(explainRes) { + const hash = FixtureHelpers.isMongos(db) + ? explainRes.queryPlanner.winningPlan.shards[0].queryHash + : explainRes.queryPlanner.queryHash; + assert.eq(typeof (hash), "string"); + return hash; +} + +const query = { + x: 3 +}; + +const initialExplain = coll.find(query).explain(); + +// Add a sparse index. +assert.commandWorked(coll.createIndex({x: 1}, {sparse: true})); + +const withIndexExplain = coll.find(query).explain(); + +// 'queryHash' shouldn't change across catalog changes. +assert.eq(getQueryHashFromExplain(initialExplain), getQueryHashFromExplain(withIndexExplain)); +// We added an index so the plan cache key changed. +assert.neq(getPlanCacheKeyFromExplain(initialExplain), + getPlanCacheKeyFromExplain(withIndexExplain)); + +// Drop the index. +assert.commandWorked(coll.dropIndex({x: 1})); +const postDropExplain = coll.find(query).explain(); + +// 'queryHash' shouldn't change across catalog changes. +assert.eq(getQueryHashFromExplain(initialExplain), getQueryHashFromExplain(postDropExplain)); + +// The 'planCacheKey' should be the same as what it was before we dropped the index. +assert.eq(getPlanCacheKeyFromExplain(initialExplain), getPlanCacheKeyFromExplain(postDropExplain)); })(); diff --git a/jstests/core/queryoptimizer3.js b/jstests/core/queryoptimizer3.js index 277ad738ce1..9fa0585991a 100644 --- a/jstests/core/queryoptimizer3.js +++ b/jstests/core/queryoptimizer3.js @@ -13,57 +13,57 @@ // ] (function() { - 'use strict'; +'use strict'; - var coll = db.jstests_queryoptimizer3; +var coll = db.jstests_queryoptimizer3; - var shellWaitHandle = startParallelShell(function() { - for (var i = 0; i < 400; ++i) { - sleep(50); - try { - db.jstests_queryoptimizer3.drop(); - } catch (e) { - if (e.code === ErrorCodes.BackgroundOperationInProgressForNamespace) { - print("Background operation temporarily in progress while attempting to drop " + - "collection."); - continue; - } - throw e; +var shellWaitHandle = startParallelShell(function() { + for (var i = 0; i < 400; ++i) { + sleep(50); + try { + db.jstests_queryoptimizer3.drop(); + } catch (e) { + if (e.code === ErrorCodes.BackgroundOperationInProgressForNamespace) { + print("Background operation temporarily in progress while attempting to drop " + + "collection."); + continue; } + throw e; } - }); + } +}); - for (var i = 0; i < 100; ++i) { - coll.drop(); - assert.commandWorked(coll.ensureIndex({a: 1})); - assert.commandWorked(coll.ensureIndex({b: 1})); +for (var i = 0; i < 100; ++i) { + coll.drop(); + assert.commandWorked(coll.ensureIndex({a: 1})); + assert.commandWorked(coll.ensureIndex({b: 1})); - var bulk = coll.initializeUnorderedBulkOp(); - for (var j = 0; j < 100; ++j) { - bulk.insert({a: j, b: j}); - } - assert.commandWorked(bulk.execute()); + var bulk = coll.initializeUnorderedBulkOp(); + for (var j = 0; j < 100; ++j) { + bulk.insert({a: j, b: j}); + } + assert.commandWorked(bulk.execute()); - try { - var m = i % 5; - if (m == 0) { - coll.count({a: {$gte: 0}, b: {$gte: 0}}); - } else if (m == 1) { - coll.find({a: {$gte: 0}, b: {$gte: 0}}).itcount(); - } else if (m == 2) { - coll.remove({a: {$gte: 0}, b: {$gte: 0}}); - } else if (m == 3) { - coll.update({a: {$gte: 0}, b: {$gte: 0}}, {}); - } else if (m == 4) { - coll.distinct('x', {a: {$gte: 0}, b: {$gte: 0}}); - } - } catch (e) { - print("Op killed during yield: " + e.message); + try { + var m = i % 5; + if (m == 0) { + coll.count({a: {$gte: 0}, b: {$gte: 0}}); + } else if (m == 1) { + coll.find({a: {$gte: 0}, b: {$gte: 0}}).itcount(); + } else if (m == 2) { + coll.remove({a: {$gte: 0}, b: {$gte: 0}}); + } else if (m == 3) { + coll.update({a: {$gte: 0}, b: {$gte: 0}}, {}); + } else if (m == 4) { + coll.distinct('x', {a: {$gte: 0}, b: {$gte: 0}}); } + } catch (e) { + print("Op killed during yield: " + e.message); } +} - shellWaitHandle(); +shellWaitHandle(); - // Ensure that the server is still responding - assert.commandWorked(db.runCommand({isMaster: 1})); +// Ensure that the server is still responding +assert.commandWorked(db.runCommand({isMaster: 1})); })(); diff --git a/jstests/core/read_after_optime.js b/jstests/core/read_after_optime.js index 33c5594d742..15ca380de47 100644 --- a/jstests/core/read_after_optime.js +++ b/jstests/core/read_after_optime.js @@ -1,14 +1,14 @@ // Test that attempting to read after optime fails if replication is not enabled. (function() { - "use strict"; +"use strict"; - var currentTime = new Date(); +var currentTime = new Date(); - var futureOpTime = new Timestamp((currentTime / 1000 + 3600), 0); +var futureOpTime = new Timestamp((currentTime / 1000 + 3600), 0); - assert.commandFailedWithCode( - db.runCommand( - {find: 'user', filter: {x: 1}, readConcern: {afterOpTime: {ts: futureOpTime, t: 0}}}), - [ErrorCodes.NotAReplicaSet, ErrorCodes.NotImplemented]); +assert.commandFailedWithCode( + db.runCommand( + {find: 'user', filter: {x: 1}, readConcern: {afterOpTime: {ts: futureOpTime, t: 0}}}), + [ErrorCodes.NotAReplicaSet, ErrorCodes.NotImplemented]); })(); diff --git a/jstests/core/record_store_count.js b/jstests/core/record_store_count.js index 2748b451031..61a1680fa94 100644 --- a/jstests/core/record_store_count.js +++ b/jstests/core/record_store_count.js @@ -7,78 +7,81 @@ load("jstests/libs/analyze_plan.js"); // For 'planHasStage'. load("jstests/libs/fixture_helpers.js"); // For isMongos and isSharded. (function() { - "use strict"; +"use strict"; - var coll = db.record_store_count; - coll.drop(); +var coll = db.record_store_count; +coll.drop(); - assert.writeOK(coll.insert({x: 0})); - assert.writeOK(coll.insert({x: 1})); +assert.writeOK(coll.insert({x: 0})); +assert.writeOK(coll.insert({x: 1})); - assert.commandWorked(coll.ensureIndex({x: 1})); +assert.commandWorked(coll.ensureIndex({x: 1})); - // - // Logically empty predicates should use the record store's count. - // - // If the collection is sharded, however, then we can't use fast count, since we need to perform - // shard filtering to avoid counting data that is not logically owned by the shard. - // - var explain = coll.explain().count({}); - assert(!planHasStage(db, explain.queryPlanner.winningPlan, "COLLSCAN")); - if (!isMongos(db) || !FixtureHelpers.isSharded(coll)) { - assert(planHasStage(db, explain.queryPlanner.winningPlan, "RECORD_STORE_FAST_COUNT")); - } +// +// Logically empty predicates should use the record store's count. +// +// If the collection is sharded, however, then we can't use fast count, since we need to perform +// shard filtering to avoid counting data that is not logically owned by the shard. +// +var explain = coll.explain().count({}); +assert(!planHasStage(db, explain.queryPlanner.winningPlan, "COLLSCAN")); +if (!isMongos(db) || !FixtureHelpers.isSharded(coll)) { + assert(planHasStage(db, explain.queryPlanner.winningPlan, "RECORD_STORE_FAST_COUNT")); +} - explain = coll.explain().count({$comment: "hi"}); - assert(!planHasStage(db, explain.queryPlanner.winningPlan, "COLLSCAN")); - if (!isMongos(db) || !FixtureHelpers.isSharded(coll)) { - assert(planHasStage(db, explain.queryPlanner.winningPlan, "RECORD_STORE_FAST_COUNT")); - } +explain = coll.explain().count({$comment: "hi"}); +assert(!planHasStage(db, explain.queryPlanner.winningPlan, "COLLSCAN")); +if (!isMongos(db) || !FixtureHelpers.isSharded(coll)) { + assert(planHasStage(db, explain.queryPlanner.winningPlan, "RECORD_STORE_FAST_COUNT")); +} - // - // A non-empty query predicate should prevent the use of the record store's count. - // +// +// A non-empty query predicate should prevent the use of the record store's count. +// - function checkPlan(plan, expectedStages, unexpectedStages) { - for (let stage of expectedStages) { - assert(planHasStage(db, plan, stage)); - } - for (let stage of unexpectedStages) { - assert(!planHasStage(db, plan, stage)); - } +function checkPlan(plan, expectedStages, unexpectedStages) { + for (let stage of expectedStages) { + assert(planHasStage(db, plan, stage)); + } + for (let stage of unexpectedStages) { + assert(!planHasStage(db, plan, stage)); } +} - function testExplainAndExpectStage({expectedStages, unexpectedStages, hintIndex}) { - explain = coll.explain().find({x: 0}).hint(hintIndex).count(); - checkPlan(explain.queryPlanner.winningPlan, expectedStages, unexpectedStages); +function testExplainAndExpectStage({expectedStages, unexpectedStages, hintIndex}) { + explain = coll.explain().find({x: 0}).hint(hintIndex).count(); + checkPlan(explain.queryPlanner.winningPlan, expectedStages, unexpectedStages); - explain = coll.explain().find({x: 0, $comment: "hi"}).hint(hintIndex).count(); - checkPlan(explain.queryPlanner.winningPlan, expectedStages, unexpectedStages); - } + explain = coll.explain().find({x: 0, $comment: "hi"}).hint(hintIndex).count(); + checkPlan(explain.queryPlanner.winningPlan, expectedStages, unexpectedStages); +} - if (!isMongos(db) || !FixtureHelpers.isSharded(coll)) { - // In an unsharded collection we can use the COUNT_SCAN stage. - testExplainAndExpectStage( - {expectedStages: ["COUNT_SCAN"], unexpectedStages: [], hintIndex: {x: 1}}); - return; - } +if (!isMongos(db) || !FixtureHelpers.isSharded(coll)) { + // In an unsharded collection we can use the COUNT_SCAN stage. + testExplainAndExpectStage( + {expectedStages: ["COUNT_SCAN"], unexpectedStages: [], hintIndex: {x: 1}}); + return; +} - // The remainder of the test is only relevant for sharded clusters. +// The remainder of the test is only relevant for sharded clusters. - // Without an index on the shard key, the entire document will have to be fetched. - testExplainAndExpectStage({ - expectedStages: ["COUNT", "SHARDING_FILTER", "FETCH"], - unexpectedStages: [], - hintIndex: {x: 1} - }); +// Without an index on the shard key, the entire document will have to be fetched. +testExplainAndExpectStage({ + expectedStages: ["COUNT", "SHARDING_FILTER", "FETCH"], + unexpectedStages: [], + hintIndex: {x: 1} +}); - // Add an index which includes the shard key. This means the FETCH should no longer be necesary - // since the SHARDING_FILTER can get the shard key straight from the index. - const kNewIndexSpec = {x: 1, _id: 1}; - assert.commandWorked(coll.ensureIndex(kNewIndexSpec)); - testExplainAndExpectStage({ - expectedStages: ["COUNT", "SHARDING_FILTER"], - unexpectedStages: ["FETCH"], - hintIndex: kNewIndexSpec - }); +// Add an index which includes the shard key. This means the FETCH should no longer be necesary +// since the SHARDING_FILTER can get the shard key straight from the index. +const kNewIndexSpec = { + x: 1, + _id: 1 +}; +assert.commandWorked(coll.ensureIndex(kNewIndexSpec)); +testExplainAndExpectStage({ + expectedStages: ["COUNT", "SHARDING_FILTER"], + unexpectedStages: ["FETCH"], + hintIndex: kNewIndexSpec +}); })(); diff --git a/jstests/core/recursion.js b/jstests/core/recursion.js index 6f6e5c906af..617a51edccd 100644 --- a/jstests/core/recursion.js +++ b/jstests/core/recursion.js @@ -7,29 +7,28 @@ // ] (function() { - "use strict"; +"use strict"; - db.recursion.drop(); +db.recursion.drop(); - // Make sure the shell doesn't blow up - function shellRecursion() { - shellRecursion.apply(); - } - assert.throws(shellRecursion); +// Make sure the shell doesn't blow up +function shellRecursion() { + shellRecursion.apply(); +} +assert.throws(shellRecursion); - // Make sure mapReduce doesn't blow up - function mapReduceRecursion() { - db.recursion.mapReduce( - function() { - (function recursion() { - recursion.apply(); - })(); - }, - function() {}, - {out: 'inline'}); - } +// Make sure mapReduce doesn't blow up +function mapReduceRecursion() { + db.recursion.mapReduce( + function() { + (function recursion() { + recursion.apply(); + })(); + }, + function() {}, + {out: 'inline'}); +} - db.recursion.insert({}); - assert.commandFailedWithCode(assert.throws(mapReduceRecursion), - ErrorCodes.JSInterpreterFailure); +db.recursion.insert({}); +assert.commandFailedWithCode(assert.throws(mapReduceRecursion), ErrorCodes.JSInterpreterFailure); }()); diff --git a/jstests/core/regex.js b/jstests/core/regex.js index 1c6a9d6a3bb..488d41f41d0 100644 --- a/jstests/core/regex.js +++ b/jstests/core/regex.js @@ -1,83 +1,85 @@ (function() { - 'use strict'; - - const t = db.jstests_regex; - - const isMaster = db.runCommand("ismaster"); - assert.commandWorked(isMaster); - const isMongos = (isMaster.msg === "isdbgrid"); - - t.drop(); - assert.writeOK(t.save({a: "bcd"})); - assert.eq(1, t.count({a: /b/}), "A"); - assert.eq(1, t.count({a: /bc/}), "B"); - assert.eq(1, t.count({a: /bcd/}), "C"); - assert.eq(0, t.count({a: /bcde/}), "D"); - - t.drop(); - assert.writeOK(t.save({a: {b: "cde"}})); - assert.eq(1, t.count({'a.b': /de/}), "E"); - - t.drop(); - assert.writeOK(t.save({a: {b: ["cde"]}})); - assert.eq(1, t.count({'a.b': /de/}), "F"); - - t.drop(); - assert.writeOK(t.save({a: [{b: "cde"}]})); - assert.eq(1, t.count({'a.b': /de/}), "G"); - - t.drop(); - assert.writeOK(t.save({a: [{b: ["cde"]}]})); - assert.eq(1, t.count({'a.b': /de/}), "H"); - - // - // Confirm match and explain serialization for $elemMatch with $regex. - // - t.drop(); - assert.writeOK(t.insert({x: ["abc"]})); - - const query = {x: {$elemMatch: {$regex: 'ABC', $options: 'i'}}}; - assert.eq(1, t.count(query)); - - const result = t.find(query).explain(); - assert.commandWorked(result); - - if (!isMongos) { - assert(result.hasOwnProperty("queryPlanner")); - assert(result.queryPlanner.hasOwnProperty("parsedQuery"), tojson(result)); - assert.eq(result.queryPlanner.parsedQuery, query); - } - - // - // Disallow embedded null bytes when using $regex syntax. - // - t.drop(); - assert.throws(function() { - t.find({a: {$regex: "a\0b", $options: "i"}}).itcount(); - }); - assert.throws(function() { - t.find({a: {$regex: "ab", $options: "i\0"}}).itcount(); - }); - assert.throws(function() { - t.find({key: {$regex: 'abcd\0xyz'}}).explain(); - }); - - // - // Confirm $options and mode specified in $regex are not allowed to be specified together. - // - t.drop(); - assert.commandWorked(t.insert({x: ["abc"]})); - - let regexFirst = assert.throws(() => t.find({x: {$regex: /ab/i, $options: 's'}}).itcount()); - assert.commandFailedWithCode(regexFirst, 51075); - - let optsFirst = assert.throws(() => t.find({x: {$options: 's', $regex: /ab/i}}).itcount()); - assert.commandFailedWithCode(optsFirst, 51074); - - t.drop(); - assert.commandWorked(t.save({x: ["abc"]})); - - assert.eq(1, t.count({x: {$regex: /ABC/i}})); - assert.eq(1, t.count({x: {$regex: /ABC/, $options: 'i'}})); - assert.eq(1, t.count({x: {$options: 'i', $regex: /ABC/}})); +'use strict'; + +const t = db.jstests_regex; + +const isMaster = db.runCommand("ismaster"); +assert.commandWorked(isMaster); +const isMongos = (isMaster.msg === "isdbgrid"); + +t.drop(); +assert.writeOK(t.save({a: "bcd"})); +assert.eq(1, t.count({a: /b/}), "A"); +assert.eq(1, t.count({a: /bc/}), "B"); +assert.eq(1, t.count({a: /bcd/}), "C"); +assert.eq(0, t.count({a: /bcde/}), "D"); + +t.drop(); +assert.writeOK(t.save({a: {b: "cde"}})); +assert.eq(1, t.count({'a.b': /de/}), "E"); + +t.drop(); +assert.writeOK(t.save({a: {b: ["cde"]}})); +assert.eq(1, t.count({'a.b': /de/}), "F"); + +t.drop(); +assert.writeOK(t.save({a: [{b: "cde"}]})); +assert.eq(1, t.count({'a.b': /de/}), "G"); + +t.drop(); +assert.writeOK(t.save({a: [{b: ["cde"]}]})); +assert.eq(1, t.count({'a.b': /de/}), "H"); + +// +// Confirm match and explain serialization for $elemMatch with $regex. +// +t.drop(); +assert.writeOK(t.insert({x: ["abc"]})); + +const query = { + x: {$elemMatch: {$regex: 'ABC', $options: 'i'}} +}; +assert.eq(1, t.count(query)); + +const result = t.find(query).explain(); +assert.commandWorked(result); + +if (!isMongos) { + assert(result.hasOwnProperty("queryPlanner")); + assert(result.queryPlanner.hasOwnProperty("parsedQuery"), tojson(result)); + assert.eq(result.queryPlanner.parsedQuery, query); +} + +// +// Disallow embedded null bytes when using $regex syntax. +// +t.drop(); +assert.throws(function() { + t.find({a: {$regex: "a\0b", $options: "i"}}).itcount(); +}); +assert.throws(function() { + t.find({a: {$regex: "ab", $options: "i\0"}}).itcount(); +}); +assert.throws(function() { + t.find({key: {$regex: 'abcd\0xyz'}}).explain(); +}); + +// +// Confirm $options and mode specified in $regex are not allowed to be specified together. +// +t.drop(); +assert.commandWorked(t.insert({x: ["abc"]})); + +let regexFirst = assert.throws(() => t.find({x: {$regex: /ab/i, $options: 's'}}).itcount()); +assert.commandFailedWithCode(regexFirst, 51075); + +let optsFirst = assert.throws(() => t.find({x: {$options: 's', $regex: /ab/i}}).itcount()); +assert.commandFailedWithCode(optsFirst, 51074); + +t.drop(); +assert.commandWorked(t.save({x: ["abc"]})); + +assert.eq(1, t.count({x: {$regex: /ABC/i}})); +assert.eq(1, t.count({x: {$regex: /ABC/, $options: 'i'}})); +assert.eq(1, t.count({x: {$options: 'i', $regex: /ABC/}})); })(); diff --git a/jstests/core/regex5.js b/jstests/core/regex5.js index 6d11fce5578..69537e149e5 100644 --- a/jstests/core/regex5.js +++ b/jstests/core/regex5.js @@ -15,7 +15,6 @@ a = /.*b.*c/; x = /.*y.*/; doit = function() { - assert.eq(1, t.find({x: a}).count(), "A"); assert.eq(2, t.find({x: x}).count(), "B"); assert.eq(2, t.find({x: {$in: [x]}}).count(), "C"); // SERVER-322 diff --git a/jstests/core/regex_error.js b/jstests/core/regex_error.js index a6deb56c460..19e191d754c 100644 --- a/jstests/core/regex_error.js +++ b/jstests/core/regex_error.js @@ -2,14 +2,13 @@ * Test that the server errors when given an invalid regex. */ (function() { - const coll = db.regex_error; - coll.drop(); +const coll = db.regex_error; +coll.drop(); - // Run some invalid regexes. - assert.commandFailedWithCode(coll.runCommand("find", {filter: {a: {$regex: "[)"}}}), 51091); - assert.commandFailedWithCode(coll.runCommand("find", {filter: {a: {$regex: "ab\0c"}}}), - ErrorCodes.BadValue); - assert.commandFailedWithCode( - coll.runCommand("find", {filter: {a: {$regex: "ab", $options: "\0i"}}}), - ErrorCodes.BadValue); +// Run some invalid regexes. +assert.commandFailedWithCode(coll.runCommand("find", {filter: {a: {$regex: "[)"}}}), 51091); +assert.commandFailedWithCode(coll.runCommand("find", {filter: {a: {$regex: "ab\0c"}}}), + ErrorCodes.BadValue); +assert.commandFailedWithCode( + coll.runCommand("find", {filter: {a: {$regex: "ab", $options: "\0i"}}}), ErrorCodes.BadValue); })(); diff --git a/jstests/core/regex_limit.js b/jstests/core/regex_limit.js index 71a8c4e915c..31f72b758f4 100644 --- a/jstests/core/regex_limit.js +++ b/jstests/core/regex_limit.js @@ -2,25 +2,25 @@ * Test the behavior of very, very long regex patterns. */ (function() { - "use strict"; +"use strict"; - const coll = db.regex_limit; - coll.drop(); +const coll = db.regex_limit; +coll.drop(); - const kMaxRegexPatternLen = 32761; +const kMaxRegexPatternLen = 32761; - // Populate the collection with a document containing a very long string. - assert.commandWorked(coll.insert({z: "c".repeat(100000)})); +// Populate the collection with a document containing a very long string. +assert.commandWorked(coll.insert({z: "c".repeat(100000)})); - // Test that a regex exactly at the maximum allowable pattern length can find a document. - const patternMaxLen = "c".repeat(kMaxRegexPatternLen); - assert.eq(1, coll.find({z: {$regex: patternMaxLen}}).itcount()); - assert.eq(1, coll.find({z: {$in: [new RegExp(patternMaxLen)]}}).itcount()); +// Test that a regex exactly at the maximum allowable pattern length can find a document. +const patternMaxLen = "c".repeat(kMaxRegexPatternLen); +assert.eq(1, coll.find({z: {$regex: patternMaxLen}}).itcount()); +assert.eq(1, coll.find({z: {$in: [new RegExp(patternMaxLen)]}}).itcount()); - // Test that a regex pattern exceeding the limit fails. - const patternTooLong = "c".repeat(kMaxRegexPatternLen + 1); - assert.commandFailedWithCode(coll.runCommand("find", {filter: {z: {$regex: patternTooLong}}}), - 51091); - assert.commandFailedWithCode( - coll.runCommand("find", {filter: {z: {$in: [new RegExp(patternTooLong)]}}}), 51091); +// Test that a regex pattern exceeding the limit fails. +const patternTooLong = "c".repeat(kMaxRegexPatternLen + 1); +assert.commandFailedWithCode(coll.runCommand("find", {filter: {z: {$regex: patternTooLong}}}), + 51091); +assert.commandFailedWithCode( + coll.runCommand("find", {filter: {z: {$in: [new RegExp(patternTooLong)]}}}), 51091); }()); diff --git a/jstests/core/regex_unicode.js b/jstests/core/regex_unicode.js index 32a3d177831..2befd6f700c 100644 --- a/jstests/core/regex_unicode.js +++ b/jstests/core/regex_unicode.js @@ -2,113 +2,122 @@ * Test regexes with various Unicode options. */ (function() { - "use strict"; - - const coll = db.getCollection("regex_unicode"); - coll.drop(); - - // Populate the collection with strings containing ASCII and non-ASCII characters. - let docAllAscii = {_id: 0, text: "kyle"}; - let docNoAscii = {_id: 1, text: "ë°•ì •ìˆ˜"}; - let docMixed = {_id: 2, text: "suárez"}; - [docAllAscii, docNoAscii, docMixed].forEach((doc) => assert.commandWorked(coll.insert(doc))); - - /** - * Helper function that asserts that a find command with a filter on the "text" field using - * 'regex' returns 'expected' when sorting by _id ascending. - */ - function assertFindResultsEq(regex, expected) { - const res = coll.find({text: {$regex: regex}}).sort({_id: 1}).toArray(); - const errfn = - `Regex query "${regex}" returned ${tojson(res)} ` + `but expected ${tojson(expected)}`; - assert.eq(res, expected, errfn); - } - - // Sanity check on exact characters. - assertFindResultsEq("y", [docAllAscii]); - assertFindResultsEq("e", [docAllAscii, docMixed]); - assertFindResultsEq("á", [docMixed]); - assertFindResultsEq("ì •", [docNoAscii]); - - // Test that the (*UTF) and (*UTF8) options are accepted. - assertFindResultsEq("(*UTF)e", [docAllAscii, docMixed]); - assertFindResultsEq("(*UTF)á", [docMixed]); - assertFindResultsEq("(*UTF)ì •", [docNoAscii]); - assertFindResultsEq("(*UTF8)e", [docAllAscii, docMixed]); - assertFindResultsEq("(*UTF8)á", [docMixed]); - assertFindResultsEq("(*UTF8)ì •", [docNoAscii]); - - // Test that regexes support Unicode character properties. - assertFindResultsEq(String.raw `\p{Latin}`, [docAllAscii, docMixed]); - assertFindResultsEq(String.raw `^\p{Latin}+$`, [docAllAscii, docMixed]); - assertFindResultsEq(String.raw `\p{Hangul}`, [docNoAscii]); - assertFindResultsEq(String.raw `^\p{Hangul}+$`, [docNoAscii]); - assertFindResultsEq(String.raw `^\p{L}+$`, [docAllAscii, docNoAscii, docMixed]); - assertFindResultsEq(String.raw `^\p{Xan}+$`, [docAllAscii, docNoAscii, docMixed]); - - // Tests for the '\w' character type, which matches any "word" character. In the default mode, - // characters outside of the ASCII code point range are excluded. - - // An unanchored regex should match the two documents that contain at least one ASCII character. - assertFindResultsEq(String.raw `\w`, [docAllAscii, docMixed]); - - // This anchored regex will only match the document with exclusively ASCII characters, since the - // Unicode character in the mixed document will prevent it from being considered all "word" - // characters. - assertFindResultsEq(String.raw `^\w+$`, [docAllAscii]); - - // When the (*UCP) option is specified, Unicode "word" characters are included in the '\w' - // character type, so all three documents should match. - assertFindResultsEq(String.raw `(*UCP)\w`, [docAllAscii, docNoAscii, docMixed]); - assertFindResultsEq(String.raw `(*UCP)^\w+$`, [docAllAscii, docNoAscii, docMixed]); - - // By default, the [:alpha:] character class matches ASCII alphabetic characters. - assertFindResultsEq("[[:alpha:]]", [docAllAscii, docMixed]); - assertFindResultsEq("^[[:alpha:]]+$", [docAllAscii]); - - // When the (*UCP) option is specified, [:alpha:] becomes \p{L} and matches all Unicode - // alphabetic characters. - assertFindResultsEq("(*UCP)[[:alpha:]]", [docAllAscii, docNoAscii, docMixed]); - assertFindResultsEq("(*UCP)^[[:alpha:]]+$", [docAllAscii, docNoAscii, docMixed]); - - // Drop the collection and repopulate it with numerical characters. - coll.drop(); - docAllAscii = {_id: 0, text: "02191996"}; - docNoAscii = {_id: 1, text: "༢༣༤༥"}; - docMixed = {_id: 2, text: "9à©àªà¬à¯6"}; - [docAllAscii, docNoAscii, docMixed].forEach((doc) => assert.commandWorked(coll.insert(doc))); - - // Sanity check on exact characters. - assertFindResultsEq("1", [docAllAscii]); - assertFindResultsEq("9", [docAllAscii, docMixed]); - assertFindResultsEq("àª", [docMixed]); - assertFindResultsEq("༣", [docNoAscii]); - - // Test that the regexes are matched by the numeric Unicode character property. - assertFindResultsEq(String.raw `^\p{N}+$`, [docAllAscii, docNoAscii, docMixed]); - assertFindResultsEq(String.raw `^\p{Xan}+$`, [docAllAscii, docNoAscii, docMixed]); - - // Tests for the '\d' character type, which matches any "digit" character. In the default mode, - // characters outside of the ASCII code point range are excluded. - // An unanchored regex should match the two documents that contain at least one ASCII character. - assertFindResultsEq(String.raw `\d`, [docAllAscii, docMixed]); - - // This anchored regex will only match the document with exclusively ASCII characters, since the - // Unicode character in the mixed document will prevent it from being considered all "digit" - // characters. - assertFindResultsEq(String.raw `^\d+$`, [docAllAscii]); - - // When the (*UCP) option is specified, Unicode "digit" characters are included in the '\d' - // character type, so all three documents should match. - assertFindResultsEq(String.raw `(*UCP)\d`, [docAllAscii, docNoAscii, docMixed]); - assertFindResultsEq(String.raw `(*UCP)^\d+$`, [docAllAscii, docNoAscii, docMixed]); - - // By default, the [:digit:] character class matches ASCII decimal digit characters. - assertFindResultsEq("[[:digit:]]", [docAllAscii, docMixed]); - assertFindResultsEq("^[[:digit:]]+$", [docAllAscii]); - - // When the (*UCP) option is specified, [:digit:] becomes \p{N} and matches all Unicode - // decimal digit characters. - assertFindResultsEq("(*UCP)[[:digit:]]", [docAllAscii, docNoAscii, docMixed]); - assertFindResultsEq("(*UCP)^[[:digit:]]+$", [docAllAscii, docNoAscii, docMixed]); +"use strict"; + +const coll = db.getCollection("regex_unicode"); +coll.drop(); + +// Populate the collection with strings containing ASCII and non-ASCII characters. +let docAllAscii = {_id: 0, text: "kyle"}; +let docNoAscii = {_id: 1, text: "ë°•ì •ìˆ˜"}; +let docMixed = {_id: 2, text: "suárez"}; +[docAllAscii, docNoAscii, docMixed].forEach((doc) => assert.commandWorked(coll.insert(doc))); + +/** + * Helper function that asserts that a find command with a filter on the "text" field using + * 'regex' returns 'expected' when sorting by _id ascending. + */ +function assertFindResultsEq(regex, expected) { + const res = coll.find({text: {$regex: regex}}).sort({_id: 1}).toArray(); + const errfn = `Regex query "${regex}" returned ${tojson(res)} ` + + `but expected ${tojson(expected)}`; + assert.eq(res, expected, errfn); +} + +// Sanity check on exact characters. +assertFindResultsEq("y", [docAllAscii]); +assertFindResultsEq("e", [docAllAscii, docMixed]); +assertFindResultsEq("á", [docMixed]); +assertFindResultsEq("ì •", [docNoAscii]); + +// Test that the (*UTF) and (*UTF8) options are accepted. +assertFindResultsEq("(*UTF)e", [docAllAscii, docMixed]); +assertFindResultsEq("(*UTF)á", [docMixed]); +assertFindResultsEq("(*UTF)ì •", [docNoAscii]); +assertFindResultsEq("(*UTF8)e", [docAllAscii, docMixed]); +assertFindResultsEq("(*UTF8)á", [docMixed]); +assertFindResultsEq("(*UTF8)ì •", [docNoAscii]); + +// Test that regexes support Unicode character properties. +assertFindResultsEq(String.raw`\p{Latin}`, [docAllAscii, docMixed]); +assertFindResultsEq(String.raw`^\p{Latin}+$`, [docAllAscii, docMixed]); +assertFindResultsEq(String.raw`\p{Hangul}`, [docNoAscii]); +assertFindResultsEq(String.raw`^\p{Hangul}+$`, [docNoAscii]); +assertFindResultsEq(String.raw`^\p{L}+$`, [docAllAscii, docNoAscii, docMixed]); +assertFindResultsEq(String.raw`^\p{Xan}+$`, [docAllAscii, docNoAscii, docMixed]); + +// Tests for the '\w' character type, which matches any "word" character. In the default mode, +// characters outside of the ASCII code point range are excluded. + +// An unanchored regex should match the two documents that contain at least one ASCII character. +assertFindResultsEq(String.raw`\w`, [docAllAscii, docMixed]); + +// This anchored regex will only match the document with exclusively ASCII characters, since the +// Unicode character in the mixed document will prevent it from being considered all "word" +// characters. +assertFindResultsEq(String.raw`^\w+$`, [docAllAscii]); + +// When the (*UCP) option is specified, Unicode "word" characters are included in the '\w' +// character type, so all three documents should match. +assertFindResultsEq(String.raw`(*UCP)\w`, [docAllAscii, docNoAscii, docMixed]); +assertFindResultsEq(String.raw`(*UCP)^\w+$`, [docAllAscii, docNoAscii, docMixed]); + +// By default, the [:alpha:] character class matches ASCII alphabetic characters. +assertFindResultsEq("[[:alpha:]]", [docAllAscii, docMixed]); +assertFindResultsEq("^[[:alpha:]]+$", [docAllAscii]); + +// When the (*UCP) option is specified, [:alpha:] becomes \p{L} and matches all Unicode +// alphabetic characters. +assertFindResultsEq("(*UCP)[[:alpha:]]", [docAllAscii, docNoAscii, docMixed]); +assertFindResultsEq("(*UCP)^[[:alpha:]]+$", [docAllAscii, docNoAscii, docMixed]); + +// Drop the collection and repopulate it with numerical characters. +coll.drop(); +docAllAscii = { + _id: 0, + text: "02191996" +}; +docNoAscii = { + _id: 1, + text: "༢༣༤༥" +}; +docMixed = { + _id: 2, + text: "9à©àªà¬à¯6" +}; +[docAllAscii, docNoAscii, docMixed].forEach((doc) => assert.commandWorked(coll.insert(doc))); + +// Sanity check on exact characters. +assertFindResultsEq("1", [docAllAscii]); +assertFindResultsEq("9", [docAllAscii, docMixed]); +assertFindResultsEq("àª", [docMixed]); +assertFindResultsEq("༣", [docNoAscii]); + +// Test that the regexes are matched by the numeric Unicode character property. +assertFindResultsEq(String.raw`^\p{N}+$`, [docAllAscii, docNoAscii, docMixed]); +assertFindResultsEq(String.raw`^\p{Xan}+$`, [docAllAscii, docNoAscii, docMixed]); + +// Tests for the '\d' character type, which matches any "digit" character. In the default mode, +// characters outside of the ASCII code point range are excluded. +// An unanchored regex should match the two documents that contain at least one ASCII character. +assertFindResultsEq(String.raw`\d`, [docAllAscii, docMixed]); + +// This anchored regex will only match the document with exclusively ASCII characters, since the +// Unicode character in the mixed document will prevent it from being considered all "digit" +// characters. +assertFindResultsEq(String.raw`^\d+$`, [docAllAscii]); + +// When the (*UCP) option is specified, Unicode "digit" characters are included in the '\d' +// character type, so all three documents should match. +assertFindResultsEq(String.raw`(*UCP)\d`, [docAllAscii, docNoAscii, docMixed]); +assertFindResultsEq(String.raw`(*UCP)^\d+$`, [docAllAscii, docNoAscii, docMixed]); + +// By default, the [:digit:] character class matches ASCII decimal digit characters. +assertFindResultsEq("[[:digit:]]", [docAllAscii, docMixed]); +assertFindResultsEq("^[[:digit:]]+$", [docAllAscii]); + +// When the (*UCP) option is specified, [:digit:] becomes \p{N} and matches all Unicode +// decimal digit characters. +assertFindResultsEq("(*UCP)[[:digit:]]", [docAllAscii, docNoAscii, docMixed]); +assertFindResultsEq("(*UCP)^[[:digit:]]+$", [docAllAscii, docNoAscii, docMixed]); }()); diff --git a/jstests/core/regex_util.js b/jstests/core/regex_util.js index b0c7791b6c1..7d87ac5f283 100644 --- a/jstests/core/regex_util.js +++ b/jstests/core/regex_util.js @@ -1,26 +1,26 @@ // Tests for RegExp.escape (function() { - var TEST_STRINGS = [ - "[db]", - "{ab}", - "<c2>", - "(abc)", - "^first^", - "&addr", - "k@10gen.com", - "#4", - "!b", - "<>3", - "****word+", - "\t| |\n\r", - "Mongo-db", - "[{(<>)}]!@#%^&*+\\" - ]; +var TEST_STRINGS = [ + "[db]", + "{ab}", + "<c2>", + "(abc)", + "^first^", + "&addr", + "k@10gen.com", + "#4", + "!b", + "<>3", + "****word+", + "\t| |\n\r", + "Mongo-db", + "[{(<>)}]!@#%^&*+\\" +]; - TEST_STRINGS.forEach(function(str) { - var escaped = RegExp.escape(str); - var regex = new RegExp(escaped); - assert(regex.test(str), "Wrong escape for " + str); - }); +TEST_STRINGS.forEach(function(str) { + var escaped = RegExp.escape(str); + var regex = new RegExp(escaped); + assert(regex.test(str), "Wrong escape for " + str); +}); })(); diff --git a/jstests/core/regex_verbs.js b/jstests/core/regex_verbs.js index 92a03af1b4d..52ac9bb07bf 100644 --- a/jstests/core/regex_verbs.js +++ b/jstests/core/regex_verbs.js @@ -2,48 +2,54 @@ * Tests regular expressions and the use of various UCP verbs. */ (function() { - "use strict"; - - const coll = db.getCollection("regex_backtracking_verbs"); - coll.drop(); - - const docA = {_id: 0, text: "a"}; - const docB = {_id: 1, text: "b"}; - [docA, docB].forEach(doc => assert.commandWorked(coll.insert(doc))); - - /** - * Helper function that asserts that a find command with a filter on the "text" field using - * 'regex' returns 'expected' when sorting by _id ascending. - */ - function assertFindResultsEq(regex, expected) { - const res = coll.find({text: {$regex: regex}}).sort({_id: 1}).toArray(); - const errfn = `Regex query ${tojson(regex)} returned ${tojson(res)} ` + - `but expected ${tojson(expected)}`; - assert.eq(res, expected, errfn); - } - - const assertMatchesEverything = (regex) => assertFindResultsEq(regex, [docA, docB]); - const assertMatchesNothing = (regex) => assertFindResultsEq(regex, []); - - // On encountering FAIL, the pattern immediately does not match. - assertMatchesNothing("(*FAIL)"); - assertMatchesNothing("a(*FAIL)"); - assertMatchesNothing("(*FAIL)b"); - - // On encountering ACCEPT, the pattern immediately matches. - assertMatchesEverything("(*ACCEPT)"); - assertMatchesEverything("(*ACCEPT)a"); - assertMatchesEverything("(*ACCEPT)c"); - assertFindResultsEq("b(*ACCEPT)", [docB]); - - // The following tests simply assert that the backtracking verbs are accepted and do not - // influence matching. - ["COMMIT", "PRUNE", "PRUNE:FOO", "SKIP", "SKIP:BAR", "THEN", "THEN:BAZ"].forEach(verb => { - // Verb by itself is the same as an empty regex and matches everything. - assertMatchesEverything(`(*${verb})`); - - // Verb with pattern does not affect the "matchiness" of the pattern. - assertFindResultsEq(`(*${verb})a`, [docA]); - assertFindResultsEq(`(*${verb})[Bb]`, [docB]); - }); +"use strict"; + +const coll = db.getCollection("regex_backtracking_verbs"); +coll.drop(); + +const docA = { + _id: 0, + text: "a" +}; +const docB = { + _id: 1, + text: "b" +}; +[docA, docB].forEach(doc => assert.commandWorked(coll.insert(doc))); + +/** + * Helper function that asserts that a find command with a filter on the "text" field using + * 'regex' returns 'expected' when sorting by _id ascending. + */ +function assertFindResultsEq(regex, expected) { + const res = coll.find({text: {$regex: regex}}).sort({_id: 1}).toArray(); + const errfn = `Regex query ${tojson(regex)} returned ${tojson(res)} ` + + `but expected ${tojson(expected)}`; + assert.eq(res, expected, errfn); +} + +const assertMatchesEverything = (regex) => assertFindResultsEq(regex, [docA, docB]); +const assertMatchesNothing = (regex) => assertFindResultsEq(regex, []); + +// On encountering FAIL, the pattern immediately does not match. +assertMatchesNothing("(*FAIL)"); +assertMatchesNothing("a(*FAIL)"); +assertMatchesNothing("(*FAIL)b"); + +// On encountering ACCEPT, the pattern immediately matches. +assertMatchesEverything("(*ACCEPT)"); +assertMatchesEverything("(*ACCEPT)a"); +assertMatchesEverything("(*ACCEPT)c"); +assertFindResultsEq("b(*ACCEPT)", [docB]); + +// The following tests simply assert that the backtracking verbs are accepted and do not +// influence matching. +["COMMIT", "PRUNE", "PRUNE:FOO", "SKIP", "SKIP:BAR", "THEN", "THEN:BAZ"].forEach(verb => { + // Verb by itself is the same as an empty regex and matches everything. + assertMatchesEverything(`(*${verb})`); + + // Verb with pattern does not affect the "matchiness" of the pattern. + assertFindResultsEq(`(*${verb})a`, [docA]); + assertFindResultsEq(`(*${verb})[Bb]`, [docB]); +}); }()); diff --git a/jstests/core/remove2.js b/jstests/core/remove2.js index 601684f5041..50fe507c134 100644 --- a/jstests/core/remove2.js +++ b/jstests/core/remove2.js @@ -3,45 +3,45 @@ // remove2.js // a unit test for db remove (function() { - "use strict"; - - const t = db.removetest2; - - function f() { - t.save({ - x: [3, 3, 3, 3, 3, 3, 3, 3, 4, 5, 6], - z: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" - }); - t.save({x: 9}); - t.save({x: 1}); - - t.remove({x: 3}); - - assert(t.findOne({x: 3}) == null); - assert(t.validate().valid); - } - - function g() { - t.save({x: [3, 4, 5, 6], z: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}); - t.save({x: [7, 8, 9], z: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}); - - const res = t.remove({x: {$gte: 3}}); - - assert.writeOK(res); - assert(t.findOne({x: 3}) == null); - assert(t.findOne({x: 8}) == null); - assert(t.validate().valid); - } - - t.drop(); - f(); - t.drop(); - g(); - - t.ensureIndex({x: 1}); - t.remove({}); - f(); - t.drop(); - t.ensureIndex({x: 1}); - g(); +"use strict"; + +const t = db.removetest2; + +function f() { + t.save({ + x: [3, 3, 3, 3, 3, 3, 3, 3, 4, 5, 6], + z: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + }); + t.save({x: 9}); + t.save({x: 1}); + + t.remove({x: 3}); + + assert(t.findOne({x: 3}) == null); + assert(t.validate().valid); +} + +function g() { + t.save({x: [3, 4, 5, 6], z: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}); + t.save({x: [7, 8, 9], z: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}); + + const res = t.remove({x: {$gte: 3}}); + + assert.writeOK(res); + assert(t.findOne({x: 3}) == null); + assert(t.findOne({x: 8}) == null); + assert(t.validate().valid); +} + +t.drop(); +f(); +t.drop(); +g(); + +t.ensureIndex({x: 1}); +t.remove({}); +f(); +t.drop(); +t.ensureIndex({x: 1}); +g(); })(); diff --git a/jstests/core/remove9.js b/jstests/core/remove9.js index 8762e3944ff..888625764ec 100644 --- a/jstests/core/remove9.js +++ b/jstests/core/remove9.js @@ -7,32 +7,32 @@ // SERVER-2009 Count odd numbered entries while updating and deleting even numbered entries. (function() { - "use strict"; +"use strict"; - const t = db.jstests_remove9; - t.drop(); - t.ensureIndex({i: 1}); - - const bulk = t.initializeUnorderedBulkOp(); - for (let i = 0; i < 1000; ++i) { - bulk.insert({i: i}); - } - assert.writeOK(bulk.execute()); +const t = db.jstests_remove9; +t.drop(); +t.ensureIndex({i: 1}); - const s = startParallelShell(function() { - const t = db.jstests_remove9; - Random.setRandomSeed(); - for (let j = 0; j < 5000; ++j) { - const i = Random.randInt(499) * 2; - t.update({i: i}, {$set: {i: 2000}}); - t.remove({i: 2000}); - t.save({i: i}); - } - }); +const bulk = t.initializeUnorderedBulkOp(); +for (let i = 0; i < 1000; ++i) { + bulk.insert({i: i}); +} +assert.writeOK(bulk.execute()); - for (let i = 0; i < 1000; ++i) { - assert.eq(500, t.find({i: {$gte: 0, $mod: [2, 1]}}).hint({i: 1}).itcount()); +const s = startParallelShell(function() { + const t = db.jstests_remove9; + Random.setRandomSeed(); + for (let j = 0; j < 5000; ++j) { + const i = Random.randInt(499) * 2; + t.update({i: i}, {$set: {i: 2000}}); + t.remove({i: 2000}); + t.save({i: i}); } +}); + +for (let i = 0; i < 1000; ++i) { + assert.eq(500, t.find({i: {$gte: 0, $mod: [2, 1]}}).hint({i: 1}).itcount()); +} - s(); +s(); })(); diff --git a/jstests/core/remove_undefined.js b/jstests/core/remove_undefined.js index c0c031a5763..6b97cc5d053 100644 --- a/jstests/core/remove_undefined.js +++ b/jstests/core/remove_undefined.js @@ -1,32 +1,35 @@ // @tags: [requires_non_retryable_writes, requires_fastcount] (function() { - "use strict"; +"use strict"; - const coll = db.remove_undefined; - coll.drop(); +const coll = db.remove_undefined; +coll.drop(); - assert.writeOK(coll.insert({_id: 1})); - assert.writeOK(coll.insert({_id: 2})); - assert.writeOK(coll.insert({_id: null})); +assert.writeOK(coll.insert({_id: 1})); +assert.writeOK(coll.insert({_id: 2})); +assert.writeOK(coll.insert({_id: null})); - const obj = {foo: 1, nullElem: null}; +const obj = { + foo: 1, + nullElem: null +}; - coll.remove({x: obj.bar}); - assert.eq(3, coll.count()); +coll.remove({x: obj.bar}); +assert.eq(3, coll.count()); - coll.remove({x: undefined}); - assert.eq(3, coll.count()); +coll.remove({x: undefined}); +assert.eq(3, coll.count()); - assert.writeErrorWithCode(coll.remove({_id: obj.bar}), ErrorCodes.BadValue); - assert.writeErrorWithCode(coll.remove({_id: undefined}), ErrorCodes.BadValue); +assert.writeErrorWithCode(coll.remove({_id: obj.bar}), ErrorCodes.BadValue); +assert.writeErrorWithCode(coll.remove({_id: undefined}), ErrorCodes.BadValue); - coll.remove({_id: obj.nullElem}); - assert.eq(2, coll.count()); +coll.remove({_id: obj.nullElem}); +assert.eq(2, coll.count()); - assert.writeOK(coll.insert({_id: null})); - assert.eq(3, coll.count()); +assert.writeOK(coll.insert({_id: null})); +assert.eq(3, coll.count()); - assert.writeErrorWithCode(coll.remove({_id: undefined}), ErrorCodes.BadValue); - assert.eq(3, coll.count()); +assert.writeErrorWithCode(coll.remove({_id: undefined}), ErrorCodes.BadValue); +assert.eq(3, coll.count()); })(); diff --git a/jstests/core/removea.js b/jstests/core/removea.js index 082833b503a..ee914662d92 100644 --- a/jstests/core/removea.js +++ b/jstests/core/removea.js @@ -2,31 +2,31 @@ // Test removal of a substantial proportion of inserted documents. (function() { - "use strict"; +"use strict"; - const t = db.jstests_removea; +const t = db.jstests_removea; - Random.setRandomSeed(); +Random.setRandomSeed(); - for (let v = 0; v < 2; ++v) { // Try each index version. - t.drop(); - t.ensureIndex({a: 1}, {v: v}); - const S = 100; - const B = 100; - for (let x = 0; x < S; x++) { - let batch = []; - for (let y = 0; y < B; y++) { - let i = y + (B * x); - batch.push({a: i}); - } - assert.writeOK(t.insert(batch)); +for (let v = 0; v < 2; ++v) { // Try each index version. + t.drop(); + t.ensureIndex({a: 1}, {v: v}); + const S = 100; + const B = 100; + for (let x = 0; x < S; x++) { + let batch = []; + for (let y = 0; y < B; y++) { + let i = y + (B * x); + batch.push({a: i}); } - assert.eq(t.count(), S * B); + assert.writeOK(t.insert(batch)); + } + assert.eq(t.count(), S * B); - let toDrop = []; - for (let i = 0; i < S * B; ++i) { - toDrop.push(Random.randInt(10000)); // Dups in the query will be ignored. - } - assert.writeOK(t.remove({a: {$in: toDrop}})); + let toDrop = []; + for (let i = 0; i < S * B; ++i) { + toDrop.push(Random.randInt(10000)); // Dups in the query will be ignored. } + assert.writeOK(t.remove({a: {$in: toDrop}})); +} })(); diff --git a/jstests/core/removeb.js b/jstests/core/removeb.js index 4cf00d46ffa..eeed0fc30bc 100644 --- a/jstests/core/removeb.js +++ b/jstests/core/removeb.js @@ -7,57 +7,57 @@ // Test removal of Records that have been reused since the remove operation began. SERVER-5198 (function() { - "use strict"; +"use strict"; - const t = db.jstests_removeb; - t.drop(); +const t = db.jstests_removeb; +t.drop(); - t.ensureIndex({a: 1}); +t.ensureIndex({a: 1}); - // Make the index multikey to trigger cursor dedup checking. - t.insert({a: [-1, -2]}); - t.remove({}); +// Make the index multikey to trigger cursor dedup checking. +t.insert({a: [-1, -2]}); +t.remove({}); - const insertDocs = function(collection, nDocs) { - print("Bulk inserting " + nDocs + " documents"); +const insertDocs = function(collection, nDocs) { + print("Bulk inserting " + nDocs + " documents"); - const bulk = collection.initializeUnorderedBulkOp(); - for (let i = 0; i < nDocs; ++i) { - bulk.insert({a: i}); - } + const bulk = collection.initializeUnorderedBulkOp(); + for (let i = 0; i < nDocs; ++i) { + bulk.insert({a: i}); + } - assert.writeOK(bulk.execute()); + assert.writeOK(bulk.execute()); - print("Bulk insert " + nDocs + " documents completed"); - }; + print("Bulk insert " + nDocs + " documents completed"); +}; - insertDocs(t, 20000); +insertDocs(t, 20000); - const p = startParallelShell(function() { - // Wait until the remove operation (below) begins running. - while (db.jstests_removeb.count() === 20000) { - } +const p = startParallelShell(function() { + // Wait until the remove operation (below) begins running. + while (db.jstests_removeb.count() === 20000) { + } - // Insert documents with increasing 'a' values. These inserted documents may - // reuse Records freed by the remove operation in progress and will be - // visited by the remove operation if it has not completed. - for (let i = 20000; i < 40000; i += 100) { - const bulk = db.jstests_removeb.initializeUnorderedBulkOp(); - for (let j = 0; j < 100; ++j) { - bulk.insert({a: i + j}); - } - assert.writeOK(bulk.execute()); - if (i % 1000 === 0) { - print(i - 20000 + " of second set of 20000 documents inserted"); - } + // Insert documents with increasing 'a' values. These inserted documents may + // reuse Records freed by the remove operation in progress and will be + // visited by the remove operation if it has not completed. + for (let i = 20000; i < 40000; i += 100) { + const bulk = db.jstests_removeb.initializeUnorderedBulkOp(); + for (let j = 0; j < 100; ++j) { + bulk.insert({a: i + j}); + } + assert.writeOK(bulk.execute()); + if (i % 1000 === 0) { + print(i - 20000 + " of second set of 20000 documents inserted"); } - }); + } +}); - // Remove using the a:1 index in ascending direction. - var res = t.remove({a: {$gte: 0}}); - assert(!res.hasWriteError(), 'The remove operation failed.'); +// Remove using the a:1 index in ascending direction. +var res = t.remove({a: {$gte: 0}}); +assert(!res.hasWriteError(), 'The remove operation failed.'); - p(); +p(); - t.drop(); +t.drop(); })(); diff --git a/jstests/core/rename6.js b/jstests/core/rename6.js index dbdf677a811..faa36a448dd 100644 --- a/jstests/core/rename6.js +++ b/jstests/core/rename6.js @@ -6,33 +6,33 @@ // @tags: [requires_non_retryable_commands, assumes_unsharded_collection] (function() { - 'use strict'; +'use strict'; - const testDB = db.getSiblingDB("test"); - const c = "rename2c"; - const dbc = testDB.getCollection(c); - const d = "dest4567890123456789012345678901234567890123456789012345678901234567890"; - const dbd = testDB.getCollection(d); +const testDB = db.getSiblingDB("test"); +const c = "rename2c"; +const dbc = testDB.getCollection(c); +const d = "dest4567890123456789012345678901234567890123456789012345678901234567890"; +const dbd = testDB.getCollection(d); - dbc.drop(); - dbd.drop(); +dbc.drop(); +dbd.drop(); - dbc.ensureIndex({ - "name": 1, - "date": 1, - "time": 1, - "renameCollection": 1, - "mongodb": 1, - "testing": 1, - "data": 1 - }); +dbc.ensureIndex({ + "name": 1, + "date": 1, + "time": 1, + "renameCollection": 1, + "mongodb": 1, + "testing": 1, + "data": 1 +}); - // Checking for the newly created index and the _id index in original collection. - assert.eq(2, dbc.getIndexes().length, "Long Rename Init"); - // Should succeed in renaming collection as the long index namespace is acceptable. - assert.commandWorked(dbc.renameCollection(d), "Long Rename Exec"); - // Since we succeeded we should have the 2 indexes moved and no indexes under the old collection - // name. - assert.eq(0, dbc.getIndexes().length, "Long Rename Result 1"); - assert.eq(2, dbd.getIndexes().length, "Long Rename Result 2"); +// Checking for the newly created index and the _id index in original collection. +assert.eq(2, dbc.getIndexes().length, "Long Rename Init"); +// Should succeed in renaming collection as the long index namespace is acceptable. +assert.commandWorked(dbc.renameCollection(d), "Long Rename Exec"); +// Since we succeeded we should have the 2 indexes moved and no indexes under the old collection +// name. +assert.eq(0, dbc.getIndexes().length, "Long Rename Result 1"); +assert.eq(2, dbd.getIndexes().length, "Long Rename Result 2"); })(); diff --git a/jstests/core/rename_change_target_type.js b/jstests/core/rename_change_target_type.js index 25fbcfb0f01..859e1add0b2 100644 --- a/jstests/core/rename_change_target_type.js +++ b/jstests/core/rename_change_target_type.js @@ -1,15 +1,15 @@ // Test that a rename that overwrites its destination with an equivalent value of a different type // updates the type of the destination (SERVER-32109). (function() { - "use strict"; +"use strict"; - let coll = db.rename_change_target_type; - coll.drop(); +let coll = db.rename_change_target_type; +coll.drop(); - assert.writeOK(coll.insert({to: NumberLong(100), from: 100})); - assert.writeOK(coll.update({}, {$rename: {from: "to"}})); +assert.writeOK(coll.insert({to: NumberLong(100), from: 100})); +assert.writeOK(coll.update({}, {$rename: {from: "to"}})); - let aggResult = coll.aggregate([{$project: {toType: {$type: "$to"}}}]).toArray(); - assert.eq(aggResult.length, 1); - assert.eq(aggResult[0].toType, "double", "Incorrect type resulting from $rename"); +let aggResult = coll.aggregate([{$project: {toType: {$type: "$to"}}}]).toArray(); +assert.eq(aggResult.length, 1); +assert.eq(aggResult[0].toType, "double", "Incorrect type resulting from $rename"); })(); diff --git a/jstests/core/restart_catalog.js b/jstests/core/restart_catalog.js index 19bd0f9f27c..bf254537239 100644 --- a/jstests/core/restart_catalog.js +++ b/jstests/core/restart_catalog.js @@ -12,127 +12,128 @@ * ] */ (function() { - "use strict"; - - // Only run this test if the storage engine is "wiredTiger" or "inMemory". - const acceptedStorageEngines = ["wiredTiger", "inMemory"]; - const currentStorageEngine = jsTest.options().storageEngine || "wiredTiger"; - if (!acceptedStorageEngines.includes(currentStorageEngine)) { - jsTest.log("Refusing to run restartCatalog test on " + currentStorageEngine + - " storage engine"); - return; - } - - // Helper function for sorting documents in JavaScript. - function sortOnId(doc1, doc2) { - return bsonWoCompare({_: doc1._id}, {_: doc2._id}); - } - - const testDB = db.getSiblingDB("restart_catalog"); - const artistsColl = testDB.getCollection("artists"); - const songsColl = testDB.getCollection("songs"); - artistsColl.drop(); - songsColl.drop(); - - // Populate some data into the collection. - const artists = [ - {_id: "beyonce"}, - {_id: "fenech-soler"}, - {_id: "gallant"}, - ]; - for (let artist of artists) { - assert.commandWorked(artistsColl.insert(artist)); - } - - const songs = [ - {_id: "flawless", artist: "beyonce", sales: 5000}, - {_id: "conversation", artist: "fenech-soler", sales: 75.5}, - {_id: "kaleidoscope", artist: "fenech-soler", sales: 30.0}, - {_id: "miyazaki", artist: "gallant", sales: 400.3}, - {_id: "percogesic", artist: "gallant", sales: 550.8}, - {_id: "shotgun", artist: "gallant", sales: 300.0}, - ]; - for (let song of songs) { - assert.commandWorked(songsColl.insert(song, {writeConcern: {w: "majority"}})); - } - - // Perform some queries. - function assertQueriesFindExpectedData() { - assert.eq(artistsColl.find().sort({_id: 1}).toArray(), artists); - assert.eq(songsColl.find().sort({_id: 1}).toArray(), songs.sort(sortOnId)); - - const songsWithLotsOfSales = songs.filter(song => song.sales > 500).sort(sortOnId); - assert.eq(songsColl.find({sales: {$gt: 500}}).sort({_id: 1}).toArray(), - songsWithLotsOfSales); - - const songsByGallant = songs.filter(song => song.artist === "gallant").sort(sortOnId); - assert.eq(songsColl.aggregate([{$match: {artist: "gallant"}}, {$sort: {_id: 1}}]).toArray(), - songsByGallant); - - const initialValue = 0; - const totalSales = songs.reduce((total, song) => total + song.sales, initialValue); - assert.eq(songsColl - .aggregate([{$group: {_id: null, totalSales: {$sum: "$sales"}}}], - {readConcern: {level: "majority"}}) - .toArray(), - [{_id: null, totalSales: totalSales}]); - } - assertQueriesFindExpectedData(); - - // Remember what indexes are present, then restart the catalog. - const songIndexesBeforeRestart = songsColl.getIndexes().sort(sortOnId); - const artistIndexesBeforeRestart = artistsColl.getIndexes().sort(sortOnId); - assert.commandWorked(db.adminCommand({restartCatalog: 1})); - - // Access the query plan cache. (This makes no assumptions about the state of the plan cache - // after restart; however, the database definitely should not crash.) - [songsColl, artistsColl].forEach(coll => { - assert.commandWorked(coll.runCommand("planCacheListPlans", {query: {_id: 1}})); - assert.commandWorked(coll.runCommand("planCacheListQueryShapes")); - assert.commandWorked(coll.runCommand("planCacheClear")); - }); - - // Verify that the data in the collections has not changed. - assertQueriesFindExpectedData(); - - // Verify that both collections have the same indexes as prior to the restart. - const songIndexesAfterRestart = songsColl.getIndexes().sort(sortOnId); - assert.eq(songIndexesBeforeRestart, songIndexesAfterRestart); - const artistIndexesAfterRestart = artistsColl.getIndexes().sort(sortOnId); - assert.eq(artistIndexesBeforeRestart, artistIndexesAfterRestart); - - // Create new indexes and run more queries. - assert.commandWorked(songsColl.createIndex({sales: 1})); - assert.commandWorked(songsColl.createIndex({artist: 1, sales: 1})); - assertQueriesFindExpectedData(); - - // Modify an existing collection. - assert.commandWorked(artistsColl.runCommand("collMod", {validator: {_id: {$type: "string"}}})); - assert.writeErrorWithCode(artistsColl.insert({_id: 7}), ErrorCodes.DocumentValidationFailure); - - // Perform another write, implicitly creating a new collection and database. - const secondTestDB = db.getSiblingDB("restart_catalog_2"); - const foodColl = secondTestDB.getCollection("food"); - foodColl.drop(); - const doc = {_id: "apple", category: "fruit"}; - assert.commandWorked(foodColl.insert(doc)); - assert.eq(foodColl.find().toArray(), [doc]); - - // Build a new index on the new collection. - assert.commandWorked(foodColl.createIndex({category: -1})); - assert.eq(foodColl.find().hint({category: -1}).toArray(), [doc]); - - // The restartCatalog command kills all cursors. Test that a getMore on a cursor that existed - // during restartCatalog fails with the appropriate error code. We insert a second document so - // that we can make a query happen in two batches. - assert.commandWorked(foodColl.insert({_id: "orange"})); - let cursorResponse = assert.commandWorked( - secondTestDB.runCommand({find: foodColl.getName(), filter: {}, batchSize: 1})); - assert.eq(cursorResponse.cursor.firstBatch.length, 1); - assert.neq(cursorResponse.cursor.id, 0); - assert.commandWorked(secondTestDB.adminCommand({restartCatalog: 1})); - assert.commandFailedWithCode( - secondTestDB.runCommand( - {getMore: cursorResponse.cursor.id, collection: foodColl.getName()}), - ErrorCodes.QueryPlanKilled); +"use strict"; + +// Only run this test if the storage engine is "wiredTiger" or "inMemory". +const acceptedStorageEngines = ["wiredTiger", "inMemory"]; +const currentStorageEngine = jsTest.options().storageEngine || "wiredTiger"; +if (!acceptedStorageEngines.includes(currentStorageEngine)) { + jsTest.log("Refusing to run restartCatalog test on " + currentStorageEngine + + " storage engine"); + return; +} + +// Helper function for sorting documents in JavaScript. +function sortOnId(doc1, doc2) { + return bsonWoCompare({_: doc1._id}, {_: doc2._id}); +} + +const testDB = db.getSiblingDB("restart_catalog"); +const artistsColl = testDB.getCollection("artists"); +const songsColl = testDB.getCollection("songs"); +artistsColl.drop(); +songsColl.drop(); + +// Populate some data into the collection. +const artists = [ + {_id: "beyonce"}, + {_id: "fenech-soler"}, + {_id: "gallant"}, +]; +for (let artist of artists) { + assert.commandWorked(artistsColl.insert(artist)); +} + +const songs = [ + {_id: "flawless", artist: "beyonce", sales: 5000}, + {_id: "conversation", artist: "fenech-soler", sales: 75.5}, + {_id: "kaleidoscope", artist: "fenech-soler", sales: 30.0}, + {_id: "miyazaki", artist: "gallant", sales: 400.3}, + {_id: "percogesic", artist: "gallant", sales: 550.8}, + {_id: "shotgun", artist: "gallant", sales: 300.0}, +]; +for (let song of songs) { + assert.commandWorked(songsColl.insert(song, {writeConcern: {w: "majority"}})); +} + +// Perform some queries. +function assertQueriesFindExpectedData() { + assert.eq(artistsColl.find().sort({_id: 1}).toArray(), artists); + assert.eq(songsColl.find().sort({_id: 1}).toArray(), songs.sort(sortOnId)); + + const songsWithLotsOfSales = songs.filter(song => song.sales > 500).sort(sortOnId); + assert.eq(songsColl.find({sales: {$gt: 500}}).sort({_id: 1}).toArray(), songsWithLotsOfSales); + + const songsByGallant = songs.filter(song => song.artist === "gallant").sort(sortOnId); + assert.eq(songsColl.aggregate([{$match: {artist: "gallant"}}, {$sort: {_id: 1}}]).toArray(), + songsByGallant); + + const initialValue = 0; + const totalSales = songs.reduce((total, song) => total + song.sales, initialValue); + assert.eq(songsColl + .aggregate([{$group: {_id: null, totalSales: {$sum: "$sales"}}}], + {readConcern: {level: "majority"}}) + .toArray(), + [{_id: null, totalSales: totalSales}]); +} +assertQueriesFindExpectedData(); + +// Remember what indexes are present, then restart the catalog. +const songIndexesBeforeRestart = songsColl.getIndexes().sort(sortOnId); +const artistIndexesBeforeRestart = artistsColl.getIndexes().sort(sortOnId); +assert.commandWorked(db.adminCommand({restartCatalog: 1})); + +// Access the query plan cache. (This makes no assumptions about the state of the plan cache +// after restart; however, the database definitely should not crash.) +[songsColl, artistsColl].forEach(coll => { + assert.commandWorked(coll.runCommand("planCacheListPlans", {query: {_id: 1}})); + assert.commandWorked(coll.runCommand("planCacheListQueryShapes")); + assert.commandWorked(coll.runCommand("planCacheClear")); +}); + +// Verify that the data in the collections has not changed. +assertQueriesFindExpectedData(); + +// Verify that both collections have the same indexes as prior to the restart. +const songIndexesAfterRestart = songsColl.getIndexes().sort(sortOnId); +assert.eq(songIndexesBeforeRestart, songIndexesAfterRestart); +const artistIndexesAfterRestart = artistsColl.getIndexes().sort(sortOnId); +assert.eq(artistIndexesBeforeRestart, artistIndexesAfterRestart); + +// Create new indexes and run more queries. +assert.commandWorked(songsColl.createIndex({sales: 1})); +assert.commandWorked(songsColl.createIndex({artist: 1, sales: 1})); +assertQueriesFindExpectedData(); + +// Modify an existing collection. +assert.commandWorked(artistsColl.runCommand("collMod", {validator: {_id: {$type: "string"}}})); +assert.writeErrorWithCode(artistsColl.insert({_id: 7}), ErrorCodes.DocumentValidationFailure); + +// Perform another write, implicitly creating a new collection and database. +const secondTestDB = db.getSiblingDB("restart_catalog_2"); +const foodColl = secondTestDB.getCollection("food"); +foodColl.drop(); +const doc = { + _id: "apple", + category: "fruit" +}; +assert.commandWorked(foodColl.insert(doc)); +assert.eq(foodColl.find().toArray(), [doc]); + +// Build a new index on the new collection. +assert.commandWorked(foodColl.createIndex({category: -1})); +assert.eq(foodColl.find().hint({category: -1}).toArray(), [doc]); + +// The restartCatalog command kills all cursors. Test that a getMore on a cursor that existed +// during restartCatalog fails with the appropriate error code. We insert a second document so +// that we can make a query happen in two batches. +assert.commandWorked(foodColl.insert({_id: "orange"})); +let cursorResponse = assert.commandWorked( + secondTestDB.runCommand({find: foodColl.getName(), filter: {}, batchSize: 1})); +assert.eq(cursorResponse.cursor.firstBatch.length, 1); +assert.neq(cursorResponse.cursor.id, 0); +assert.commandWorked(secondTestDB.adminCommand({restartCatalog: 1})); +assert.commandFailedWithCode( + secondTestDB.runCommand({getMore: cursorResponse.cursor.id, collection: foodColl.getName()}), + ErrorCodes.QueryPlanKilled); }()); diff --git a/jstests/core/return_key.js b/jstests/core/return_key.js index 38843eaf0a3..26dd01082b6 100644 --- a/jstests/core/return_key.js +++ b/jstests/core/return_key.js @@ -9,78 +9,76 @@ load("jstests/libs/analyze_plan.js"); (function() { - 'use strict'; +'use strict'; - var results; - var explain; +var results; +var explain; - var coll = db.jstests_returnkey; - coll.drop(); +var coll = db.jstests_returnkey; +coll.drop(); - assert.writeOK(coll.insert({a: 1, b: 3})); - assert.writeOK(coll.insert({a: 2, b: 2})); - assert.writeOK(coll.insert({a: 3, b: 1})); +assert.writeOK(coll.insert({a: 1, b: 3})); +assert.writeOK(coll.insert({a: 2, b: 2})); +assert.writeOK(coll.insert({a: 3, b: 1})); - assert.commandWorked(coll.ensureIndex({a: 1})); - assert.commandWorked(coll.ensureIndex({b: 1})); +assert.commandWorked(coll.ensureIndex({a: 1})); +assert.commandWorked(coll.ensureIndex({b: 1})); - // Basic returnKey. - results = coll.find().hint({a: 1}).sort({a: 1}).returnKey().toArray(); - assert.eq(results, [{a: 1}, {a: 2}, {a: 3}]); - results = coll.find().hint({a: 1}).sort({a: -1}).returnKey().toArray(); - assert.eq(results, [{a: 3}, {a: 2}, {a: 1}]); +// Basic returnKey. +results = coll.find().hint({a: 1}).sort({a: 1}).returnKey().toArray(); +assert.eq(results, [{a: 1}, {a: 2}, {a: 3}]); +results = coll.find().hint({a: 1}).sort({a: -1}).returnKey().toArray(); +assert.eq(results, [{a: 3}, {a: 2}, {a: 1}]); - // Check that the plan is covered. - explain = coll.find().hint({a: 1}).sort({a: 1}).returnKey().explain(); - assert(isIndexOnly(db, explain.queryPlanner.winningPlan)); - explain = coll.find().hint({a: 1}).sort({a: -1}).returnKey().explain(); - assert(isIndexOnly(db, explain.queryPlanner.winningPlan)); +// Check that the plan is covered. +explain = coll.find().hint({a: 1}).sort({a: 1}).returnKey().explain(); +assert(isIndexOnly(db, explain.queryPlanner.winningPlan)); +explain = coll.find().hint({a: 1}).sort({a: -1}).returnKey().explain(); +assert(isIndexOnly(db, explain.queryPlanner.winningPlan)); - // returnKey with an in-memory sort. - results = coll.find().hint({a: 1}).sort({b: 1}).returnKey().toArray(); - assert.eq(results, [{a: 3}, {a: 2}, {a: 1}]); - results = coll.find().hint({a: 1}).sort({b: -1}).returnKey().toArray(); - assert.eq(results, [{a: 1}, {a: 2}, {a: 3}]); +// returnKey with an in-memory sort. +results = coll.find().hint({a: 1}).sort({b: 1}).returnKey().toArray(); +assert.eq(results, [{a: 3}, {a: 2}, {a: 1}]); +results = coll.find().hint({a: 1}).sort({b: -1}).returnKey().toArray(); +assert.eq(results, [{a: 1}, {a: 2}, {a: 3}]); - // Check that the plan is not covered. - explain = coll.find().hint({a: 1}).sort({b: 1}).returnKey().explain(); - assert(!isIndexOnly(db, explain.queryPlanner.winningPlan)); - explain = coll.find().hint({a: 1}).sort({b: -1}).returnKey().explain(); - assert(!isIndexOnly(db, explain.queryPlanner.winningPlan)); +// Check that the plan is not covered. +explain = coll.find().hint({a: 1}).sort({b: 1}).returnKey().explain(); +assert(!isIndexOnly(db, explain.queryPlanner.winningPlan)); +explain = coll.find().hint({a: 1}).sort({b: -1}).returnKey().explain(); +assert(!isIndexOnly(db, explain.queryPlanner.winningPlan)); - // returnKey takes precedence over other a regular inclusion projection. Should still be - // covered. - results = coll.find({}, {b: 1}).hint({a: 1}).sort({a: -1}).returnKey().toArray(); - assert.eq(results, [{a: 3}, {a: 2}, {a: 1}]); - explain = coll.find({}, {b: 1}).hint({a: 1}).sort({a: -1}).returnKey().explain(); - assert(isIndexOnly(db, explain.queryPlanner.winningPlan)); +// returnKey takes precedence over other a regular inclusion projection. Should still be +// covered. +results = coll.find({}, {b: 1}).hint({a: 1}).sort({a: -1}).returnKey().toArray(); +assert.eq(results, [{a: 3}, {a: 2}, {a: 1}]); +explain = coll.find({}, {b: 1}).hint({a: 1}).sort({a: -1}).returnKey().explain(); +assert(isIndexOnly(db, explain.queryPlanner.winningPlan)); - // returnKey takes precedence over other a regular exclusion projection. Should still be - // covered. - results = coll.find({}, {a: 0}).hint({a: 1}).sort({a: -1}).returnKey().toArray(); - assert.eq(results, [{a: 3}, {a: 2}, {a: 1}]); - explain = coll.find({}, {a: 0}).hint({a: 1}).sort({a: -1}).returnKey().explain(); - assert(isIndexOnly(db, explain.queryPlanner.winningPlan)); +// returnKey takes precedence over other a regular exclusion projection. Should still be +// covered. +results = coll.find({}, {a: 0}).hint({a: 1}).sort({a: -1}).returnKey().toArray(); +assert.eq(results, [{a: 3}, {a: 2}, {a: 1}]); +explain = coll.find({}, {a: 0}).hint({a: 1}).sort({a: -1}).returnKey().explain(); +assert(isIndexOnly(db, explain.queryPlanner.winningPlan)); - // Unlike other projections, sortKey meta-projection can co-exist with returnKey. - results = - coll.find({}, {c: {$meta: 'sortKey'}}).hint({a: 1}).sort({a: -1}).returnKey().toArray(); - assert.eq(results, [{a: 3, c: {'': 3}}, {a: 2, c: {'': 2}}, {a: 1, c: {'': 1}}]); +// Unlike other projections, sortKey meta-projection can co-exist with returnKey. +results = coll.find({}, {c: {$meta: 'sortKey'}}).hint({a: 1}).sort({a: -1}).returnKey().toArray(); +assert.eq(results, [{a: 3, c: {'': 3}}, {a: 2, c: {'': 2}}, {a: 1, c: {'': 1}}]); - // returnKey with sortKey $meta where there is an in-memory sort. - results = - coll.find({}, {c: {$meta: 'sortKey'}}).hint({a: 1}).sort({b: 1}).returnKey().toArray(); - assert.eq(results, [{a: 3, c: {'': 1}}, {a: 2, c: {'': 2}}, {a: 1, c: {'': 3}}]); +// returnKey with sortKey $meta where there is an in-memory sort. +results = coll.find({}, {c: {$meta: 'sortKey'}}).hint({a: 1}).sort({b: 1}).returnKey().toArray(); +assert.eq(results, [{a: 3, c: {'': 1}}, {a: 2, c: {'': 2}}, {a: 1, c: {'': 3}}]); - // returnKey with multiple sortKey $meta projections. - results = coll.find({}, {c: {$meta: 'sortKey'}, d: {$meta: 'sortKey'}}) - .hint({a: 1}) - .sort({b: 1}) - .returnKey() - .toArray(); - assert.eq(results, [ - {a: 3, c: {'': 1}, d: {'': 1}}, - {a: 2, c: {'': 2}, d: {'': 2}}, - {a: 1, c: {'': 3}, d: {'': 3}} - ]); +// returnKey with multiple sortKey $meta projections. +results = coll.find({}, {c: {$meta: 'sortKey'}, d: {$meta: 'sortKey'}}) + .hint({a: 1}) + .sort({b: 1}) + .returnKey() + .toArray(); +assert.eq(results, [ + {a: 3, c: {'': 1}, d: {'': 1}}, + {a: 2, c: {'': 2}, d: {'': 2}}, + {a: 1, c: {'': 3}, d: {'': 3}} +]); })(); diff --git a/jstests/core/role_management_helpers.js b/jstests/core/role_management_helpers.js index b0f1762acf9..f59ff425d52 100644 --- a/jstests/core/role_management_helpers.js +++ b/jstests/core/role_management_helpers.js @@ -34,121 +34,119 @@ function assertHasPrivilege(privilegeArray, privilege) { } } assert(false, - "Privilege " + tojson(privilege) + " not found in privilege array: " + - tojson(privilegeArray)); + "Privilege " + tojson(privilege) + + " not found in privilege array: " + tojson(privilegeArray)); } (function(db) { - var db = db.getSiblingDB("role_management_helpers"); - db.dropDatabase(); - db.dropAllRoles(); - - db.createRole({ - role: 'roleA', - roles: [], - privileges: [{resource: {db: db.getName(), collection: "foo"}, actions: ['find']}] - }); - db.createRole({role: 'roleB', privileges: [], roles: ["roleA"]}); - db.createRole({role: 'roleC', privileges: [], roles: []}); - - // Test getRole - var roleObj = db.getRole("roleA"); - assert.eq(0, roleObj.roles.length); - assert.eq(null, roleObj.privileges); - roleObj = db.getRole("roleA", {showPrivileges: true}); - assert.eq(1, roleObj.privileges.length); - assertHasPrivilege(roleObj.privileges, - {resource: {db: db.getName(), collection: "foo"}, actions: ['find']}); - roleObj = db.getRole("roleB", {showPrivileges: true}); - assert.eq(1, roleObj.inheritedPrivileges.length); // inherited from roleA - assertHasPrivilege(roleObj.inheritedPrivileges, - {resource: {db: db.getName(), collection: "foo"}, actions: ['find']}); - assert.eq(1, roleObj.roles.length); - assertHasRole(roleObj.roles, "roleA", db.getName()); - - // Test getRoles - var roles = db.getRoles(); - assert.eq(3, roles.length); - printjson(roles); - assert(roles[0].role == 'roleA' || roles[1].role == 'roleA' || roles[2].role == 'roleA'); - assert(roles[0].role == 'roleB' || roles[1].role == 'roleB' || roles[2].role == 'roleB'); - assert(roles[0].role == 'roleC' || roles[1].role == 'roleC' || roles[2].role == 'roleC'); - assert.eq(null, roles[0].inheritedPrivileges); - var roles = db.getRoles({showPrivileges: true, showBuiltinRoles: true}); - assert.eq(9, roles.length); - assert.neq(null, roles[0].inheritedPrivileges); - - // Granting roles to nonexistent role fails - assert.throws(function() { - db.grantRolesToRole("fakeRole", ['dbAdmin']); - }); - // Granting roles to built-in role fails - assert.throws(function() { - db.grantRolesToRole("readWrite", ['dbAdmin']); - }); - // Granting non-existant role fails - assert.throws(function() { - db.grantRolesToRole("roleB", ['dbAdmin', 'fakeRole']); - }); - - roleObj = db.getRole("roleB", {showPrivileges: true}); - assert.eq(1, roleObj.inheritedPrivileges.length); - assert.eq(1, roleObj.roles.length); - assertHasRole(roleObj.roles, "roleA", db.getName()); - - // Granting a role you already have is no problem - db.grantRolesToRole("roleB", ['readWrite', 'roleC']); - roleObj = db.getRole("roleB", {showPrivileges: true}); - assert.gt(roleObj.inheritedPrivileges.length, 1); // Got privileges from readWrite role - assert.eq(3, roleObj.roles.length); - assertHasRole(roleObj.roles, "readWrite", db.getName()); - assertHasRole(roleObj.roles, "roleA", db.getName()); - assertHasRole(roleObj.roles, "roleC", db.getName()); - - // Revoking roles the role doesn't have is fine - db.revokeRolesFromRole("roleB", ['roleA', 'readWrite', 'dbAdmin']); - roleObj = db.getRole("roleB", {showPrivileges: true}); - assert.eq(0, roleObj.inheritedPrivileges.length); - assert.eq(1, roleObj.roles.length); - assertHasRole(roleObj.roles, "roleC", db.getName()); - - // Privileges on the same resource get collapsed - db.grantPrivilegesToRole("roleA", [ - {resource: {db: db.getName(), collection: ""}, actions: ['dropDatabase']}, - {resource: {db: db.getName(), collection: "foo"}, actions: ['insert']} - ]); - roleObj = db.getRole("roleA", {showPrivileges: true}); - assert.eq(0, roleObj.roles.length); - assert.eq(2, roleObj.privileges.length); - assertHasPrivilege( - roleObj.privileges, - {resource: {db: db.getName(), collection: "foo"}, actions: ['find', 'insert']}); - assertHasPrivilege(roleObj.privileges, - {resource: {db: db.getName(), collection: ""}, actions: ['dropDatabase']}); - - // Update role - db.updateRole("roleA", { - roles: ['roleB'], - privileges: [{resource: {db: db.getName(), collection: "foo"}, actions: ['find']}] - }); - roleObj = db.getRole("roleA", {showPrivileges: true}); - assert.eq(1, roleObj.roles.length); - assertHasRole(roleObj.roles, "roleB", db.getName()); - assert.eq(1, roleObj.privileges.length); - assertHasPrivilege(roleObj.privileges, - {resource: {db: db.getName(), collection: "foo"}, actions: ['find']}); - - // Test dropRole - db.dropRole('roleC'); - assert.eq(null, db.getRole('roleC')); - roleObj = db.getRole("roleB", {showPrivileges: true}); - assert.eq(0, roleObj.privileges.length); - assert.eq(0, roleObj.roles.length); - - // Test dropAllRoles - db.dropAllRoles(); - assert.eq(null, db.getRole('roleA')); - assert.eq(null, db.getRole('roleB')); - assert.eq(null, db.getRole('roleC')); - +var db = db.getSiblingDB("role_management_helpers"); +db.dropDatabase(); +db.dropAllRoles(); + +db.createRole({ + role: 'roleA', + roles: [], + privileges: [{resource: {db: db.getName(), collection: "foo"}, actions: ['find']}] +}); +db.createRole({role: 'roleB', privileges: [], roles: ["roleA"]}); +db.createRole({role: 'roleC', privileges: [], roles: []}); + +// Test getRole +var roleObj = db.getRole("roleA"); +assert.eq(0, roleObj.roles.length); +assert.eq(null, roleObj.privileges); +roleObj = db.getRole("roleA", {showPrivileges: true}); +assert.eq(1, roleObj.privileges.length); +assertHasPrivilege(roleObj.privileges, + {resource: {db: db.getName(), collection: "foo"}, actions: ['find']}); +roleObj = db.getRole("roleB", {showPrivileges: true}); +assert.eq(1, roleObj.inheritedPrivileges.length); // inherited from roleA +assertHasPrivilege(roleObj.inheritedPrivileges, + {resource: {db: db.getName(), collection: "foo"}, actions: ['find']}); +assert.eq(1, roleObj.roles.length); +assertHasRole(roleObj.roles, "roleA", db.getName()); + +// Test getRoles +var roles = db.getRoles(); +assert.eq(3, roles.length); +printjson(roles); +assert(roles[0].role == 'roleA' || roles[1].role == 'roleA' || roles[2].role == 'roleA'); +assert(roles[0].role == 'roleB' || roles[1].role == 'roleB' || roles[2].role == 'roleB'); +assert(roles[0].role == 'roleC' || roles[1].role == 'roleC' || roles[2].role == 'roleC'); +assert.eq(null, roles[0].inheritedPrivileges); +var roles = db.getRoles({showPrivileges: true, showBuiltinRoles: true}); +assert.eq(9, roles.length); +assert.neq(null, roles[0].inheritedPrivileges); + +// Granting roles to nonexistent role fails +assert.throws(function() { + db.grantRolesToRole("fakeRole", ['dbAdmin']); +}); +// Granting roles to built-in role fails +assert.throws(function() { + db.grantRolesToRole("readWrite", ['dbAdmin']); +}); +// Granting non-existant role fails +assert.throws(function() { + db.grantRolesToRole("roleB", ['dbAdmin', 'fakeRole']); +}); + +roleObj = db.getRole("roleB", {showPrivileges: true}); +assert.eq(1, roleObj.inheritedPrivileges.length); +assert.eq(1, roleObj.roles.length); +assertHasRole(roleObj.roles, "roleA", db.getName()); + +// Granting a role you already have is no problem +db.grantRolesToRole("roleB", ['readWrite', 'roleC']); +roleObj = db.getRole("roleB", {showPrivileges: true}); +assert.gt(roleObj.inheritedPrivileges.length, 1); // Got privileges from readWrite role +assert.eq(3, roleObj.roles.length); +assertHasRole(roleObj.roles, "readWrite", db.getName()); +assertHasRole(roleObj.roles, "roleA", db.getName()); +assertHasRole(roleObj.roles, "roleC", db.getName()); + +// Revoking roles the role doesn't have is fine +db.revokeRolesFromRole("roleB", ['roleA', 'readWrite', 'dbAdmin']); +roleObj = db.getRole("roleB", {showPrivileges: true}); +assert.eq(0, roleObj.inheritedPrivileges.length); +assert.eq(1, roleObj.roles.length); +assertHasRole(roleObj.roles, "roleC", db.getName()); + +// Privileges on the same resource get collapsed +db.grantPrivilegesToRole("roleA", [ + {resource: {db: db.getName(), collection: ""}, actions: ['dropDatabase']}, + {resource: {db: db.getName(), collection: "foo"}, actions: ['insert']} +]); +roleObj = db.getRole("roleA", {showPrivileges: true}); +assert.eq(0, roleObj.roles.length); +assert.eq(2, roleObj.privileges.length); +assertHasPrivilege(roleObj.privileges, + {resource: {db: db.getName(), collection: "foo"}, actions: ['find', 'insert']}); +assertHasPrivilege(roleObj.privileges, + {resource: {db: db.getName(), collection: ""}, actions: ['dropDatabase']}); + +// Update role +db.updateRole("roleA", { + roles: ['roleB'], + privileges: [{resource: {db: db.getName(), collection: "foo"}, actions: ['find']}] +}); +roleObj = db.getRole("roleA", {showPrivileges: true}); +assert.eq(1, roleObj.roles.length); +assertHasRole(roleObj.roles, "roleB", db.getName()); +assert.eq(1, roleObj.privileges.length); +assertHasPrivilege(roleObj.privileges, + {resource: {db: db.getName(), collection: "foo"}, actions: ['find']}); + +// Test dropRole +db.dropRole('roleC'); +assert.eq(null, db.getRole('roleC')); +roleObj = db.getRole("roleB", {showPrivileges: true}); +assert.eq(0, roleObj.privileges.length); +assert.eq(0, roleObj.roles.length); + +// Test dropAllRoles +db.dropAllRoles(); +assert.eq(null, db.getRole('roleA')); +assert.eq(null, db.getRole('roleB')); +assert.eq(null, db.getRole('roleC')); }(db)); diff --git a/jstests/core/rollback_index_drop.js b/jstests/core/rollback_index_drop.js index 3e7c3a97952..6f999f56209 100644 --- a/jstests/core/rollback_index_drop.js +++ b/jstests/core/rollback_index_drop.js @@ -5,31 +5,30 @@ // // @tags: [does_not_support_stepdowns, assumes_unsharded_collection] (function() { - "use strict"; +"use strict"; - const coll = db.rollback_index_drop; - coll.drop(); +const coll = db.rollback_index_drop; +coll.drop(); - assert.commandWorked(coll.insert([{a: 1}, {a: 2}, {a: 3}])); - assert.commandWorked(coll.createIndex({a: 1})); +assert.commandWorked(coll.insert([{a: 1}, {a: 2}, {a: 3}])); +assert.commandWorked(coll.createIndex({a: 1})); - // Verify that the index has the expected set of keys. - assert.eq([{a: 1}, {a: 2}, {a: 3}], - coll.find().hint({a: 1}).sort({a: 1}).returnKey().toArray()); +// Verify that the index has the expected set of keys. +assert.eq([{a: 1}, {a: 2}, {a: 3}], coll.find().hint({a: 1}).sort({a: 1}).returnKey().toArray()); - // Run a dropIndexes command that attempts to drop both {a: 1} and an invalid index. This should - // cause the drop of {a: 1} to rollback, since the set of index drops happen atomically. - assert.commandFailedWithCode( - db.runCommand({dropIndexes: coll.getName(), index: ["a_1", "unknown"]}), - ErrorCodes.IndexNotFound); +// Run a dropIndexes command that attempts to drop both {a: 1} and an invalid index. This should +// cause the drop of {a: 1} to rollback, since the set of index drops happen atomically. +assert.commandFailedWithCode( + db.runCommand({dropIndexes: coll.getName(), index: ["a_1", "unknown"]}), + ErrorCodes.IndexNotFound); - // Verify that the {a: 1} index is still present in listIndexes output. - const indexList = coll.getIndexes(); - assert.neq(undefined, indexList.find((idx) => idx.name === "a_1"), indexList); +// Verify that the {a: 1} index is still present in listIndexes output. +const indexList = coll.getIndexes(); +assert.neq(undefined, indexList.find((idx) => idx.name === "a_1"), indexList); - // Write to the collection and ensure that the resulting set of index keys is correct. - assert.commandWorked(coll.update({a: 3}, {$inc: {a: 1}})); - assert.commandWorked(coll.insert({a: 5})); - assert.eq([{a: 1}, {a: 2}, {a: 4}, {a: 5}], - coll.find().hint({a: 1}).sort({a: 1}).returnKey().toArray()); +// Write to the collection and ensure that the resulting set of index keys is correct. +assert.commandWorked(coll.update({a: 3}, {$inc: {a: 1}})); +assert.commandWorked(coll.insert({a: 5})); +assert.eq([{a: 1}, {a: 2}, {a: 4}, {a: 5}], + coll.find().hint({a: 1}).sort({a: 1}).returnKey().toArray()); }()); diff --git a/jstests/core/server1470.js b/jstests/core/server1470.js index 040eda4228f..41f7bfea7e0 100644 --- a/jstests/core/server1470.js +++ b/jstests/core/server1470.js @@ -12,7 +12,7 @@ q = { }; t.update(q, {$set: {x: 1}}, true, true); ref = t.findOne().pic; -assert.eq("object", typeof(ref)); +assert.eq("object", typeof (ref)); assert.eq(q.pic["$ref"], ref["$ref"]); assert.eq(q.pic["$id"], ref["$id"]); diff --git a/jstests/core/server14747.js b/jstests/core/server14747.js index e75407a7fdf..c6d77e6adb4 100644 --- a/jstests/core/server14747.js +++ b/jstests/core/server14747.js @@ -3,15 +3,14 @@ (function() { - "use strict"; - var t = db.jstests_server14747; - - t.drop(); - t.ensureIndex({a: 1, b: 1}); - t.ensureIndex({a: 1, c: 1}); - t.insert({a: 1}); - for (var i = 0; i < 10; i++) { - t.find({a: 1}).explain(true); - } +"use strict"; +var t = db.jstests_server14747; +t.drop(); +t.ensureIndex({a: 1, b: 1}); +t.ensureIndex({a: 1, c: 1}); +t.insert({a: 1}); +for (var i = 0; i < 10; i++) { + t.find({a: 1}).explain(true); +} }()); diff --git a/jstests/core/server14753.js b/jstests/core/server14753.js index cd6ea309399..e8de183f2d7 100644 --- a/jstests/core/server14753.js +++ b/jstests/core/server14753.js @@ -3,17 +3,16 @@ (function() { - "use strict"; - var t = db.jstests_server14753; - - t.drop(); - t.ensureIndex({a: 1}); - t.ensureIndex({b: 1}); - for (var i = 0; i < 20; i++) { - t.insert({b: i}); - } - for (var i = 0; i < 20; i++) { - t.find({b: 1}).sort({a: 1}).next(); - } +"use strict"; +var t = db.jstests_server14753; +t.drop(); +t.ensureIndex({a: 1}); +t.ensureIndex({b: 1}); +for (var i = 0; i < 20; i++) { + t.insert({b: i}); +} +for (var i = 0; i < 20; i++) { + t.find({b: 1}).sort({a: 1}).next(); +} }()); diff --git a/jstests/core/server22053.js b/jstests/core/server22053.js index d295a72cc9b..d803c732b86 100644 --- a/jstests/core/server22053.js +++ b/jstests/core/server22053.js @@ -1,19 +1,19 @@ (function() { - "use strict"; - var t = db.jstests_server22053; +"use strict"; +var t = db.jstests_server22053; - /* eslint-disable no-sparse-arrays */ - var s0 = [, , 3, , , 6]; - t.coll.insert({mys: s0}); +/* eslint-disable no-sparse-arrays */ +var s0 = [, , 3, , , 6]; +t.coll.insert({mys: s0}); - var cur = t.coll.find(); - var doc = cur.next(); - assert.eq(6, doc['mys'].length); - assert.eq(undefined, doc['mys'][0]); - assert.eq(undefined, doc['mys'][1]); - assert.eq(3, doc['mys'][2]); - assert.eq(undefined, doc['mys'][3]); - assert.eq(undefined, doc['mys'][4]); - assert.eq(6, doc['mys'][5]); +var cur = t.coll.find(); +var doc = cur.next(); +assert.eq(6, doc['mys'].length); +assert.eq(undefined, doc['mys'][0]); +assert.eq(undefined, doc['mys'][1]); +assert.eq(3, doc['mys'][2]); +assert.eq(undefined, doc['mys'][3]); +assert.eq(undefined, doc['mys'][4]); +assert.eq(6, doc['mys'][5]); }());
\ No newline at end of file diff --git a/jstests/core/server25192.js b/jstests/core/server25192.js index e07cfdcf50c..a275c768f71 100644 --- a/jstests/core/server25192.js +++ b/jstests/core/server25192.js @@ -1,12 +1,12 @@ (function() { - "use strict"; +"use strict"; - var x = {}; +var x = {}; - assert.doesNotThrow(function() { - Object.extend(x, {a: null}, true); - }, [], "Extending an object with a null field does not throw"); +assert.doesNotThrow(function() { + Object.extend(x, {a: null}, true); +}, [], "Extending an object with a null field does not throw"); - assert.eq(x.a, null); +assert.eq(x.a, null); }()); diff --git a/jstests/core/set_param1.js b/jstests/core/set_param1.js index 7910c01aa96..6484f2241e2 100644 --- a/jstests/core/set_param1.js +++ b/jstests/core/set_param1.js @@ -48,78 +48,74 @@ assert.commandFailed( // Set multiple component log levels at once. (function() { - assert.commandWorked(db.adminCommand({ - "setParameter": 1, - logComponentVerbosity: { - verbosity: 2, - accessControl: {verbosity: 0}, - storage: {verbosity: 3, journal: {verbosity: 5}} - } - })); - - var result = - assert.commandWorked(db.adminCommand({"getParameter": 1, logComponentVerbosity: 1})) - .logComponentVerbosity; - - assert.eq(2, result.verbosity); - assert.eq(0, result.accessControl.verbosity); - assert.eq(3, result.storage.verbosity); - assert.eq(5, result.storage.journal.verbosity); +assert.commandWorked(db.adminCommand({ + "setParameter": 1, + logComponentVerbosity: { + verbosity: 2, + accessControl: {verbosity: 0}, + storage: {verbosity: 3, journal: {verbosity: 5}} + } +})); + +var result = assert.commandWorked(db.adminCommand({"getParameter": 1, logComponentVerbosity: 1})) + .logComponentVerbosity; + +assert.eq(2, result.verbosity); +assert.eq(0, result.accessControl.verbosity); +assert.eq(3, result.storage.verbosity); +assert.eq(5, result.storage.journal.verbosity); })(); // Set multiple component log levels at once. // Unrecognized field names not mapping to a log component shall be rejected // No changes shall apply. (function() { - assert.commandFailed(db.adminCommand({ - "setParameter": 1, - logComponentVerbosity: { - verbosity: 6, - accessControl: {verbosity: 5}, - storage: {verbosity: 4, journal: {verbosity: 6}}, - NoSuchComponent: {verbosity: 2}, - extraField: 123 - } - })); - - var result = - assert.commandWorked(db.adminCommand({"getParameter": 1, logComponentVerbosity: 1})) - .logComponentVerbosity; - - assert.eq(2, result.verbosity); - assert.eq(0, result.accessControl.verbosity); - assert.eq(3, result.storage.verbosity); - assert.eq(5, result.storage.journal.verbosity); +assert.commandFailed(db.adminCommand({ + "setParameter": 1, + logComponentVerbosity: { + verbosity: 6, + accessControl: {verbosity: 5}, + storage: {verbosity: 4, journal: {verbosity: 6}}, + NoSuchComponent: {verbosity: 2}, + extraField: 123 + } +})); + +var result = assert.commandWorked(db.adminCommand({"getParameter": 1, logComponentVerbosity: 1})) + .logComponentVerbosity; + +assert.eq(2, result.verbosity); +assert.eq(0, result.accessControl.verbosity); +assert.eq(3, result.storage.verbosity); +assert.eq(5, result.storage.journal.verbosity); })(); // Clear verbosity for default and journal. (function() { - assert.commandWorked(db.adminCommand({ - "setParameter": 1, - logComponentVerbosity: {verbosity: -1, storage: {journal: {verbosity: -1}}} - })); - - var result = - assert.commandWorked(db.adminCommand({"getParameter": 1, logComponentVerbosity: 1})) - .logComponentVerbosity; - - assert.eq(0, result.verbosity); - assert.eq(0, result.accessControl.verbosity); - assert.eq(3, result.storage.verbosity); - assert.eq(-1, result.storage.journal.verbosity); +assert.commandWorked(db.adminCommand({ + "setParameter": 1, + logComponentVerbosity: {verbosity: -1, storage: {journal: {verbosity: -1}}} +})); + +var result = assert.commandWorked(db.adminCommand({"getParameter": 1, logComponentVerbosity: 1})) + .logComponentVerbosity; + +assert.eq(0, result.verbosity); +assert.eq(0, result.accessControl.verbosity); +assert.eq(3, result.storage.verbosity); +assert.eq(-1, result.storage.journal.verbosity); })(); // Set accessControl verbosity using numerical level instead of // subdocument with 'verbosity' field. (function() { - assert.commandWorked( - db.adminCommand({"setParameter": 1, logComponentVerbosity: {accessControl: 5}})); +assert.commandWorked( + db.adminCommand({"setParameter": 1, logComponentVerbosity: {accessControl: 5}})); - var result = - assert.commandWorked(db.adminCommand({"getParameter": 1, logComponentVerbosity: 1})) - .logComponentVerbosity; +var result = assert.commandWorked(db.adminCommand({"getParameter": 1, logComponentVerbosity: 1})) + .logComponentVerbosity; - assert.eq(5, result.accessControl.verbosity); +assert.eq(5, result.accessControl.verbosity); })(); // Restore old verbosity values. diff --git a/jstests/core/set_type_change.js b/jstests/core/set_type_change.js index 565da8be12e..5b06449dce4 100644 --- a/jstests/core/set_type_change.js +++ b/jstests/core/set_type_change.js @@ -8,21 +8,21 @@ * the document, including any relevant indices. */ (function() { - "use strict"; +"use strict"; - var coll = db.set_type_change; - coll.drop(); - assert.commandWorked(coll.ensureIndex({a: 1})); +var coll = db.set_type_change; +coll.drop(); +assert.commandWorked(coll.ensureIndex({a: 1})); - assert.writeOK(coll.insert({a: 2})); +assert.writeOK(coll.insert({a: 2})); - var newVal = new NumberLong(2); - var res = coll.update({}, {$set: {a: newVal}}); - assert.eq(res.nMatched, 1); - if (coll.getMongo().writeMode() == "commands") - assert.eq(res.nModified, 1); +var newVal = new NumberLong(2); +var res = coll.update({}, {$set: {a: newVal}}); +assert.eq(res.nMatched, 1); +if (coll.getMongo().writeMode() == "commands") + assert.eq(res.nModified, 1); - // Make sure it actually changed the type. - var updated = coll.findOne(); - assert(updated.a instanceof NumberLong, "$set did not update type of value: " + updated.a); +// Make sure it actually changed the type. +var updated = coll.findOne(); +assert(updated.a instanceof NumberLong, "$set did not update type of value: " + updated.a); })(); diff --git a/jstests/core/shell_connection_strings.js b/jstests/core/shell_connection_strings.js index ff9aa727480..0cf2f3867d5 100644 --- a/jstests/core/shell_connection_strings.js +++ b/jstests/core/shell_connection_strings.js @@ -3,34 +3,33 @@ // uses_multiple_connections, // ] (function() { - 'use strict'; +'use strict'; - const mongod = new MongoURI(db.getMongo().host).servers[0]; - const host = mongod.host; - const port = mongod.port; +const mongod = new MongoURI(db.getMongo().host).servers[0]; +const host = mongod.host; +const port = mongod.port; - function testConnect(ok, ...args) { - const exitCode = runMongoProgram('mongo', '--eval', ';', ...args); - if (ok) { - assert.eq(exitCode, 0, "failed to connect with `" + args.join(' ') + "`"); - } else { - assert.neq( - exitCode, 0, "unexpectedly succeeded connecting with `" + args.join(' ') + "`"); - } +function testConnect(ok, ...args) { + const exitCode = runMongoProgram('mongo', '--eval', ';', ...args); + if (ok) { + assert.eq(exitCode, 0, "failed to connect with `" + args.join(' ') + "`"); + } else { + assert.neq(exitCode, 0, "unexpectedly succeeded connecting with `" + args.join(' ') + "`"); } +} - testConnect(true, `${host}:${port}`); - testConnect(true, `${host}:${port}/test`); - testConnect(true, `${host}:${port}/admin`); - testConnect(true, host, '--port', port); - testConnect(true, '--host', host, '--port', port, 'test'); - testConnect(true, '--host', host, '--port', port, 'admin'); - testConnect(true, `mongodb://${host}:${port}/test`); - testConnect(true, `mongodb://${host}:${port}/test?connectTimeoutMS=10000`); +testConnect(true, `${host}:${port}`); +testConnect(true, `${host}:${port}/test`); +testConnect(true, `${host}:${port}/admin`); +testConnect(true, host, '--port', port); +testConnect(true, '--host', host, '--port', port, 'test'); +testConnect(true, '--host', host, '--port', port, 'admin'); +testConnect(true, `mongodb://${host}:${port}/test`); +testConnect(true, `mongodb://${host}:${port}/test?connectTimeoutMS=10000`); - // if a full URI is provided, you cannot also specify host or port - testConnect(false, `${host}/test`, '--port', port); - testConnect(false, `mongodb://${host}:${port}/test`, '--port', port); - testConnect(false, `mongodb://${host}:${port}/test`, '--host', host); - testConnect(false, `mongodb://${host}:${port}/test`, '--host', host, '--port', port); +// if a full URI is provided, you cannot also specify host or port +testConnect(false, `${host}/test`, '--port', port); +testConnect(false, `mongodb://${host}:${port}/test`, '--port', port); +testConnect(false, `mongodb://${host}:${port}/test`, '--host', host); +testConnect(false, `mongodb://${host}:${port}/test`, '--host', host, '--port', port); })(); diff --git a/jstests/core/single_batch.js b/jstests/core/single_batch.js index ccf9f73362f..b06e5ce7aa5 100644 --- a/jstests/core/single_batch.js +++ b/jstests/core/single_batch.js @@ -1,21 +1,21 @@ // Test the "single batch" semantics of negative limit. (function() { - 'use strict'; +'use strict'; - var coll = db.jstests_single_batch; - coll.drop(); +var coll = db.jstests_single_batch; +coll.drop(); - // Approximately 1 MB. - var padding = new Array(1024 * 1024).join("x"); +// Approximately 1 MB. +var padding = new Array(1024 * 1024).join("x"); - // Insert ~20 MB of data. - for (var i = 0; i < 20; i++) { - assert.writeOK(coll.insert({_id: i, padding: padding})); - } +// Insert ~20 MB of data. +for (var i = 0; i < 20; i++) { + assert.writeOK(coll.insert({_id: i, padding: padding})); +} - // The limit is 18, but we should end up with fewer documents since 18 docs won't fit in a - // single 16 MB batch. - var numResults = coll.find().limit(-18).itcount(); - assert.lt(numResults, 18); - assert.gt(numResults, 0); +// The limit is 18, but we should end up with fewer documents since 18 docs won't fit in a +// single 16 MB batch. +var numResults = coll.find().limit(-18).itcount(); +assert.lt(numResults, 18); +assert.gt(numResults, 0); })(); diff --git a/jstests/core/sort1.js b/jstests/core/sort1.js index 50599ad340f..edd787306b0 100644 --- a/jstests/core/sort1.js +++ b/jstests/core/sort1.js @@ -1,70 +1,69 @@ (function() { - 'use strict'; +'use strict'; - var coll = db.sort1; - coll.drop(); +var coll = db.sort1; +coll.drop(); - coll.save({x: 3, z: 33}); - coll.save({x: 5, z: 33}); - coll.save({x: 2, z: 33}); - coll.save({x: 3, z: 33}); - coll.save({x: 1, z: 33}); +coll.save({x: 3, z: 33}); +coll.save({x: 5, z: 33}); +coll.save({x: 2, z: 33}); +coll.save({x: 3, z: 33}); +coll.save({x: 1, z: 33}); - for (var pass = 0; pass < 2; pass++) { - assert(coll.find().sort({x: 1})[0].x == 1); - assert(coll.find().sort({x: 1}).skip(1)[0].x == 2); - assert(coll.find().sort({x: -1})[0].x == 5); - assert(coll.find().sort({x: -1})[1].x == 3); - assert.eq(coll.find().sort({x: -1}).skip(0)[0].x, 5); - assert.eq(coll.find().sort({x: -1}).skip(1)[0].x, 3); - coll.ensureIndex({x: 1}); - } +for (var pass = 0; pass < 2; pass++) { + assert(coll.find().sort({x: 1})[0].x == 1); + assert(coll.find().sort({x: 1}).skip(1)[0].x == 2); + assert(coll.find().sort({x: -1})[0].x == 5); + assert(coll.find().sort({x: -1})[1].x == 3); + assert.eq(coll.find().sort({x: -1}).skip(0)[0].x, 5); + assert.eq(coll.find().sort({x: -1}).skip(1)[0].x, 3); + coll.ensureIndex({x: 1}); +} - assert(coll.validate().valid); +assert(coll.validate().valid); - coll.drop(); - coll.save({x: 'a'}); - coll.save({x: 'aba'}); - coll.save({x: 'zed'}); - coll.save({x: 'foo'}); +coll.drop(); +coll.save({x: 'a'}); +coll.save({x: 'aba'}); +coll.save({x: 'zed'}); +coll.save({x: 'foo'}); - for (var pass = 0; pass < 2; pass++) { - assert.eq("a", coll.find().sort({'x': 1}).limit(1).next().x, "c.1"); - assert.eq("a", coll.find().sort({'x': 1}).next().x, "c.2"); - assert.eq("zed", coll.find().sort({'x': -1}).limit(1).next().x, "c.3"); - assert.eq("zed", coll.find().sort({'x': -1}).next().x, "c.4"); - coll.ensureIndex({x: 1}); - } +for (var pass = 0; pass < 2; pass++) { + assert.eq("a", coll.find().sort({'x': 1}).limit(1).next().x, "c.1"); + assert.eq("a", coll.find().sort({'x': 1}).next().x, "c.2"); + assert.eq("zed", coll.find().sort({'x': -1}).limit(1).next().x, "c.3"); + assert.eq("zed", coll.find().sort({'x': -1}).next().x, "c.4"); + coll.ensureIndex({x: 1}); +} - assert(coll.validate().valid); +assert(coll.validate().valid); - // Ensure that sorts with a collation and no index return the correct ordering. Here we use the - // 'numericOrdering' option which orders number-like strings by their numerical values. - if (db.getMongo().useReadCommands()) { - coll.drop(); - assert.writeOK(coll.insert({_id: 0, str: '1000'})); - assert.writeOK(coll.insert({_id: 1, str: '5'})); - assert.writeOK(coll.insert({_id: 2, str: '200'})); +// Ensure that sorts with a collation and no index return the correct ordering. Here we use the +// 'numericOrdering' option which orders number-like strings by their numerical values. +if (db.getMongo().useReadCommands()) { + coll.drop(); + assert.writeOK(coll.insert({_id: 0, str: '1000'})); + assert.writeOK(coll.insert({_id: 1, str: '5'})); + assert.writeOK(coll.insert({_id: 2, str: '200'})); - var cursor = - coll.find().sort({str: -1}).collation({locale: 'en_US', numericOrdering: true}); - assert.eq(cursor.next(), {_id: 0, str: '1000'}); - assert.eq(cursor.next(), {_id: 2, str: '200'}); - assert.eq(cursor.next(), {_id: 1, str: '5'}); - assert(!cursor.hasNext()); - } + var cursor = coll.find().sort({str: -1}).collation({locale: 'en_US', numericOrdering: true}); + assert.eq(cursor.next(), {_id: 0, str: '1000'}); + assert.eq(cursor.next(), {_id: 2, str: '200'}); + assert.eq(cursor.next(), {_id: 1, str: '5'}); + assert(!cursor.hasNext()); +} - // Ensure that sorting of arrays correctly respects a collation with numeric ordering. - if (db.getMongo().useReadCommands()) { - coll.drop(); - assert.writeOK(coll.insert({_id: 0, strs: ['1000', '500']})); - assert.writeOK(coll.insert({_id: 1, strs: ['2000', '60']})); - cursor = coll.find({strs: {$lt: '1000'}}).sort({strs: 1}).collation({ - locale: 'en_US', - numericOrdering: true - }); - assert.eq(cursor.next(), {_id: 1, strs: ['2000', '60']}); - assert.eq(cursor.next(), {_id: 0, strs: ['1000', '500']}); - assert(!cursor.hasNext()); - } +// Ensure that sorting of arrays correctly respects a collation with numeric ordering. +if (db.getMongo().useReadCommands()) { + coll.drop(); + assert.writeOK(coll.insert({_id: 0, strs: ['1000', '500']})); + assert.writeOK(coll.insert({_id: 1, strs: ['2000', '60']})); + cursor = coll.find({strs: {$lt: '1000'}}).sort({strs: 1}).collation({ + locale: 'en_US', + numericOrdering: true + }); + assert.eq(cursor.next(), {_id: 1, strs: ['2000', '60']}); + assert.eq(cursor.next(), {_id: 0, strs: ['1000', '500']}); + assert(!cursor.hasNext()); +} })(); diff --git a/jstests/core/sort3.js b/jstests/core/sort3.js index 1a1df005fb3..5e5f3313f51 100644 --- a/jstests/core/sort3.js +++ b/jstests/core/sort3.js @@ -1,13 +1,13 @@ (function() { - "use strict"; +"use strict"; - const coll = db.sort3; - coll.drop(); +const coll = db.sort3; +coll.drop(); - assert.writeOK(coll.insert({a: 1})); - assert.writeOK(coll.insert({a: 5})); - assert.writeOK(coll.insert({a: 3})); +assert.writeOK(coll.insert({a: 1})); +assert.writeOK(coll.insert({a: 5})); +assert.writeOK(coll.insert({a: 3})); - assert.eq([1, 3, 5], coll.find().sort({a: 1}).toArray().map(doc => doc.a)); - assert.eq([5, 3, 1], coll.find().sort({a: -1}).toArray().map(doc => doc.a)); +assert.eq([1, 3, 5], coll.find().sort({a: 1}).toArray().map(doc => doc.a)); +assert.eq([5, 3, 1], coll.find().sort({a: -1}).toArray().map(doc => doc.a)); }()); diff --git a/jstests/core/sort4.js b/jstests/core/sort4.js index ef33e779d8e..63d7f3810bd 100644 --- a/jstests/core/sort4.js +++ b/jstests/core/sort4.js @@ -1,45 +1,45 @@ (function() { - "use strict"; - - const coll = db.sort4; - coll.drop(); - - function nice(sort, correct, extra) { - const c = coll.find().sort(sort); - let s = ""; - c.forEach(function(z) { - if (s.length) { - s += ","; - } - s += z.name; - if (z.prename) { - s += z.prename; - } - }); - if (correct) { - assert.eq(correct, s, tojson(sort) + "(" + extra + ")"); +"use strict"; + +const coll = db.sort4; +coll.drop(); + +function nice(sort, correct, extra) { + const c = coll.find().sort(sort); + let s = ""; + c.forEach(function(z) { + if (s.length) { + s += ","; + } + s += z.name; + if (z.prename) { + s += z.prename; } - return s; + }); + if (correct) { + assert.eq(correct, s, tojson(sort) + "(" + extra + ")"); } + return s; +} - assert.writeOK(coll.insert({name: 'A', prename: 'B'})); - assert.writeOK(coll.insert({name: 'A', prename: 'C'})); - assert.writeOK(coll.insert({name: 'B', prename: 'B'})); - assert.writeOK(coll.insert({name: 'B', prename: 'D'})); +assert.writeOK(coll.insert({name: 'A', prename: 'B'})); +assert.writeOK(coll.insert({name: 'A', prename: 'C'})); +assert.writeOK(coll.insert({name: 'B', prename: 'B'})); +assert.writeOK(coll.insert({name: 'B', prename: 'D'})); - nice({name: 1, prename: 1}, "AB,AC,BB,BD", "s3"); - nice({prename: 1, name: 1}, "AB,BB,AC,BD", "s3"); +nice({name: 1, prename: 1}, "AB,AC,BB,BD", "s3"); +nice({prename: 1, name: 1}, "AB,BB,AC,BD", "s3"); - assert.writeOK(coll.insert({name: 'A'})); - nice({name: 1, prename: 1}, "A,AB,AC,BB,BD", "e1"); +assert.writeOK(coll.insert({name: 'A'})); +nice({name: 1, prename: 1}, "A,AB,AC,BB,BD", "e1"); - assert.writeOK(coll.insert({name: 'C'})); - nice({name: 1, prename: 1}, "A,AB,AC,BB,BD,C", "e2"); // SERVER-282 +assert.writeOK(coll.insert({name: 'C'})); +nice({name: 1, prename: 1}, "A,AB,AC,BB,BD,C", "e2"); // SERVER-282 - assert.commandWorked(coll.ensureIndex({name: 1, prename: 1})); - nice({name: 1, prename: 1}, "A,AB,AC,BB,BD,C", "e2ia"); // SERVER-282 +assert.commandWorked(coll.ensureIndex({name: 1, prename: 1})); +nice({name: 1, prename: 1}, "A,AB,AC,BB,BD,C", "e2ia"); // SERVER-282 - assert.commandWorked(coll.dropIndexes()); - assert.commandWorked(coll.ensureIndex({name: 1})); - nice({name: 1, prename: 1}, "A,AB,AC,BB,BD,C", "e2ib"); // SERVER-282 +assert.commandWorked(coll.dropIndexes()); +assert.commandWorked(coll.ensureIndex({name: 1})); +nice({name: 1, prename: 1}, "A,AB,AC,BB,BD,C", "e2ib"); // SERVER-282 }()); diff --git a/jstests/core/sort_array.js b/jstests/core/sort_array.js index 48ccdea93c4..20ae0187693 100644 --- a/jstests/core/sort_array.js +++ b/jstests/core/sort_array.js @@ -4,225 +4,193 @@ * Tests for sorting documents by fields that contain arrays. */ (function() { - "use strict"; +"use strict"; - load("jstests/libs/analyze_plan.js"); +load("jstests/libs/analyze_plan.js"); - let coll = db.jstests_array_sort; +let coll = db.jstests_array_sort; - /** - * Runs a $match-$sort-$project query as both a find and then an aggregate. Asserts that the - * result set, after being converted to an array, is equal to 'expected'. Also asserts that the - * find plan uses the SORT stage and the agg plan uses the "$sort" agg stage. - */ - function testAggAndFindSort({filter, sort, project, hint, expected}) { - let cursor = coll.find(filter, project).sort(sort); - assert.eq(cursor.toArray(), expected); - if (hint) { - // If there was a hint specified, make sure we get the same results with the hint. - cursor = coll.find(filter, project).sort(sort).hint(hint); - assert.eq(cursor.toArray(), expected); - } - let explain = coll.find(filter, project).sort(sort).explain(); - assert(planHasStage(db, explain, "SORT")); - - let pipeline = [ - {$_internalInhibitOptimization: {}}, - {$match: filter}, - {$sort: sort}, - {$project: project}, - ]; - cursor = coll.aggregate(pipeline); +/** + * Runs a $match-$sort-$project query as both a find and then an aggregate. Asserts that the + * result set, after being converted to an array, is equal to 'expected'. Also asserts that the + * find plan uses the SORT stage and the agg plan uses the "$sort" agg stage. + */ +function testAggAndFindSort({filter, sort, project, hint, expected}) { + let cursor = coll.find(filter, project).sort(sort); + assert.eq(cursor.toArray(), expected); + if (hint) { + // If there was a hint specified, make sure we get the same results with the hint. + cursor = coll.find(filter, project).sort(sort).hint(hint); assert.eq(cursor.toArray(), expected); - explain = coll.explain().aggregate(pipeline); - assert(aggPlanHasStage(explain, "$sort")); } - - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [3, 0, 1]})); - assert.writeOK(coll.insert({_id: 1, a: [8, 4, -1]})); - - // Sanity check that a sort on "_id" is usually pushed down into the query layer, but that - // $_internalInhibitOptimization prevents this from happening. This makes sure that this test is - // actually exercising the agg blocking sort implementation. - let explain = coll.explain().aggregate([{$sort: {_id: 1}}]); - assert(!aggPlanHasStage(explain, "$sort")); - explain = coll.explain().aggregate([{$_internalInhibitOptimization: {}}, {$sort: {_id: 1}}]); - assert(aggPlanHasStage(explain, "$sort")); - - // Ascending sort, without an index. - testAggAndFindSort({ - filter: {a: {$gte: 2}}, - sort: {a: 1}, - project: {_id: 1, a: 1}, - expected: [{_id: 1, a: [8, 4, -1]}, {_id: 0, a: [3, 0, 1]}] - }); - - assert.writeOK(coll.remove({})); - assert.writeOK(coll.insert({_id: 0, a: [3, 0, 1]})); - assert.writeOK(coll.insert({_id: 1, a: [0, 4, -1]})); - - // Descending sort, without an index. - testAggAndFindSort({ - filter: {a: {$gte: 2}}, - sort: {a: -1}, - project: {_id: 1, a: 1}, - expected: [{_id: 1, a: [0, 4, -1]}, {_id: 0, a: [3, 0, 1]}] - }); - - assert.writeOK(coll.remove({})); - assert.writeOK(coll.insert({_id: 0, a: [3, 0, 1]})); - assert.writeOK(coll.insert({_id: 1, a: [8, 4, -1]})); - assert.commandWorked(coll.createIndex({a: 1})); - - // Ascending sort, in the presence of an index. The multikey index should not be used to provide - // the sort. - testAggAndFindSort({ - filter: {a: {$gte: 2}}, - sort: {a: 1}, - project: {_id: 1, a: 1}, - expected: [{_id: 1, a: [8, 4, -1]}, {_id: 0, a: [3, 0, 1]}] - }); - - assert.writeOK(coll.remove({})); - assert.writeOK(coll.insert({_id: 0, a: [3, 0, 1]})); - assert.writeOK(coll.insert({_id: 1, a: [0, 4, -1]})); - - // Descending sort, in the presence of an index. - testAggAndFindSort({ - filter: {a: {$gte: 2}}, - sort: {a: -1}, - project: {_id: 1, a: 1}, - expected: [{_id: 1, a: [0, 4, -1]}, {_id: 0, a: [3, 0, 1]}] - }); - - assert.writeOK(coll.remove({})); - assert.writeOK(coll.insert({_id: 0, x: [{y: [4, 0, 1], z: 7}, {y: 0, z: 9}]})); - assert.writeOK(coll.insert({_id: 1, x: [{y: 1, z: 7}, {y: 0, z: [8, 6]}]})); - - // Compound mixed ascending/descending sorts, without an index. Sort key for doc with _id: 0 is - // {'': 0, '': 9}. Sort key for doc with _id: 1 is {'': 0, '': 8}. - testAggAndFindSort({ - filter: {}, - sort: {"x.y": 1, "x.z": -1}, - project: {_id: 1}, - expected: [{_id: 0}, {_id: 1}] - }); - - // Sort key for doc with _id: 0 is {'': 4, '': 7}. Sort key for doc with _id: 1 is {'': 1, '': - // 7}. - testAggAndFindSort({ - filter: {}, - sort: {"x.y": -1, "x.z": 1}, - project: {_id: 1}, - expected: [{_id: 0}, {_id: 1}] - }); - - assert.commandWorked(coll.createIndex({"x.y": 1, "x.z": -1})); - - // Compound mixed ascending/descending sorts, with an index. - testAggAndFindSort({ - filter: {}, - sort: {"x.y": 1, "x.z": -1}, - project: {_id: 1}, - expected: [{_id: 0}, {_id: 1}] - }); - testAggAndFindSort({ - filter: {}, - sort: {"x.y": -1, "x.z": 1}, - project: {_id: 1}, - expected: [{_id: 0}, {_id: 1}] - }); - - // Test that a multikey index can provide a sort over a non-multikey field. - coll.drop(); - assert.commandWorked(coll.createIndex({a: 1, "b.c": 1})); - assert.writeOK(coll.insert({a: [1, 2, 3], b: {c: 9}})); - explain = coll.find({a: 2}).sort({"b.c": -1}).explain(); - assert(planHasStage(db, explain, "IXSCAN")); - assert(!planHasStage(db, explain, "SORT")); - - const pipeline = [{$match: {a: 2}}, {$sort: {"b.c": -1}}]; + let explain = coll.find(filter, project).sort(sort).explain(); + assert(planHasStage(db, explain, "SORT")); + + let pipeline = [ + {$_internalInhibitOptimization: {}}, + {$match: filter}, + {$sort: sort}, + {$project: project}, + ]; + cursor = coll.aggregate(pipeline); + assert.eq(cursor.toArray(), expected); explain = coll.explain().aggregate(pipeline); - assert(isQueryPlan(explain)); - assert(planHasStage(db, explain, "IXSCAN")); - assert(!planHasStage(db, explain, "SORT")); - - // Test that we can correctly sort by an array field in agg when there are additional fields not - // involved in the sort pattern. - coll.drop(); - assert.writeOK(coll.insert( - {_id: 0, a: 1, b: {c: 1}, d: [{e: {f: 1, g: [6, 5, 4]}}, {e: {g: [3, 2, 1]}}]})); - assert.writeOK(coll.insert( - {_id: 1, a: 2, b: {c: 2}, d: [{e: {f: 2, g: [5, 4, 3]}}, {e: {g: [2, 1, 0]}}]})); - - testAggAndFindSort( - {filter: {}, sort: {"d.e.g": 1}, project: {_id: 1}, expected: [{_id: 1}, {_id: 0}]}); - - // Test a sort over the trailing field of a compound index, where the two fields of the index - // share a path prefix. This is designed as a regression test for SERVER-31858. - coll.drop(); - assert.writeOK(coll.insert({_id: 2, a: [{b: 1, c: 2}, {b: 2, c: 3}]})); - assert.writeOK(coll.insert({_id: 0, a: [{b: 2, c: 0}, {b: 1, c: 4}]})); - assert.writeOK(coll.insert({_id: 1, a: [{b: 1, c: 5}, {b: 2, c: 1}]})); - assert.commandWorked(coll.createIndex({"a.b": 1, "a.c": 1})); - testAggAndFindSort({ - filter: {"a.b": 1}, - project: {_id: 1}, - sort: {"a.c": 1}, - expected: [{_id: 0}, {_id: 1}, {_id: 2}] - }); - - // Test that an indexed and unindexed sort return the same thing for a path "a.x" which - // traverses through an array. - coll.drop(); - assert.commandWorked(coll.insert({_id: 0, a: [{x: 2}]})); - assert.commandWorked(coll.insert({_id: 1, a: [{x: 1}]})); - assert.commandWorked(coll.insert({_id: 2, a: [{x: 3}]})); - testAggAndFindSort({ - filter: {}, - project: {_id: 1}, - sort: {"a.x": 1}, - expected: [{_id: 1}, {_id: 0}, {_id: 2}] - }); - assert.commandWorked(coll.createIndex({"a.x": 1})); - testAggAndFindSort({ - filter: {}, - project: {_id: 1}, - sort: {"a.x": 1}, - expected: [{_id: 1}, {_id: 0}, {_id: 2}] - }); - testAggAndFindSort({ - filter: {}, - project: {_id: 1}, - sort: {"a.x": 1}, - hint: {"a.x": 1}, - expected: [{_id: 1}, {_id: 0}, {_id: 2}] - }); - - // Now repeat the test with multiple entries along the path "a.x". - coll.drop(); - assert.commandWorked(coll.insert({_id: 0, a: [{x: 2}, {x: 3}]})); - assert.commandWorked(coll.insert({_id: 1, a: [{x: 1}, {x: 4}]})); - assert.commandWorked(coll.insert({_id: 2, a: [{x: 3}, {x: 4}]})); - testAggAndFindSort({ - filter: {}, - project: {_id: 1}, - sort: {"a.x": 1}, - expected: [{_id: 1}, {_id: 0}, {_id: 2}] - }); - assert.commandWorked(coll.createIndex({"a.x": 1})); - testAggAndFindSort({ - filter: {}, - project: {_id: 1}, - sort: {"a.x": 1}, - expected: [{_id: 1}, {_id: 0}, {_id: 2}] - }); - testAggAndFindSort({ - filter: {}, - project: {_id: 1}, - sort: {"a.x": 1}, - hint: {"a.x": 1}, - expected: [{_id: 1}, {_id: 0}, {_id: 2}] - }); + assert(aggPlanHasStage(explain, "$sort")); +} + +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: [3, 0, 1]})); +assert.writeOK(coll.insert({_id: 1, a: [8, 4, -1]})); + +// Sanity check that a sort on "_id" is usually pushed down into the query layer, but that +// $_internalInhibitOptimization prevents this from happening. This makes sure that this test is +// actually exercising the agg blocking sort implementation. +let explain = coll.explain().aggregate([{$sort: {_id: 1}}]); +assert(!aggPlanHasStage(explain, "$sort")); +explain = coll.explain().aggregate([{$_internalInhibitOptimization: {}}, {$sort: {_id: 1}}]); +assert(aggPlanHasStage(explain, "$sort")); + +// Ascending sort, without an index. +testAggAndFindSort({ + filter: {a: {$gte: 2}}, + sort: {a: 1}, + project: {_id: 1, a: 1}, + expected: [{_id: 1, a: [8, 4, -1]}, {_id: 0, a: [3, 0, 1]}] +}); + +assert.writeOK(coll.remove({})); +assert.writeOK(coll.insert({_id: 0, a: [3, 0, 1]})); +assert.writeOK(coll.insert({_id: 1, a: [0, 4, -1]})); + +// Descending sort, without an index. +testAggAndFindSort({ + filter: {a: {$gte: 2}}, + sort: {a: -1}, + project: {_id: 1, a: 1}, + expected: [{_id: 1, a: [0, 4, -1]}, {_id: 0, a: [3, 0, 1]}] +}); + +assert.writeOK(coll.remove({})); +assert.writeOK(coll.insert({_id: 0, a: [3, 0, 1]})); +assert.writeOK(coll.insert({_id: 1, a: [8, 4, -1]})); +assert.commandWorked(coll.createIndex({a: 1})); + +// Ascending sort, in the presence of an index. The multikey index should not be used to provide +// the sort. +testAggAndFindSort({ + filter: {a: {$gte: 2}}, + sort: {a: 1}, + project: {_id: 1, a: 1}, + expected: [{_id: 1, a: [8, 4, -1]}, {_id: 0, a: [3, 0, 1]}] +}); + +assert.writeOK(coll.remove({})); +assert.writeOK(coll.insert({_id: 0, a: [3, 0, 1]})); +assert.writeOK(coll.insert({_id: 1, a: [0, 4, -1]})); + +// Descending sort, in the presence of an index. +testAggAndFindSort({ + filter: {a: {$gte: 2}}, + sort: {a: -1}, + project: {_id: 1, a: 1}, + expected: [{_id: 1, a: [0, 4, -1]}, {_id: 0, a: [3, 0, 1]}] +}); + +assert.writeOK(coll.remove({})); +assert.writeOK(coll.insert({_id: 0, x: [{y: [4, 0, 1], z: 7}, {y: 0, z: 9}]})); +assert.writeOK(coll.insert({_id: 1, x: [{y: 1, z: 7}, {y: 0, z: [8, 6]}]})); + +// Compound mixed ascending/descending sorts, without an index. Sort key for doc with _id: 0 is +// {'': 0, '': 9}. Sort key for doc with _id: 1 is {'': 0, '': 8}. +testAggAndFindSort( + {filter: {}, sort: {"x.y": 1, "x.z": -1}, project: {_id: 1}, expected: [{_id: 0}, {_id: 1}]}); + +// Sort key for doc with _id: 0 is {'': 4, '': 7}. Sort key for doc with _id: 1 is {'': 1, '': +// 7}. +testAggAndFindSort( + {filter: {}, sort: {"x.y": -1, "x.z": 1}, project: {_id: 1}, expected: [{_id: 0}, {_id: 1}]}); + +assert.commandWorked(coll.createIndex({"x.y": 1, "x.z": -1})); + +// Compound mixed ascending/descending sorts, with an index. +testAggAndFindSort( + {filter: {}, sort: {"x.y": 1, "x.z": -1}, project: {_id: 1}, expected: [{_id: 0}, {_id: 1}]}); +testAggAndFindSort( + {filter: {}, sort: {"x.y": -1, "x.z": 1}, project: {_id: 1}, expected: [{_id: 0}, {_id: 1}]}); + +// Test that a multikey index can provide a sort over a non-multikey field. +coll.drop(); +assert.commandWorked(coll.createIndex({a: 1, "b.c": 1})); +assert.writeOK(coll.insert({a: [1, 2, 3], b: {c: 9}})); +explain = coll.find({a: 2}).sort({"b.c": -1}).explain(); +assert(planHasStage(db, explain, "IXSCAN")); +assert(!planHasStage(db, explain, "SORT")); + +const pipeline = [{$match: {a: 2}}, {$sort: {"b.c": -1}}]; +explain = coll.explain().aggregate(pipeline); +assert(isQueryPlan(explain)); +assert(planHasStage(db, explain, "IXSCAN")); +assert(!planHasStage(db, explain, "SORT")); + +// Test that we can correctly sort by an array field in agg when there are additional fields not +// involved in the sort pattern. +coll.drop(); +assert.writeOK( + coll.insert({_id: 0, a: 1, b: {c: 1}, d: [{e: {f: 1, g: [6, 5, 4]}}, {e: {g: [3, 2, 1]}}]})); +assert.writeOK( + coll.insert({_id: 1, a: 2, b: {c: 2}, d: [{e: {f: 2, g: [5, 4, 3]}}, {e: {g: [2, 1, 0]}}]})); + +testAggAndFindSort( + {filter: {}, sort: {"d.e.g": 1}, project: {_id: 1}, expected: [{_id: 1}, {_id: 0}]}); + +// Test a sort over the trailing field of a compound index, where the two fields of the index +// share a path prefix. This is designed as a regression test for SERVER-31858. +coll.drop(); +assert.writeOK(coll.insert({_id: 2, a: [{b: 1, c: 2}, {b: 2, c: 3}]})); +assert.writeOK(coll.insert({_id: 0, a: [{b: 2, c: 0}, {b: 1, c: 4}]})); +assert.writeOK(coll.insert({_id: 1, a: [{b: 1, c: 5}, {b: 2, c: 1}]})); +assert.commandWorked(coll.createIndex({"a.b": 1, "a.c": 1})); +testAggAndFindSort({ + filter: {"a.b": 1}, + project: {_id: 1}, + sort: {"a.c": 1}, + expected: [{_id: 0}, {_id: 1}, {_id: 2}] +}); + +// Test that an indexed and unindexed sort return the same thing for a path "a.x" which +// traverses through an array. +coll.drop(); +assert.commandWorked(coll.insert({_id: 0, a: [{x: 2}]})); +assert.commandWorked(coll.insert({_id: 1, a: [{x: 1}]})); +assert.commandWorked(coll.insert({_id: 2, a: [{x: 3}]})); +testAggAndFindSort( + {filter: {}, project: {_id: 1}, sort: {"a.x": 1}, expected: [{_id: 1}, {_id: 0}, {_id: 2}]}); +assert.commandWorked(coll.createIndex({"a.x": 1})); +testAggAndFindSort( + {filter: {}, project: {_id: 1}, sort: {"a.x": 1}, expected: [{_id: 1}, {_id: 0}, {_id: 2}]}); +testAggAndFindSort({ + filter: {}, + project: {_id: 1}, + sort: {"a.x": 1}, + hint: {"a.x": 1}, + expected: [{_id: 1}, {_id: 0}, {_id: 2}] +}); + +// Now repeat the test with multiple entries along the path "a.x". +coll.drop(); +assert.commandWorked(coll.insert({_id: 0, a: [{x: 2}, {x: 3}]})); +assert.commandWorked(coll.insert({_id: 1, a: [{x: 1}, {x: 4}]})); +assert.commandWorked(coll.insert({_id: 2, a: [{x: 3}, {x: 4}]})); +testAggAndFindSort( + {filter: {}, project: {_id: 1}, sort: {"a.x": 1}, expected: [{_id: 1}, {_id: 0}, {_id: 2}]}); +assert.commandWorked(coll.createIndex({"a.x": 1})); +testAggAndFindSort( + {filter: {}, project: {_id: 1}, sort: {"a.x": 1}, expected: [{_id: 1}, {_id: 0}, {_id: 2}]}); +testAggAndFindSort({ + filter: {}, + project: {_id: 1}, + sort: {"a.x": 1}, + hint: {"a.x": 1}, + expected: [{_id: 1}, {_id: 0}, {_id: 2}] +}); }()); diff --git a/jstests/core/sorta.js b/jstests/core/sorta.js index 91f36ba3621..f030cc6a673 100644 --- a/jstests/core/sorta.js +++ b/jstests/core/sorta.js @@ -1,30 +1,30 @@ // SERVER-2905 sorting with missing fields (function() { - 'use strict'; +'use strict'; - var coll = db.jstests_sorta; - coll.drop(); +var coll = db.jstests_sorta; +coll.drop(); - const docs = [ - {_id: 0, a: MinKey}, - {_id: 1, a: []}, - {_id: 2, a: []}, - {_id: 3, a: null}, - {_id: 4}, - {_id: 5, a: null}, - {_id: 6, a: 1}, - {_id: 7, a: [2]}, - {_id: 8, a: MaxKey} - ]; - const bulk = coll.initializeUnorderedBulkOp(); - for (let doc of docs) { - bulk.insert(doc); - } - assert.writeOK(bulk.execute()); +const docs = [ + {_id: 0, a: MinKey}, + {_id: 1, a: []}, + {_id: 2, a: []}, + {_id: 3, a: null}, + {_id: 4}, + {_id: 5, a: null}, + {_id: 6, a: 1}, + {_id: 7, a: [2]}, + {_id: 8, a: MaxKey} +]; +const bulk = coll.initializeUnorderedBulkOp(); +for (let doc of docs) { + bulk.insert(doc); +} +assert.writeOK(bulk.execute()); - assert.eq(coll.find().sort({a: 1, _id: 1}).toArray(), docs); +assert.eq(coll.find().sort({a: 1, _id: 1}).toArray(), docs); - assert.commandWorked(coll.createIndex({a: 1, _id: 1})); - assert.eq(coll.find().sort({a: 1, _id: 1}).toArray(), docs); +assert.commandWorked(coll.createIndex({a: 1, _id: 1})); +assert.eq(coll.find().sort({a: 1, _id: 1}).toArray(), docs); })(); diff --git a/jstests/core/sortb.js b/jstests/core/sortb.js index 953062a05ec..34fb94951e9 100644 --- a/jstests/core/sortb.js +++ b/jstests/core/sortb.js @@ -1,41 +1,41 @@ // Test that the in memory sort capacity limit is checked for all "top N" sort candidates. // SERVER-4716 (function() { - "use strict"; - - load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. - - const t = db.jstests_sortb; - t.drop(); - - t.ensureIndex({b: 1}); - - let i; - for (i = 0; i < 100; ++i) { - t.save({a: i, b: i}); - } - - const numShards = FixtureHelpers.numberOfShardsForCollection(t); - const numLargeDocumentsToWrite = 120 * numShards; - - // These large documents will not be part of the initial set of "top 100" matches, and they will - // not be part of the final set of "top 100" matches returned to the client. However, they are - // an intermediate set of "top 100" matches and should trigger an in memory sort capacity - // exception. - const big = new Array(1024 * 1024).toString(); - for (; i < 100 + numLargeDocumentsToWrite; ++i) { - t.save({a: i, b: i, big: big}); - } - - for (; i < 200 + numLargeDocumentsToWrite; ++i) { - t.save({a: i, b: i}); - } - - assert.throws(function() { - t.find().sort({a: -1}).hint({b: 1}).limit(100).itcount(); - }); - assert.throws(function() { - t.find().sort({a: -1}).hint({b: 1}).showDiskLoc().limit(100).itcount(); - }); - t.drop(); +"use strict"; + +load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. + +const t = db.jstests_sortb; +t.drop(); + +t.ensureIndex({b: 1}); + +let i; +for (i = 0; i < 100; ++i) { + t.save({a: i, b: i}); +} + +const numShards = FixtureHelpers.numberOfShardsForCollection(t); +const numLargeDocumentsToWrite = 120 * numShards; + +// These large documents will not be part of the initial set of "top 100" matches, and they will +// not be part of the final set of "top 100" matches returned to the client. However, they are +// an intermediate set of "top 100" matches and should trigger an in memory sort capacity +// exception. +const big = new Array(1024 * 1024).toString(); +for (; i < 100 + numLargeDocumentsToWrite; ++i) { + t.save({a: i, b: i, big: big}); +} + +for (; i < 200 + numLargeDocumentsToWrite; ++i) { + t.save({a: i, b: i}); +} + +assert.throws(function() { + t.find().sort({a: -1}).hint({b: 1}).limit(100).itcount(); +}); +assert.throws(function() { + t.find().sort({a: -1}).hint({b: 1}).showDiskLoc().limit(100).itcount(); +}); +t.drop(); })(); diff --git a/jstests/core/sortc.js b/jstests/core/sortc.js index 2b1070b8b6b..3b6213a11c7 100644 --- a/jstests/core/sortc.js +++ b/jstests/core/sortc.js @@ -1,34 +1,34 @@ // Test sorting with skipping and multiple candidate query plans. (function() { - "use strict"; +"use strict"; - const coll = db.jstests_sortc; - coll.drop(); +const coll = db.jstests_sortc; +coll.drop(); - assert.writeOK(coll.insert({a: 1})); - assert.writeOK(coll.insert({a: 2})); +assert.writeOK(coll.insert({a: 1})); +assert.writeOK(coll.insert({a: 2})); - function checkA(a, sort, skip, query) { - query = query || {}; - assert.eq(a, coll.find(query).sort(sort).skip(skip)[0].a); - } +function checkA(a, sort, skip, query) { + query = query || {}; + assert.eq(a, coll.find(query).sort(sort).skip(skip)[0].a); +} - function checkSortAndSkip() { - checkA(1, {a: 1}, 0); - checkA(2, {a: 1}, 1); +function checkSortAndSkip() { + checkA(1, {a: 1}, 0); + checkA(2, {a: 1}, 1); - checkA(1, {a: 1}, 0, {a: {$gt: 0}, b: null}); - checkA(2, {a: 1}, 1, {a: {$gt: 0}, b: null}); + checkA(1, {a: 1}, 0, {a: {$gt: 0}, b: null}); + checkA(2, {a: 1}, 1, {a: {$gt: 0}, b: null}); - checkA(2, {a: -1}, 0); - checkA(1, {a: -1}, 1); + checkA(2, {a: -1}, 0); + checkA(1, {a: -1}, 1); - checkA(2, {a: -1}, 0, {a: {$gt: 0}, b: null}); - checkA(1, {a: -1}, 1, {a: {$gt: 0}, b: null}); - } + checkA(2, {a: -1}, 0, {a: {$gt: 0}, b: null}); + checkA(1, {a: -1}, 1, {a: {$gt: 0}, b: null}); +} - checkSortAndSkip(); +checkSortAndSkip(); - assert.commandWorked(coll.createIndex({a: 1})); - checkSortAndSkip(); +assert.commandWorked(coll.createIndex({a: 1})); +checkSortAndSkip(); }()); diff --git a/jstests/core/sortg.js b/jstests/core/sortg.js index 3e3c7bf517d..1f5a442fa35 100644 --- a/jstests/core/sortg.js +++ b/jstests/core/sortg.js @@ -4,64 +4,64 @@ // Test that a memory exception is triggered for in memory sorts, but not for indexed sorts. (function() { - "use strict"; +"use strict"; - const t = db.jstests_sortg; - t.drop(); +const t = db.jstests_sortg; +t.drop(); - const big = new Array(1000000).toString(); +const big = new Array(1000000).toString(); - let i; - for (i = 0; i < 100; ++i) { - t.save({b: 0}); - } +let i; +for (i = 0; i < 100; ++i) { + t.save({b: 0}); +} - for (i = 0; i < 110; ++i) { - t.save({a: 0, x: big}); - } +for (i = 0; i < 110; ++i) { + t.save({a: 0, x: big}); +} - function memoryException(sortSpec, querySpec) { - querySpec = querySpec || {}; - var ex = assert.throws(function() { - t.find(querySpec).sort(sortSpec).batchSize(1000).itcount(); - }); - assert(ex.toString().match(/Sort/)); - } - - function noMemoryException(sortSpec, querySpec) { - querySpec = querySpec || {}; +function memoryException(sortSpec, querySpec) { + querySpec = querySpec || {}; + var ex = assert.throws(function() { t.find(querySpec).sort(sortSpec).batchSize(1000).itcount(); - } + }); + assert(ex.toString().match(/Sort/)); +} + +function noMemoryException(sortSpec, querySpec) { + querySpec = querySpec || {}; + t.find(querySpec).sort(sortSpec).batchSize(1000).itcount(); +} - // Unindexed sorts. - memoryException({a: 1}); - memoryException({b: 1}); +// Unindexed sorts. +memoryException({a: 1}); +memoryException({b: 1}); - // Indexed sorts. - noMemoryException({_id: 1}); - noMemoryException({$natural: 1}); +// Indexed sorts. +noMemoryException({_id: 1}); +noMemoryException({$natural: 1}); - assert.eq(1, t.getIndexes().length); +assert.eq(1, t.getIndexes().length); - t.ensureIndex({a: 1}); - t.ensureIndex({b: 1}); - t.ensureIndex({c: 1}); +t.ensureIndex({a: 1}); +t.ensureIndex({b: 1}); +t.ensureIndex({c: 1}); - assert.eq(4, t.getIndexes().length); +assert.eq(4, t.getIndexes().length); - // These sorts are now indexed. - noMemoryException({a: 1}); - noMemoryException({b: 1}); +// These sorts are now indexed. +noMemoryException({a: 1}); +noMemoryException({b: 1}); - // A memory exception is triggered for an unindexed sort involving multiple plans. - memoryException({d: 1}, {b: null, c: null}); +// A memory exception is triggered for an unindexed sort involving multiple plans. +memoryException({d: 1}, {b: null, c: null}); - // With an indexed plan on _id:1 and an unindexed plan on b:1, the indexed plan should succeed - // even if the unindexed one would exhaust its memory limit. - noMemoryException({_id: 1}, {b: null}); +// With an indexed plan on _id:1 and an unindexed plan on b:1, the indexed plan should succeed +// even if the unindexed one would exhaust its memory limit. +noMemoryException({_id: 1}, {b: null}); - // With an unindexed plan on b:1 recorded for a query, the query should be retried when the - // unindexed plan exhausts its memory limit. - noMemoryException({_id: 1}, {b: null}); - t.drop(); +// With an unindexed plan on b:1 recorded for a query, the query should be retried when the +// unindexed plan exhausts its memory limit. +noMemoryException({_id: 1}, {b: null}); +t.drop(); })(); diff --git a/jstests/core/sorth.js b/jstests/core/sorth.js index 9d6519613d3..c096f265882 100644 --- a/jstests/core/sorth.js +++ b/jstests/core/sorth.js @@ -1,277 +1,244 @@ // Tests for the $in/sort/limit optimization combined with inequality bounds. SERVER-5777 (function() { - "use strict"; - - var t = db.jstests_sorth; - t.drop(); - - // These can be set to modify the query run by the helper find(). - var _sort; - var _limit; - var _hint; - - /** - * Generate a cursor using global parameters '_sort', '_hint', and '_limit'. - */ - function find(query) { - return t.find(query, {_id: 0}).sort(_sort).limit(_limit).hint(_hint); +"use strict"; + +var t = db.jstests_sorth; +t.drop(); + +// These can be set to modify the query run by the helper find(). +var _sort; +var _limit; +var _hint; + +/** + * Generate a cursor using global parameters '_sort', '_hint', and '_limit'. + */ +function find(query) { + return t.find(query, {_id: 0}).sort(_sort).limit(_limit).hint(_hint); +} + +/** + * Returns true if the elements of 'expectedMatches' match element by element with + * 'actualMatches', only considering the fields 'a' and 'b'. + * + * @param {Array} expectedMatches - expected results from a query. + * @param {Array} actualMatches - the actual results from that query. + */ +function resultsMatch(expectedMatches, actualMatches) { + if (expectedMatches.length !== actualMatches.length) { + return false; } - /** - * Returns true if the elements of 'expectedMatches' match element by element with - * 'actualMatches', only considering the fields 'a' and 'b'. - * - * @param {Array} expectedMatches - expected results from a query. - * @param {Array} actualMatches - the actual results from that query. - */ - function resultsMatch(expectedMatches, actualMatches) { - if (expectedMatches.length !== actualMatches.length) { + for (var i = 0; i < expectedMatches.length; ++i) { + if ((expectedMatches[i].a !== actualMatches[i].a) || + (expectedMatches[i].b !== actualMatches[i].b)) { return false; } - - for (var i = 0; i < expectedMatches.length; ++i) { - if ((expectedMatches[i].a !== actualMatches[i].a) || - (expectedMatches[i].b !== actualMatches[i].b)) { - return false; - } - } - return true; } - - /** - * Asserts that the given query returns results that are expected. - * - * @param {Object} options.query - the query to run. - * @param {Array.<Object>} options.expectedQueryResults - the expected results from the query. - * @param {Array.<Array>} [options.acceptableQueryResults=[options.expectedQueryResults]] - An - * array of acceptable outcomes of the query. This can be used if there are multiple results - * that are considered correct for the query. - */ - function assertMatches(options) { - const results = find(options.query).toArray(); - const acceptableQueryResults = - options.acceptableQueryResults || [options.expectedQueryResults]; - assert.gte(acceptableQueryResults.length, 1); - for (var i = 0; i < acceptableQueryResults.length; ++i) { - const validResultSet = acceptableQueryResults[i]; - - // All results should have the same number of results. - assert.eq(validResultSet.length, - results.length, - "Expected " + results.length + " results from query " + - tojson(options.query) + " but found " + validResultSet.length); - - if (resultsMatch(validResultSet, results)) { - return; - } + return true; +} + +/** + * Asserts that the given query returns results that are expected. + * + * @param {Object} options.query - the query to run. + * @param {Array.<Object>} options.expectedQueryResults - the expected results from the query. + * @param {Array.<Array>} [options.acceptableQueryResults=[options.expectedQueryResults]] - An + * array of acceptable outcomes of the query. This can be used if there are multiple results + * that are considered correct for the query. + */ +function assertMatches(options) { + const results = find(options.query).toArray(); + const acceptableQueryResults = options.acceptableQueryResults || [options.expectedQueryResults]; + assert.gte(acceptableQueryResults.length, 1); + for (var i = 0; i < acceptableQueryResults.length; ++i) { + const validResultSet = acceptableQueryResults[i]; + + // All results should have the same number of results. + assert.eq(validResultSet.length, + results.length, + "Expected " + results.length + " results from query " + tojson(options.query) + + " but found " + validResultSet.length); + + if (resultsMatch(validResultSet, results)) { + return; } - throw new Error("Unexpected results for query " + tojson(options.query) + ": " + - tojson(results) + ", acceptable results were: " + - tojson(acceptableQueryResults)); - } - - /** - * Reset data, index, and _sort and _hint globals. - */ - function reset(sort, index) { - t.drop(); - t.save({a: 1, b: 1}); - t.save({a: 1, b: 2}); - t.save({a: 1, b: 3}); - t.save({a: 2, b: 0}); - t.save({a: 2, b: 3}); - t.save({a: 2, b: 5}); - t.ensureIndex(index); - _sort = sort; - _hint = index; - } - - function checkForwardDirection(options) { - // All callers specify a sort that is prefixed by b, ascending. - assert.eq(Object.keys(options.sort)[0], "b"); - assert.eq(options.sort.b, 1); - - // None of the callers specify a sort on "a". - assert(!options.sort.hasOwnProperty("a")); - - reset(options.sort, options.index); - - _limit = -1; - - // Lower bound checks. - assertMatches( - {expectedQueryResults: [{a: 2, b: 0}], query: {a: {$in: [1, 2]}, b: {$gte: 0}}}); - assertMatches( - {expectedQueryResults: [{a: 1, b: 1}], query: {a: {$in: [1, 2]}, b: {$gt: 0}}}); - assertMatches( - {expectedQueryResults: [{a: 1, b: 1}], query: {a: {$in: [1, 2]}, b: {$gte: 1}}}); - assertMatches( - {expectedQueryResults: [{a: 1, b: 2}], query: {a: {$in: [1, 2]}, b: {$gt: 1}}}); - assertMatches( - {expectedQueryResults: [{a: 1, b: 2}], query: {a: {$in: [1, 2]}, b: {$gte: 2}}}); - - // Since we are sorting on the field "b", and the sort specification doesn't include the - // field "a", any query that is expected to result in a document with a value of 3 for "b" - // has two acceptable results, since there are two documents with a value of 3 for "b". The - // same argument applies for all assertions below involving a result with a value of 3 for - // the field "b". - assertMatches({ - acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]], - query: {a: {$in: [1, 2]}, b: {$gt: 2}} - }); - assertMatches({ - acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]], - query: {a: {$in: [1, 2]}, b: {$gte: 3}} - }); - assertMatches( - {expectedQueryResults: [{a: 2, b: 5}], query: {a: {$in: [1, 2]}, b: {$gt: 3}}}); - assertMatches( - {expectedQueryResults: [{a: 2, b: 5}], query: {a: {$in: [1, 2]}, b: {$gte: 4}}}); - assertMatches( - {expectedQueryResults: [{a: 2, b: 5}], query: {a: {$in: [1, 2]}, b: {$gt: 4}}}); - assertMatches( - {expectedQueryResults: [{a: 2, b: 5}], query: {a: {$in: [1, 2]}, b: {$gte: 5}}}); - - // Upper bound checks. - assertMatches( - {expectedQueryResults: [{a: 2, b: 0}], query: {a: {$in: [1, 2]}, b: {$lte: 0}}}); - assertMatches( - {expectedQueryResults: [{a: 2, b: 0}], query: {a: {$in: [1, 2]}, b: {$lt: 1}}}); - assertMatches( - {expectedQueryResults: [{a: 2, b: 0}], query: {a: {$in: [1, 2]}, b: {$lte: 1}}}); - assertMatches( - {expectedQueryResults: [{a: 2, b: 0}], query: {a: {$in: [1, 2]}, b: {$lt: 3}}}); - - // Lower and upper bounds checks. - assertMatches({ - expectedQueryResults: [{a: 2, b: 0}], - query: {a: {$in: [1, 2]}, b: {$gte: 0, $lte: 0}} - }); - assertMatches({ - expectedQueryResults: [{a: 2, b: 0}], - query: {a: {$in: [1, 2]}, b: {$gte: 0, $lt: 1}} - }); - assertMatches({ - expectedQueryResults: [{a: 2, b: 0}], - query: {a: {$in: [1, 2]}, b: {$gte: 0, $lte: 1}} - }); - assertMatches({ - expectedQueryResults: [{a: 1, b: 1}], - query: {a: {$in: [1, 2]}, b: {$gt: 0, $lte: 1}} - }); - assertMatches({ - expectedQueryResults: [{a: 1, b: 2}], - query: {a: {$in: [1, 2]}, b: {$gte: 2, $lt: 3}} - }); - assertMatches({ - acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]], - query: {a: {$in: [1, 2]}, b: {$gte: 2.5, $lte: 3}} - }); - assertMatches({ - acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]], - query: {a: {$in: [1, 2]}, b: {$gt: 2.5, $lte: 3}} - }); - - // Limit is -2. - _limit = -2; - assertMatches({ - expectedQueryResults: [{a: 2, b: 0}, {a: 1, b: 1}], - query: {a: {$in: [1, 2]}, b: {$gte: 0}} - }); - assertMatches({ - acceptableQueryResults: [[{a: 1, b: 2}, {a: 2, b: 3}], [{a: 1, b: 2}, {a: 1, b: 3}]], - query: {a: {$in: [1, 2]}, b: {$gt: 1}} - }); - assertMatches( - {expectedQueryResults: [{a: 2, b: 5}], query: {a: {$in: [1, 2]}, b: {$gt: 4}}}); - - // With an additional document between the $in values. - t.save({a: 1.5, b: 3}); - assertMatches({ - expectedQueryResults: [{a: 2, b: 0}, {a: 1, b: 1}], - query: {a: {$in: [1, 2]}, b: {$gte: 0}} - }); - } - - // Basic test with an index suffix order. - checkForwardDirection({sort: {b: 1}, index: {a: 1, b: 1}}); - // With an additional index field. - checkForwardDirection({sort: {b: 1}, index: {a: 1, b: 1, c: 1}}); - // With an additional reverse direction index field. - checkForwardDirection({sort: {b: 1}, index: {a: 1, b: 1, c: -1}}); - // With an additional ordered index field. - checkForwardDirection({sort: {b: 1, c: 1}, index: {a: 1, b: 1, c: 1}}); - // With an additional reverse direction ordered index field. - checkForwardDirection({sort: {b: 1, c: -1}, index: {a: 1, b: 1, c: -1}}); - - function checkReverseDirection(options) { - // All callers specify a sort that is prefixed by "b", descending. - assert.eq(Object.keys(options.sort)[0], "b"); - assert.eq(options.sort.b, -1); - // None of the callers specify a sort on "a". - assert(!options.sort.hasOwnProperty("a")); - - reset(options.sort, options.index); - _limit = -1; - - // For matching documents, highest value of 'b' is 5. - assertMatches( - {expectedQueryResults: [{a: 2, b: 5}], query: {a: {$in: [1, 2]}, b: {$gte: 0}}}); - assertMatches( - {expectedQueryResults: [{a: 2, b: 5}], query: {a: {$in: [1, 2]}, b: {$gte: 5}}}); - assertMatches( - {expectedQueryResults: [{a: 2, b: 5}], query: {a: {$in: [1, 2]}, b: {$lte: 5}}}); - assertMatches({ - expectedQueryResults: [{a: 2, b: 5}], - query: {a: {$in: [1, 2]}, b: {$lte: 5, $gte: 5}} - }); - - // For matching documents, highest value of 'b' is 2. - assertMatches( - {expectedQueryResults: [{a: 1, b: 2}], query: {a: {$in: [1, 2]}, b: {$lt: 3}}}); - assertMatches( - {expectedQueryResults: [{a: 1, b: 2}], query: {a: {$in: [1, 2]}, b: {$lt: 3, $gt: 1}}}); - - // For matching documents, highest value of 'b' is 1. - assertMatches({ - expectedQueryResults: [{a: 1, b: 1}], - query: {a: {$in: [1, 2]}, b: {$lt: 2, $gte: 1}} - }); - - // These queries expect 3 as the highest value of 'b' among matching documents, but there - // are two documents with a value of 3 for the field 'b'. Either document is acceptable, - // since there is no sort order on any other existing fields. - assertMatches({ - acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]], - query: {a: {$in: [1, 2]}, b: {$lt: 5}} - }); - assertMatches({ - acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]], - query: {a: {$in: [1, 2]}, b: {$lt: 3.1}} - }); - assertMatches({ - acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]], - query: {a: {$in: [1, 2]}, b: {$lt: 3.5}} - }); - assertMatches({ - acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]], - query: {a: {$in: [1, 2]}, b: {$lte: 3}} - }); - assertMatches({ - acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]], - query: {a: {$in: [1, 2]}, b: {$lt: 3.5, $gte: 3}} - }); - assertMatches({ - acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]], - query: {a: {$in: [1, 2]}, b: {$lte: 3, $gt: 0}} - }); } - - // With a descending order index. - checkReverseDirection({sort: {b: -1}, index: {a: 1, b: -1}}); - checkReverseDirection({sort: {b: -1}, index: {a: 1, b: -1, c: 1}}); - checkReverseDirection({sort: {b: -1}, index: {a: 1, b: -1, c: -1}}); - checkReverseDirection({sort: {b: -1, c: 1}, index: {a: 1, b: -1, c: 1}}); - checkReverseDirection({sort: {b: -1, c: -1}, index: {a: 1, b: -1, c: -1}}); + throw new Error("Unexpected results for query " + tojson(options.query) + ": " + + tojson(results) + + ", acceptable results were: " + tojson(acceptableQueryResults)); +} + +/** + * Reset data, index, and _sort and _hint globals. + */ +function reset(sort, index) { + t.drop(); + t.save({a: 1, b: 1}); + t.save({a: 1, b: 2}); + t.save({a: 1, b: 3}); + t.save({a: 2, b: 0}); + t.save({a: 2, b: 3}); + t.save({a: 2, b: 5}); + t.ensureIndex(index); + _sort = sort; + _hint = index; +} + +function checkForwardDirection(options) { + // All callers specify a sort that is prefixed by b, ascending. + assert.eq(Object.keys(options.sort)[0], "b"); + assert.eq(options.sort.b, 1); + + // None of the callers specify a sort on "a". + assert(!options.sort.hasOwnProperty("a")); + + reset(options.sort, options.index); + + _limit = -1; + + // Lower bound checks. + assertMatches({expectedQueryResults: [{a: 2, b: 0}], query: {a: {$in: [1, 2]}, b: {$gte: 0}}}); + assertMatches({expectedQueryResults: [{a: 1, b: 1}], query: {a: {$in: [1, 2]}, b: {$gt: 0}}}); + assertMatches({expectedQueryResults: [{a: 1, b: 1}], query: {a: {$in: [1, 2]}, b: {$gte: 1}}}); + assertMatches({expectedQueryResults: [{a: 1, b: 2}], query: {a: {$in: [1, 2]}, b: {$gt: 1}}}); + assertMatches({expectedQueryResults: [{a: 1, b: 2}], query: {a: {$in: [1, 2]}, b: {$gte: 2}}}); + + // Since we are sorting on the field "b", and the sort specification doesn't include the + // field "a", any query that is expected to result in a document with a value of 3 for "b" + // has two acceptable results, since there are two documents with a value of 3 for "b". The + // same argument applies for all assertions below involving a result with a value of 3 for + // the field "b". + assertMatches({ + acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]], + query: {a: {$in: [1, 2]}, b: {$gt: 2}} + }); + assertMatches({ + acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]], + query: {a: {$in: [1, 2]}, b: {$gte: 3}} + }); + assertMatches({expectedQueryResults: [{a: 2, b: 5}], query: {a: {$in: [1, 2]}, b: {$gt: 3}}}); + assertMatches({expectedQueryResults: [{a: 2, b: 5}], query: {a: {$in: [1, 2]}, b: {$gte: 4}}}); + assertMatches({expectedQueryResults: [{a: 2, b: 5}], query: {a: {$in: [1, 2]}, b: {$gt: 4}}}); + assertMatches({expectedQueryResults: [{a: 2, b: 5}], query: {a: {$in: [1, 2]}, b: {$gte: 5}}}); + + // Upper bound checks. + assertMatches({expectedQueryResults: [{a: 2, b: 0}], query: {a: {$in: [1, 2]}, b: {$lte: 0}}}); + assertMatches({expectedQueryResults: [{a: 2, b: 0}], query: {a: {$in: [1, 2]}, b: {$lt: 1}}}); + assertMatches({expectedQueryResults: [{a: 2, b: 0}], query: {a: {$in: [1, 2]}, b: {$lte: 1}}}); + assertMatches({expectedQueryResults: [{a: 2, b: 0}], query: {a: {$in: [1, 2]}, b: {$lt: 3}}}); + + // Lower and upper bounds checks. + assertMatches( + {expectedQueryResults: [{a: 2, b: 0}], query: {a: {$in: [1, 2]}, b: {$gte: 0, $lte: 0}}}); + assertMatches( + {expectedQueryResults: [{a: 2, b: 0}], query: {a: {$in: [1, 2]}, b: {$gte: 0, $lt: 1}}}); + assertMatches( + {expectedQueryResults: [{a: 2, b: 0}], query: {a: {$in: [1, 2]}, b: {$gte: 0, $lte: 1}}}); + assertMatches( + {expectedQueryResults: [{a: 1, b: 1}], query: {a: {$in: [1, 2]}, b: {$gt: 0, $lte: 1}}}); + assertMatches( + {expectedQueryResults: [{a: 1, b: 2}], query: {a: {$in: [1, 2]}, b: {$gte: 2, $lt: 3}}}); + assertMatches({ + acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]], + query: {a: {$in: [1, 2]}, b: {$gte: 2.5, $lte: 3}} + }); + assertMatches({ + acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]], + query: {a: {$in: [1, 2]}, b: {$gt: 2.5, $lte: 3}} + }); + + // Limit is -2. + _limit = -2; + assertMatches({ + expectedQueryResults: [{a: 2, b: 0}, {a: 1, b: 1}], + query: {a: {$in: [1, 2]}, b: {$gte: 0}} + }); + assertMatches({ + acceptableQueryResults: [[{a: 1, b: 2}, {a: 2, b: 3}], [{a: 1, b: 2}, {a: 1, b: 3}]], + query: {a: {$in: [1, 2]}, b: {$gt: 1}} + }); + assertMatches({expectedQueryResults: [{a: 2, b: 5}], query: {a: {$in: [1, 2]}, b: {$gt: 4}}}); + + // With an additional document between the $in values. + t.save({a: 1.5, b: 3}); + assertMatches({ + expectedQueryResults: [{a: 2, b: 0}, {a: 1, b: 1}], + query: {a: {$in: [1, 2]}, b: {$gte: 0}} + }); +} + +// Basic test with an index suffix order. +checkForwardDirection({sort: {b: 1}, index: {a: 1, b: 1}}); +// With an additional index field. +checkForwardDirection({sort: {b: 1}, index: {a: 1, b: 1, c: 1}}); +// With an additional reverse direction index field. +checkForwardDirection({sort: {b: 1}, index: {a: 1, b: 1, c: -1}}); +// With an additional ordered index field. +checkForwardDirection({sort: {b: 1, c: 1}, index: {a: 1, b: 1, c: 1}}); +// With an additional reverse direction ordered index field. +checkForwardDirection({sort: {b: 1, c: -1}, index: {a: 1, b: 1, c: -1}}); + +function checkReverseDirection(options) { + // All callers specify a sort that is prefixed by "b", descending. + assert.eq(Object.keys(options.sort)[0], "b"); + assert.eq(options.sort.b, -1); + // None of the callers specify a sort on "a". + assert(!options.sort.hasOwnProperty("a")); + + reset(options.sort, options.index); + _limit = -1; + + // For matching documents, highest value of 'b' is 5. + assertMatches({expectedQueryResults: [{a: 2, b: 5}], query: {a: {$in: [1, 2]}, b: {$gte: 0}}}); + assertMatches({expectedQueryResults: [{a: 2, b: 5}], query: {a: {$in: [1, 2]}, b: {$gte: 5}}}); + assertMatches({expectedQueryResults: [{a: 2, b: 5}], query: {a: {$in: [1, 2]}, b: {$lte: 5}}}); + assertMatches( + {expectedQueryResults: [{a: 2, b: 5}], query: {a: {$in: [1, 2]}, b: {$lte: 5, $gte: 5}}}); + + // For matching documents, highest value of 'b' is 2. + assertMatches({expectedQueryResults: [{a: 1, b: 2}], query: {a: {$in: [1, 2]}, b: {$lt: 3}}}); + assertMatches( + {expectedQueryResults: [{a: 1, b: 2}], query: {a: {$in: [1, 2]}, b: {$lt: 3, $gt: 1}}}); + + // For matching documents, highest value of 'b' is 1. + assertMatches( + {expectedQueryResults: [{a: 1, b: 1}], query: {a: {$in: [1, 2]}, b: {$lt: 2, $gte: 1}}}); + + // These queries expect 3 as the highest value of 'b' among matching documents, but there + // are two documents with a value of 3 for the field 'b'. Either document is acceptable, + // since there is no sort order on any other existing fields. + assertMatches({ + acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]], + query: {a: {$in: [1, 2]}, b: {$lt: 5}} + }); + assertMatches({ + acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]], + query: {a: {$in: [1, 2]}, b: {$lt: 3.1}} + }); + assertMatches({ + acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]], + query: {a: {$in: [1, 2]}, b: {$lt: 3.5}} + }); + assertMatches({ + acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]], + query: {a: {$in: [1, 2]}, b: {$lte: 3}} + }); + assertMatches({ + acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]], + query: {a: {$in: [1, 2]}, b: {$lt: 3.5, $gte: 3}} + }); + assertMatches({ + acceptableQueryResults: [[{a: 1, b: 3}], [{a: 2, b: 3}]], + query: {a: {$in: [1, 2]}, b: {$lte: 3, $gt: 0}} + }); +} + +// With a descending order index. +checkReverseDirection({sort: {b: -1}, index: {a: 1, b: -1}}); +checkReverseDirection({sort: {b: -1}, index: {a: 1, b: -1, c: 1}}); +checkReverseDirection({sort: {b: -1}, index: {a: 1, b: -1, c: -1}}); +checkReverseDirection({sort: {b: -1, c: 1}, index: {a: 1, b: -1, c: 1}}); +checkReverseDirection({sort: {b: -1, c: -1}, index: {a: 1, b: -1, c: -1}}); }()); diff --git a/jstests/core/sortj.js b/jstests/core/sortj.js index 625f5aa6722..27420d70646 100644 --- a/jstests/core/sortj.js +++ b/jstests/core/sortj.js @@ -1,24 +1,24 @@ // Test an in memory sort memory assertion after a plan has "taken over" in the query optimizer // cursor. (function() { - "use strict"; +"use strict"; - load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. +load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. - const t = db.jstests_sortj; - t.drop(); +const t = db.jstests_sortj; +t.drop(); - t.ensureIndex({a: 1}); +t.ensureIndex({a: 1}); - const numShards = FixtureHelpers.numberOfShardsForCollection(t); +const numShards = FixtureHelpers.numberOfShardsForCollection(t); - const big = new Array(100000).toString(); - for (let i = 0; i < 1200 * numShards; ++i) { - t.save({a: 1, b: big}); - } +const big = new Array(100000).toString(); +for (let i = 0; i < 1200 * numShards; ++i) { + t.save({a: 1, b: big}); +} - assert.throws(function() { - t.find({a: {$gte: 0}, c: null}).sort({d: 1}).itcount(); - }); - t.drop(); +assert.throws(function() { + t.find({a: {$gte: 0}, c: null}).sort({d: 1}).itcount(); +}); +t.drop(); })(); diff --git a/jstests/core/sortl.js b/jstests/core/sortl.js index 247a175a6f0..d0d94473460 100644 --- a/jstests/core/sortl.js +++ b/jstests/core/sortl.js @@ -1,36 +1,36 @@ // Tests equality query on _id with a sort, intended to be tested on both mongos and mongod. For // SERVER-20641. (function() { - 'use strict'; - var coll = db.sortl; - coll.drop(); +'use strict'; +var coll = db.sortl; +coll.drop(); - assert.writeOK(coll.insert({_id: 1, a: 2})); - var res = coll.find({_id: 1}).sort({a: 1}); - assert.eq(res.next(), {_id: 1, a: 2}); - assert.eq(res.hasNext(), false); +assert.writeOK(coll.insert({_id: 1, a: 2})); +var res = coll.find({_id: 1}).sort({a: 1}); +assert.eq(res.next(), {_id: 1, a: 2}); +assert.eq(res.hasNext(), false); - res = coll.find({_id: 1}, {b: {$meta: "sortKey"}}).sort({a: 1}); - assert.eq(res.next(), {_id: 1, a: 2, b: {"": 2}}); - assert.eq(res.hasNext(), false); +res = coll.find({_id: 1}, {b: {$meta: "sortKey"}}).sort({a: 1}); +assert.eq(res.next(), {_id: 1, a: 2, b: {"": 2}}); +assert.eq(res.hasNext(), false); - res = db.runCommand({ - findAndModify: coll.getName(), - query: {_id: 1}, - update: {$set: {b: 1}}, - sort: {a: 1}, - fields: {c: {$meta: "sortKey"}} - }); - assert.commandFailedWithCode(res, ErrorCodes.BadValue, "$meta sortKey update"); +res = db.runCommand({ + findAndModify: coll.getName(), + query: {_id: 1}, + update: {$set: {b: 1}}, + sort: {a: 1}, + fields: {c: {$meta: "sortKey"}} +}); +assert.commandFailedWithCode(res, ErrorCodes.BadValue, "$meta sortKey update"); - res = db.runCommand({ - findAndModify: coll.getName(), - query: {_id: 1}, - remove: true, - sort: {b: 1}, - fields: {c: {$meta: "sortKey"}} - }); - assert.commandFailedWithCode(res, ErrorCodes.BadValue, "$meta sortKey delete"); +res = db.runCommand({ + findAndModify: coll.getName(), + query: {_id: 1}, + remove: true, + sort: {b: 1}, + fields: {c: {$meta: "sortKey"}} +}); +assert.commandFailedWithCode(res, ErrorCodes.BadValue, "$meta sortKey delete"); - coll.drop(); +coll.drop(); })(); diff --git a/jstests/core/sparse_index_supports_ne_null.js b/jstests/core/sparse_index_supports_ne_null.js index a46616bbad9..590bacc5021 100644 --- a/jstests/core/sparse_index_supports_ne_null.js +++ b/jstests/core/sparse_index_supports_ne_null.js @@ -8,200 +8,193 @@ * @tags: [assumes_unsharded_collection] */ (function() { - "use strict"; - load("jstests/libs/analyze_plan.js"); // For getPlanStages. +"use strict"; +load("jstests/libs/analyze_plan.js"); // For getPlanStages. - const coll = db.sparse_index_supports_ne_null; - coll.drop(); +const coll = db.sparse_index_supports_ne_null; +coll.drop(); - function checkQuery({query, shouldUseIndex, nResultsExpected, indexKeyPattern}) { - const explain = assert.commandWorked(coll.find(query).explain()); - const ixScans = getPlanStages(explain.queryPlanner.winningPlan, "IXSCAN"); +function checkQuery({query, shouldUseIndex, nResultsExpected, indexKeyPattern}) { + const explain = assert.commandWorked(coll.find(query).explain()); + const ixScans = getPlanStages(explain.queryPlanner.winningPlan, "IXSCAN"); - if (shouldUseIndex) { - assert.gte(ixScans.length, 1, explain); - assert.eq(ixScans[0].keyPattern, indexKeyPattern); - } else { - assert.eq(ixScans.length, 0, explain); - } - - assert.eq(coll.find(query).itcount(), nResultsExpected); + if (shouldUseIndex) { + assert.gte(ixScans.length, 1, explain); + assert.eq(ixScans[0].keyPattern, indexKeyPattern); + } else { + assert.eq(ixScans.length, 0, explain); } - // Non compound case. - (function() { - const query = {a: {$ne: null}}; - const elemMatchQuery = {a: {$elemMatch: {$ne: null}}}; - const keyPattern = {a: 1}; - - assert.commandWorked(coll.insert({a: 1})); - assert.commandWorked(coll.insert({a: {x: 1}})); - assert.commandWorked(coll.insert({a: null})); - assert.commandWorked(coll.insert({a: undefined})); - - assert.commandWorked(coll.createIndex(keyPattern, {sparse: true})); - - // Be sure the index is used. - checkQuery( - {query: query, shouldUseIndex: true, nResultsExpected: 2, indexKeyPattern: keyPattern}); - checkQuery({ - query: elemMatchQuery, - shouldUseIndex: true, - nResultsExpected: 0, - indexKeyPattern: keyPattern - }); - - // When the index becomes multikey, it cannot support {$ne: null} queries. - assert.commandWorked(coll.insert({a: [1, 2, 3]})); - checkQuery({ - query: query, - shouldUseIndex: false, - nResultsExpected: 3, - indexKeyPattern: keyPattern - }); - // But it can support queries with {$ne: null} within an $elemMatch. - checkQuery({ - query: elemMatchQuery, - shouldUseIndex: true, - nResultsExpected: 1, - indexKeyPattern: keyPattern - }); - })(); - - // Compound case. - (function() { - const query = {a: {$ne: null}}; - const elemMatchQuery = {a: {$elemMatch: {$ne: null}}}; - const keyPattern = {a: 1, b: 1}; - - coll.drop(); - assert.commandWorked(coll.insert({a: 1, b: 1})); - assert.commandWorked(coll.insert({a: {x: 1}, b: 1})); - assert.commandWorked(coll.insert({a: null, b: 1})); - assert.commandWorked(coll.insert({a: undefined, b: 1})); - - assert.commandWorked(coll.createIndex(keyPattern, {sparse: true})); - - // Be sure the index is used. - checkQuery( - {query: query, shouldUseIndex: true, nResultsExpected: 2, indexKeyPattern: keyPattern}); - checkQuery({ - query: elemMatchQuery, - shouldUseIndex: true, - nResultsExpected: 0, - indexKeyPattern: keyPattern - }); - - // When the index becomes multikey on the second field, it should still be usable. - assert.commandWorked(coll.insert({a: 1, b: [1, 2, 3]})); - checkQuery( - {query: query, shouldUseIndex: true, nResultsExpected: 3, indexKeyPattern: keyPattern}); - checkQuery({ - query: elemMatchQuery, - shouldUseIndex: true, - nResultsExpected: 0, - indexKeyPattern: keyPattern - }); - - // When the index becomes multikey on the first field, it should no longer be usable. - assert.commandWorked(coll.insert({a: [1, 2, 3], b: 1})); - checkQuery({ - query: query, - shouldUseIndex: false, - nResultsExpected: 4, - indexKeyPattern: keyPattern - }); - // Queries which use a $elemMatch should still be able to use the index. - checkQuery({ - query: elemMatchQuery, - shouldUseIndex: true, - nResultsExpected: 1, - indexKeyPattern: keyPattern - }); - })(); - - // Nested field multikey with $elemMatch. - (function() { - const keyPattern = {"a.b.c.d": 1}; - coll.drop(); - assert.commandWorked(coll.insert({a: {b: [{c: {d: 1}}]}})); - assert.commandWorked(coll.insert({a: {b: [{c: {d: {e: 1}}}]}})); - assert.commandWorked(coll.insert({a: {b: [{c: {d: null}}]}})); - assert.commandWorked(coll.insert({a: {b: [{c: {d: undefined}}]}})); - - assert.commandWorked(coll.createIndex(keyPattern, {sparse: true})); - - const query = {"a.b.c.d": {$ne: null}}; - // $elemMatch object can only use the index when none of the paths below the $elemMatch is - // not multikey. - const elemMatchObjectQuery = {"a.b": {$elemMatch: {"c.d": {$ne: null}}}}; - // $elemMatch value can always use the index. - const elemMatchValueQuery = {"a.b.c.d": {$elemMatch: {$ne: null}}}; - - // 'a.b' is multikey, so the index isn't used. - checkQuery({ - query: query, - shouldUseIndex: false, - nResultsExpected: 2, - indexKeyPattern: keyPattern - }); - // Since the multikey portion is above the $elemMatch, the $elemMatch query may use the - // index. - checkQuery({ - query: elemMatchObjectQuery, - shouldUseIndex: true, - nResultsExpected: 2, - indexKeyPattern: keyPattern - }); - checkQuery({ - query: elemMatchValueQuery, - shouldUseIndex: true, - nResultsExpected: 0, - indexKeyPattern: keyPattern - }); - - // Make the index become multikey on 'a' (another field above the $elemMatch). - assert.commandWorked(coll.insert({a: [{b: [{c: {d: 1}}]}]})); - checkQuery({ - query: query, - shouldUseIndex: false, - nResultsExpected: 3, - indexKeyPattern: keyPattern - }); - // The only multikey paths are still above the $elemMatch, queries which use a $elemMatch - // should still be able to use the index. - checkQuery({ - query: elemMatchObjectQuery, - shouldUseIndex: true, - nResultsExpected: 3, - indexKeyPattern: keyPattern - }); - checkQuery({ - query: elemMatchValueQuery, - shouldUseIndex: true, - nResultsExpected: 0, - indexKeyPattern: keyPattern - }); - - // Make the index multikey for 'a.b.c'. Now the $elemMatch query may not use the index. - assert.commandWorked(coll.insert({a: {b: [{c: [{d: 1}]}]}})); - checkQuery({ - query: query, - shouldUseIndex: false, - nResultsExpected: 4, - indexKeyPattern: keyPattern - }); - checkQuery({ - query: elemMatchObjectQuery, - shouldUseIndex: false, - nResultsExpected: 4, - indexKeyPattern: keyPattern - }); - checkQuery({ - query: elemMatchValueQuery, - shouldUseIndex: true, - nResultsExpected: 0, - indexKeyPattern: keyPattern - }); - })(); + assert.eq(coll.find(query).itcount(), nResultsExpected); +} + +// Non compound case. +(function() { +const query = { + a: {$ne: null} +}; +const elemMatchQuery = { + a: {$elemMatch: {$ne: null}} +}; +const keyPattern = { + a: 1 +}; + +assert.commandWorked(coll.insert({a: 1})); +assert.commandWorked(coll.insert({a: {x: 1}})); +assert.commandWorked(coll.insert({a: null})); +assert.commandWorked(coll.insert({a: undefined})); + +assert.commandWorked(coll.createIndex(keyPattern, {sparse: true})); + +// Be sure the index is used. +checkQuery({query: query, shouldUseIndex: true, nResultsExpected: 2, indexKeyPattern: keyPattern}); +checkQuery({ + query: elemMatchQuery, + shouldUseIndex: true, + nResultsExpected: 0, + indexKeyPattern: keyPattern +}); + +// When the index becomes multikey, it cannot support {$ne: null} queries. +assert.commandWorked(coll.insert({a: [1, 2, 3]})); +checkQuery({query: query, shouldUseIndex: false, nResultsExpected: 3, indexKeyPattern: keyPattern}); +// But it can support queries with {$ne: null} within an $elemMatch. +checkQuery({ + query: elemMatchQuery, + shouldUseIndex: true, + nResultsExpected: 1, + indexKeyPattern: keyPattern +}); +})(); + +// Compound case. +(function() { +const query = { + a: {$ne: null} +}; +const elemMatchQuery = { + a: {$elemMatch: {$ne: null}} +}; +const keyPattern = { + a: 1, + b: 1 +}; + +coll.drop(); +assert.commandWorked(coll.insert({a: 1, b: 1})); +assert.commandWorked(coll.insert({a: {x: 1}, b: 1})); +assert.commandWorked(coll.insert({a: null, b: 1})); +assert.commandWorked(coll.insert({a: undefined, b: 1})); + +assert.commandWorked(coll.createIndex(keyPattern, {sparse: true})); + +// Be sure the index is used. +checkQuery({query: query, shouldUseIndex: true, nResultsExpected: 2, indexKeyPattern: keyPattern}); +checkQuery({ + query: elemMatchQuery, + shouldUseIndex: true, + nResultsExpected: 0, + indexKeyPattern: keyPattern +}); + +// When the index becomes multikey on the second field, it should still be usable. +assert.commandWorked(coll.insert({a: 1, b: [1, 2, 3]})); +checkQuery({query: query, shouldUseIndex: true, nResultsExpected: 3, indexKeyPattern: keyPattern}); +checkQuery({ + query: elemMatchQuery, + shouldUseIndex: true, + nResultsExpected: 0, + indexKeyPattern: keyPattern +}); + +// When the index becomes multikey on the first field, it should no longer be usable. +assert.commandWorked(coll.insert({a: [1, 2, 3], b: 1})); +checkQuery({query: query, shouldUseIndex: false, nResultsExpected: 4, indexKeyPattern: keyPattern}); +// Queries which use a $elemMatch should still be able to use the index. +checkQuery({ + query: elemMatchQuery, + shouldUseIndex: true, + nResultsExpected: 1, + indexKeyPattern: keyPattern +}); +})(); + +// Nested field multikey with $elemMatch. +(function() { +const keyPattern = { + "a.b.c.d": 1 +}; +coll.drop(); +assert.commandWorked(coll.insert({a: {b: [{c: {d: 1}}]}})); +assert.commandWorked(coll.insert({a: {b: [{c: {d: {e: 1}}}]}})); +assert.commandWorked(coll.insert({a: {b: [{c: {d: null}}]}})); +assert.commandWorked(coll.insert({a: {b: [{c: {d: undefined}}]}})); + +assert.commandWorked(coll.createIndex(keyPattern, {sparse: true})); + +const query = { + "a.b.c.d": {$ne: null} +}; +// $elemMatch object can only use the index when none of the paths below the $elemMatch is +// not multikey. +const elemMatchObjectQuery = { + "a.b": {$elemMatch: {"c.d": {$ne: null}}} +}; +// $elemMatch value can always use the index. +const elemMatchValueQuery = { + "a.b.c.d": {$elemMatch: {$ne: null}} +}; + +// 'a.b' is multikey, so the index isn't used. +checkQuery({query: query, shouldUseIndex: false, nResultsExpected: 2, indexKeyPattern: keyPattern}); +// Since the multikey portion is above the $elemMatch, the $elemMatch query may use the +// index. +checkQuery({ + query: elemMatchObjectQuery, + shouldUseIndex: true, + nResultsExpected: 2, + indexKeyPattern: keyPattern +}); +checkQuery({ + query: elemMatchValueQuery, + shouldUseIndex: true, + nResultsExpected: 0, + indexKeyPattern: keyPattern +}); + +// Make the index become multikey on 'a' (another field above the $elemMatch). +assert.commandWorked(coll.insert({a: [{b: [{c: {d: 1}}]}]})); +checkQuery({query: query, shouldUseIndex: false, nResultsExpected: 3, indexKeyPattern: keyPattern}); +// The only multikey paths are still above the $elemMatch, queries which use a $elemMatch +// should still be able to use the index. +checkQuery({ + query: elemMatchObjectQuery, + shouldUseIndex: true, + nResultsExpected: 3, + indexKeyPattern: keyPattern +}); +checkQuery({ + query: elemMatchValueQuery, + shouldUseIndex: true, + nResultsExpected: 0, + indexKeyPattern: keyPattern +}); + +// Make the index multikey for 'a.b.c'. Now the $elemMatch query may not use the index. +assert.commandWorked(coll.insert({a: {b: [{c: [{d: 1}]}]}})); +checkQuery({query: query, shouldUseIndex: false, nResultsExpected: 4, indexKeyPattern: keyPattern}); +checkQuery({ + query: elemMatchObjectQuery, + shouldUseIndex: false, + nResultsExpected: 4, + indexKeyPattern: keyPattern +}); +checkQuery({ + query: elemMatchValueQuery, + shouldUseIndex: true, + nResultsExpected: 0, + indexKeyPattern: keyPattern +}); +})(); })(); diff --git a/jstests/core/startup_log.js b/jstests/core/startup_log.js index 1c66e463f43..0da9f636a3b 100644 --- a/jstests/core/startup_log.js +++ b/jstests/core/startup_log.js @@ -13,103 +13,101 @@ load('jstests/aggregation/extras/utils.js'); (function() { - 'use strict'; +'use strict'; - // Check that smallArray is entirely contained by largeArray - // returns false if a member of smallArray is not in largeArray - function arrayIsSubset(smallArray, largeArray) { - for (var i = 0; i < smallArray.length; i++) { - if (!Array.contains(largeArray, smallArray[i])) { - print("Could not find " + smallArray[i] + " in largeArray"); - return false; - } +// Check that smallArray is entirely contained by largeArray +// returns false if a member of smallArray is not in largeArray +function arrayIsSubset(smallArray, largeArray) { + for (var i = 0; i < smallArray.length; i++) { + if (!Array.contains(largeArray, smallArray[i])) { + print("Could not find " + smallArray[i] + " in largeArray"); + return false; } - - return true; } - // Test startup_log - var stats = db.getSisterDB("local").startup_log.stats(); - assert(stats.capped); + return true; +} - var latestStartUpLog = - db.getSisterDB("local").startup_log.find().sort({$natural: -1}).limit(1).next(); - var serverStatus = db._adminCommand("serverStatus"); - var cmdLine = db._adminCommand("getCmdLineOpts").parsed; +// Test startup_log +var stats = db.getSisterDB("local").startup_log.stats(); +assert(stats.capped); - // Test that the startup log has the expected keys - var verbose = false; - var expectedKeys = - ["_id", "hostname", "startTime", "startTimeLocal", "cmdLine", "pid", "buildinfo"]; - var keys = Object.keySet(latestStartUpLog); - assert(arrayEq(expectedKeys, keys, verbose), 'startup_log keys failed'); +var latestStartUpLog = + db.getSisterDB("local").startup_log.find().sort({$natural: -1}).limit(1).next(); +var serverStatus = db._adminCommand("serverStatus"); +var cmdLine = db._adminCommand("getCmdLineOpts").parsed; - // Tests _id implicitly - should be comprised of host-timestamp - // Setup expected startTime and startTimeLocal from the supplied timestamp - var _id = latestStartUpLog._id.split('-'); // _id should consist of host-timestamp - var _idUptime = _id.pop(); - var _idHost = _id.join('-'); - var uptimeSinceEpochRounded = Math.floor(_idUptime / 1000) * 1000; - var startTime = new Date(uptimeSinceEpochRounded); // Expected startTime +// Test that the startup log has the expected keys +var verbose = false; +var expectedKeys = + ["_id", "hostname", "startTime", "startTimeLocal", "cmdLine", "pid", "buildinfo"]; +var keys = Object.keySet(latestStartUpLog); +assert(arrayEq(expectedKeys, keys, verbose), 'startup_log keys failed'); - assert.eq(_idHost, latestStartUpLog.hostname, "Hostname doesn't match one from _id"); - assert.eq(serverStatus.host.split(':')[0], - latestStartUpLog.hostname, - "Hostname doesn't match one in server status"); - assert.closeWithinMS(startTime, - latestStartUpLog.startTime, - "StartTime doesn't match one from _id", - 2000); // Expect less than 2 sec delta - assert.eq(cmdLine, latestStartUpLog.cmdLine, "cmdLine doesn't match that from getCmdLineOpts"); - assert.eq(serverStatus.pid, latestStartUpLog.pid, "pid doesn't match that from serverStatus"); +// Tests _id implicitly - should be comprised of host-timestamp +// Setup expected startTime and startTimeLocal from the supplied timestamp +var _id = latestStartUpLog._id.split('-'); // _id should consist of host-timestamp +var _idUptime = _id.pop(); +var _idHost = _id.join('-'); +var uptimeSinceEpochRounded = Math.floor(_idUptime / 1000) * 1000; +var startTime = new Date(uptimeSinceEpochRounded); // Expected startTime - // Test buildinfo - var buildinfo = db.runCommand("buildinfo"); - delete buildinfo.ok; // Delete extra meta info not in startup_log - delete buildinfo.operationTime; // Delete extra meta info not in startup_log - delete buildinfo.$clusterTime; // Delete extra meta info not in startup_log - var isMaster = db._adminCommand("ismaster"); +assert.eq(_idHost, latestStartUpLog.hostname, "Hostname doesn't match one from _id"); +assert.eq(serverStatus.host.split(':')[0], + latestStartUpLog.hostname, + "Hostname doesn't match one in server status"); +assert.closeWithinMS(startTime, + latestStartUpLog.startTime, + "StartTime doesn't match one from _id", + 2000); // Expect less than 2 sec delta +assert.eq(cmdLine, latestStartUpLog.cmdLine, "cmdLine doesn't match that from getCmdLineOpts"); +assert.eq(serverStatus.pid, latestStartUpLog.pid, "pid doesn't match that from serverStatus"); - // Test buildinfo has the expected keys - var expectedKeys = [ - "version", - "gitVersion", - "allocator", - "versionArray", - "javascriptEngine", - "openssl", - "buildEnvironment", - "debug", - "maxBsonObjectSize", - "bits", - "modules" - ]; +// Test buildinfo +var buildinfo = db.runCommand("buildinfo"); +delete buildinfo.ok; // Delete extra meta info not in startup_log +delete buildinfo.operationTime; // Delete extra meta info not in startup_log +delete buildinfo.$clusterTime; // Delete extra meta info not in startup_log +var isMaster = db._adminCommand("ismaster"); - var keys = Object.keySet(latestStartUpLog.buildinfo); - // Disabled to check - assert(arrayIsSubset(expectedKeys, keys), - "buildinfo keys failed! \n expected:\t" + expectedKeys + "\n actual:\t" + keys); - assert.eq(buildinfo, - latestStartUpLog.buildinfo, - "buildinfo doesn't match that from buildinfo command"); +// Test buildinfo has the expected keys +var expectedKeys = [ + "version", + "gitVersion", + "allocator", + "versionArray", + "javascriptEngine", + "openssl", + "buildEnvironment", + "debug", + "maxBsonObjectSize", + "bits", + "modules" +]; - // Test version and version Array - var version = latestStartUpLog.buildinfo.version.split('-')[0]; - var versionArray = latestStartUpLog.buildinfo.versionArray; - var versionArrayCleaned = versionArray.slice(0, 3); - if (versionArray[3] == -100) { - versionArrayCleaned[2] -= 1; - } +var keys = Object.keySet(latestStartUpLog.buildinfo); +// Disabled to check +assert(arrayIsSubset(expectedKeys, keys), + "buildinfo keys failed! \n expected:\t" + expectedKeys + "\n actual:\t" + keys); +assert.eq( + buildinfo, latestStartUpLog.buildinfo, "buildinfo doesn't match that from buildinfo command"); - assert.eq(serverStatus.version, - latestStartUpLog.buildinfo.version, - "Mongo version doesn't match that from ServerStatus"); - assert.eq( - version, versionArrayCleaned.join('.'), "version doesn't match that from the versionArray"); - var jsEngine = latestStartUpLog.buildinfo.javascriptEngine; - assert((jsEngine == "none") || jsEngine.startsWith("mozjs")); - assert.eq(isMaster.maxBsonObjectSize, - latestStartUpLog.buildinfo.maxBsonObjectSize, - "maxBsonObjectSize doesn't match one from ismaster"); +// Test version and version Array +var version = latestStartUpLog.buildinfo.version.split('-')[0]; +var versionArray = latestStartUpLog.buildinfo.versionArray; +var versionArrayCleaned = versionArray.slice(0, 3); +if (versionArray[3] == -100) { + versionArrayCleaned[2] -= 1; +} +assert.eq(serverStatus.version, + latestStartUpLog.buildinfo.version, + "Mongo version doesn't match that from ServerStatus"); +assert.eq( + version, versionArrayCleaned.join('.'), "version doesn't match that from the versionArray"); +var jsEngine = latestStartUpLog.buildinfo.javascriptEngine; +assert((jsEngine == "none") || jsEngine.startsWith("mozjs")); +assert.eq(isMaster.maxBsonObjectSize, + latestStartUpLog.buildinfo.maxBsonObjectSize, + "maxBsonObjectSize doesn't match one from ismaster"); })(); diff --git a/jstests/core/tailable_cursor_invalidation.js b/jstests/core/tailable_cursor_invalidation.js index 97ea96bb8d0..2424bce64f7 100644 --- a/jstests/core/tailable_cursor_invalidation.js +++ b/jstests/core/tailable_cursor_invalidation.js @@ -3,70 +3,69 @@ // Tests for the behavior of tailable cursors when a collection is dropped or the cursor is // otherwise invalidated. (function() { - "use strict"; +"use strict"; - const collName = "tailable_cursor_invalidation"; - const coll = db[collName]; - coll.drop(); +const collName = "tailable_cursor_invalidation"; +const coll = db[collName]; +coll.drop(); - // Test that you cannot open a tailable cursor on a non-existent collection. - assert.eq(0, assert.commandWorked(db.runCommand({find: collName})).cursor.id); - assert.eq(0, assert.commandWorked(db.runCommand({find: collName, tailable: true})).cursor.id); - assert.eq(0, - assert.commandWorked(db.runCommand({find: collName, tailable: true, awaitData: true})) - .cursor.id); - const emptyBatchCursorId = - assert - .commandWorked( - db.runCommand({find: collName, tailable: true, awaitData: true, batchSize: 0})) - .cursor.id; - const isMongos = db.adminCommand({isdbgrid: 1}).isdbgrid; - if (isMongos) { - // Mongos will let you establish a cursor with batch size 0 and return to you before it - // realizes the shard's cursor is exhausted. The next getMore should return a 0 cursor id - // though. - assert.neq(emptyBatchCursorId, 0); - assert.eq( - 0, - assert.commandWorked(db.runCommand({getMore: emptyBatchCursorId, collection: collName})) - .cursor.id); - } else { - // A mongod should know immediately that the collection doesn't exist, and return a 0 cursor - // id. - assert.eq(0, emptyBatchCursorId); - } +// Test that you cannot open a tailable cursor on a non-existent collection. +assert.eq(0, assert.commandWorked(db.runCommand({find: collName})).cursor.id); +assert.eq(0, assert.commandWorked(db.runCommand({find: collName, tailable: true})).cursor.id); +assert.eq(0, + assert.commandWorked(db.runCommand({find: collName, tailable: true, awaitData: true})) + .cursor.id); +const emptyBatchCursorId = assert + .commandWorked(db.runCommand( + {find: collName, tailable: true, awaitData: true, batchSize: 0})) + .cursor.id; +const isMongos = db.adminCommand({isdbgrid: 1}).isdbgrid; +if (isMongos) { + // Mongos will let you establish a cursor with batch size 0 and return to you before it + // realizes the shard's cursor is exhausted. The next getMore should return a 0 cursor id + // though. + assert.neq(emptyBatchCursorId, 0); + assert.eq( + 0, + assert.commandWorked(db.runCommand({getMore: emptyBatchCursorId, collection: collName})) + .cursor.id); +} else { + // A mongod should know immediately that the collection doesn't exist, and return a 0 cursor + // id. + assert.eq(0, emptyBatchCursorId); +} - function dropAndRecreateColl() { - coll.drop(); - assert.commandWorked(db.createCollection(collName, {capped: true, size: 1024})); - const numDocs = 4; - const bulk = coll.initializeUnorderedBulkOp(); - for (let i = 0; i < numDocs; ++i) { - bulk.insert({_id: i}); - } - assert.writeOK(bulk.execute()); +function dropAndRecreateColl() { + coll.drop(); + assert.commandWorked(db.createCollection(collName, {capped: true, size: 1024})); + const numDocs = 4; + const bulk = coll.initializeUnorderedBulkOp(); + for (let i = 0; i < numDocs; ++i) { + bulk.insert({_id: i}); } - dropAndRecreateColl(); + assert.writeOK(bulk.execute()); +} +dropAndRecreateColl(); - /** - * Runs a find command to establish a cursor. Asserts that the command worked and that the - * cursor id is not 0, then returns the cursor id. - */ - function openCursor({tailable, awaitData}) { - const findRes = assert.commandWorked( - db.runCommand({find: collName, tailable: tailable, awaitData: awaitData})); - assert.neq(findRes.cursor.id, 0); - assert.eq(findRes.cursor.ns, coll.getFullName()); - return findRes.cursor.id; - } +/** + * Runs a find command to establish a cursor. Asserts that the command worked and that the + * cursor id is not 0, then returns the cursor id. + */ +function openCursor({tailable, awaitData}) { + const findRes = assert.commandWorked( + db.runCommand({find: collName, tailable: tailable, awaitData: awaitData})); + assert.neq(findRes.cursor.id, 0); + assert.eq(findRes.cursor.ns, coll.getFullName()); + return findRes.cursor.id; +} - // Test that the cursor dies on getMore if the collection has been dropped. - let cursorId = openCursor({tailable: true, awaitData: false}); - dropAndRecreateColl(); - assert.commandFailedWithCode(db.runCommand({getMore: cursorId, collection: collName}), - ErrorCodes.QueryPlanKilled); - cursorId = openCursor({tailable: true, awaitData: true}); - dropAndRecreateColl(); - assert.commandFailedWithCode(db.runCommand({getMore: cursorId, collection: collName}), - ErrorCodes.QueryPlanKilled); +// Test that the cursor dies on getMore if the collection has been dropped. +let cursorId = openCursor({tailable: true, awaitData: false}); +dropAndRecreateColl(); +assert.commandFailedWithCode(db.runCommand({getMore: cursorId, collection: collName}), + ErrorCodes.QueryPlanKilled); +cursorId = openCursor({tailable: true, awaitData: true}); +dropAndRecreateColl(); +assert.commandFailedWithCode(db.runCommand({getMore: cursorId, collection: collName}), + ErrorCodes.QueryPlanKilled); }()); diff --git a/jstests/core/tailable_getmore_batch_size.js b/jstests/core/tailable_getmore_batch_size.js index 9e96f6f68a3..466fa25a686 100644 --- a/jstests/core/tailable_getmore_batch_size.js +++ b/jstests/core/tailable_getmore_batch_size.js @@ -3,94 +3,94 @@ // Tests for the behavior of combining the tailable and awaitData options to the getMore command // with the batchSize option. (function() { - "use strict"; +"use strict"; - const collName = "tailable_getmore_batch_size"; - const coll = db[collName]; - const batchSize = 2; +const collName = "tailable_getmore_batch_size"; +const coll = db[collName]; +const batchSize = 2; - function dropAndRecreateColl({numDocs}) { - coll.drop(); - assert.commandWorked(db.createCollection(collName, {capped: true, size: 1024})); - const bulk = coll.initializeUnorderedBulkOp(); - for (let i = 0; i < numDocs; ++i) { - bulk.insert({_id: i}); - } - assert.writeOK(bulk.execute()); +function dropAndRecreateColl({numDocs}) { + coll.drop(); + assert.commandWorked(db.createCollection(collName, {capped: true, size: 1024})); + const bulk = coll.initializeUnorderedBulkOp(); + for (let i = 0; i < numDocs; ++i) { + bulk.insert({_id: i}); } + assert.writeOK(bulk.execute()); +} - // Test that running a find with the 'tailable' option will return results immediately, even if - // there are fewer than the specified batch size. - dropAndRecreateColl({numDocs: batchSize - 1}); - let findRes = - assert.commandWorked(db.runCommand({find: collName, tailable: true, batchSize: batchSize})); - assert.eq(findRes.cursor.firstBatch.length, batchSize - 1); - assert.neq(findRes.cursor.id, 0); - // Test that the same is true for a find with the 'tailable' and 'awaitData' options set. - findRes = assert.commandWorked( - db.runCommand({find: collName, tailable: true, awaitData: true, batchSize: batchSize})); - assert.eq(findRes.cursor.firstBatch.length, batchSize - 1); - assert.neq(findRes.cursor.id, 0); +// Test that running a find with the 'tailable' option will return results immediately, even if +// there are fewer than the specified batch size. +dropAndRecreateColl({numDocs: batchSize - 1}); +let findRes = + assert.commandWorked(db.runCommand({find: collName, tailable: true, batchSize: batchSize})); +assert.eq(findRes.cursor.firstBatch.length, batchSize - 1); +assert.neq(findRes.cursor.id, 0); +// Test that the same is true for a find with the 'tailable' and 'awaitData' options set. +findRes = assert.commandWorked( + db.runCommand({find: collName, tailable: true, awaitData: true, batchSize: batchSize})); +assert.eq(findRes.cursor.firstBatch.length, batchSize - 1); +assert.neq(findRes.cursor.id, 0); - /** - * Runs a find command with a batchSize of 'batchSize' to establish a cursor. Asserts that the - * command worked and that the cursor id is not 0, then returns the cursor id. - */ - function openCursor({batchSize, tailable, awaitData}) { - const findRes = assert.commandWorked(db.runCommand( - {find: collName, tailable: tailable, awaitData: awaitData, batchSize: batchSize})); - assert.eq(findRes.cursor.firstBatch.length, batchSize); - assert.neq(findRes.cursor.id, 0); - assert.eq(findRes.cursor.ns, coll.getFullName()); - return findRes.cursor.id; - } +/** + * Runs a find command with a batchSize of 'batchSize' to establish a cursor. Asserts that the + * command worked and that the cursor id is not 0, then returns the cursor id. + */ +function openCursor({batchSize, tailable, awaitData}) { + const findRes = assert.commandWorked(db.runCommand( + {find: collName, tailable: tailable, awaitData: awaitData, batchSize: batchSize})); + assert.eq(findRes.cursor.firstBatch.length, batchSize); + assert.neq(findRes.cursor.id, 0); + assert.eq(findRes.cursor.ns, coll.getFullName()); + return findRes.cursor.id; +} - // Test that specifying a batch size to a getMore on a tailable cursor produces a batch of the - // desired size when the number of results is larger than the batch size. +// Test that specifying a batch size to a getMore on a tailable cursor produces a batch of the +// desired size when the number of results is larger than the batch size. - // One batch's worth for the find and one more than one batch's worth for the getMore. - dropAndRecreateColl({numDocs: batchSize + (batchSize + 1)}); - let cursorId = openCursor({batchSize: batchSize, tailable: true, awaitData: false}); - let getMoreRes = assert.commandWorked( - db.runCommand({getMore: cursorId, collection: collName, batchSize: batchSize})); - assert.eq(getMoreRes.cursor.nextBatch.length, batchSize); +// One batch's worth for the find and one more than one batch's worth for the getMore. +dropAndRecreateColl({numDocs: batchSize + (batchSize + 1)}); +let cursorId = openCursor({batchSize: batchSize, tailable: true, awaitData: false}); +let getMoreRes = assert.commandWorked( + db.runCommand({getMore: cursorId, collection: collName, batchSize: batchSize})); +assert.eq(getMoreRes.cursor.nextBatch.length, batchSize); - // Test that the same is true for a tailable, *awaitData* cursor. - cursorId = openCursor({batchSize: batchSize, tailable: true, awaitData: true}); - getMoreRes = assert.commandWorked( - db.runCommand({getMore: cursorId, collection: collName, batchSize: batchSize})); - assert.eq(getMoreRes.cursor.nextBatch.length, batchSize); +// Test that the same is true for a tailable, *awaitData* cursor. +cursorId = openCursor({batchSize: batchSize, tailable: true, awaitData: true}); +getMoreRes = assert.commandWorked( + db.runCommand({getMore: cursorId, collection: collName, batchSize: batchSize})); +assert.eq(getMoreRes.cursor.nextBatch.length, batchSize); - // Test that specifying a batch size to a getMore on a tailable cursor returns all - // new results immediately, even if the batch size is larger than the number of new results. - // One batch's worth for the find and one less than one batch's worth for the getMore. - dropAndRecreateColl({numDocs: batchSize + (batchSize - 1)}); - cursorId = openCursor({batchSize: batchSize, tailable: true, awaitData: false}); - getMoreRes = assert.commandWorked( - db.runCommand({getMore: cursorId, collection: collName, batchSize: batchSize})); - assert.eq(getMoreRes.cursor.nextBatch.length, batchSize - 1); +// Test that specifying a batch size to a getMore on a tailable cursor returns all +// new results immediately, even if the batch size is larger than the number of new results. +// One batch's worth for the find and one less than one batch's worth for the getMore. +dropAndRecreateColl({numDocs: batchSize + (batchSize - 1)}); +cursorId = openCursor({batchSize: batchSize, tailable: true, awaitData: false}); +getMoreRes = assert.commandWorked( + db.runCommand({getMore: cursorId, collection: collName, batchSize: batchSize})); +assert.eq(getMoreRes.cursor.nextBatch.length, batchSize - 1); - // Test that the same is true for a tailable, *awaitData* cursor. - cursorId = openCursor({batchSize: batchSize, tailable: true, awaitData: true}); - getMoreRes = assert.commandWorked( - db.runCommand({getMore: cursorId, collection: collName, batchSize: batchSize})); - assert.eq(getMoreRes.cursor.nextBatch.length, batchSize - 1); +// Test that the same is true for a tailable, *awaitData* cursor. +cursorId = openCursor({batchSize: batchSize, tailable: true, awaitData: true}); +getMoreRes = assert.commandWorked( + db.runCommand({getMore: cursorId, collection: collName, batchSize: batchSize})); +assert.eq(getMoreRes.cursor.nextBatch.length, batchSize - 1); - // Test that using a smaller batch size than there are results will return all results without - // empty batches in between (SERVER-30799). - dropAndRecreateColl({numDocs: batchSize * 3}); - cursorId = openCursor({batchSize: batchSize, tailable: true, awaitData: false}); - getMoreRes = assert.commandWorked( - db.runCommand({getMore: cursorId, collection: collName, batchSize: batchSize})); - assert.eq(getMoreRes.cursor.nextBatch.length, batchSize); - getMoreRes = assert.commandWorked( - db.runCommand({getMore: cursorId, collection: collName, batchSize: batchSize})); - assert.eq(getMoreRes.cursor.nextBatch.length, batchSize); - getMoreRes = assert.commandWorked( - db.runCommand({getMore: cursorId, collection: collName, batchSize: batchSize})); - assert.eq(getMoreRes.cursor.nextBatch.length, 0); +// Test that using a smaller batch size than there are results will return all results without +// empty batches in between (SERVER-30799). +dropAndRecreateColl({numDocs: batchSize * 3}); +cursorId = openCursor({batchSize: batchSize, tailable: true, awaitData: false}); +getMoreRes = assert.commandWorked( + db.runCommand({getMore: cursorId, collection: collName, batchSize: batchSize})); +assert.eq(getMoreRes.cursor.nextBatch.length, batchSize); +getMoreRes = assert.commandWorked( + db.runCommand({getMore: cursorId, collection: collName, batchSize: batchSize})); +assert.eq(getMoreRes.cursor.nextBatch.length, batchSize); +getMoreRes = assert.commandWorked( + db.runCommand({getMore: cursorId, collection: collName, batchSize: batchSize})); +assert.eq(getMoreRes.cursor.nextBatch.length, 0); - // Avoid leaving the cursor open. Cursors above are killed by drops, but we'll avoid dropping - // the collection at the end so other consistency checks like validate can be run against it. - assert.commandWorked(db.runCommand({killCursors: collName, cursors: [getMoreRes.cursor.id]})); +// Avoid leaving the cursor open. Cursors above are killed by drops, but we'll avoid dropping +// the collection at the end so other consistency checks like validate can be run against it. +assert.commandWorked(db.runCommand({killCursors: collName, cursors: [getMoreRes.cursor.id]})); }()); diff --git a/jstests/core/tailable_skip_limit.js b/jstests/core/tailable_skip_limit.js index 8669e29a836..672a52aeb3d 100644 --- a/jstests/core/tailable_skip_limit.js +++ b/jstests/core/tailable_skip_limit.js @@ -2,92 +2,92 @@ // Test that tailable cursors work correctly with skip and limit. (function() { - "use strict"; +"use strict"; - // Setup the capped collection. - var collname = "jstests_tailable_skip_limit"; - var t = db[collname]; - t.drop(); - assert.commandWorked(db.createCollection(collname, {capped: true, size: 1024})); +// Setup the capped collection. +var collname = "jstests_tailable_skip_limit"; +var t = db[collname]; +t.drop(); +assert.commandWorked(db.createCollection(collname, {capped: true, size: 1024})); - assert.writeOK(t.insert({_id: 1})); - assert.writeOK(t.insert({_id: 2})); +assert.writeOK(t.insert({_id: 1})); +assert.writeOK(t.insert({_id: 2})); - // Non-tailable with skip - var cursor = t.find().skip(1); - assert.eq(2, cursor.next()["_id"]); - assert(!cursor.hasNext()); - assert.writeOK(t.insert({_id: 3})); - assert(!cursor.hasNext()); +// Non-tailable with skip +var cursor = t.find().skip(1); +assert.eq(2, cursor.next()["_id"]); +assert(!cursor.hasNext()); +assert.writeOK(t.insert({_id: 3})); +assert(!cursor.hasNext()); - // Non-tailable with limit - var cursor = t.find().limit(100); - for (var i = 1; i <= 3; i++) { - assert.eq(i, cursor.next()["_id"]); - } - assert(!cursor.hasNext()); - assert.writeOK(t.insert({_id: 4})); - assert(!cursor.hasNext()); +// Non-tailable with limit +var cursor = t.find().limit(100); +for (var i = 1; i <= 3; i++) { + assert.eq(i, cursor.next()["_id"]); +} +assert(!cursor.hasNext()); +assert.writeOK(t.insert({_id: 4})); +assert(!cursor.hasNext()); - // Non-tailable with negative limit - var cursor = t.find().limit(-100); - for (var i = 1; i <= 4; i++) { - assert.eq(i, cursor.next()["_id"]); - } - assert(!cursor.hasNext()); - assert.writeOK(t.insert({_id: 5})); - assert(!cursor.hasNext()); +// Non-tailable with negative limit +var cursor = t.find().limit(-100); +for (var i = 1; i <= 4; i++) { + assert.eq(i, cursor.next()["_id"]); +} +assert(!cursor.hasNext()); +assert.writeOK(t.insert({_id: 5})); +assert(!cursor.hasNext()); - // Tailable with skip - cursor = t.find().addOption(2).skip(4); - assert.eq(5, cursor.next()["_id"]); - assert(!cursor.hasNext()); - assert.writeOK(t.insert({_id: 6})); - assert(cursor.hasNext()); - assert.eq(6, cursor.next()["_id"]); +// Tailable with skip +cursor = t.find().addOption(2).skip(4); +assert.eq(5, cursor.next()["_id"]); +assert(!cursor.hasNext()); +assert.writeOK(t.insert({_id: 6})); +assert(cursor.hasNext()); +assert.eq(6, cursor.next()["_id"]); - // Tailable with limit - var cursor = t.find().addOption(2).limit(100); - for (var i = 1; i <= 6; i++) { - assert.eq(i, cursor.next()["_id"]); - } - assert(!cursor.hasNext()); - assert.writeOK(t.insert({_id: 7})); - assert(cursor.hasNext()); - assert.eq(7, cursor.next()["_id"]); +// Tailable with limit +var cursor = t.find().addOption(2).limit(100); +for (var i = 1; i <= 6; i++) { + assert.eq(i, cursor.next()["_id"]); +} +assert(!cursor.hasNext()); +assert.writeOK(t.insert({_id: 7})); +assert(cursor.hasNext()); +assert.eq(7, cursor.next()["_id"]); - // Tailable with negative limit is an error. - assert.throws(function() { - t.find().addOption(2).limit(-100).next(); - }); +// Tailable with negative limit is an error. +assert.throws(function() { + t.find().addOption(2).limit(-100).next(); +}); +assert.throws(function() { + t.find().addOption(2).limit(-1).itcount(); +}); + +// When using read commands, a limit of 1 with the tailable option is allowed. In legacy +// readMode, an ntoreturn of 1 means the same thing as ntoreturn -1 and is disallowed with +// tailable. +if (db.getMongo().useReadCommands()) { + assert.eq(1, t.find().addOption(2).limit(1).itcount()); +} else { assert.throws(function() { - t.find().addOption(2).limit(-1).itcount(); + t.find().addOption(2).limit(1).itcount(); }); +} - // When using read commands, a limit of 1 with the tailable option is allowed. In legacy - // readMode, an ntoreturn of 1 means the same thing as ntoreturn -1 and is disallowed with - // tailable. - if (db.getMongo().useReadCommands()) { - assert.eq(1, t.find().addOption(2).limit(1).itcount()); - } else { - assert.throws(function() { - t.find().addOption(2).limit(1).itcount(); - }); - } - - // Tests that a tailable cursor over an empty capped collection produces a dead cursor, intended - // to be run on both mongod and mongos. For SERVER-20720. - t.drop(); - assert.commandWorked(db.createCollection(t.getName(), {capped: true, size: 1024})); +// Tests that a tailable cursor over an empty capped collection produces a dead cursor, intended +// to be run on both mongod and mongos. For SERVER-20720. +t.drop(); +assert.commandWorked(db.createCollection(t.getName(), {capped: true, size: 1024})); - var cmdRes = db.runCommand({find: t.getName(), tailable: true}); - assert.commandWorked(cmdRes); - assert.eq(cmdRes.cursor.id, NumberLong(0)); - assert.eq(cmdRes.cursor.ns, t.getFullName()); - assert.eq(cmdRes.cursor.firstBatch.length, 0); +var cmdRes = db.runCommand({find: t.getName(), tailable: true}); +assert.commandWorked(cmdRes); +assert.eq(cmdRes.cursor.id, NumberLong(0)); +assert.eq(cmdRes.cursor.ns, t.getFullName()); +assert.eq(cmdRes.cursor.firstBatch.length, 0); - // Test that the cursor works in the shell. - assert.eq(t.find().addOption(2).itcount(), 0); - assert.writeOK(t.insert({a: 1})); - assert.eq(t.find().addOption(2).itcount(), 1); +// Test that the cursor works in the shell. +assert.eq(t.find().addOption(2).itcount(), 0); +assert.writeOK(t.insert({a: 1})); +assert.eq(t.find().addOption(2).itcount(), 1); })(); diff --git a/jstests/core/text_covered_matching.js b/jstests/core/text_covered_matching.js index f3fe9908d1b..a81dfd84e09 100644 --- a/jstests/core/text_covered_matching.js +++ b/jstests/core/text_covered_matching.js @@ -12,176 +12,176 @@ load("jstests/libs/analyze_plan.js"); (function() { - "use strict"; - const coll = db.text_covered_matching; +"use strict"; +const coll = db.text_covered_matching; - coll.drop(); - assert.commandWorked(coll.createIndex({a: "text", b: 1})); - assert.writeOK(coll.insert({a: "hello", b: 1, c: 1})); - assert.writeOK(coll.insert({a: "world", b: 2, c: 2})); - assert.writeOK(coll.insert({a: "hello world", b: 3, c: 3})); +coll.drop(); +assert.commandWorked(coll.createIndex({a: "text", b: 1})); +assert.writeOK(coll.insert({a: "hello", b: 1, c: 1})); +assert.writeOK(coll.insert({a: "world", b: 2, c: 2})); +assert.writeOK(coll.insert({a: "hello world", b: 3, c: 3})); - // - // Test the query {$text: {$search: "hello"}, b: 1} with and without the 'textScore' in the - // output. - // +// +// Test the query {$text: {$search: "hello"}, b: 1} with and without the 'textScore' in the +// output. +// - // Expected result: - // - We examine two keys, for the two documents with "hello" in their text; - // - we examine only one document, because covered matching rejects the index entry for - // which b != 1; - // - we return exactly one document. - let explainResult = coll.find({$text: {$search: "hello"}, b: 1}).explain("executionStats"); - assert.commandWorked(explainResult); - assert(planHasStage(db, explainResult.queryPlanner.winningPlan, "OR")); - assert.eq(explainResult.executionStats.totalKeysExamined, - 2, - "Unexpected number of keys examined: " + tojson(explainResult)); - assert.eq(explainResult.executionStats.totalDocsExamined, - 1, - "Unexpected number of documents examined: " + tojson(explainResult)); - assert.eq(explainResult.executionStats.nReturned, - 1, - "Unexpected number of results returned: " + tojson(explainResult)); +// Expected result: +// - We examine two keys, for the two documents with "hello" in their text; +// - we examine only one document, because covered matching rejects the index entry for +// which b != 1; +// - we return exactly one document. +let explainResult = coll.find({$text: {$search: "hello"}, b: 1}).explain("executionStats"); +assert.commandWorked(explainResult); +assert(planHasStage(db, explainResult.queryPlanner.winningPlan, "OR")); +assert.eq(explainResult.executionStats.totalKeysExamined, + 2, + "Unexpected number of keys examined: " + tojson(explainResult)); +assert.eq(explainResult.executionStats.totalDocsExamined, + 1, + "Unexpected number of documents examined: " + tojson(explainResult)); +assert.eq(explainResult.executionStats.nReturned, + 1, + "Unexpected number of results returned: " + tojson(explainResult)); - // When we include the text score in the projection, we use a TEXT_OR instead of an OR in our - // query plan, which changes how filtering is done. We should get the same result, however. - explainResult = coll.find({$text: {$search: "hello"}, b: 1}, - {a: 1, b: 1, c: 1, textScore: {$meta: "textScore"}}) - .explain("executionStats"); - assert.commandWorked(explainResult); - assert(planHasStage(db, explainResult.queryPlanner.winningPlan, "TEXT_OR")); - assert.eq(explainResult.executionStats.totalKeysExamined, - 2, - "Unexpected number of keys examined: " + tojson(explainResult)); - assert.eq(explainResult.executionStats.totalDocsExamined, - 1, - "Unexpected number of documents examined: " + tojson(explainResult)); - assert.eq(explainResult.executionStats.nReturned, - 1, - "Unexpected number of results returned: " + tojson(explainResult)); +// When we include the text score in the projection, we use a TEXT_OR instead of an OR in our +// query plan, which changes how filtering is done. We should get the same result, however. +explainResult = coll.find({$text: {$search: "hello"}, b: 1}, + {a: 1, b: 1, c: 1, textScore: {$meta: "textScore"}}) + .explain("executionStats"); +assert.commandWorked(explainResult); +assert(planHasStage(db, explainResult.queryPlanner.winningPlan, "TEXT_OR")); +assert.eq(explainResult.executionStats.totalKeysExamined, + 2, + "Unexpected number of keys examined: " + tojson(explainResult)); +assert.eq(explainResult.executionStats.totalDocsExamined, + 1, + "Unexpected number of documents examined: " + tojson(explainResult)); +assert.eq(explainResult.executionStats.nReturned, + 1, + "Unexpected number of results returned: " + tojson(explainResult)); - // - // Test the query {$text: {$search: "hello"}, c: 1} with and without the 'textScore' in the - // output. - // +// +// Test the query {$text: {$search: "hello"}, c: 1} with and without the 'textScore' in the +// output. +// - // Expected result: - // - We examine two keys, for the two documents with "hello" in their text; - // - we examine more than just the matching document, because we need to fetch documents in - // order to examine the non-covered 'c' field; - // - we return exactly one document. - explainResult = coll.find({$text: {$search: "hello"}, c: 1}).explain("executionStats"); - assert.commandWorked(explainResult); - assert(planHasStage(db, explainResult.queryPlanner.winningPlan, "OR")); - assert.eq(explainResult.executionStats.totalKeysExamined, - 2, - "Unexpected number of keys examined: " + tojson(explainResult)); - assert.gt(explainResult.executionStats.totalDocsExamined, - 1, - "Unexpected number of documents examined: " + tojson(explainResult)); - assert.eq(explainResult.executionStats.nReturned, - 1, - "Unexpected number of results returned: " + tojson(explainResult)); +// Expected result: +// - We examine two keys, for the two documents with "hello" in their text; +// - we examine more than just the matching document, because we need to fetch documents in +// order to examine the non-covered 'c' field; +// - we return exactly one document. +explainResult = coll.find({$text: {$search: "hello"}, c: 1}).explain("executionStats"); +assert.commandWorked(explainResult); +assert(planHasStage(db, explainResult.queryPlanner.winningPlan, "OR")); +assert.eq(explainResult.executionStats.totalKeysExamined, + 2, + "Unexpected number of keys examined: " + tojson(explainResult)); +assert.gt(explainResult.executionStats.totalDocsExamined, + 1, + "Unexpected number of documents examined: " + tojson(explainResult)); +assert.eq(explainResult.executionStats.nReturned, + 1, + "Unexpected number of results returned: " + tojson(explainResult)); - // As before, including the text score in the projection changes how filtering occurs, but we - // still expect the same result. - explainResult = coll.find({$text: {$search: "hello"}, c: 1}, - {a: 1, b: 1, c: 1, textScore: {$meta: "textScore"}}) - .explain("executionStats"); - assert.commandWorked(explainResult); - assert.eq(explainResult.executionStats.totalKeysExamined, - 2, - "Unexpected number of keys examined: " + tojson(explainResult)); - assert.gt(explainResult.executionStats.totalDocsExamined, - 1, - "Unexpected number of documents examined: " + tojson(explainResult)); - assert.eq(explainResult.executionStats.nReturned, - 1, - "Unexpected number of results returned: " + tojson(explainResult)); +// As before, including the text score in the projection changes how filtering occurs, but we +// still expect the same result. +explainResult = coll.find({$text: {$search: "hello"}, c: 1}, + {a: 1, b: 1, c: 1, textScore: {$meta: "textScore"}}) + .explain("executionStats"); +assert.commandWorked(explainResult); +assert.eq(explainResult.executionStats.totalKeysExamined, + 2, + "Unexpected number of keys examined: " + tojson(explainResult)); +assert.gt(explainResult.executionStats.totalDocsExamined, + 1, + "Unexpected number of documents examined: " + tojson(explainResult)); +assert.eq(explainResult.executionStats.nReturned, + 1, + "Unexpected number of results returned: " + tojson(explainResult)); - // - // Test the first query again, but this time, use dotted fields to make sure they don't confuse - // the query planner: - // {$text: {$search: "hello"}, "b.d": 1} - // - coll.drop(); - assert.commandWorked(coll.createIndex({a: "text", "b.d": 1})); - assert.writeOK(coll.insert({a: "hello", b: {d: 1}, c: {e: 1}})); - assert.writeOK(coll.insert({a: "world", b: {d: 2}, c: {e: 2}})); - assert.writeOK(coll.insert({a: "hello world", b: {d: 3}, c: {e: 3}})); +// +// Test the first query again, but this time, use dotted fields to make sure they don't confuse +// the query planner: +// {$text: {$search: "hello"}, "b.d": 1} +// +coll.drop(); +assert.commandWorked(coll.createIndex({a: "text", "b.d": 1})); +assert.writeOK(coll.insert({a: "hello", b: {d: 1}, c: {e: 1}})); +assert.writeOK(coll.insert({a: "world", b: {d: 2}, c: {e: 2}})); +assert.writeOK(coll.insert({a: "hello world", b: {d: 3}, c: {e: 3}})); - // Expected result: - // - We examine two keys, for the two documents with "hello" in their text; - // - we examine only one document, because covered matching rejects the index entry for - // which b != 1; - // - we return exactly one document. - explainResult = coll.find({$text: {$search: "hello"}, "b.d": 1}).explain("executionStats"); - assert.commandWorked(explainResult); - assert(planHasStage(db, explainResult.queryPlanner.winningPlan, "OR")); - assert.eq(explainResult.executionStats.totalKeysExamined, - 2, - "Unexpected number of keys examined: " + tojson(explainResult)); - assert.eq(explainResult.executionStats.totalDocsExamined, - 1, - "Unexpected number of documents examined: " + tojson(explainResult)); - assert.eq(explainResult.executionStats.nReturned, - 1, - "Unexpected number of results returned: " + tojson(explainResult)); +// Expected result: +// - We examine two keys, for the two documents with "hello" in their text; +// - we examine only one document, because covered matching rejects the index entry for +// which b != 1; +// - we return exactly one document. +explainResult = coll.find({$text: {$search: "hello"}, "b.d": 1}).explain("executionStats"); +assert.commandWorked(explainResult); +assert(planHasStage(db, explainResult.queryPlanner.winningPlan, "OR")); +assert.eq(explainResult.executionStats.totalKeysExamined, + 2, + "Unexpected number of keys examined: " + tojson(explainResult)); +assert.eq(explainResult.executionStats.totalDocsExamined, + 1, + "Unexpected number of documents examined: " + tojson(explainResult)); +assert.eq(explainResult.executionStats.nReturned, + 1, + "Unexpected number of results returned: " + tojson(explainResult)); - // When we include the text score in the projection, we use a TEXT_OR instead of an OR in our - // query plan, which changes how filtering is done. We should get the same result, however. - explainResult = coll.find({$text: {$search: "hello"}, "b.d": 1}, - {a: 1, b: 1, c: 1, textScore: {$meta: "textScore"}}) - .explain("executionStats"); - assert.commandWorked(explainResult); - assert(planHasStage(db, explainResult.queryPlanner.winningPlan, "TEXT_OR")); - assert.eq(explainResult.executionStats.totalKeysExamined, - 2, - "Unexpected number of keys examined: " + tojson(explainResult)); - assert.eq(explainResult.executionStats.totalDocsExamined, - 1, - "Unexpected number of documents examined: " + tojson(explainResult)); - assert.eq(explainResult.executionStats.nReturned, - 1, - "Unexpected number of results returned: " + tojson(explainResult)); +// When we include the text score in the projection, we use a TEXT_OR instead of an OR in our +// query plan, which changes how filtering is done. We should get the same result, however. +explainResult = coll.find({$text: {$search: "hello"}, "b.d": 1}, + {a: 1, b: 1, c: 1, textScore: {$meta: "textScore"}}) + .explain("executionStats"); +assert.commandWorked(explainResult); +assert(planHasStage(db, explainResult.queryPlanner.winningPlan, "TEXT_OR")); +assert.eq(explainResult.executionStats.totalKeysExamined, + 2, + "Unexpected number of keys examined: " + tojson(explainResult)); +assert.eq(explainResult.executionStats.totalDocsExamined, + 1, + "Unexpected number of documents examined: " + tojson(explainResult)); +assert.eq(explainResult.executionStats.nReturned, + 1, + "Unexpected number of results returned: " + tojson(explainResult)); - // - // Test the second query again, this time with dotted fields: - // {$text: {$search: "hello"}, "c.e": 1} - // +// +// Test the second query again, this time with dotted fields: +// {$text: {$search: "hello"}, "c.e": 1} +// - // Expected result: - // - We examine two keys, for the two documents with "hello" in their text; - // - we examine more than just the matching document, because we need to fetch documents in - // order to examine the non-covered 'c' field; - // - we return exactly one document. - explainResult = coll.find({$text: {$search: "hello"}, "c.e": 1}).explain("executionStats"); - assert.commandWorked(explainResult); - assert(planHasStage(db, explainResult.queryPlanner.winningPlan, "OR")); - assert.eq(explainResult.executionStats.totalKeysExamined, - 2, - "Unexpected number of keys examined: " + tojson(explainResult)); - assert.gt(explainResult.executionStats.totalDocsExamined, - 1, - "Unexpected number of documents examined: " + tojson(explainResult)); - assert.eq(explainResult.executionStats.nReturned, - 1, - "Unexpected number of results returned: " + tojson(explainResult)); +// Expected result: +// - We examine two keys, for the two documents with "hello" in their text; +// - we examine more than just the matching document, because we need to fetch documents in +// order to examine the non-covered 'c' field; +// - we return exactly one document. +explainResult = coll.find({$text: {$search: "hello"}, "c.e": 1}).explain("executionStats"); +assert.commandWorked(explainResult); +assert(planHasStage(db, explainResult.queryPlanner.winningPlan, "OR")); +assert.eq(explainResult.executionStats.totalKeysExamined, + 2, + "Unexpected number of keys examined: " + tojson(explainResult)); +assert.gt(explainResult.executionStats.totalDocsExamined, + 1, + "Unexpected number of documents examined: " + tojson(explainResult)); +assert.eq(explainResult.executionStats.nReturned, + 1, + "Unexpected number of results returned: " + tojson(explainResult)); - // As before, including the text score in the projection changes how filtering occurs, but we - // still expect the same result. - explainResult = coll.find({$text: {$search: "hello"}, "c.e": 1}, - {a: 1, b: 1, c: 1, textScore: {$meta: "textScore"}}) - .explain("executionStats"); - assert.commandWorked(explainResult); - assert.eq(explainResult.executionStats.totalKeysExamined, - 2, - "Unexpected number of keys examined: " + tojson(explainResult)); - assert.gt(explainResult.executionStats.totalDocsExamined, - 1, - "Unexpected number of documents examined: " + tojson(explainResult)); - assert.eq(explainResult.executionStats.nReturned, - 1, - "Unexpected number of results returned: " + tojson(explainResult)); +// As before, including the text score in the projection changes how filtering occurs, but we +// still expect the same result. +explainResult = coll.find({$text: {$search: "hello"}, "c.e": 1}, + {a: 1, b: 1, c: 1, textScore: {$meta: "textScore"}}) + .explain("executionStats"); +assert.commandWorked(explainResult); +assert.eq(explainResult.executionStats.totalKeysExamined, + 2, + "Unexpected number of keys examined: " + tojson(explainResult)); +assert.gt(explainResult.executionStats.totalDocsExamined, + 1, + "Unexpected number of documents examined: " + tojson(explainResult)); +assert.eq(explainResult.executionStats.nReturned, + 1, + "Unexpected number of results returned: " + tojson(explainResult)); })(); diff --git a/jstests/core/text_index_limits.js b/jstests/core/text_index_limits.js index 73df159b4b5..69a9be2f751 100644 --- a/jstests/core/text_index_limits.js +++ b/jstests/core/text_index_limits.js @@ -6,43 +6,42 @@ * @tags: [does_not_support_stepdowns] */ (function() { - "use strict"; +"use strict"; - var t = db.text_index_limits; - t.drop(); +var t = db.text_index_limits; +t.drop(); - assert.commandWorked(t.createIndex({comments: "text"})); +assert.commandWorked(t.createIndex({comments: "text"})); - // 1. Test number of unique terms exceeds 400,000 - let commentsWithALotOfUniqueWords = ""; - // 26^4 = 456,976 > 400,000 - for (let ch1 = 97; ch1 < 123; ch1++) { - for (let ch2 = 97; ch2 < 123; ch2++) { - for (let ch3 = 97; ch3 < 123; ch3++) { - for (let ch4 = 97; ch4 < 123; ch4++) { - let word = String.fromCharCode(ch1, ch2, ch3, ch4); - commentsWithALotOfUniqueWords += word + " "; - } +// 1. Test number of unique terms exceeds 400,000 +let commentsWithALotOfUniqueWords = ""; +// 26^4 = 456,976 > 400,000 +for (let ch1 = 97; ch1 < 123; ch1++) { + for (let ch2 = 97; ch2 < 123; ch2++) { + for (let ch3 = 97; ch3 < 123; ch3++) { + for (let ch4 = 97; ch4 < 123; ch4++) { + let word = String.fromCharCode(ch1, ch2, ch3, ch4); + commentsWithALotOfUniqueWords += word + " "; } } } - assert.commandWorked(db.runCommand( - {insert: t.getName(), documents: [{_id: 1, comments: commentsWithALotOfUniqueWords}]})); +} +assert.commandWorked(db.runCommand( + {insert: t.getName(), documents: [{_id: 1, comments: commentsWithALotOfUniqueWords}]})); - // 2. Test total size of index keys for unique terms exceeds 4MB +// 2. Test total size of index keys for unique terms exceeds 4MB - // 26^3 = 17576 < 400,000 - let prefix = "a".repeat(400); - let commentsWithWordsOfLargeSize = ""; - for (let ch1 = 97; ch1 < 123; ch1++) { - for (let ch2 = 97; ch2 < 123; ch2++) { - for (let ch3 = 97; ch3 < 123; ch3++) { - let word = String.fromCharCode(ch1, ch2, ch3); - commentsWithWordsOfLargeSize += prefix + word + " "; - } +// 26^3 = 17576 < 400,000 +let prefix = "a".repeat(400); +let commentsWithWordsOfLargeSize = ""; +for (let ch1 = 97; ch1 < 123; ch1++) { + for (let ch2 = 97; ch2 < 123; ch2++) { + for (let ch3 = 97; ch3 < 123; ch3++) { + let word = String.fromCharCode(ch1, ch2, ch3); + commentsWithWordsOfLargeSize += prefix + word + " "; } } - assert.commandWorked(db.runCommand( - {insert: t.getName(), documents: [{_id: 2, comments: commentsWithWordsOfLargeSize}]})); - +} +assert.commandWorked(db.runCommand( + {insert: t.getName(), documents: [{_id: 2, comments: commentsWithWordsOfLargeSize}]})); }()); diff --git a/jstests/core/throw_big.js b/jstests/core/throw_big.js index 422ee93a6ae..ef9554966e0 100644 --- a/jstests/core/throw_big.js +++ b/jstests/core/throw_big.js @@ -2,15 +2,14 @@ * Test that verifies the javascript integration can handle large string exception messages. */ (function() { - 'use strict'; +'use strict'; - var len = 65 * 1024 * 1024; - var str = new Array(len + 1).join('b'); - - // We expect to successfully throw and catch this large exception message. - // We do not want the mongo shell to terminate. - assert.throws(function() { - throw str; - }); +var len = 65 * 1024 * 1024; +var str = new Array(len + 1).join('b'); +// We expect to successfully throw and catch this large exception message. +// We do not want the mongo shell to terminate. +assert.throws(function() { + throw str; +}); })(); diff --git a/jstests/core/top.js b/jstests/core/top.js index eca4570472f..2d8fd8297c3 100644 --- a/jstests/core/top.js +++ b/jstests/core/top.js @@ -14,110 +14,110 @@ */ (function() { - load("jstests/libs/stats.js"); - - var name = "toptest"; - - var testDB = db.getSiblingDB(name); - var testColl = testDB[name + "coll"]; - testColl.drop(); - - // Perform an operation on the collection so that it is present in the "top" command's output. - assert.eq(testColl.find({}).itcount(), 0); - - // This variable is used to get differential output - var lastTop = getTop(testColl); - - var numRecords = 100; - - // Insert - for (var i = 0; i < numRecords; i++) { - assert.writeOK(testColl.insert({_id: i})); - } - assertTopDiffEq(testColl, lastTop, "insert", numRecords); - lastTop = assertTopDiffEq(testColl, lastTop, "writeLock", numRecords); - - // Update - for (i = 0; i < numRecords; i++) { - assert.writeOK(testColl.update({_id: i}, {x: i})); - } - lastTop = assertTopDiffEq(testColl, lastTop, "update", numRecords); - - // Queries - var query = {}; - for (i = 0; i < numRecords; i++) { - query[i] = testColl.find({x: {$gte: i}}).batchSize(2); - assert.eq(query[i].next()._id, i); - } - lastTop = assertTopDiffEq(testColl, lastTop, "queries", numRecords); - - // Getmore - for (i = 0; i < numRecords / 2; i++) { - assert.eq(query[i].next()._id, i + 1); - assert.eq(query[i].next()._id, i + 2); - assert.eq(query[i].next()._id, i + 3); - assert.eq(query[i].next()._id, i + 4); - } - lastTop = assertTopDiffEq(testColl, lastTop, "getmore", numRecords); - - // Remove - for (i = 0; i < numRecords; i++) { - assert.writeOK(testColl.remove({_id: 1})); - } - lastTop = assertTopDiffEq(testColl, lastTop, "remove", numRecords); - - // Upsert, note that these are counted as updates, not inserts - for (i = 0; i < numRecords; i++) { - assert.writeOK(testColl.update({_id: i}, {x: i}, {upsert: 1})); - } - lastTop = assertTopDiffEq(testColl, lastTop, "update", numRecords); - - // Commands - var res; - - // "count" command - lastTop = getTop(testColl); // ignore any commands before this - for (i = 0; i < numRecords; i++) { - res = assert.commandWorked(testDB.runCommand({count: testColl.getName()})); - assert.eq(res.n, numRecords, tojson(res)); - } - lastTop = assertTopDiffEq(testColl, lastTop, "commands", numRecords); - - // "findAndModify" command - lastTop = getTop(testColl); - for (i = 0; i < numRecords; i++) { - res = assert.commandWorked(testDB.runCommand({ - findAndModify: testColl.getName(), - query: {_id: i}, - update: {$inc: {x: 1}}, - })); - assert.eq(res.value.x, i, tojson(res)); - } - lastTop = assertTopDiffEq(testColl, lastTop, "commands", numRecords); - - lastTop = getTop(testColl); - for (i = 0; i < numRecords; i++) { - res = assert.commandWorked(testDB.runCommand({ - findAndModify: testColl.getName(), - query: {_id: i}, - remove: true, - })); - assert.eq(res.value.x, i + 1, tojson(res)); - } - lastTop = assertTopDiffEq(testColl, lastTop, "commands", numRecords); - - // getIndexes - assert.eq(1, testColl.getIndexes().length); - assertTopDiffEq(testColl, lastTop, "commands", 1); - lastTop = assertTopDiffEq(testColl, lastTop, "readLock", 1); - - // createIndex - res = assert.commandWorked(testColl.createIndex({x: 1})); - assertTopDiffEq(testColl, lastTop, "writeLock", 1); - lastTop = assertTopDiffEq(testColl, lastTop, "commands", 1); - - // dropIndex - res = assert.commandWorked(testColl.dropIndex({x: 1})); - assertTopDiffEq(testColl, lastTop, "commands", 1); - lastTop = assertTopDiffEq(testColl, lastTop, "writeLock", 1); +load("jstests/libs/stats.js"); + +var name = "toptest"; + +var testDB = db.getSiblingDB(name); +var testColl = testDB[name + "coll"]; +testColl.drop(); + +// Perform an operation on the collection so that it is present in the "top" command's output. +assert.eq(testColl.find({}).itcount(), 0); + +// This variable is used to get differential output +var lastTop = getTop(testColl); + +var numRecords = 100; + +// Insert +for (var i = 0; i < numRecords; i++) { + assert.writeOK(testColl.insert({_id: i})); +} +assertTopDiffEq(testColl, lastTop, "insert", numRecords); +lastTop = assertTopDiffEq(testColl, lastTop, "writeLock", numRecords); + +// Update +for (i = 0; i < numRecords; i++) { + assert.writeOK(testColl.update({_id: i}, {x: i})); +} +lastTop = assertTopDiffEq(testColl, lastTop, "update", numRecords); + +// Queries +var query = {}; +for (i = 0; i < numRecords; i++) { + query[i] = testColl.find({x: {$gte: i}}).batchSize(2); + assert.eq(query[i].next()._id, i); +} +lastTop = assertTopDiffEq(testColl, lastTop, "queries", numRecords); + +// Getmore +for (i = 0; i < numRecords / 2; i++) { + assert.eq(query[i].next()._id, i + 1); + assert.eq(query[i].next()._id, i + 2); + assert.eq(query[i].next()._id, i + 3); + assert.eq(query[i].next()._id, i + 4); +} +lastTop = assertTopDiffEq(testColl, lastTop, "getmore", numRecords); + +// Remove +for (i = 0; i < numRecords; i++) { + assert.writeOK(testColl.remove({_id: 1})); +} +lastTop = assertTopDiffEq(testColl, lastTop, "remove", numRecords); + +// Upsert, note that these are counted as updates, not inserts +for (i = 0; i < numRecords; i++) { + assert.writeOK(testColl.update({_id: i}, {x: i}, {upsert: 1})); +} +lastTop = assertTopDiffEq(testColl, lastTop, "update", numRecords); + +// Commands +var res; + +// "count" command +lastTop = getTop(testColl); // ignore any commands before this +for (i = 0; i < numRecords; i++) { + res = assert.commandWorked(testDB.runCommand({count: testColl.getName()})); + assert.eq(res.n, numRecords, tojson(res)); +} +lastTop = assertTopDiffEq(testColl, lastTop, "commands", numRecords); + +// "findAndModify" command +lastTop = getTop(testColl); +for (i = 0; i < numRecords; i++) { + res = assert.commandWorked(testDB.runCommand({ + findAndModify: testColl.getName(), + query: {_id: i}, + update: {$inc: {x: 1}}, + })); + assert.eq(res.value.x, i, tojson(res)); +} +lastTop = assertTopDiffEq(testColl, lastTop, "commands", numRecords); + +lastTop = getTop(testColl); +for (i = 0; i < numRecords; i++) { + res = assert.commandWorked(testDB.runCommand({ + findAndModify: testColl.getName(), + query: {_id: i}, + remove: true, + })); + assert.eq(res.value.x, i + 1, tojson(res)); +} +lastTop = assertTopDiffEq(testColl, lastTop, "commands", numRecords); + +// getIndexes +assert.eq(1, testColl.getIndexes().length); +assertTopDiffEq(testColl, lastTop, "commands", 1); +lastTop = assertTopDiffEq(testColl, lastTop, "readLock", 1); + +// createIndex +res = assert.commandWorked(testColl.createIndex({x: 1})); +assertTopDiffEq(testColl, lastTop, "writeLock", 1); +lastTop = assertTopDiffEq(testColl, lastTop, "commands", 1); + +// dropIndex +res = assert.commandWorked(testColl.dropIndex({x: 1})); +assertTopDiffEq(testColl, lastTop, "commands", 1); +lastTop = assertTopDiffEq(testColl, lastTop, "writeLock", 1); }()); diff --git a/jstests/core/ts1.js b/jstests/core/ts1.js index 79a2db95dca..a52995dd4c8 100644 --- a/jstests/core/ts1.js +++ b/jstests/core/ts1.js @@ -3,43 +3,42 @@ // if the inserts are into a sharded collection. // @tags: [assumes_unsharded_collection] (function() { - "use strict"; - const t = db.ts1; - t.drop(); - - const N = 20; - - for (let i = 0; i < N; i++) { - assert.writeOK(t.insert({_id: i, x: new Timestamp()})); - sleep(100); - } - - function get(i) { - return t.findOne({_id: i}).x; - } - - function cmp(a, b) { - if (a.t < b.t) - return -1; - if (a.t > b.t) - return 1; - - return a.i - b.i; - } - - for (let i = 0; i < N - 1; i++) { - const a = get(i); - const b = get(i + 1); - assert.gt(0, - cmp(a, b), - `Expected ${tojson(a)} to be smaller than ${tojson(b)} (at iteration ${i})`); - } - - assert.eq(N, t.find({x: {$type: 17}}).itcount()); - assert.eq(0, t.find({x: {$type: 3}}).itcount()); - - assert.writeOK(t.insert({_id: 100, x: new Timestamp(123456, 50)})); - const x = t.findOne({_id: 100}).x; - assert.eq(123456, x.t); - assert.eq(50, x.i); +"use strict"; +const t = db.ts1; +t.drop(); + +const N = 20; + +for (let i = 0; i < N; i++) { + assert.writeOK(t.insert({_id: i, x: new Timestamp()})); + sleep(100); +} + +function get(i) { + return t.findOne({_id: i}).x; +} + +function cmp(a, b) { + if (a.t < b.t) + return -1; + if (a.t > b.t) + return 1; + + return a.i - b.i; +} + +for (let i = 0; i < N - 1; i++) { + const a = get(i); + const b = get(i + 1); + assert.gt( + 0, cmp(a, b), `Expected ${tojson(a)} to be smaller than ${tojson(b)} (at iteration ${i})`); +} + +assert.eq(N, t.find({x: {$type: 17}}).itcount()); +assert.eq(0, t.find({x: {$type: 3}}).itcount()); + +assert.writeOK(t.insert({_id: 100, x: new Timestamp(123456, 50)})); +const x = t.findOne({_id: 100}).x; +assert.eq(123456, x.t); +assert.eq(50, x.i); }()); diff --git a/jstests/core/ttl_index_options.js b/jstests/core/ttl_index_options.js index f4d3c4b3e42..47ae2709073 100644 --- a/jstests/core/ttl_index_options.js +++ b/jstests/core/ttl_index_options.js @@ -4,42 +4,40 @@ * @tags: [requires_ttl_index] */ (function() { - 'use strict'; - - let coll = db.core_ttl_index_options; - coll.drop(); - - // Ensure that any overflows are caught when converting from seconds to milliseconds. - assert.commandFailedWithCode( - coll.createIndexes([{x: 1}], {expireAfterSeconds: 9223372036854775808}), - ErrorCodes.CannotCreateIndex); - assert.commandFailedWithCode( - coll.createIndexes([{x: 1}], {expireAfterSeconds: 9999999999999999}), - ErrorCodes.CannotCreateIndex); - - // Ensure that we cannot provide a time that is larger than the current epoch time. - let secondsSinceEpoch = Date.now() / 1000; - assert.commandFailedWithCode( - coll.createIndexes([{x: 1}], {expireAfterSeconds: secondsSinceEpoch + 1000}), - ErrorCodes.CannotCreateIndex); - - // 'expireAfterSeconds' cannot be less than 0. - assert.commandFailedWithCode(coll.createIndexes([{x: 1}], {expireAfterSeconds: -1}), - ErrorCodes.CannotCreateIndex); - assert.commandWorked(coll.createIndexes([{z: 1}], {expireAfterSeconds: 0})); - - // Compound indexes are not support with TTL indexes. - assert.commandFailedWithCode(coll.createIndexes([{x: 1, y: 1}], {expireAfterSeconds: 100}), - ErrorCodes.CannotCreateIndex); - - // 'expireAfterSeconds' should be a number. - assert.commandFailedWithCode( - coll.createIndexes([{x: 1}], {expireAfterSeconds: "invalidOption"}), - ErrorCodes.CannotCreateIndex); - - // Using 'expireAfterSeconds' as an index key is valid, but doesn't create a TTL index. - assert.commandWorked(coll.createIndexes([{x: 1, expireAfterSeconds: 3600}])); - - // Create a valid TTL index. - assert.commandWorked(coll.createIndexes([{x: 1}, {y: 1}], {expireAfterSeconds: 3600})); +'use strict'; + +let coll = db.core_ttl_index_options; +coll.drop(); + +// Ensure that any overflows are caught when converting from seconds to milliseconds. +assert.commandFailedWithCode( + coll.createIndexes([{x: 1}], {expireAfterSeconds: 9223372036854775808}), + ErrorCodes.CannotCreateIndex); +assert.commandFailedWithCode(coll.createIndexes([{x: 1}], {expireAfterSeconds: 9999999999999999}), + ErrorCodes.CannotCreateIndex); + +// Ensure that we cannot provide a time that is larger than the current epoch time. +let secondsSinceEpoch = Date.now() / 1000; +assert.commandFailedWithCode( + coll.createIndexes([{x: 1}], {expireAfterSeconds: secondsSinceEpoch + 1000}), + ErrorCodes.CannotCreateIndex); + +// 'expireAfterSeconds' cannot be less than 0. +assert.commandFailedWithCode(coll.createIndexes([{x: 1}], {expireAfterSeconds: -1}), + ErrorCodes.CannotCreateIndex); +assert.commandWorked(coll.createIndexes([{z: 1}], {expireAfterSeconds: 0})); + +// Compound indexes are not support with TTL indexes. +assert.commandFailedWithCode(coll.createIndexes([{x: 1, y: 1}], {expireAfterSeconds: 100}), + ErrorCodes.CannotCreateIndex); + +// 'expireAfterSeconds' should be a number. +assert.commandFailedWithCode(coll.createIndexes([{x: 1}], {expireAfterSeconds: "invalidOption"}), + ErrorCodes.CannotCreateIndex); + +// Using 'expireAfterSeconds' as an index key is valid, but doesn't create a TTL index. +assert.commandWorked(coll.createIndexes([{x: 1, expireAfterSeconds: 3600}])); + +// Create a valid TTL index. +assert.commandWorked(coll.createIndexes([{x: 1}, {y: 1}], {expireAfterSeconds: 3600})); }()); diff --git a/jstests/core/txns/abort_expired_transaction.js b/jstests/core/txns/abort_expired_transaction.js index 3022080d1b4..c64ed7407e5 100644 --- a/jstests/core/txns/abort_expired_transaction.js +++ b/jstests/core/txns/abort_expired_transaction.js @@ -5,85 +5,83 @@ // @tags: [uses_transactions] (function() { - "use strict"; +"use strict"; - const testDBName = "testDB"; - const testCollName = "abort_expired_transaction"; - const ns = testDBName + "." + testCollName; - const testDB = db.getSiblingDB(testDBName); - const testColl = testDB[testCollName]; - testColl.drop({writeConcern: {w: "majority"}}); +const testDBName = "testDB"; +const testCollName = "abort_expired_transaction"; +const ns = testDBName + "." + testCollName; +const testDB = db.getSiblingDB(testDBName); +const testColl = testDB[testCollName]; +testColl.drop({writeConcern: {w: "majority"}}); - // Need the original 'transactionLifetimeLimitSeconds' value so that we can reset it back at the - // end of the test. - const res = assert.commandWorked( - db.adminCommand({getParameter: 1, transactionLifetimeLimitSeconds: 1})); - const originalTransactionLifetimeLimitSeconds = res.transactionLifetimeLimitSeconds; +// Need the original 'transactionLifetimeLimitSeconds' value so that we can reset it back at the +// end of the test. +const res = + assert.commandWorked(db.adminCommand({getParameter: 1, transactionLifetimeLimitSeconds: 1})); +const originalTransactionLifetimeLimitSeconds = res.transactionLifetimeLimitSeconds; - try { - jsTest.log("Decrease transactionLifetimeLimitSeconds from " + - originalTransactionLifetimeLimitSeconds + " to 1 second."); - assert.commandWorked( - db.adminCommand({setParameter: 1, transactionLifetimeLimitSeconds: 1})); +try { + jsTest.log("Decrease transactionLifetimeLimitSeconds from " + + originalTransactionLifetimeLimitSeconds + " to 1 second."); + assert.commandWorked(db.adminCommand({setParameter: 1, transactionLifetimeLimitSeconds: 1})); - jsTest.log("Create a collection '" + ns + "' outside of the transaction."); - assert.writeOK(testColl.insert({foo: "bar"}, {writeConcern: {w: "majority"}})); + jsTest.log("Create a collection '" + ns + "' outside of the transaction."); + assert.writeOK(testColl.insert({foo: "bar"}, {writeConcern: {w: "majority"}})); - jsTest.log("Set up the session."); - const sessionOptions = {causalConsistency: false}; - const session = db.getMongo().startSession(sessionOptions); - const sessionDb = session.getDatabase(testDBName); + jsTest.log("Set up the session."); + const sessionOptions = {causalConsistency: false}; + const session = db.getMongo().startSession(sessionOptions); + const sessionDb = session.getDatabase(testDBName); - let txnNumber = 0; + let txnNumber = 0; - jsTest.log("Insert a document starting a transaction."); - assert.commandWorked(sessionDb.runCommand({ - insert: testCollName, - documents: [{_id: "insert-1"}], - txnNumber: NumberLong(txnNumber), - startTransaction: true, - autocommit: false, - })); + jsTest.log("Insert a document starting a transaction."); + assert.commandWorked(sessionDb.runCommand({ + insert: testCollName, + documents: [{_id: "insert-1"}], + txnNumber: NumberLong(txnNumber), + startTransaction: true, + autocommit: false, + })); - // We can deterministically wait for the transaction to be aborted by waiting for currentOp - // to cease reporting the inactive transaction: the transaction should disappear from the - // currentOp results once aborted. - jsTest.log("Wait for the transaction to expire and be aborted."); - assert.soon( - function() { - const sessionFilter = { - active: false, - opid: {$exists: false}, - desc: "inactive transaction", - "transaction.parameters.txnNumber": NumberLong(txnNumber), - "lsid.id": session.getSessionId().id - }; - const res = db.getSiblingDB("admin").aggregate( - [{$currentOp: {allUsers: true, idleSessions: true}}, {$match: sessionFilter}]); - return (res.itcount() == 0); + // We can deterministically wait for the transaction to be aborted by waiting for currentOp + // to cease reporting the inactive transaction: the transaction should disappear from the + // currentOp results once aborted. + jsTest.log("Wait for the transaction to expire and be aborted."); + assert.soon( + function() { + const sessionFilter = { + active: false, + opid: {$exists: false}, + desc: "inactive transaction", + "transaction.parameters.txnNumber": NumberLong(txnNumber), + "lsid.id": session.getSessionId().id + }; + const res = db.getSiblingDB("admin").aggregate( + [{$currentOp: {allUsers: true, idleSessions: true}}, {$match: sessionFilter}]); + return (res.itcount() == 0); + }, + "currentOp reports that the idle transaction still exists, it has not been " + + "aborted as expected."); - }, - "currentOp reports that the idle transaction still exists, it has not been " + - "aborted as expected."); + jsTest.log( + "Attempt to do a write in the transaction, which should fail because the transaction " + + "was aborted"); + assert.commandFailedWithCode(sessionDb.runCommand({ + insert: testCollName, + documents: [{_id: "insert-2"}], + txnNumber: NumberLong(txnNumber), + autocommit: false, + }), + ErrorCodes.NoSuchTransaction); - jsTest.log( - "Attempt to do a write in the transaction, which should fail because the transaction " + - "was aborted"); - assert.commandFailedWithCode(sessionDb.runCommand({ - insert: testCollName, - documents: [{_id: "insert-2"}], - txnNumber: NumberLong(txnNumber), - autocommit: false, - }), - ErrorCodes.NoSuchTransaction); - - session.endSession(); - } finally { - // Must ensure that the transactionLifetimeLimitSeconds is reset so that it does not impact - // other tests in the suite. - assert.commandWorked(db.adminCommand({ - setParameter: 1, - transactionLifetimeLimitSeconds: originalTransactionLifetimeLimitSeconds - })); - } + session.endSession(); +} finally { + // Must ensure that the transactionLifetimeLimitSeconds is reset so that it does not impact + // other tests in the suite. + assert.commandWorked(db.adminCommand({ + setParameter: 1, + transactionLifetimeLimitSeconds: originalTransactionLifetimeLimitSeconds + })); +} }()); diff --git a/jstests/core/txns/abort_prepared_transaction.js b/jstests/core/txns/abort_prepared_transaction.js index 365a4d852bf..3f2a21f98c1 100644 --- a/jstests/core/txns/abort_prepared_transaction.js +++ b/jstests/core/txns/abort_prepared_transaction.js @@ -4,79 +4,85 @@ * @tags: [uses_transactions, uses_prepare_transaction] */ (function() { - "use strict"; - load("jstests/core/txns/libs/prepare_helpers.js"); +"use strict"; +load("jstests/core/txns/libs/prepare_helpers.js"); - const dbName = "test"; - const collName = "abort_prepared_transaction"; - const testDB = db.getSiblingDB(dbName); - const testColl = testDB.getCollection(collName); +const dbName = "test"; +const collName = "abort_prepared_transaction"; +const testDB = db.getSiblingDB(dbName); +const testColl = testDB.getCollection(collName); - testColl.drop({writeConcern: {w: "majority"}}); - assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); +testColl.drop({writeConcern: {w: "majority"}}); +assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); - const session = db.getMongo().startSession({causalConsistency: false}); - const sessionDB = session.getDatabase(dbName); - const sessionColl = sessionDB.getCollection(collName); +const session = db.getMongo().startSession({causalConsistency: false}); +const sessionDB = session.getDatabase(dbName); +const sessionColl = sessionDB.getCollection(collName); - const doc1 = {_id: 1, x: 1}; +const doc1 = { + _id: 1, + x: 1 +}; - // ---- Test 1. Insert a single document and run prepare. ---- +// ---- Test 1. Insert a single document and run prepare. ---- - session.startTransaction(); - assert.commandWorked(sessionColl.insert(doc1)); +session.startTransaction(); +assert.commandWorked(sessionColl.insert(doc1)); - // Insert should not be visible outside the session. - assert.eq(null, testColl.findOne(doc1)); +// Insert should not be visible outside the session. +assert.eq(null, testColl.findOne(doc1)); - // Insert should be visible in this session. - assert.eq(doc1, sessionColl.findOne(doc1)); +// Insert should be visible in this session. +assert.eq(doc1, sessionColl.findOne(doc1)); - PrepareHelpers.prepareTransaction(session); - assert.commandWorked(session.abortTransaction_forTesting()); +PrepareHelpers.prepareTransaction(session); +assert.commandWorked(session.abortTransaction_forTesting()); - // After abort the insert is rolled back. - assert.eq(null, testColl.findOne(doc1)); +// After abort the insert is rolled back. +assert.eq(null, testColl.findOne(doc1)); - // ---- Test 2. Update a document and run prepare. ---- +// ---- Test 2. Update a document and run prepare. ---- - // Insert a document to update. - assert.commandWorked(sessionColl.insert(doc1, {writeConcern: {w: "majority"}})); +// Insert a document to update. +assert.commandWorked(sessionColl.insert(doc1, {writeConcern: {w: "majority"}})); - session.startTransaction(); - assert.commandWorked(sessionColl.update(doc1, {$inc: {x: 1}})); +session.startTransaction(); +assert.commandWorked(sessionColl.update(doc1, {$inc: {x: 1}})); - const doc2 = {_id: 1, x: 2}; +const doc2 = { + _id: 1, + x: 2 +}; - // Update should not be visible outside the session. - assert.eq(null, testColl.findOne(doc2)); +// Update should not be visible outside the session. +assert.eq(null, testColl.findOne(doc2)); - // Update should be visible in this session. - assert.eq(doc2, sessionColl.findOne(doc2)); +// Update should be visible in this session. +assert.eq(doc2, sessionColl.findOne(doc2)); - PrepareHelpers.prepareTransaction(session); - assert.commandWorked(session.abortTransaction_forTesting()); +PrepareHelpers.prepareTransaction(session); +assert.commandWorked(session.abortTransaction_forTesting()); - // After abort the update is rolled back. - assert.eq(doc1, testColl.findOne({_id: 1})); +// After abort the update is rolled back. +assert.eq(doc1, testColl.findOne({_id: 1})); - // ---- Test 3. Delete a document and run prepare. ---- +// ---- Test 3. Delete a document and run prepare. ---- - // Update the document. - assert.commandWorked(sessionColl.update(doc1, {$inc: {x: 1}}, {writeConcern: {w: "majority"}})); +// Update the document. +assert.commandWorked(sessionColl.update(doc1, {$inc: {x: 1}}, {writeConcern: {w: "majority"}})); - session.startTransaction(); - assert.commandWorked(sessionColl.remove(doc2, {justOne: true})); +session.startTransaction(); +assert.commandWorked(sessionColl.remove(doc2, {justOne: true})); - // Delete should not be visible outside the session, so the document should be. - assert.eq(doc2, testColl.findOne(doc2)); +// Delete should not be visible outside the session, so the document should be. +assert.eq(doc2, testColl.findOne(doc2)); - // Document should not be visible in this session, since the delete should be visible. - assert.eq(null, sessionColl.findOne(doc2)); +// Document should not be visible in this session, since the delete should be visible. +assert.eq(null, sessionColl.findOne(doc2)); - PrepareHelpers.prepareTransaction(session); - assert.commandWorked(session.abortTransaction_forTesting()); +PrepareHelpers.prepareTransaction(session); +assert.commandWorked(session.abortTransaction_forTesting()); - // After abort the delete is rolled back. - assert.eq(doc2, testColl.findOne(doc2)); +// After abort the delete is rolled back. +assert.eq(doc2, testColl.findOne(doc2)); }()); diff --git a/jstests/core/txns/abort_transaction_thread_does_not_block_on_locks.js b/jstests/core/txns/abort_transaction_thread_does_not_block_on_locks.js index 297f0bbe902..5b899e73689 100644 --- a/jstests/core/txns/abort_transaction_thread_does_not_block_on_locks.js +++ b/jstests/core/txns/abort_transaction_thread_does_not_block_on_locks.js @@ -6,104 +6,104 @@ // // @tags: [uses_transactions] (function() { - "use strict"; +"use strict"; - const dbName = "test"; - const collName = "abort_transaction_thread_does_not_block_on_locks"; - const testDB = db.getSiblingDB(dbName); - const testColl = testDB[collName]; - const sessionOptions = {causalConsistency: false}; +const dbName = "test"; +const collName = "abort_transaction_thread_does_not_block_on_locks"; +const testDB = db.getSiblingDB(dbName); +const testColl = testDB[collName]; +const sessionOptions = { + causalConsistency: false +}; - let dropRes = testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); - if (!dropRes.ok) { - assert.commandFailedWithCode(dropRes, ErrorCodes.NamespaceNotFound); - } +let dropRes = testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); +if (!dropRes.ok) { + assert.commandFailedWithCode(dropRes, ErrorCodes.NamespaceNotFound); +} - const bulk = testColl.initializeUnorderedBulkOp(); - for (let i = 0; i < 4; ++i) { - bulk.insert({_id: i}); - } - assert.commandWorked(bulk.execute({w: "majority"})); +const bulk = testColl.initializeUnorderedBulkOp(); +for (let i = 0; i < 4; ++i) { + bulk.insert({_id: i}); +} +assert.commandWorked(bulk.execute({w: "majority"})); - const res = assert.commandWorked( - db.adminCommand({getParameter: 1, transactionLifetimeLimitSeconds: 1})); - const originalTransactionLifetimeLimitSeconds = res.transactionLifetimeLimitSeconds; +const res = + assert.commandWorked(db.adminCommand({getParameter: 1, transactionLifetimeLimitSeconds: 1})); +const originalTransactionLifetimeLimitSeconds = res.transactionLifetimeLimitSeconds; - try { - let transactionLifeTime = 10; - jsTest.log("Decrease transactionLifetimeLimitSeconds to " + transactionLifeTime + - " seconds."); - assert.commandWorked(db.adminCommand( - {setParameter: 1, transactionLifetimeLimitSeconds: transactionLifeTime})); +try { + let transactionLifeTime = 10; + jsTest.log("Decrease transactionLifetimeLimitSeconds to " + transactionLifeTime + " seconds."); + assert.commandWorked( + db.adminCommand({setParameter: 1, transactionLifetimeLimitSeconds: transactionLifeTime})); - // Set up two transactions with IX locks and cursors. + // Set up two transactions with IX locks and cursors. - let session1 = db.getMongo().startSession(sessionOptions); - let sessionDb1 = session1.getDatabase(dbName); - let sessionColl1 = sessionDb1[collName]; + let session1 = db.getMongo().startSession(sessionOptions); + let sessionDb1 = session1.getDatabase(dbName); + let sessionColl1 = sessionDb1[collName]; - let session2 = db.getMongo().startSession(sessionOptions); - let sessionDb2 = session2.getDatabase(dbName); - let sessionColl2 = sessionDb2[collName]; + let session2 = db.getMongo().startSession(sessionOptions); + let sessionDb2 = session2.getDatabase(dbName); + let sessionColl2 = sessionDb2[collName]; - let firstTxnNumber = 1; - let secondTxnNumber = 2; + let firstTxnNumber = 1; + let secondTxnNumber = 2; - jsTest.log("Setting up first transaction with an open cursor and IX lock"); - let cursorRes1 = assert.commandWorked(sessionDb1.runCommand({ - find: collName, - batchSize: 2, - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(firstTxnNumber), - stmtId: NumberInt(0), - startTransaction: true, - autocommit: false - })); - assert(cursorRes1.hasOwnProperty("cursor"), tojson(cursorRes1)); - assert.neq(0, cursorRes1.cursor.id, tojson(cursorRes1)); + jsTest.log("Setting up first transaction with an open cursor and IX lock"); + let cursorRes1 = assert.commandWorked(sessionDb1.runCommand({ + find: collName, + batchSize: 2, + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(firstTxnNumber), + stmtId: NumberInt(0), + startTransaction: true, + autocommit: false + })); + assert(cursorRes1.hasOwnProperty("cursor"), tojson(cursorRes1)); + assert.neq(0, cursorRes1.cursor.id, tojson(cursorRes1)); - jsTest.log("Setting up second transaction with an open cursor and IX lock"); - let cursorRes2 = assert.commandWorked(sessionDb2.runCommand({ - find: collName, - batchSize: 2, - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(secondTxnNumber), - stmtId: NumberInt(0), - startTransaction: true, - autocommit: false - })); - assert(cursorRes2.hasOwnProperty("cursor"), tojson(cursorRes2)); - assert.neq(0, cursorRes2.cursor.id, tojson(cursorRes2)); + jsTest.log("Setting up second transaction with an open cursor and IX lock"); + let cursorRes2 = assert.commandWorked(sessionDb2.runCommand({ + find: collName, + batchSize: 2, + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(secondTxnNumber), + stmtId: NumberInt(0), + startTransaction: true, + autocommit: false + })); + assert(cursorRes2.hasOwnProperty("cursor"), tojson(cursorRes2)); + assert.neq(0, cursorRes2.cursor.id, tojson(cursorRes2)); - jsTest.log("Perform a drop. This will block until both transactions finish. The " + - "transactions should expire in " + transactionLifeTime * 1.5 + - " seconds or less."); - assert.commandWorked(testDB.runCommand({drop: collName, writeConcern: {w: "majority"}})); + jsTest.log("Perform a drop. This will block until both transactions finish. The " + + "transactions should expire in " + transactionLifeTime * 1.5 + " seconds or less."); + assert.commandWorked(testDB.runCommand({drop: collName, writeConcern: {w: "majority"}})); - // Verify and cleanup. + // Verify and cleanup. - jsTest.log("Drop finished. Verifying that the transactions were aborted as expected"); - assert.commandFailedWithCode(sessionDb1.adminCommand({ - commitTransaction: 1, - txnNumber: NumberLong(firstTxnNumber), - stmtId: NumberInt(2), - autocommit: false - }), - ErrorCodes.NoSuchTransaction); - assert.commandFailedWithCode(sessionDb2.adminCommand({ - commitTransaction: 1, - txnNumber: NumberLong(secondTxnNumber), - stmtId: NumberInt(2), - autocommit: false - }), - ErrorCodes.NoSuchTransaction); + jsTest.log("Drop finished. Verifying that the transactions were aborted as expected"); + assert.commandFailedWithCode(sessionDb1.adminCommand({ + commitTransaction: 1, + txnNumber: NumberLong(firstTxnNumber), + stmtId: NumberInt(2), + autocommit: false + }), + ErrorCodes.NoSuchTransaction); + assert.commandFailedWithCode(sessionDb2.adminCommand({ + commitTransaction: 1, + txnNumber: NumberLong(secondTxnNumber), + stmtId: NumberInt(2), + autocommit: false + }), + ErrorCodes.NoSuchTransaction); - session1.endSession(); - session2.endSession(); - } finally { - assert.commandWorked(db.adminCommand({ - setParameter: 1, - transactionLifetimeLimitSeconds: originalTransactionLifetimeLimitSeconds - })); - } + session1.endSession(); + session2.endSession(); +} finally { + assert.commandWorked(db.adminCommand({ + setParameter: 1, + transactionLifetimeLimitSeconds: originalTransactionLifetimeLimitSeconds + })); +} }()); diff --git a/jstests/core/txns/abort_unprepared_transactions_on_FCV_downgrade.js b/jstests/core/txns/abort_unprepared_transactions_on_FCV_downgrade.js index cfedbb158b1..aadda6561a9 100644 --- a/jstests/core/txns/abort_unprepared_transactions_on_FCV_downgrade.js +++ b/jstests/core/txns/abort_unprepared_transactions_on_FCV_downgrade.js @@ -1,45 +1,47 @@ // Test that open unprepared transactions are aborted on FCV downgrade. // @tags: [uses_transactions] (function() { - "use strict"; - load("jstests/libs/feature_compatibility_version.js"); - - const dbName = "test"; - const collName = "abort_unprepared_transactions_on_FCV_downgrade"; - const testDB = db.getSiblingDB(dbName); - const adminDB = db.getSiblingDB("admin"); - testDB[collName].drop({writeConcern: {w: "majority"}}); - - assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); - - const sessionOptions = {causalConsistency: false}; - const session = testDB.getMongo().startSession(sessionOptions); - const sessionDB = session.getDatabase(dbName); - - try { - jsTestLog("Start a transaction."); - session.startTransaction({readConcern: {level: "snapshot"}}); - assert.commandWorked(sessionDB[collName].insert({_id: "insert-1"})); - - jsTestLog("Attempt to drop the collection. This should fail due to the open transaction."); - assert.commandFailedWithCode(testDB.runCommand({drop: collName, maxTimeMS: 1000}), - ErrorCodes.MaxTimeMSExpired); - - jsTestLog("Downgrade the featureCompatibilityVersion."); - assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV})); - checkFCV(adminDB, lastStableFCV); - - jsTestLog("Drop the collection. This should succeed, since the transaction was aborted."); - assert.commandWorked(testDB.runCommand({drop: collName})); - - jsTestLog("Test that committing the transaction fails, since it was aborted."); - assert.commandFailedWithCode(session.commitTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - } finally { - jsTestLog("Restore the original featureCompatibilityVersion."); - assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV})); - checkFCV(adminDB, latestFCV); - } - - session.endSession(); +"use strict"; +load("jstests/libs/feature_compatibility_version.js"); + +const dbName = "test"; +const collName = "abort_unprepared_transactions_on_FCV_downgrade"; +const testDB = db.getSiblingDB(dbName); +const adminDB = db.getSiblingDB("admin"); +testDB[collName].drop({writeConcern: {w: "majority"}}); + +assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); + +const sessionOptions = { + causalConsistency: false +}; +const session = testDB.getMongo().startSession(sessionOptions); +const sessionDB = session.getDatabase(dbName); + +try { + jsTestLog("Start a transaction."); + session.startTransaction({readConcern: {level: "snapshot"}}); + assert.commandWorked(sessionDB[collName].insert({_id: "insert-1"})); + + jsTestLog("Attempt to drop the collection. This should fail due to the open transaction."); + assert.commandFailedWithCode(testDB.runCommand({drop: collName, maxTimeMS: 1000}), + ErrorCodes.MaxTimeMSExpired); + + jsTestLog("Downgrade the featureCompatibilityVersion."); + assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV})); + checkFCV(adminDB, lastStableFCV); + + jsTestLog("Drop the collection. This should succeed, since the transaction was aborted."); + assert.commandWorked(testDB.runCommand({drop: collName})); + + jsTestLog("Test that committing the transaction fails, since it was aborted."); + assert.commandFailedWithCode(session.commitTransaction_forTesting(), + ErrorCodes.NoSuchTransaction); +} finally { + jsTestLog("Restore the original featureCompatibilityVersion."); + assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV})); + checkFCV(adminDB, latestFCV); +} + +session.endSession(); }()); diff --git a/jstests/core/txns/aggregation_in_transaction.js b/jstests/core/txns/aggregation_in_transaction.js index c14c4276fa8..76c5f4d6a0a 100644 --- a/jstests/core/txns/aggregation_in_transaction.js +++ b/jstests/core/txns/aggregation_in_transaction.js @@ -1,56 +1,62 @@ // Tests that aggregation is supported in transactions. // @tags: [uses_transactions, uses_snapshot_read_concern] (function() { - "use strict"; - - load("jstests/libs/fixture_helpers.js"); // For isSharded. - - const session = db.getMongo().startSession({causalConsistency: false}); - const testDB = session.getDatabase("test"); - const coll = testDB.getCollection("aggregation_in_transaction"); - const foreignColl = testDB.getCollection("aggregation_in_transaction_lookup"); - - [coll, foreignColl].forEach(col => { - const reply = col.runCommand("drop", {writeConcern: {w: "majority"}}); - if (reply.ok !== 1) { - assert.commandFailedWithCode(reply, ErrorCodes.NamespaceNotFound); - } - }); - - // Populate the collections. - const testDoc = {_id: 0, foreignKey: "orange"}; - assert.commandWorked(coll.insert(testDoc, {writeConcern: {w: "majority"}})); - const foreignDoc = {_id: "orange", val: 9}; - assert.commandWorked(foreignColl.insert(foreignDoc, {writeConcern: {w: "majority"}})); - - const isForeignSharded = FixtureHelpers.isSharded(foreignColl); - - // Run a dummy find to start the transaction. - jsTestLog("Starting transaction."); - session.startTransaction({readConcern: {level: "snapshot"}}); - let cursor = coll.find(); - cursor.next(); - - // Insert a document outside of the transaction. Subsequent aggregations should not see this - // document. - jsTestLog("Inserting document outside of transaction."); - assert.commandWorked(db.getSiblingDB(testDB.getName()).getCollection(coll.getName()).insert({ - _id: "not_visible_in_transaction", - foreignKey: "orange", - })); - - // Perform an aggregation that is fed by a cursor on the underlying collection. Only the - // majority-committed document present at the start of the transaction should be found. - jsTestLog("Starting aggregations inside of the transaction."); - cursor = coll.aggregate({$match: {}}); - assert.docEq(testDoc, cursor.next()); - assert(!cursor.hasNext()); +"use strict"; + +load("jstests/libs/fixture_helpers.js"); // For isSharded. - // Perform aggregations that look at other collections. - // TODO: SERVER-39162 Sharded $lookup is not supported in transactions. - if (!isForeignSharded) { - const lookupDoc = Object.extend(testDoc, {lookup: [foreignDoc]}); - cursor = coll.aggregate({ +const session = db.getMongo().startSession({causalConsistency: false}); +const testDB = session.getDatabase("test"); +const coll = testDB.getCollection("aggregation_in_transaction"); +const foreignColl = testDB.getCollection("aggregation_in_transaction_lookup"); + +[coll, foreignColl].forEach(col => { + const reply = col.runCommand("drop", {writeConcern: {w: "majority"}}); + if (reply.ok !== 1) { + assert.commandFailedWithCode(reply, ErrorCodes.NamespaceNotFound); + } +}); + +// Populate the collections. +const testDoc = { + _id: 0, + foreignKey: "orange" +}; +assert.commandWorked(coll.insert(testDoc, {writeConcern: {w: "majority"}})); +const foreignDoc = { + _id: "orange", + val: 9 +}; +assert.commandWorked(foreignColl.insert(foreignDoc, {writeConcern: {w: "majority"}})); + +const isForeignSharded = FixtureHelpers.isSharded(foreignColl); + +// Run a dummy find to start the transaction. +jsTestLog("Starting transaction."); +session.startTransaction({readConcern: {level: "snapshot"}}); +let cursor = coll.find(); +cursor.next(); + +// Insert a document outside of the transaction. Subsequent aggregations should not see this +// document. +jsTestLog("Inserting document outside of transaction."); +assert.commandWorked(db.getSiblingDB(testDB.getName()).getCollection(coll.getName()).insert({ + _id: "not_visible_in_transaction", + foreignKey: "orange", +})); + +// Perform an aggregation that is fed by a cursor on the underlying collection. Only the +// majority-committed document present at the start of the transaction should be found. +jsTestLog("Starting aggregations inside of the transaction."); +cursor = coll.aggregate({$match: {}}); +assert.docEq(testDoc, cursor.next()); +assert(!cursor.hasNext()); + +// Perform aggregations that look at other collections. +// TODO: SERVER-39162 Sharded $lookup is not supported in transactions. +if (!isForeignSharded) { + const lookupDoc = Object.extend(testDoc, {lookup: [foreignDoc]}); + cursor = coll.aggregate({ $lookup: { from: foreignColl.getName(), localField: "foreignKey", @@ -58,10 +64,10 @@ as: "lookup", } }); - assert.docEq(cursor.next(), lookupDoc); - assert(!cursor.hasNext()); + assert.docEq(cursor.next(), lookupDoc); + assert(!cursor.hasNext()); - cursor = coll.aggregate({ + cursor = coll.aggregate({ $graphLookup: { from: foreignColl.getName(), startWith: "$foreignKey", @@ -70,50 +76,47 @@ as: "lookup" } }); - assert.docEq(cursor.next(), lookupDoc); - assert(!cursor.hasNext()); - } else { - // TODO SERVER-39048: Test that $lookup on sharded collection is banned - // within a transaction. - } - - jsTestLog("Testing $count within a transaction."); - - let countRes = coll.aggregate([{$count: "count"}]).toArray(); - assert.eq(countRes.length, 1, tojson(countRes)); - assert.eq(countRes[0].count, 1, tojson(countRes)); - - assert.commandWorked(coll.insert({a: 2})); - countRes = coll.aggregate([{$count: "count"}]).toArray(); - assert.eq(countRes.length, 1, tojson(countRes)); - assert.eq(countRes[0].count, 2, tojson(countRes)); - - assert.commandWorked( - db.getSiblingDB(testDB.getName()).getCollection(coll.getName()).insert({a: 3})); - countRes = coll.aggregate([{$count: "count"}]).toArray(); - assert.eq(countRes.length, 1, tojson(countRes)); - assert.eq(countRes[0].count, 2, tojson(countRes)); - - assert.commandWorked(session.commitTransaction_forTesting()); - jsTestLog("Transaction committed."); - - // Perform aggregations with non-cursor initial sources and assert that they are not supported - // in transactions. - jsTestLog("Running aggregations in transactions that are expected to throw and fail."); - session.startTransaction({readConcern: {level: "snapshot"}}); - assert.throws(() => coll.aggregate({$currentOp: {allUsers: true, localOps: true}}).next()); - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - - session.startTransaction({readConcern: {level: "snapshot"}}); - assert.throws( - () => coll.aggregate({$collStats: {latencyStats: {histograms: true}, storageStats: {}}}) - .next()); - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - - session.startTransaction({readConcern: {level: "snapshot"}}); - assert.throws(() => coll.aggregate({$indexStats: {}}).next()); - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); + assert.docEq(cursor.next(), lookupDoc); + assert(!cursor.hasNext()); +} else { + // TODO SERVER-39048: Test that $lookup on sharded collection is banned + // within a transaction. +} + +jsTestLog("Testing $count within a transaction."); + +let countRes = coll.aggregate([{$count: "count"}]).toArray(); +assert.eq(countRes.length, 1, tojson(countRes)); +assert.eq(countRes[0].count, 1, tojson(countRes)); + +assert.commandWorked(coll.insert({a: 2})); +countRes = coll.aggregate([{$count: "count"}]).toArray(); +assert.eq(countRes.length, 1, tojson(countRes)); +assert.eq(countRes[0].count, 2, tojson(countRes)); + +assert.commandWorked( + db.getSiblingDB(testDB.getName()).getCollection(coll.getName()).insert({a: 3})); +countRes = coll.aggregate([{$count: "count"}]).toArray(); +assert.eq(countRes.length, 1, tojson(countRes)); +assert.eq(countRes[0].count, 2, tojson(countRes)); + +assert.commandWorked(session.commitTransaction_forTesting()); +jsTestLog("Transaction committed."); + +// Perform aggregations with non-cursor initial sources and assert that they are not supported +// in transactions. +jsTestLog("Running aggregations in transactions that are expected to throw and fail."); +session.startTransaction({readConcern: {level: "snapshot"}}); +assert.throws(() => coll.aggregate({$currentOp: {allUsers: true, localOps: true}}).next()); +assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction); + +session.startTransaction({readConcern: {level: "snapshot"}}); +assert.throws( + () => + coll.aggregate({$collStats: {latencyStats: {histograms: true}, storageStats: {}}}).next()); +assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction); + +session.startTransaction({readConcern: {level: "snapshot"}}); +assert.throws(() => coll.aggregate({$indexStats: {}}).next()); +assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction); }()); diff --git a/jstests/core/txns/await_prepared_transactions_on_FCV_downgrade.js b/jstests/core/txns/await_prepared_transactions_on_FCV_downgrade.js index a6623101196..32e5822519c 100644 --- a/jstests/core/txns/await_prepared_transactions_on_FCV_downgrade.js +++ b/jstests/core/txns/await_prepared_transactions_on_FCV_downgrade.js @@ -3,68 +3,68 @@ * @tags: [uses_transactions, uses_prepare_transaction] */ (function() { - "use strict"; - load("jstests/libs/feature_compatibility_version.js"); - load("jstests/core/txns/libs/prepare_helpers.js"); +"use strict"; +load("jstests/libs/feature_compatibility_version.js"); +load("jstests/core/txns/libs/prepare_helpers.js"); - const dbName = "test"; - const collName = "await_prepared_transactions_on_FCV_downgrade"; - const testDB = db.getSiblingDB(dbName); - const adminDB = db.getSiblingDB("admin"); +const dbName = "test"; +const collName = "await_prepared_transactions_on_FCV_downgrade"; +const testDB = db.getSiblingDB(dbName); +const adminDB = db.getSiblingDB("admin"); - testDB[collName].drop({writeConcern: {w: "majority"}}); - assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); +testDB[collName].drop({writeConcern: {w: "majority"}}); +assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); - const session = testDB.getMongo().startSession(); - const sessionDB = session.getDatabase(dbName); +const session = testDB.getMongo().startSession(); +const sessionDB = session.getDatabase(dbName); - try { - jsTestLog("Start a transaction."); - session.startTransaction(); - assert.commandWorked(sessionDB[collName].insert({"a": 1})); +try { + jsTestLog("Start a transaction."); + session.startTransaction(); + assert.commandWorked(sessionDB[collName].insert({"a": 1})); - jsTestLog("Put that transaction into a prepared state."); - let prepareTimestamp = PrepareHelpers.prepareTransaction(session); + jsTestLog("Put that transaction into a prepared state."); + let prepareTimestamp = PrepareHelpers.prepareTransaction(session); - // The setFCV command will need to acquire a global S lock to complete. The global - // lock is currently held by prepare, so that will block. We use a failpoint to make that - // command fail immediately when it tries to get the lock. - assert.commandWorked(testDB.adminCommand( - {configureFailPoint: "failNonIntentLocksIfWaitNeeded", mode: "alwaysOn"})); + // The setFCV command will need to acquire a global S lock to complete. The global + // lock is currently held by prepare, so that will block. We use a failpoint to make that + // command fail immediately when it tries to get the lock. + assert.commandWorked(testDB.adminCommand( + {configureFailPoint: "failNonIntentLocksIfWaitNeeded", mode: "alwaysOn"})); - jsTestLog("Attempt to downgrade the featureCompatibilityVersion."); - assert.commandFailedWithCode( - testDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}), - ErrorCodes.LockTimeout); + jsTestLog("Attempt to downgrade the featureCompatibilityVersion."); + assert.commandFailedWithCode( + testDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}), + ErrorCodes.LockTimeout); - assert.commandWorked(testDB.adminCommand( - {configureFailPoint: "failNonIntentLocksIfWaitNeeded", mode: "off"})); + assert.commandWorked( + testDB.adminCommand({configureFailPoint: "failNonIntentLocksIfWaitNeeded", mode: "off"})); - jsTestLog("Verify that the setFCV command set the target version to 'lastStable'."); - checkFCV(adminDB, lastStableFCV, lastStableFCV); + jsTestLog("Verify that the setFCV command set the target version to 'lastStable'."); + checkFCV(adminDB, lastStableFCV, lastStableFCV); - jsTestLog("Commit the prepared transaction."); - assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp)); + jsTestLog("Commit the prepared transaction."); + assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp)); - jsTestLog("Rerun the setFCV command and let it complete successfully."); - assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV})); - checkFCV(adminDB, lastStableFCV); + jsTestLog("Rerun the setFCV command and let it complete successfully."); + assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV})); + checkFCV(adminDB, lastStableFCV); - jsTestLog("Verify that we are not allowed to prepare a transaction after downgrading."); - session.startTransaction(); - assert.commandWorked(sessionDB[collName].insert({"b": 2})); - assert.commandFailedWithCode(sessionDB.adminCommand({prepareTransaction: 1}), - ErrorCodes.CommandNotSupported); - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - } finally { - assert.commandWorked(testDB.adminCommand( - {configureFailPoint: "failNonIntentLocksIfWaitNeeded", mode: "off"})); + jsTestLog("Verify that we are not allowed to prepare a transaction after downgrading."); + session.startTransaction(); + assert.commandWorked(sessionDB[collName].insert({"b": 2})); + assert.commandFailedWithCode(sessionDB.adminCommand({prepareTransaction: 1}), + ErrorCodes.CommandNotSupported); + assert.commandFailedWithCode(session.abortTransaction_forTesting(), + ErrorCodes.NoSuchTransaction); +} finally { + assert.commandWorked( + testDB.adminCommand({configureFailPoint: "failNonIntentLocksIfWaitNeeded", mode: "off"})); - jsTestLog("Restore the original featureCompatibilityVersion."); - assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV})); - checkFCV(adminDB, latestFCV); - } + jsTestLog("Restore the original featureCompatibilityVersion."); + assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV})); + checkFCV(adminDB, latestFCV); +} - session.endSession(); +session.endSession(); }()); diff --git a/jstests/core/txns/banned_txn_dbs.js b/jstests/core/txns/banned_txn_dbs.js index 4422d19ea0a..78bcef608a5 100644 --- a/jstests/core/txns/banned_txn_dbs.js +++ b/jstests/core/txns/banned_txn_dbs.js @@ -2,36 +2,36 @@ // transactions. // @tags: [uses_transactions] (function() { - "use strict"; +"use strict"; - const session = db.getMongo().startSession({causalConsistency: false}); - const collName = "banned_txn_dbs"; +const session = db.getMongo().startSession({causalConsistency: false}); +const collName = "banned_txn_dbs"; - function runTest(sessionDB) { - jsTest.log("Testing database " + sessionDB.getName()); +function runTest(sessionDB) { + jsTest.log("Testing database " + sessionDB.getName()); - let sessionColl = sessionDB[collName]; - sessionColl.drop({writeConcern: {w: "majority"}}); - assert.commandWorked(sessionDB.createCollection(collName, {writeConcern: {w: "majority"}})); + let sessionColl = sessionDB[collName]; + sessionColl.drop({writeConcern: {w: "majority"}}); + assert.commandWorked(sessionDB.createCollection(collName, {writeConcern: {w: "majority"}})); - jsTest.log("Testing read commands are forbidden."); - session.startTransaction(); - let error = assert.throws(() => sessionColl.find().itcount()); - assert.commandFailedWithCode(error, ErrorCodes.OperationNotSupportedInTransaction); - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); + jsTest.log("Testing read commands are forbidden."); + session.startTransaction(); + let error = assert.throws(() => sessionColl.find().itcount()); + assert.commandFailedWithCode(error, ErrorCodes.OperationNotSupportedInTransaction); + assert.commandFailedWithCode(session.abortTransaction_forTesting(), + ErrorCodes.NoSuchTransaction); - jsTest.log("Testing write commands are forbidden."); - session.startTransaction(); - assert.commandFailedWithCode(sessionColl.insert({}), - ErrorCodes.OperationNotSupportedInTransaction); - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - } + jsTest.log("Testing write commands are forbidden."); + session.startTransaction(); + assert.commandFailedWithCode(sessionColl.insert({}), + ErrorCodes.OperationNotSupportedInTransaction); + assert.commandFailedWithCode(session.abortTransaction_forTesting(), + ErrorCodes.NoSuchTransaction); +} - runTest(session.getDatabase("config")); - runTest(session.getDatabase("admin")); - runTest(session.getDatabase("local")); +runTest(session.getDatabase("config")); +runTest(session.getDatabase("admin")); +runTest(session.getDatabase("local")); - session.endSession(); +session.endSession(); }()); diff --git a/jstests/core/txns/basic_causal_consistency.js b/jstests/core/txns/basic_causal_consistency.js index 84f1520a105..5a78ddc0900 100644 --- a/jstests/core/txns/basic_causal_consistency.js +++ b/jstests/core/txns/basic_causal_consistency.js @@ -1,33 +1,35 @@ // Test that the shell helper supports causal consistency. // @tags: [uses_transactions, uses_snapshot_read_concern] (function() { - "use strict"; +"use strict"; - const dbName = "test"; - const collName = "basic_causal_consistency"; - const testDB = db.getSiblingDB(dbName); +const dbName = "test"; +const collName = "basic_causal_consistency"; +const testDB = db.getSiblingDB(dbName); - testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); +testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); - assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); +assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); - const sessionOptions = {causalConsistency: true}; - const session = testDB.getMongo().startSession(sessionOptions); - const sessionDb = session.getDatabase(dbName); - const sessionColl = sessionDb.getCollection(collName); +const sessionOptions = { + causalConsistency: true +}; +const session = testDB.getMongo().startSession(sessionOptions); +const sessionDb = session.getDatabase(dbName); +const sessionColl = sessionDb.getCollection(collName); - session.startTransaction({readConcern: {level: "snapshot"}}); +session.startTransaction({readConcern: {level: "snapshot"}}); - // Performing a read first should work when snapshot readConcern is specified. - assert.docEq(null, sessionColl.findOne({_id: "insert-1"})); +// Performing a read first should work when snapshot readConcern is specified. +assert.docEq(null, sessionColl.findOne({_id: "insert-1"})); - assert.commandWorked(sessionColl.insert({_id: "insert-1"})); +assert.commandWorked(sessionColl.insert({_id: "insert-1"})); - assert.docEq(null, sessionColl.findOne({_id: "insert-2"})); +assert.docEq(null, sessionColl.findOne({_id: "insert-2"})); - assert.docEq({_id: "insert-1"}, sessionColl.findOne({_id: "insert-1"})); +assert.docEq({_id: "insert-1"}, sessionColl.findOne({_id: "insert-1"})); - assert.commandWorked(session.commitTransaction_forTesting()); +assert.commandWorked(session.commitTransaction_forTesting()); - session.endSession(); +session.endSession(); }()); diff --git a/jstests/core/txns/commands_banning_txnnumber_outside_transactions.js b/jstests/core/txns/commands_banning_txnnumber_outside_transactions.js index 08a8551fb09..7f611ce2869 100644 --- a/jstests/core/txns/commands_banning_txnnumber_outside_transactions.js +++ b/jstests/core/txns/commands_banning_txnnumber_outside_transactions.js @@ -3,59 +3,59 @@ // requires_document_locking, // ] (function() { - "use strict"; +"use strict"; - const isMongos = assert.commandWorked(db.runCommand("ismaster")).msg === "isdbgrid"; +const isMongos = assert.commandWorked(db.runCommand("ismaster")).msg === "isdbgrid"; - const session = db.getMongo().startSession(); - const sessionDb = session.getDatabase("admin"); +const session = db.getMongo().startSession(); +const sessionDb = session.getDatabase("admin"); - const nonRetryableWriteCommands = [ - // Commands that are allowed in transactions. - {aggregate: 1}, - {commitTransaction: 1}, - {distinct: "c"}, - {find: "c"}, - {getMore: NumberLong(1), collection: "c"}, - {killCursors: 1}, - // A selection of commands that are not allowed in transactions. - {count: 1}, - {explain: {find: "c"}}, - {filemd5: 1}, - {isMaster: 1}, - {buildInfo: 1}, - {ping: 1}, - {listCommands: 1}, - {create: "c"}, - {drop: 1}, - {createIndexes: 1}, - {mapReduce: "c"} - ]; +const nonRetryableWriteCommands = [ + // Commands that are allowed in transactions. + {aggregate: 1}, + {commitTransaction: 1}, + {distinct: "c"}, + {find: "c"}, + {getMore: NumberLong(1), collection: "c"}, + {killCursors: 1}, + // A selection of commands that are not allowed in transactions. + {count: 1}, + {explain: {find: "c"}}, + {filemd5: 1}, + {isMaster: 1}, + {buildInfo: 1}, + {ping: 1}, + {listCommands: 1}, + {create: "c"}, + {drop: 1}, + {createIndexes: 1}, + {mapReduce: "c"} +]; - const nonRetryableWriteCommandsMongodOnly = [ - // Commands that are allowed in transactions. - {coordinateCommitTransaction: 1, participants: []}, - {geoSearch: 1}, - {prepareTransaction: 1}, - // A selection of commands that are not allowed in transactions. - {applyOps: 1} - ]; +const nonRetryableWriteCommandsMongodOnly = [ + // Commands that are allowed in transactions. + {coordinateCommitTransaction: 1, participants: []}, + {geoSearch: 1}, + {prepareTransaction: 1}, + // A selection of commands that are not allowed in transactions. + {applyOps: 1} +]; - nonRetryableWriteCommands.forEach(function(command) { +nonRetryableWriteCommands.forEach(function(command) { + jsTest.log("Testing command: " + tojson(command)); + assert.commandFailedWithCode( + sessionDb.runCommand(Object.assign({}, command, {txnNumber: NumberLong(0)})), + [50768, 50889]); +}); + +if (!isMongos) { + nonRetryableWriteCommandsMongodOnly.forEach(function(command) { jsTest.log("Testing command: " + tojson(command)); assert.commandFailedWithCode( sessionDb.runCommand(Object.assign({}, command, {txnNumber: NumberLong(0)})), [50768, 50889]); }); +} - if (!isMongos) { - nonRetryableWriteCommandsMongodOnly.forEach(function(command) { - jsTest.log("Testing command: " + tojson(command)); - assert.commandFailedWithCode( - sessionDb.runCommand(Object.assign({}, command, {txnNumber: NumberLong(0)})), - [50768, 50889]); - }); - } - - session.endSession(); +session.endSession(); }()); diff --git a/jstests/core/txns/commands_not_allowed_in_txn.js b/jstests/core/txns/commands_not_allowed_in_txn.js index 8583f6db9de..f0f96bed437 100644 --- a/jstests/core/txns/commands_not_allowed_in_txn.js +++ b/jstests/core/txns/commands_not_allowed_in_txn.js @@ -4,160 +4,157 @@ // uses_transactions, // ] (function() { - "use strict"; - - const dbName = "test"; - const collName = "commands_not_allowed_in_txn"; - const testDB = db.getSiblingDB(dbName); - const testColl = testDB[collName]; - - testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); - let txnNumber = 0; - - const sessionOptions = {causalConsistency: false}; - const session = db.getMongo().startSession(sessionOptions); - const sessionDb = session.getDatabase(dbName); - - const isMongos = assert.commandWorked(db.runCommand("ismaster")).msg === "isdbgrid"; - +"use strict"; + +const dbName = "test"; +const collName = "commands_not_allowed_in_txn"; +const testDB = db.getSiblingDB(dbName); +const testColl = testDB[collName]; + +testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); +let txnNumber = 0; + +const sessionOptions = { + causalConsistency: false +}; +const session = db.getMongo().startSession(sessionOptions); +const sessionDb = session.getDatabase(dbName); + +const isMongos = assert.commandWorked(db.runCommand("ismaster")).msg === "isdbgrid"; + +assert.commandWorked(testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}})); +assert.commandWorked(testDB.runCommand({ + createIndexes: collName, + indexes: [ + {name: "geo_2d", key: {geo: "2d"}}, + {key: {haystack: "geoHaystack", a: 1}, name: "haystack_geo", bucketSize: 1} + ], + writeConcern: {w: "majority"} +})); + +function setup() { + testColl.dropIndex({a: 1}); + testDB.runCommand({drop: "create_collection", writeConcern: {w: "majority"}}); + testDB.runCommand({drop: "drop_collection", writeConcern: {w: "majority"}}); assert.commandWorked( - testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}})); - assert.commandWorked(testDB.runCommand({ - createIndexes: collName, - indexes: [ - {name: "geo_2d", key: {geo: "2d"}}, - {key: {haystack: "geoHaystack", a: 1}, name: "haystack_geo", bucketSize: 1} - ], - writeConcern: {w: "majority"} - })); + testDB.createCollection("drop_collection", {writeConcern: {w: "majority"}})); +} - function setup() { - testColl.dropIndex({a: 1}); - testDB.runCommand({drop: "create_collection", writeConcern: {w: "majority"}}); - testDB.runCommand({drop: "drop_collection", writeConcern: {w: "majority"}}); - assert.commandWorked( - testDB.createCollection("drop_collection", {writeConcern: {w: "majority"}})); - } +function testCommand(command) { + jsTest.log("Testing command: " + tojson(command)); + const errmsgRegExp = new RegExp( + 'Cannot run .* in a multi-document transaction.\|This command is not supported in transactions'); - function testCommand(command) { - jsTest.log("Testing command: " + tojson(command)); - const errmsgRegExp = new RegExp( - 'Cannot run .* in a multi-document transaction.\|This command is not supported in transactions'); - - // Check that the command runs successfully outside transactions. - setup(); - assert.commandWorked(sessionDb.runCommand(command)); - - // Check that the command cannot be used to start a transaction. - setup(); - let res = assert.commandFailedWithCode(sessionDb.runCommand(Object.assign({}, command, { - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(++txnNumber), - stmtId: NumberInt(0), - startTransaction: true, - autocommit: false - })), - ErrorCodes.OperationNotSupportedInTransaction); - // Check that the command fails with expected error message. - assert(res.errmsg.match(errmsgRegExp), res); - - // Mongos has special handling for commitTransaction to support commit recovery. - if (!isMongos) { - assert.commandFailedWithCode(sessionDb.adminCommand({ - commitTransaction: 1, - txnNumber: NumberLong(txnNumber), - stmtId: NumberInt(1), - autocommit: false - }), - ErrorCodes.NoSuchTransaction); - } - - // Check that the command fails inside a transaction, but does not abort the transaction. - setup(); - assert.commandWorked(sessionDb.runCommand({ - insert: collName, - documents: [{}], - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(++txnNumber), - stmtId: NumberInt(0), - startTransaction: true, - autocommit: false - })); - res = assert.commandFailedWithCode( - sessionDb.runCommand(Object.assign( - {}, - command, - {txnNumber: NumberLong(txnNumber), stmtId: NumberInt(1), autocommit: false})), - ErrorCodes.OperationNotSupportedInTransaction); - // Check that the command fails with expected error message. - assert(res.errmsg.match(errmsgRegExp), res); - assert.commandWorked(sessionDb.adminCommand({ - commitTransaction: 1, - txnNumber: NumberLong(txnNumber), - stmtId: NumberInt(2), - autocommit: false - })); - } + // Check that the command runs successfully outside transactions. + setup(); + assert.commandWorked(sessionDb.runCommand(command)); - // - // Test a selection of commands that are not allowed in transactions. - // - - const commands = [ - {count: collName}, - {count: collName, query: {a: 1}}, - {explain: {find: collName}}, - {filemd5: 1, root: "fs"}, - {isMaster: 1}, - {buildInfo: 1}, - {ping: 1}, - {listCommands: 1}, - {create: "create_collection", writeConcern: {w: "majority"}}, - {drop: "drop_collection", writeConcern: {w: "majority"}}, - { - createIndexes: collName, - indexes: [{name: "a_1", key: {a: 1}}], - writeConcern: {w: "majority"} - }, - // Output inline so the implicitly shard accessed collections override won't drop the - // output collection during the active transaction test case, which would hang indefinitely - // waiting for a database exclusive lock. - {mapReduce: collName, map: function() {}, reduce: function(key, vals) {}, out: {inline: 1}}, - ]; - - // There is no applyOps command on mongos. - if (!isMongos) { - commands.push( - {applyOps: [{op: "u", ns: testColl.getFullName(), o2: {_id: 0}, o: {$set: {a: 5}}}]}); - } - - commands.forEach(testCommand); - - // - // Test that a find command with the read-once cursor option is not allowed in a transaction. - // - assert.commandFailedWithCode(sessionDb.runCommand({ - find: collName, - readOnce: true, + // Check that the command cannot be used to start a transaction. + setup(); + let res = assert.commandFailedWithCode(sessionDb.runCommand(Object.assign({}, command, { readConcern: {level: "snapshot"}, txnNumber: NumberLong(++txnNumber), stmtId: NumberInt(0), startTransaction: true, autocommit: false - }), - ErrorCodes.OperationNotSupportedInTransaction); + })), + ErrorCodes.OperationNotSupportedInTransaction); + // Check that the command fails with expected error message. + assert(res.errmsg.match(errmsgRegExp), res); // Mongos has special handling for commitTransaction to support commit recovery. if (!isMongos) { - // The failed find should abort the transaction so a commit should fail. assert.commandFailedWithCode(sessionDb.adminCommand({ commitTransaction: 1, - autocommit: false, txnNumber: NumberLong(txnNumber), stmtId: NumberInt(1), + autocommit: false }), ErrorCodes.NoSuchTransaction); } - session.endSession(); + // Check that the command fails inside a transaction, but does not abort the transaction. + setup(); + assert.commandWorked(sessionDb.runCommand({ + insert: collName, + documents: [{}], + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(++txnNumber), + stmtId: NumberInt(0), + startTransaction: true, + autocommit: false + })); + res = assert.commandFailedWithCode( + sessionDb.runCommand(Object.assign( + {}, + command, + {txnNumber: NumberLong(txnNumber), stmtId: NumberInt(1), autocommit: false})), + ErrorCodes.OperationNotSupportedInTransaction); + // Check that the command fails with expected error message. + assert(res.errmsg.match(errmsgRegExp), res); + assert.commandWorked(sessionDb.adminCommand({ + commitTransaction: 1, + txnNumber: NumberLong(txnNumber), + stmtId: NumberInt(2), + autocommit: false + })); +} + +// +// Test a selection of commands that are not allowed in transactions. +// + +const commands = [ + {count: collName}, + {count: collName, query: {a: 1}}, + {explain: {find: collName}}, + {filemd5: 1, root: "fs"}, + {isMaster: 1}, + {buildInfo: 1}, + {ping: 1}, + {listCommands: 1}, + {create: "create_collection", writeConcern: {w: "majority"}}, + {drop: "drop_collection", writeConcern: {w: "majority"}}, + {createIndexes: collName, indexes: [{name: "a_1", key: {a: 1}}], writeConcern: {w: "majority"}}, + // Output inline so the implicitly shard accessed collections override won't drop the + // output collection during the active transaction test case, which would hang indefinitely + // waiting for a database exclusive lock. + {mapReduce: collName, map: function() {}, reduce: function(key, vals) {}, out: {inline: 1}}, +]; + +// There is no applyOps command on mongos. +if (!isMongos) { + commands.push( + {applyOps: [{op: "u", ns: testColl.getFullName(), o2: {_id: 0}, o: {$set: {a: 5}}}]}); +} + +commands.forEach(testCommand); + +// +// Test that a find command with the read-once cursor option is not allowed in a transaction. +// +assert.commandFailedWithCode(sessionDb.runCommand({ + find: collName, + readOnce: true, + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(++txnNumber), + stmtId: NumberInt(0), + startTransaction: true, + autocommit: false +}), + ErrorCodes.OperationNotSupportedInTransaction); + +// Mongos has special handling for commitTransaction to support commit recovery. +if (!isMongos) { + // The failed find should abort the transaction so a commit should fail. + assert.commandFailedWithCode(sessionDb.adminCommand({ + commitTransaction: 1, + autocommit: false, + txnNumber: NumberLong(txnNumber), + stmtId: NumberInt(1), + }), + ErrorCodes.NoSuchTransaction); +} + +session.endSession(); }()); diff --git a/jstests/core/txns/commit_and_abort_large_prepared_transactions.js b/jstests/core/txns/commit_and_abort_large_prepared_transactions.js index 448c2bc79b5..d7505dfb043 100644 --- a/jstests/core/txns/commit_and_abort_large_prepared_transactions.js +++ b/jstests/core/txns/commit_and_abort_large_prepared_transactions.js @@ -5,48 +5,47 @@ */ (function() { - "use strict"; - load("jstests/core/txns/libs/prepare_helpers.js"); - - const dbName = "test"; - const collName = "large_prepared_transactions"; - const testDB = db.getSiblingDB(dbName); - const testColl = testDB.getCollection(collName); - - // As we are not able to send a single request larger than 16MB, we insert two documents - // of 10MB each to create a "large" transaction. - const kSize10MB = 10 * 1024 * 1024; - function createLargeDocument(id) { - return {_id: id, longString: new Array(kSize10MB).join("a")}; - } - - testColl.drop({writeConcern: {w: "majority"}}); - assert.commandWorked( - testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}})); - - const session = db.getMongo().startSession({causalConsistency: false}); - const sessionDB = session.getDatabase(dbName); - const sessionColl = sessionDB.getCollection(collName); - - // Test preparing and committing a large transaction with two 10MB inserts. - let doc1 = createLargeDocument(1); - let doc2 = createLargeDocument(2); - session.startTransaction(); - assert.commandWorked(sessionColl.insert(doc1)); - assert.commandWorked(sessionColl.insert(doc2)); - - let prepareTimestamp = PrepareHelpers.prepareTransaction(session); - assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp)); - assert.sameMembers(sessionColl.find().toArray(), [doc1, doc2]); - - // Test preparing and aborting a large transaction with two 10MB inserts. - let doc3 = createLargeDocument(3); - let doc4 = createLargeDocument(4); - session.startTransaction(); - assert.commandWorked(sessionColl.insert(doc3)); - assert.commandWorked(sessionColl.insert(doc4)); - - PrepareHelpers.prepareTransaction(session); - assert.commandWorked(session.abortTransaction_forTesting()); - assert.sameMembers(sessionColl.find({_id: {$gt: 2}}).toArray(), []); +"use strict"; +load("jstests/core/txns/libs/prepare_helpers.js"); + +const dbName = "test"; +const collName = "large_prepared_transactions"; +const testDB = db.getSiblingDB(dbName); +const testColl = testDB.getCollection(collName); + +// As we are not able to send a single request larger than 16MB, we insert two documents +// of 10MB each to create a "large" transaction. +const kSize10MB = 10 * 1024 * 1024; +function createLargeDocument(id) { + return {_id: id, longString: new Array(kSize10MB).join("a")}; +} + +testColl.drop({writeConcern: {w: "majority"}}); +assert.commandWorked(testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}})); + +const session = db.getMongo().startSession({causalConsistency: false}); +const sessionDB = session.getDatabase(dbName); +const sessionColl = sessionDB.getCollection(collName); + +// Test preparing and committing a large transaction with two 10MB inserts. +let doc1 = createLargeDocument(1); +let doc2 = createLargeDocument(2); +session.startTransaction(); +assert.commandWorked(sessionColl.insert(doc1)); +assert.commandWorked(sessionColl.insert(doc2)); + +let prepareTimestamp = PrepareHelpers.prepareTransaction(session); +assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp)); +assert.sameMembers(sessionColl.find().toArray(), [doc1, doc2]); + +// Test preparing and aborting a large transaction with two 10MB inserts. +let doc3 = createLargeDocument(3); +let doc4 = createLargeDocument(4); +session.startTransaction(); +assert.commandWorked(sessionColl.insert(doc3)); +assert.commandWorked(sessionColl.insert(doc4)); + +PrepareHelpers.prepareTransaction(session); +assert.commandWorked(session.abortTransaction_forTesting()); +assert.sameMembers(sessionColl.find({_id: {$gt: 2}}).toArray(), []); }()); diff --git a/jstests/core/txns/commit_and_abort_large_unprepared_transactions.js b/jstests/core/txns/commit_and_abort_large_unprepared_transactions.js index 2ddda9ed3f3..feb09ef4656 100644 --- a/jstests/core/txns/commit_and_abort_large_unprepared_transactions.js +++ b/jstests/core/txns/commit_and_abort_large_unprepared_transactions.js @@ -5,45 +5,44 @@ */ (function() { - "use strict"; - - const dbName = "test"; - const collName = "large_unprepared_transactions"; - const testDB = db.getSiblingDB(dbName); - const testColl = testDB.getCollection(collName); - - // As we are not able to send a single request larger than 16MB, we insert two documents - // of 10MB each to create a "large" transaction. - const kSize10MB = 10 * 1024 * 1024; - function createLargeDocument(id) { - return {_id: id, longString: new Array(kSize10MB).join("a")}; - } - - testColl.drop({writeConcern: {w: "majority"}}); - assert.commandWorked( - testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}})); - - const session = db.getMongo().startSession({causalConsistency: false}); - const sessionDB = session.getDatabase(dbName); - const sessionColl = sessionDB.getCollection(collName); - - // Test committing an unprepared large transaction with two 10MB inserts. - let doc1 = createLargeDocument(1); - let doc2 = createLargeDocument(2); - session.startTransaction(); - assert.commandWorked(sessionColl.insert(doc1)); - assert.commandWorked(sessionColl.insert(doc2)); - - assert.commandWorked(session.commitTransaction_forTesting()); - assert.sameMembers(sessionColl.find().toArray(), [doc1, doc2]); - - // Test aborting an unprepared large transaction with two 10MB inserts. - let doc3 = createLargeDocument(3); - let doc4 = createLargeDocument(4); - session.startTransaction(); - assert.commandWorked(sessionColl.insert(doc3)); - assert.commandWorked(sessionColl.insert(doc4)); - - assert.commandWorked(session.abortTransaction_forTesting()); - assert.sameMembers(sessionColl.find({_id: {$gt: 2}}).toArray(), []); +"use strict"; + +const dbName = "test"; +const collName = "large_unprepared_transactions"; +const testDB = db.getSiblingDB(dbName); +const testColl = testDB.getCollection(collName); + +// As we are not able to send a single request larger than 16MB, we insert two documents +// of 10MB each to create a "large" transaction. +const kSize10MB = 10 * 1024 * 1024; +function createLargeDocument(id) { + return {_id: id, longString: new Array(kSize10MB).join("a")}; +} + +testColl.drop({writeConcern: {w: "majority"}}); +assert.commandWorked(testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}})); + +const session = db.getMongo().startSession({causalConsistency: false}); +const sessionDB = session.getDatabase(dbName); +const sessionColl = sessionDB.getCollection(collName); + +// Test committing an unprepared large transaction with two 10MB inserts. +let doc1 = createLargeDocument(1); +let doc2 = createLargeDocument(2); +session.startTransaction(); +assert.commandWorked(sessionColl.insert(doc1)); +assert.commandWorked(sessionColl.insert(doc2)); + +assert.commandWorked(session.commitTransaction_forTesting()); +assert.sameMembers(sessionColl.find().toArray(), [doc1, doc2]); + +// Test aborting an unprepared large transaction with two 10MB inserts. +let doc3 = createLargeDocument(3); +let doc4 = createLargeDocument(4); +session.startTransaction(); +assert.commandWorked(sessionColl.insert(doc3)); +assert.commandWorked(sessionColl.insert(doc4)); + +assert.commandWorked(session.abortTransaction_forTesting()); +assert.sameMembers(sessionColl.find({_id: {$gt: 2}}).toArray(), []); }()); diff --git a/jstests/core/txns/commit_prepared_transaction.js b/jstests/core/txns/commit_prepared_transaction.js index 39e9fa6c3b7..d8bd4908943 100644 --- a/jstests/core/txns/commit_prepared_transaction.js +++ b/jstests/core/txns/commit_prepared_transaction.js @@ -7,85 +7,91 @@ load("jstests/core/txns/libs/prepare_helpers.js"); (function() { - "use strict"; +"use strict"; - const dbName = "test"; - const collName = "commit_prepared_transaction"; - const testDB = db.getSiblingDB(dbName); - const testColl = testDB.getCollection(collName); +const dbName = "test"; +const collName = "commit_prepared_transaction"; +const testDB = db.getSiblingDB(dbName); +const testColl = testDB.getCollection(collName); - testColl.drop({writeConcern: {w: "majority"}}); - assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); +testColl.drop({writeConcern: {w: "majority"}}); +assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); - const session = db.getMongo().startSession({causalConsistency: false}); - const sessionDB = session.getDatabase(dbName); - const sessionColl = sessionDB.getCollection(collName); +const session = db.getMongo().startSession({causalConsistency: false}); +const sessionDB = session.getDatabase(dbName); +const sessionColl = sessionDB.getCollection(collName); - const doc1 = {_id: 1, x: 1}; +const doc1 = { + _id: 1, + x: 1 +}; - // ---- Test 1. Insert a single document and run prepare. ---- +// ---- Test 1. Insert a single document and run prepare. ---- - session.startTransaction(); - assert.commandWorked(sessionColl.insert(doc1)); +session.startTransaction(); +assert.commandWorked(sessionColl.insert(doc1)); - // Insert should not be visible outside the session. - assert.eq(null, testColl.findOne(doc1)); +// Insert should not be visible outside the session. +assert.eq(null, testColl.findOne(doc1)); - // Insert should be visible in this session. - assert.eq(doc1, sessionColl.findOne(doc1)); +// Insert should be visible in this session. +assert.eq(doc1, sessionColl.findOne(doc1)); - let prepareTimestamp = PrepareHelpers.prepareTransaction(session); +let prepareTimestamp = PrepareHelpers.prepareTransaction(session); - // Users should not be allowed to modify config.transaction entries for prepared transactions. - // This portion of the test needs to run on a connection without implicit sessions, because - // writes to `config.transactions` are disallowed under sessions. - { - var conn = new Mongo(db.getMongo().host); - conn._setDummyDefaultSession(); - var configDB = conn.getDB('config'); - assert.commandFailed(configDB.transactions.remove({"_id.id": session.getSessionId().id})); - assert.commandFailed(configDB.transactions.update({"_id.id": session.getSessionId().id}, - {$set: {extraField: 1}})); - } +// Users should not be allowed to modify config.transaction entries for prepared transactions. +// This portion of the test needs to run on a connection without implicit sessions, because +// writes to `config.transactions` are disallowed under sessions. +{ + var conn = new Mongo(db.getMongo().host); + conn._setDummyDefaultSession(); + var configDB = conn.getDB('config'); + assert.commandFailed(configDB.transactions.remove({"_id.id": session.getSessionId().id})); + assert.commandFailed(configDB.transactions.update({"_id.id": session.getSessionId().id}, + {$set: {extraField: 1}})); +} - assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp)); +assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp)); - // After commit the insert persists. - assert.eq(doc1, testColl.findOne(doc1)); +// After commit the insert persists. +assert.eq(doc1, testColl.findOne(doc1)); - // ---- Test 2. Update a document and run prepare. ---- +// ---- Test 2. Update a document and run prepare. ---- - session.startTransaction(); - assert.commandWorked(sessionColl.update(doc1, {$inc: {x: 1}})); +session.startTransaction(); +assert.commandWorked(sessionColl.update(doc1, {$inc: {x: 1}})); - const doc2 = {_id: 1, x: 2}; +const doc2 = { + _id: 1, + x: 2 +}; - // Update should not be visible outside the session. - assert.eq(null, testColl.findOne(doc2)); +// Update should not be visible outside the session. +assert.eq(null, testColl.findOne(doc2)); - // Update should be visible in this session. - assert.eq(doc2, sessionColl.findOne(doc2)); +// Update should be visible in this session. +assert.eq(doc2, sessionColl.findOne(doc2)); - prepareTimestamp = PrepareHelpers.prepareTransaction(session); - assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp)); +prepareTimestamp = PrepareHelpers.prepareTransaction(session); +assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp)); - // After commit the update persists. - assert.eq(doc2, testColl.findOne({_id: 1})); +// After commit the update persists. +assert.eq(doc2, testColl.findOne({_id: 1})); - // ---- Test 3. Delete a document and run prepare. ---- +// ---- Test 3. Delete a document and run prepare. ---- - session.startTransaction(); - assert.commandWorked(sessionColl.remove(doc2, {justOne: true})); +session.startTransaction(); +assert.commandWorked(sessionColl.remove(doc2, {justOne: true})); - // Delete should not be visible outside the session, so the document should be. - assert.eq(doc2, testColl.findOne(doc2)); +// Delete should not be visible outside the session, so the document should be. +assert.eq(doc2, testColl.findOne(doc2)); - // Document should not be visible in this session, since the delete should be visible. - assert.eq(null, sessionColl.findOne(doc2)); +// Document should not be visible in this session, since the delete should be visible. +assert.eq(null, sessionColl.findOne(doc2)); - prepareTimestamp = PrepareHelpers.prepareTransaction(session); - assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp)); +prepareTimestamp = PrepareHelpers.prepareTransaction(session); +assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp)); - // After commit the delete persists. - assert.eq(null, testColl.findOne(doc2)); +// After commit the delete persists. +assert.eq(null, testColl.findOne(doc2)); }()); diff --git a/jstests/core/txns/commit_prepared_transaction_errors.js b/jstests/core/txns/commit_prepared_transaction_errors.js index 97ecd5bf8e9..64b27f3c16c 100644 --- a/jstests/core/txns/commit_prepared_transaction_errors.js +++ b/jstests/core/txns/commit_prepared_transaction_errors.js @@ -4,70 +4,69 @@ * @tags: [uses_transactions, uses_prepare_transaction] */ (function() { - "use strict"; - load("jstests/core/txns/libs/prepare_helpers.js"); +"use strict"; +load("jstests/core/txns/libs/prepare_helpers.js"); - const dbName = "test"; - const collName = "commit_prepared_transaction_errors"; - const testDB = db.getSiblingDB(dbName); - const testColl = testDB.getCollection(collName); +const dbName = "test"; +const collName = "commit_prepared_transaction_errors"; +const testDB = db.getSiblingDB(dbName); +const testColl = testDB.getCollection(collName); - testColl.drop({writeConcern: {w: "majority"}}); - assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); +testColl.drop({writeConcern: {w: "majority"}}); +assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); - const session = db.getMongo().startSession({causalConsistency: false}); - const sessionDB = session.getDatabase(dbName); - const sessionColl = sessionDB.getCollection(collName); +const session = db.getMongo().startSession({causalConsistency: false}); +const sessionDB = session.getDatabase(dbName); +const sessionColl = sessionDB.getCollection(collName); - const doc = {_id: 1}; +const doc = { + _id: 1 +}; - jsTestLog("Test committing a prepared transaction with no 'commitTimestamp'."); - session.startTransaction(); - assert.commandWorked(sessionColl.insert(doc)); - PrepareHelpers.prepareTransaction(session); - assert.commandFailedWithCode(sessionDB.adminCommand({commitTransaction: 1}), - ErrorCodes.InvalidOptions); - // Make sure the transaction is still running by observing write conflicts. - const anotherSession = db.getMongo().startSession({causalConsistency: false}); - anotherSession.startTransaction(); - assert.commandFailedWithCode( - anotherSession.getDatabase(dbName).getCollection(collName).insert(doc), - ErrorCodes.WriteConflict); - assert.commandFailedWithCode(anotherSession.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - // Abort the original transaction. - assert.commandWorked(session.abortTransaction_forTesting()); +jsTestLog("Test committing a prepared transaction with no 'commitTimestamp'."); +session.startTransaction(); +assert.commandWorked(sessionColl.insert(doc)); +PrepareHelpers.prepareTransaction(session); +assert.commandFailedWithCode(sessionDB.adminCommand({commitTransaction: 1}), + ErrorCodes.InvalidOptions); +// Make sure the transaction is still running by observing write conflicts. +const anotherSession = db.getMongo().startSession({causalConsistency: false}); +anotherSession.startTransaction(); +assert.commandFailedWithCode(anotherSession.getDatabase(dbName).getCollection(collName).insert(doc), + ErrorCodes.WriteConflict); +assert.commandFailedWithCode(anotherSession.abortTransaction_forTesting(), + ErrorCodes.NoSuchTransaction); +// Abort the original transaction. +assert.commandWorked(session.abortTransaction_forTesting()); - jsTestLog("Test committing a prepared transaction with an invalid 'commitTimestamp'."); - session.startTransaction(); - assert.commandWorked(sessionColl.insert(doc)); - PrepareHelpers.prepareTransaction(session); - assert.commandFailedWithCode(PrepareHelpers.commitTransaction(session, 5), - ErrorCodes.TypeMismatch); +jsTestLog("Test committing a prepared transaction with an invalid 'commitTimestamp'."); +session.startTransaction(); +assert.commandWorked(sessionColl.insert(doc)); +PrepareHelpers.prepareTransaction(session); +assert.commandFailedWithCode(PrepareHelpers.commitTransaction(session, 5), ErrorCodes.TypeMismatch); - jsTestLog("Test committing a prepared transaction with a null 'commitTimestamp'."); - session.startTransaction(); - assert.commandWorked(sessionColl.insert(doc)); - PrepareHelpers.prepareTransaction(session); - assert.commandFailedWithCode(PrepareHelpers.commitTransaction(session, Timestamp(0, 0)), - ErrorCodes.InvalidOptions); +jsTestLog("Test committing a prepared transaction with a null 'commitTimestamp'."); +session.startTransaction(); +assert.commandWorked(sessionColl.insert(doc)); +PrepareHelpers.prepareTransaction(session); +assert.commandFailedWithCode(PrepareHelpers.commitTransaction(session, Timestamp(0, 0)), + ErrorCodes.InvalidOptions); - jsTestLog("Test committing an unprepared transaction with a 'commitTimestamp'."); - session.startTransaction(); - assert.commandWorked(sessionColl.insert(doc)); - let res = assert.commandFailedWithCode( - PrepareHelpers.commitTransaction(session, Timestamp(3, 3)), ErrorCodes.InvalidOptions); - assert(res.errmsg.includes("cannot provide commitTimestamp to unprepared transaction"), res); +jsTestLog("Test committing an unprepared transaction with a 'commitTimestamp'."); +session.startTransaction(); +assert.commandWorked(sessionColl.insert(doc)); +let res = assert.commandFailedWithCode(PrepareHelpers.commitTransaction(session, Timestamp(3, 3)), + ErrorCodes.InvalidOptions); +assert(res.errmsg.includes("cannot provide commitTimestamp to unprepared transaction"), res); - jsTestLog("Test committing an unprepared transaction with a null 'commitTimestamp'."); - session.startTransaction(); - assert.commandWorked(sessionColl.insert(doc)); - assert.commandFailedWithCode(PrepareHelpers.commitTransaction(session, Timestamp(0, 0)), - ErrorCodes.InvalidOptions); +jsTestLog("Test committing an unprepared transaction with a null 'commitTimestamp'."); +session.startTransaction(); +assert.commandWorked(sessionColl.insert(doc)); +assert.commandFailedWithCode(PrepareHelpers.commitTransaction(session, Timestamp(0, 0)), + ErrorCodes.InvalidOptions); - jsTestLog("Test committing an unprepared transaction with an invalid 'commitTimestamp'."); - session.startTransaction(); - assert.commandWorked(sessionColl.insert(doc)); - assert.commandFailedWithCode(PrepareHelpers.commitTransaction(session, 5), - ErrorCodes.TypeMismatch); +jsTestLog("Test committing an unprepared transaction with an invalid 'commitTimestamp'."); +session.startTransaction(); +assert.commandWorked(sessionColl.insert(doc)); +assert.commandFailedWithCode(PrepareHelpers.commitTransaction(session, 5), ErrorCodes.TypeMismatch); }()); diff --git a/jstests/core/txns/concurrent_drops_and_creates.js b/jstests/core/txns/concurrent_drops_and_creates.js index b025f5a33c3..101deb76a9b 100644 --- a/jstests/core/txns/concurrent_drops_and_creates.js +++ b/jstests/core/txns/concurrent_drops_and_creates.js @@ -2,77 +2,75 @@ // transaction started. // @tags: [uses_transactions, uses_snapshot_read_concern] (function() { - "use strict"; +"use strict"; - const dbName1 = "test1"; - const dbName2 = "test2"; - const collNameA = "coll_A"; - const collNameB = "coll_B"; +const dbName1 = "test1"; +const dbName2 = "test2"; +const collNameA = "coll_A"; +const collNameB = "coll_B"; - const sessionOutsideTxn = db.getMongo().startSession({causalConsistency: true}); - const testDB1 = sessionOutsideTxn.getDatabase(dbName1); - const testDB2 = sessionOutsideTxn.getDatabase(dbName2); - testDB1.runCommand({drop: collNameA, writeConcern: {w: "majority"}}); - testDB2.runCommand({drop: collNameB, writeConcern: {w: "majority"}}); +const sessionOutsideTxn = db.getMongo().startSession({causalConsistency: true}); +const testDB1 = sessionOutsideTxn.getDatabase(dbName1); +const testDB2 = sessionOutsideTxn.getDatabase(dbName2); +testDB1.runCommand({drop: collNameA, writeConcern: {w: "majority"}}); +testDB2.runCommand({drop: collNameB, writeConcern: {w: "majority"}}); - const session = db.getMongo().startSession({causalConsistency: false}); - const sessionDB1 = session.getDatabase(dbName1); - const sessionDB2 = session.getDatabase(dbName2); - const sessionCollA = sessionDB1[collNameA]; - const sessionCollB = sessionDB2[collNameB]; +const session = db.getMongo().startSession({causalConsistency: false}); +const sessionDB1 = session.getDatabase(dbName1); +const sessionDB2 = session.getDatabase(dbName2); +const sessionCollA = sessionDB1[collNameA]; +const sessionCollB = sessionDB2[collNameB]; - // - // A transaction with snapshot read concern cannot write to a collection that has been dropped - // since the transaction started. - // +// +// A transaction with snapshot read concern cannot write to a collection that has been dropped +// since the transaction started. +// - // Ensure collection A and collection B exist. - assert.commandWorked(sessionCollA.insert({})); - assert.commandWorked(sessionCollB.insert({})); +// Ensure collection A and collection B exist. +assert.commandWorked(sessionCollA.insert({})); +assert.commandWorked(sessionCollB.insert({})); - // Start the transaction with a write to collection A. - session.startTransaction({readConcern: {level: "snapshot"}}); - assert.commandWorked(sessionCollA.insert({})); +// Start the transaction with a write to collection A. +session.startTransaction({readConcern: {level: "snapshot"}}); +assert.commandWorked(sessionCollA.insert({})); - // Drop collection B outside of the transaction. Advance the cluster time of the session - // performing the drop to ensure it happens at a later cluster time than the transaction began. - sessionOutsideTxn.advanceClusterTime(session.getClusterTime()); - assert.commandWorked(testDB2.runCommand({drop: collNameB, writeConcern: {w: "majority"}})); +// Drop collection B outside of the transaction. Advance the cluster time of the session +// performing the drop to ensure it happens at a later cluster time than the transaction began. +sessionOutsideTxn.advanceClusterTime(session.getClusterTime()); +assert.commandWorked(testDB2.runCommand({drop: collNameB, writeConcern: {w: "majority"}})); - // We cannot write to collection B in the transaction, since it is illegal to implicitly create - // collections in transactions. The collection drop is visible to the transaction in this way, - // since our implementation of the in-memory collection catalog always has the most recent - // collection metadata. - assert.commandFailedWithCode(sessionCollB.insert({}), - ErrorCodes.OperationNotSupportedInTransaction); - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); +// We cannot write to collection B in the transaction, since it is illegal to implicitly create +// collections in transactions. The collection drop is visible to the transaction in this way, +// since our implementation of the in-memory collection catalog always has the most recent +// collection metadata. +assert.commandFailedWithCode(sessionCollB.insert({}), + ErrorCodes.OperationNotSupportedInTransaction); +assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction); - // - // A transaction with snapshot read concern cannot write to a collection that has been created - // since the transaction started. - // +// +// A transaction with snapshot read concern cannot write to a collection that has been created +// since the transaction started. +// - // Ensure collection A exists and collection B does not exist. - assert.commandWorked(sessionCollA.insert({})); - testDB2.runCommand({drop: collNameB, writeConcern: {w: "majority"}}); +// Ensure collection A exists and collection B does not exist. +assert.commandWorked(sessionCollA.insert({})); +testDB2.runCommand({drop: collNameB, writeConcern: {w: "majority"}}); - // Start the transaction with a write to collection A. - session.startTransaction({readConcern: {level: "snapshot"}}); - assert.commandWorked(sessionCollA.insert({})); +// Start the transaction with a write to collection A. +session.startTransaction({readConcern: {level: "snapshot"}}); +assert.commandWorked(sessionCollA.insert({})); - // Create collection B outside of the transaction. Advance the cluster time of the session - // performing the drop to ensure it happens at a later cluster time than the transaction began. - sessionOutsideTxn.advanceClusterTime(session.getClusterTime()); - assert.commandWorked(testDB2.runCommand({create: collNameB})); +// Create collection B outside of the transaction. Advance the cluster time of the session +// performing the drop to ensure it happens at a later cluster time than the transaction began. +sessionOutsideTxn.advanceClusterTime(session.getClusterTime()); +assert.commandWorked(testDB2.runCommand({create: collNameB})); - // We cannot write to collection B in the transaction, since it experienced catalog changes - // since the transaction's read timestamp. Since our implementation of the in-memory collection - // catalog always has the most recent collection metadata, we do not allow you to read from a - // collection at a time prior to its most recent catalog changes. - assert.commandFailedWithCode(sessionCollB.insert({}), ErrorCodes.SnapshotUnavailable); - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); +// We cannot write to collection B in the transaction, since it experienced catalog changes +// since the transaction's read timestamp. Since our implementation of the in-memory collection +// catalog always has the most recent collection metadata, we do not allow you to read from a +// collection at a time prior to its most recent catalog changes. +assert.commandFailedWithCode(sessionCollB.insert({}), ErrorCodes.SnapshotUnavailable); +assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction); - session.endSession(); +session.endSession(); }()); diff --git a/jstests/core/txns/create_collection_not_blocked_by_txn.js b/jstests/core/txns/create_collection_not_blocked_by_txn.js index 679004631da..ba043977bd4 100644 --- a/jstests/core/txns/create_collection_not_blocked_by_txn.js +++ b/jstests/core/txns/create_collection_not_blocked_by_txn.js @@ -5,29 +5,29 @@ */ (function() { - "use strict"; +"use strict"; - let rst = new ReplSetTest({nodes: 1}); - rst.startSet(); - rst.initiate(); +let rst = new ReplSetTest({nodes: 1}); +rst.startSet(); +rst.initiate(); - let db = rst.getPrimary().getDB("test"); +let db = rst.getPrimary().getDB("test"); - assert.commandWorked(db.runCommand({insert: "a", documents: [{x: 1}]})); +assert.commandWorked(db.runCommand({insert: "a", documents: [{x: 1}]})); - const session = db.getMongo().startSession(); - const sessionDb = session.getDatabase("test"); +const session = db.getMongo().startSession(); +const sessionDb = session.getDatabase("test"); - session.startTransaction(); - // This holds a database IX lock and a collection IX lock on "a". - sessionDb.a.insert({y: 1}); +session.startTransaction(); +// This holds a database IX lock and a collection IX lock on "a". +sessionDb.a.insert({y: 1}); - // This only requires database IX lock. - assert.commandWorked(db.createCollection("b")); - // Implicit creation. - assert.commandWorked(db.runCommand({insert: "c", documents: [{x: 2}]})); +// This only requires database IX lock. +assert.commandWorked(db.createCollection("b")); +// Implicit creation. +assert.commandWorked(db.runCommand({insert: "c", documents: [{x: 2}]})); - assert.commandWorked(session.commitTransaction_forTesting()); +assert.commandWorked(session.commitTransaction_forTesting()); - rst.stopSet(); +rst.stopSet(); })(); diff --git a/jstests/core/txns/currentop_blocked_operations.js b/jstests/core/txns/currentop_blocked_operations.js index 8e51334bdff..01a5026a668 100644 --- a/jstests/core/txns/currentop_blocked_operations.js +++ b/jstests/core/txns/currentop_blocked_operations.js @@ -4,80 +4,79 @@ * @tags: [uses_transactions, uses_prepare_transaction] */ (function() { - "use strict"; - load("jstests/core/txns/libs/prepare_helpers.js"); +"use strict"; +load("jstests/core/txns/libs/prepare_helpers.js"); - const dbName = "test"; - const collName = "currentop_blocked_operations"; - const testDB = db.getSiblingDB(dbName); - const testColl = testDB.getCollection(collName); +const dbName = "test"; +const collName = "currentop_blocked_operations"; +const testDB = db.getSiblingDB(dbName); +const testColl = testDB.getCollection(collName); - testColl.drop({writeConcern: {w: "majority"}}); - assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); +testColl.drop({writeConcern: {w: "majority"}}); +assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); - const session = db.getMongo().startSession(); - const sessionDB = session.getDatabase(dbName); - const sessionColl = sessionDB.getCollection(collName); +const session = db.getMongo().startSession(); +const sessionDB = session.getDatabase(dbName); +const sessionColl = sessionDB.getCollection(collName); - // Returns when the operation matching the 'matchExpr' is blocked, as evaluated by the - // 'isBlockedFunc'. - let waitForBlockedOp = function(matchExpr, isBlockedFunc) { - assert.soon(function() { - let cursor = - db.getSiblingDB("admin").aggregate([{$currentOp: {}}, {$match: matchExpr}]); - if (cursor.hasNext()) { - let op = cursor.next(); - printjson(op); - return isBlockedFunc(op); - } - return false; - }); - }; +// Returns when the operation matching the 'matchExpr' is blocked, as evaluated by the +// 'isBlockedFunc'. +let waitForBlockedOp = function(matchExpr, isBlockedFunc) { + assert.soon(function() { + let cursor = db.getSiblingDB("admin").aggregate([{$currentOp: {}}, {$match: matchExpr}]); + if (cursor.hasNext()) { + let op = cursor.next(); + printjson(op); + return isBlockedFunc(op); + } + return false; + }); +}; - // This transaction will block conflicting non-transactional operations. - session.startTransaction(); - assert.commandWorked(sessionColl.insert({_id: 2222})); +// This transaction will block conflicting non-transactional operations. +session.startTransaction(); +assert.commandWorked(sessionColl.insert({_id: 2222})); - // This insert operation will encounter a WriteConflictException due to the unique key - // violation. It will block in an infinite write conflict loop until the transaction completes. - TestData.dbName = dbName; - TestData.collName = collName; - let awaitInsert = startParallelShell(function() { - let coll = db.getSiblingDB(TestData.dbName).getCollection(TestData.collName); - assert.commandWorked(coll.insert({_id: 2222, x: 0})); - }); +// This insert operation will encounter a WriteConflictException due to the unique key +// violation. It will block in an infinite write conflict loop until the transaction completes. +TestData.dbName = dbName; +TestData.collName = collName; +let awaitInsert = startParallelShell(function() { + let coll = db.getSiblingDB(TestData.dbName).getCollection(TestData.collName); + assert.commandWorked(coll.insert({_id: 2222, x: 0})); +}); - // Wait for the counter to reach a high enough number to confirm the operation is retrying - // constantly. - waitForBlockedOp({"command.insert": collName}, function(op) { - return op.writeConflicts > 20; - }); +// Wait for the counter to reach a high enough number to confirm the operation is retrying +// constantly. +waitForBlockedOp({"command.insert": collName}, function(op) { + return op.writeConflicts > 20; +}); - assert.commandWorked(session.abortTransaction_forTesting()); - awaitInsert(); - assert.eq(1, testColl.find({_id: 2222, x: 0}).itcount()); +assert.commandWorked(session.abortTransaction_forTesting()); +awaitInsert(); +assert.eq(1, testColl.find({_id: 2222, x: 0}).itcount()); - // This prepared transaction will block conflicting non-transactional operations. - session.startTransaction(); - assert.commandWorked(sessionColl.update({_id: 2222}, {$set: {x: 1}})); - PrepareHelpers.prepareTransaction(session); +// This prepared transaction will block conflicting non-transactional operations. +session.startTransaction(); +assert.commandWorked(sessionColl.update({_id: 2222}, {$set: {x: 1}})); +PrepareHelpers.prepareTransaction(session); - // This update operation will encounter a prepare conflict due to the prepared transaction's - // modification to the same document. It will block without retrying until the prepared - // transaction completes. - TestData.dbName = dbName; - TestData.collName = collName; - let awaitUpdate = startParallelShell(function() { - let coll = db.getSiblingDB(TestData.dbName).getCollection(TestData.collName); - assert.commandWorked(coll.update({_id: 2222}, {$set: {x: 999}})); - }); +// This update operation will encounter a prepare conflict due to the prepared transaction's +// modification to the same document. It will block without retrying until the prepared +// transaction completes. +TestData.dbName = dbName; +TestData.collName = collName; +let awaitUpdate = startParallelShell(function() { + let coll = db.getSiblingDB(TestData.dbName).getCollection(TestData.collName); + assert.commandWorked(coll.update({_id: 2222}, {$set: {x: 999}})); +}); - // Expect at least one prepare conflict. - waitForBlockedOp({ns: testColl.getFullName(), op: "update"}, function(op) { - return op.prepareReadConflicts > 0; - }); +// Expect at least one prepare conflict. +waitForBlockedOp({ns: testColl.getFullName(), op: "update"}, function(op) { + return op.prepareReadConflicts > 0; +}); - assert.commandWorked(session.abortTransaction_forTesting()); - awaitUpdate(); - assert.eq(1, testColl.find({_id: 2222, x: 999}).itcount()); +assert.commandWorked(session.abortTransaction_forTesting()); +awaitUpdate(); +assert.eq(1, testColl.find({_id: 2222, x: 999}).itcount()); })(); diff --git a/jstests/core/txns/dbstats_not_blocked_by_txn.js b/jstests/core/txns/dbstats_not_blocked_by_txn.js index 4da7b2ccbe0..6adf567b191 100644 --- a/jstests/core/txns/dbstats_not_blocked_by_txn.js +++ b/jstests/core/txns/dbstats_not_blocked_by_txn.js @@ -4,31 +4,31 @@ * @tags: [uses_transactions] */ (function() { - "use strict"; - var dbName = 'dbstats_not_blocked_by_txn'; - var mydb = db.getSiblingDB(dbName); +"use strict"; +var dbName = 'dbstats_not_blocked_by_txn'; +var mydb = db.getSiblingDB(dbName); - mydb.foo.drop({writeConcern: {w: "majority"}}); - mydb.createCollection("foo", {writeConcern: {w: "majority"}}); +mydb.foo.drop({writeConcern: {w: "majority"}}); +mydb.createCollection("foo", {writeConcern: {w: "majority"}}); - var session = db.getMongo().startSession(); - var sessionDb = session.getDatabase(dbName); +var session = db.getMongo().startSession(); +var sessionDb = session.getDatabase(dbName); - const isMongos = assert.commandWorked(db.runCommand("ismaster")).msg === "isdbgrid"; - if (isMongos) { - // Before starting the transaction below, access the collection so it can be implicitly - // sharded and force all shards to refresh their database versions because the refresh - // requires an exclusive lock and would block behind the transaction. - assert.eq(sessionDb.foo.find().itcount(), 0); - assert.commandWorked(sessionDb.runCommand({listCollections: 1, nameOnly: true})); - } +const isMongos = assert.commandWorked(db.runCommand("ismaster")).msg === "isdbgrid"; +if (isMongos) { + // Before starting the transaction below, access the collection so it can be implicitly + // sharded and force all shards to refresh their database versions because the refresh + // requires an exclusive lock and would block behind the transaction. + assert.eq(sessionDb.foo.find().itcount(), 0); + assert.commandWorked(sessionDb.runCommand({listCollections: 1, nameOnly: true})); +} - session.startTransaction(); - assert.commandWorked(sessionDb.foo.insert({x: 1})); +session.startTransaction(); +assert.commandWorked(sessionDb.foo.insert({x: 1})); - let res = mydb.runCommand({dbstats: 1, maxTimeMS: 10 * 1000}); - assert.commandWorked(res, "dbstats should have succeeded and not timed out"); +let res = mydb.runCommand({dbstats: 1, maxTimeMS: 10 * 1000}); +assert.commandWorked(res, "dbstats should have succeeded and not timed out"); - assert.commandWorked(session.commitTransaction_forTesting()); - session.endSession(); +assert.commandWorked(session.commitTransaction_forTesting()); +session.endSession(); }()); diff --git a/jstests/core/txns/default_read_concern.js b/jstests/core/txns/default_read_concern.js index fbdfb3fb6f1..d593ec73332 100644 --- a/jstests/core/txns/default_read_concern.js +++ b/jstests/core/txns/default_read_concern.js @@ -4,45 +4,45 @@ * @tags: [uses_transactions] */ (function() { - "use strict"; - - const dbName = "test"; - const collName = "default_read_concern"; - const testDB = db.getSiblingDB(dbName); - const testColl = testDB[collName]; - - // Prepare the collection - testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); - assert.commandWorked(testColl.insert({_id: 0}, {writeConcern: {w: "majority"}})); - - const session = db.getMongo().startSession(); - const sessionDb = session.getDatabase(dbName); - const sessionColl = sessionDb.getCollection(collName); - - jsTestLog("Start a transaction with default readConcern"); - session.startTransaction(); - - // Inserts outside transaction aren't visible, even after they are - // majority-committed. (It is not a requirement that transactions with local - // readConcern do not see writes from another session. At some point, it - // would be desirable to have a transaction with readConcern local or - // majority see writes from other sessions. However, our current - // implementation of ensuring any data we read does not get rolled back - // relies on the fact that we read from a single WT snapshot, since we - // choose the timestamp to wait on in the first command of the - // transaction.) - let assertSameMembers = (members) => { - assert.sameMembers(members, sessionColl.find().toArray()); - }; - - assertSameMembers([{_id: 0}]); - assert.commandWorked(testColl.insert({_id: 1})); - assertSameMembers([{_id: 0}]); - assert.commandWorked(testColl.insert({_id: 2}, {writeConcern: {w: "majority"}})); - assertSameMembers([{_id: 0}]); - - assert.commandWorked(session.commitTransaction_forTesting()); - - assertSameMembers([{_id: 0}, {_id: 1}, {_id: 2}]); - session.endSession(); +"use strict"; + +const dbName = "test"; +const collName = "default_read_concern"; +const testDB = db.getSiblingDB(dbName); +const testColl = testDB[collName]; + +// Prepare the collection +testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); +assert.commandWorked(testColl.insert({_id: 0}, {writeConcern: {w: "majority"}})); + +const session = db.getMongo().startSession(); +const sessionDb = session.getDatabase(dbName); +const sessionColl = sessionDb.getCollection(collName); + +jsTestLog("Start a transaction with default readConcern"); +session.startTransaction(); + +// Inserts outside transaction aren't visible, even after they are +// majority-committed. (It is not a requirement that transactions with local +// readConcern do not see writes from another session. At some point, it +// would be desirable to have a transaction with readConcern local or +// majority see writes from other sessions. However, our current +// implementation of ensuring any data we read does not get rolled back +// relies on the fact that we read from a single WT snapshot, since we +// choose the timestamp to wait on in the first command of the +// transaction.) +let assertSameMembers = (members) => { + assert.sameMembers(members, sessionColl.find().toArray()); +}; + +assertSameMembers([{_id: 0}]); +assert.commandWorked(testColl.insert({_id: 1})); +assertSameMembers([{_id: 0}]); +assert.commandWorked(testColl.insert({_id: 2}, {writeConcern: {w: "majority"}})); +assertSameMembers([{_id: 0}]); + +assert.commandWorked(session.commitTransaction_forTesting()); + +assertSameMembers([{_id: 0}, {_id: 1}, {_id: 2}]); +session.endSession(); }()); diff --git a/jstests/core/txns/disallow_operations_on_prepared_transaction.js b/jstests/core/txns/disallow_operations_on_prepared_transaction.js index 975116b6cfd..0e6a8453aa9 100644 --- a/jstests/core/txns/disallow_operations_on_prepared_transaction.js +++ b/jstests/core/txns/disallow_operations_on_prepared_transaction.js @@ -7,115 +7,115 @@ */ (function() { - "use strict"; - load("jstests/core/txns/libs/prepare_helpers.js"); - - const dbName = "test"; - const collName = "disallow_operations_on_prepared_transaction"; - const testDB = db.getSiblingDB(dbName); - const testColl = testDB.getCollection(collName); - - testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); - assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); - - const session = db.getMongo().startSession({causalConsistency: false}); - const sessionDB = session.getDatabase(dbName); - const sessionColl = sessionDB.getCollection(collName); - - jsTestLog("Test that you can call prepareTransaction on a prepared transaction."); - session.startTransaction(); - assert.commandWorked(sessionColl.insert({_id: 1})); - let firstTimestamp = PrepareHelpers.prepareTransaction(session); - let secondTimestamp = PrepareHelpers.prepareTransaction(session); - assert.eq(firstTimestamp, secondTimestamp); - assert.commandWorked(session.abortTransaction_forTesting()); - - jsTestLog("Test that you can call commitTransaction on a prepared transaction."); - session.startTransaction(); - assert.commandWorked(sessionColl.insert({_id: 2})); - let prepareTimestamp = PrepareHelpers.prepareTransaction(session); - assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp)); - - jsTestLog("Test that you can call abortTransaction on a prepared transaction."); - session.startTransaction(); - assert.commandWorked(sessionColl.insert({_id: 3})); - PrepareHelpers.prepareTransaction(session); - assert.commandWorked(session.abortTransaction_forTesting()); - - session.startTransaction(); - assert.commandWorked(sessionColl.insert({_id: 4})); - PrepareHelpers.prepareTransaction(session); - - jsTestLog("Test that you can't run an aggregation on a prepared transaction."); - assert.commandFailedWithCode(assert.throws(function() { - sessionColl.aggregate({$match: {}}); - }), - ErrorCodes.PreparedTransactionInProgress); - - jsTestLog("Test that you can't run delete on a prepared transaction."); - var res = assert.commandFailedWithCode(sessionColl.remove({_id: 4}), - ErrorCodes.PreparedTransactionInProgress); - assert.eq(res.errorLabels, ["TransientTransactionError"]); - - jsTestLog("Test that you can't run distinct on a prepared transaction."); - assert.commandFailedWithCode(assert.throws(function() { - sessionColl.distinct("_id"); - }), - ErrorCodes.PreparedTransactionInProgress); - - jsTestLog("Test that you can't run find on a prepared transaction."); - assert.commandFailedWithCode(assert.throws(function() { - sessionColl.find({}).toArray(); - }), - ErrorCodes.PreparedTransactionInProgress); - - jsTestLog("Test that you can't run findandmodify on a prepared transaction."); - assert.commandFailedWithCode(sessionDB.runCommand({ - findandmodify: collName, - remove: true, - txnNumber: NumberLong(session.getTxnNumber_forTesting()), - stmtId: NumberInt(1), - autocommit: false - }), - ErrorCodes.PreparedTransactionInProgress); - - jsTestLog("Test that you can't run findAndModify on a prepared transaction."); - assert.commandFailedWithCode(assert.throws(function() { - sessionColl.findAndModify({query: {_id: 4}, remove: true}); - }), - ErrorCodes.PreparedTransactionInProgress); - - jsTestLog("Test that you can't run geoSearch on a prepared transaction."); - assert.commandFailedWithCode( - sessionDB.runCommand({geoSearch: collName, near: [0, 0], search: {a: 1}}), - ErrorCodes.PreparedTransactionInProgress); - - jsTestLog("Test that you can't insert on a prepared transaction."); - res = assert.commandFailedWithCode(sessionColl.insert({_id: 5}), +"use strict"; +load("jstests/core/txns/libs/prepare_helpers.js"); + +const dbName = "test"; +const collName = "disallow_operations_on_prepared_transaction"; +const testDB = db.getSiblingDB(dbName); +const testColl = testDB.getCollection(collName); + +testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); +assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); + +const session = db.getMongo().startSession({causalConsistency: false}); +const sessionDB = session.getDatabase(dbName); +const sessionColl = sessionDB.getCollection(collName); + +jsTestLog("Test that you can call prepareTransaction on a prepared transaction."); +session.startTransaction(); +assert.commandWorked(sessionColl.insert({_id: 1})); +let firstTimestamp = PrepareHelpers.prepareTransaction(session); +let secondTimestamp = PrepareHelpers.prepareTransaction(session); +assert.eq(firstTimestamp, secondTimestamp); +assert.commandWorked(session.abortTransaction_forTesting()); + +jsTestLog("Test that you can call commitTransaction on a prepared transaction."); +session.startTransaction(); +assert.commandWorked(sessionColl.insert({_id: 2})); +let prepareTimestamp = PrepareHelpers.prepareTransaction(session); +assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp)); + +jsTestLog("Test that you can call abortTransaction on a prepared transaction."); +session.startTransaction(); +assert.commandWorked(sessionColl.insert({_id: 3})); +PrepareHelpers.prepareTransaction(session); +assert.commandWorked(session.abortTransaction_forTesting()); + +session.startTransaction(); +assert.commandWorked(sessionColl.insert({_id: 4})); +PrepareHelpers.prepareTransaction(session); + +jsTestLog("Test that you can't run an aggregation on a prepared transaction."); +assert.commandFailedWithCode(assert.throws(function() { + sessionColl.aggregate({$match: {}}); + }), + ErrorCodes.PreparedTransactionInProgress); + +jsTestLog("Test that you can't run delete on a prepared transaction."); +var res = assert.commandFailedWithCode(sessionColl.remove({_id: 4}), ErrorCodes.PreparedTransactionInProgress); - assert.eq(res.errorLabels, ["TransientTransactionError"]); - - jsTestLog("Test that you can't run update on a prepared transaction."); - res = assert.commandFailedWithCode(sessionColl.update({_id: 4}, {a: 1}), - ErrorCodes.PreparedTransactionInProgress); - assert.eq(res.errorLabels, ["TransientTransactionError"]); - assert.commandWorked(session.abortTransaction_forTesting()); - - jsTestLog("Test that you can't run getMore on a prepared transaction."); - session.startTransaction(); - res = assert.commandWorked(sessionDB.runCommand({find: collName, batchSize: 1})); - assert(res.hasOwnProperty("cursor"), tojson(res)); - assert(res.cursor.hasOwnProperty("id"), tojson(res)); - PrepareHelpers.prepareTransaction(session); - assert.commandFailedWithCode( - sessionDB.runCommand({getMore: res.cursor.id, collection: collName}), - ErrorCodes.PreparedTransactionInProgress); - - jsTestLog("Test that you can't run killCursors on a prepared transaction."); - assert.commandFailedWithCode( - sessionDB.runCommand({killCursors: collName, cursors: [res.cursor.id]}), - ErrorCodes.PreparedTransactionInProgress); - assert.commandWorked(session.abortTransaction_forTesting()); - - session.endSession(); +assert.eq(res.errorLabels, ["TransientTransactionError"]); + +jsTestLog("Test that you can't run distinct on a prepared transaction."); +assert.commandFailedWithCode(assert.throws(function() { + sessionColl.distinct("_id"); + }), + ErrorCodes.PreparedTransactionInProgress); + +jsTestLog("Test that you can't run find on a prepared transaction."); +assert.commandFailedWithCode(assert.throws(function() { + sessionColl.find({}).toArray(); + }), + ErrorCodes.PreparedTransactionInProgress); + +jsTestLog("Test that you can't run findandmodify on a prepared transaction."); +assert.commandFailedWithCode(sessionDB.runCommand({ + findandmodify: collName, + remove: true, + txnNumber: NumberLong(session.getTxnNumber_forTesting()), + stmtId: NumberInt(1), + autocommit: false +}), + ErrorCodes.PreparedTransactionInProgress); + +jsTestLog("Test that you can't run findAndModify on a prepared transaction."); +assert.commandFailedWithCode( + assert.throws(function() { + sessionColl.findAndModify({query: {_id: 4}, remove: true}); + }), + ErrorCodes.PreparedTransactionInProgress); + +jsTestLog("Test that you can't run geoSearch on a prepared transaction."); +assert.commandFailedWithCode( + sessionDB.runCommand({geoSearch: collName, near: [0, 0], search: {a: 1}}), + ErrorCodes.PreparedTransactionInProgress); + +jsTestLog("Test that you can't insert on a prepared transaction."); +res = assert.commandFailedWithCode(sessionColl.insert({_id: 5}), + ErrorCodes.PreparedTransactionInProgress); +assert.eq(res.errorLabels, ["TransientTransactionError"]); + +jsTestLog("Test that you can't run update on a prepared transaction."); +res = assert.commandFailedWithCode(sessionColl.update({_id: 4}, {a: 1}), + ErrorCodes.PreparedTransactionInProgress); +assert.eq(res.errorLabels, ["TransientTransactionError"]); +assert.commandWorked(session.abortTransaction_forTesting()); + +jsTestLog("Test that you can't run getMore on a prepared transaction."); +session.startTransaction(); +res = assert.commandWorked(sessionDB.runCommand({find: collName, batchSize: 1})); +assert(res.hasOwnProperty("cursor"), tojson(res)); +assert(res.cursor.hasOwnProperty("id"), tojson(res)); +PrepareHelpers.prepareTransaction(session); +assert.commandFailedWithCode(sessionDB.runCommand({getMore: res.cursor.id, collection: collName}), + ErrorCodes.PreparedTransactionInProgress); + +jsTestLog("Test that you can't run killCursors on a prepared transaction."); +assert.commandFailedWithCode( + sessionDB.runCommand({killCursors: collName, cursors: [res.cursor.id]}), + ErrorCodes.PreparedTransactionInProgress); +assert.commandWorked(session.abortTransaction_forTesting()); + +session.endSession(); }()); diff --git a/jstests/core/txns/downgrade_fcv_while_large_partial_txn_in_progress.js b/jstests/core/txns/downgrade_fcv_while_large_partial_txn_in_progress.js index a53457fc6e5..3bbd380770e 100644 --- a/jstests/core/txns/downgrade_fcv_while_large_partial_txn_in_progress.js +++ b/jstests/core/txns/downgrade_fcv_while_large_partial_txn_in_progress.js @@ -7,69 +7,66 @@ */ (function() { - "use strict"; +"use strict"; - const dbName = "test"; - const collName = "downgrade_fcv_while_large_partial_txn_in_progress"; - const testDB = db.getSiblingDB(dbName); +const dbName = "test"; +const collName = "downgrade_fcv_while_large_partial_txn_in_progress"; +const testDB = db.getSiblingDB(dbName); - assert.commandWorked(db.adminCommand({ - configureFailPoint: "hangBeforeAbortingRunningTransactionsOnFCVDowngrade", - mode: "alwaysOn" - })); +assert.commandWorked(db.adminCommand( + {configureFailPoint: "hangBeforeAbortingRunningTransactionsOnFCVDowngrade", mode: "alwaysOn"})); - // As we are not able to send a single request larger than 16MB, we insert two documents - // of 10MB each to create a "large" transaction. - const kSize10MB = 10 * 1024 * 1024; - function createLargeDocument(id) { - return {_id: id, longString: new Array(kSize10MB).join("a")}; - } +// As we are not able to send a single request larger than 16MB, we insert two documents +// of 10MB each to create a "large" transaction. +const kSize10MB = 10 * 1024 * 1024; +function createLargeDocument(id) { + return {_id: id, longString: new Array(kSize10MB).join("a")}; +} - testDB[collName].drop({writeConcern: {w: "majority"}}); - assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}})); +testDB[collName].drop({writeConcern: {w: "majority"}}); +assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}})); - const session = db.getMongo().startSession({causalConsistency: false}); - const sessionDB = session.getDatabase(dbName); - const sessionColl = sessionDB.getCollection(collName); +const session = db.getMongo().startSession({causalConsistency: false}); +const sessionDB = session.getDatabase(dbName); +const sessionColl = sessionDB.getCollection(collName); - let doc1 = createLargeDocument(1); - let doc2 = createLargeDocument(2); +let doc1 = createLargeDocument(1); +let doc2 = createLargeDocument(2); - jsTestLog("Start a transaction and insert documents with sizes that add up to more than 16MB."); - session.startTransaction(); - assert.commandWorked(sessionColl.insert(doc1)); - assert.commandWorked(sessionColl.insert(doc2)); +jsTestLog("Start a transaction and insert documents with sizes that add up to more than 16MB."); +session.startTransaction(); +assert.commandWorked(sessionColl.insert(doc1)); +assert.commandWorked(sessionColl.insert(doc2)); - let downgradeFCV = startParallelShell(function() { - load("jstests/libs/feature_compatibility_version.js"); +let downgradeFCV = startParallelShell(function() { + load("jstests/libs/feature_compatibility_version.js"); - const testDB = db.getSiblingDB("test"); - const adminDB = db.getSiblingDB("admin"); - try { - jsTestLog("Downgrade to FCV4.0."); - assert.commandWorked( - testDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV})); - checkFCV(adminDB, lastStableFCV); - } finally { - jsTestLog("Restore back to FCV4.2."); - assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV})); - checkFCV(adminDB, latestFCV); - } - }); + const testDB = db.getSiblingDB("test"); + const adminDB = db.getSiblingDB("admin"); + try { + jsTestLog("Downgrade to FCV4.0."); + assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV})); + checkFCV(adminDB, lastStableFCV); + } finally { + jsTestLog("Restore back to FCV4.2."); + assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV})); + checkFCV(adminDB, latestFCV); + } +}); - // Wait until the in-memory FCV state has been changed to 4.0. - assert.soon(function() { - const adminDB = db.getSiblingDB("admin"); - let res = adminDB.runCommand({getParameter: 1, featureCompatibilityVersion: 1}); - assert.commandWorked(res); - return "4.0" === res.featureCompatibilityVersion.version; - }, "Failed to detect the FCV change to 4.0 from server status."); +// Wait until the in-memory FCV state has been changed to 4.0. +assert.soon(function() { + const adminDB = db.getSiblingDB("admin"); + let res = adminDB.runCommand({getParameter: 1, featureCompatibilityVersion: 1}); + assert.commandWorked(res); + return "4.0" === res.featureCompatibilityVersion.version; +}, "Failed to detect the FCV change to 4.0 from server status."); - jsTestLog("Attempt to commit the large transaction using the FCV4.0 oplog format."); - assert.commandFailedWithCode(session.commitTransaction_forTesting(), - ErrorCodes.TransactionTooLarge); +jsTestLog("Attempt to commit the large transaction using the FCV4.0 oplog format."); +assert.commandFailedWithCode(session.commitTransaction_forTesting(), + ErrorCodes.TransactionTooLarge); - assert.commandWorked(db.adminCommand( - {configureFailPoint: "hangBeforeAbortingRunningTransactionsOnFCVDowngrade", mode: "off"})); - downgradeFCV(); +assert.commandWorked(db.adminCommand( + {configureFailPoint: "hangBeforeAbortingRunningTransactionsOnFCVDowngrade", mode: "off"})); +downgradeFCV(); }()); diff --git a/jstests/core/txns/drop_collection_not_blocked_by_txn.js b/jstests/core/txns/drop_collection_not_blocked_by_txn.js index c32f7372506..85dcda1b8e1 100644 --- a/jstests/core/txns/drop_collection_not_blocked_by_txn.js +++ b/jstests/core/txns/drop_collection_not_blocked_by_txn.js @@ -5,28 +5,28 @@ */ (function() { - "use strict"; +"use strict"; - let rst = new ReplSetTest({nodes: 1}); - rst.startSet(); - rst.initiate(); +let rst = new ReplSetTest({nodes: 1}); +rst.startSet(); +rst.initiate(); - let db = rst.getPrimary().getDB("test"); +let db = rst.getPrimary().getDB("test"); - assert.commandWorked(db.runCommand({insert: "a", documents: [{x: 1}]})); - assert.commandWorked(db.runCommand({insert: "b", documents: [{x: 1}]})); +assert.commandWorked(db.runCommand({insert: "a", documents: [{x: 1}]})); +assert.commandWorked(db.runCommand({insert: "b", documents: [{x: 1}]})); - const session = db.getMongo().startSession(); - const sessionDb = session.getDatabase("test"); +const session = db.getMongo().startSession(); +const sessionDb = session.getDatabase("test"); - session.startTransaction(); - // This holds a database IX lock and a collection IX lock on "a". - sessionDb.a.insert({y: 1}); +session.startTransaction(); +// This holds a database IX lock and a collection IX lock on "a". +sessionDb.a.insert({y: 1}); - // This only requires database IX lock. - assert.commandWorked(db.runCommand({drop: "b"})); +// This only requires database IX lock. +assert.commandWorked(db.runCommand({drop: "b"})); - assert.commandWorked(session.commitTransaction_forTesting()); +assert.commandWorked(session.commitTransaction_forTesting()); - rst.stopSet(); +rst.stopSet(); })(); diff --git a/jstests/core/txns/empty_commit_abort.js b/jstests/core/txns/empty_commit_abort.js index d496cf41623..4882b477df2 100644 --- a/jstests/core/txns/empty_commit_abort.js +++ b/jstests/core/txns/empty_commit_abort.js @@ -4,61 +4,62 @@ * @tags: [uses_transactions] */ (function() { - "use strict"; +"use strict"; - const dbName = "test"; - const collName = "empty_commit_abort"; - const testDB = db.getSiblingDB(dbName); - const testColl = testDB.getCollection(collName); +const dbName = "test"; +const collName = "empty_commit_abort"; +const testDB = db.getSiblingDB(dbName); +const testColl = testDB.getCollection(collName); - testColl.drop({writeConcern: {w: "majority"}}); - assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); +testColl.drop({writeConcern: {w: "majority"}}); +assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); - const doc = {_id: 1, a: 1, b: 1}; - assert.commandWorked(testColl.insert(doc)); +const doc = { + _id: 1, + a: 1, + b: 1 +}; +assert.commandWorked(testColl.insert(doc)); - const session = db.getMongo().startSession(); - const sessionDB = session.getDatabase(dbName); - const sessionColl = sessionDB.getCollection(collName); +const session = db.getMongo().startSession(); +const sessionDB = session.getDatabase(dbName); +const sessionColl = sessionDB.getCollection(collName); - // ---- Test 1. No operations before commit ---- - session.startTransaction(); - assert.commandFailedWithCode(sessionDB.adminCommand({commitTransaction: 1}), - ErrorCodes.OperationNotSupportedInTransaction); - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); +// ---- Test 1. No operations before commit ---- +session.startTransaction(); +assert.commandFailedWithCode(sessionDB.adminCommand({commitTransaction: 1}), + ErrorCodes.OperationNotSupportedInTransaction); +assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction); - // ---- Test 2. No operations before abort ---- - session.startTransaction(); - assert.commandFailedWithCode(sessionDB.adminCommand({abortTransaction: 1}), - ErrorCodes.OperationNotSupportedInTransaction); - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); +// ---- Test 2. No operations before abort ---- +session.startTransaction(); +assert.commandFailedWithCode(sessionDB.adminCommand({abortTransaction: 1}), + ErrorCodes.OperationNotSupportedInTransaction); +assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction); - // ---- Test 3. Only reads before commit ---- - session.startTransaction(); - assert.eq(doc, sessionColl.findOne({a: 1})); - assert.commandWorked(session.commitTransaction_forTesting()); +// ---- Test 3. Only reads before commit ---- +session.startTransaction(); +assert.eq(doc, sessionColl.findOne({a: 1})); +assert.commandWorked(session.commitTransaction_forTesting()); - // ---- Test 4. Only reads before abort ---- - session.startTransaction(); - assert.eq(doc, sessionColl.findOne({a: 1})); - assert.commandWorked(session.abortTransaction_forTesting()); +// ---- Test 4. Only reads before abort ---- +session.startTransaction(); +assert.eq(doc, sessionColl.findOne({a: 1})); +assert.commandWorked(session.abortTransaction_forTesting()); - // ---- Test 5. Noop writes before commit ---- - session.startTransaction(); - let res = assert.commandWorked(sessionColl.update({_id: 1}, {$set: {b: 1}})); - assert.eq(res.nMatched, 1, tojson(res)); - assert.eq(res.nModified, 0, tojson(res)); - assert.eq(res.nUpserted, 0, tojson(res)); - assert.commandWorked(session.commitTransaction_forTesting()); - - // ---- Test 6. Noop writes before abort ---- - session.startTransaction(); - res = assert.commandWorked(sessionColl.update({_id: 1}, {$set: {b: 1}})); - assert.eq(res.nMatched, 1, tojson(res)); - assert.eq(res.nModified, 0, tojson(res)); - assert.eq(res.nUpserted, 0, tojson(res)); - assert.commandWorked(session.abortTransaction_forTesting()); +// ---- Test 5. Noop writes before commit ---- +session.startTransaction(); +let res = assert.commandWorked(sessionColl.update({_id: 1}, {$set: {b: 1}})); +assert.eq(res.nMatched, 1, tojson(res)); +assert.eq(res.nModified, 0, tojson(res)); +assert.eq(res.nUpserted, 0, tojson(res)); +assert.commandWorked(session.commitTransaction_forTesting()); +// ---- Test 6. Noop writes before abort ---- +session.startTransaction(); +res = assert.commandWorked(sessionColl.update({_id: 1}, {$set: {b: 1}})); +assert.eq(res.nMatched, 1, tojson(res)); +assert.eq(res.nModified, 0, tojson(res)); +assert.eq(res.nUpserted, 0, tojson(res)); +assert.commandWorked(session.abortTransaction_forTesting()); }()); diff --git a/jstests/core/txns/empty_prepare.js b/jstests/core/txns/empty_prepare.js index cb1b616c9fc..59c0bce1f54 100644 --- a/jstests/core/txns/empty_prepare.js +++ b/jstests/core/txns/empty_prepare.js @@ -4,50 +4,52 @@ * @tags: [uses_transactions, uses_prepare_transaction] */ (function() { - "use strict"; - - const dbName = "test"; - const collName = "empty_prepare"; - const testDB = db.getSiblingDB(dbName); - const testColl = testDB.getCollection(collName); - - testColl.drop({writeConcern: {w: "majority"}}); - assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); - - const doc = {_id: 1, a: 1, b: 1}; - assert.commandWorked(testColl.insert(doc)); - - const session = db.getMongo().startSession({causalConsistency: false}); - const sessionDB = session.getDatabase(dbName); - const sessionColl = sessionDB.getCollection(collName); - - // ---- Test 1. No operations before prepare ---- - - session.startTransaction(); - assert.commandFailedWithCode(sessionDB.adminCommand({prepareTransaction: 1}), - ErrorCodes.OperationNotSupportedInTransaction); - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - - // ---- Test 2. Only reads before prepare ---- - - session.startTransaction(); - assert.eq(doc, sessionColl.findOne({a: 1})); - let res = assert.commandWorked(sessionDB.adminCommand({prepareTransaction: 1})); - // Makes sure prepareTransaction returns prepareTimestamp in its response. - assert(res.hasOwnProperty("prepareTimestamp"), tojson(res)); - assert.commandWorked(session.abortTransaction_forTesting()); - - // ---- Test 3. Noop writes before prepare ---- - - session.startTransaction(); - res = assert.commandWorked(sessionColl.update({a: 1}, {$set: {b: 1}})); - assert.eq(res.nMatched, 1, tojson(res)); - assert.eq(res.nModified, 0, tojson(res)); - assert.eq(res.nUpserted, 0, tojson(res)); - res = assert.commandWorked(sessionDB.adminCommand({prepareTransaction: 1})); - // Makes sure prepareTransaction returns prepareTimestamp in its response. - assert(res.hasOwnProperty("prepareTimestamp"), tojson(res)); - assert.commandWorked(session.abortTransaction_forTesting()); - +"use strict"; + +const dbName = "test"; +const collName = "empty_prepare"; +const testDB = db.getSiblingDB(dbName); +const testColl = testDB.getCollection(collName); + +testColl.drop({writeConcern: {w: "majority"}}); +assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); + +const doc = { + _id: 1, + a: 1, + b: 1 +}; +assert.commandWorked(testColl.insert(doc)); + +const session = db.getMongo().startSession({causalConsistency: false}); +const sessionDB = session.getDatabase(dbName); +const sessionColl = sessionDB.getCollection(collName); + +// ---- Test 1. No operations before prepare ---- + +session.startTransaction(); +assert.commandFailedWithCode(sessionDB.adminCommand({prepareTransaction: 1}), + ErrorCodes.OperationNotSupportedInTransaction); +assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction); + +// ---- Test 2. Only reads before prepare ---- + +session.startTransaction(); +assert.eq(doc, sessionColl.findOne({a: 1})); +let res = assert.commandWorked(sessionDB.adminCommand({prepareTransaction: 1})); +// Makes sure prepareTransaction returns prepareTimestamp in its response. +assert(res.hasOwnProperty("prepareTimestamp"), tojson(res)); +assert.commandWorked(session.abortTransaction_forTesting()); + +// ---- Test 3. Noop writes before prepare ---- + +session.startTransaction(); +res = assert.commandWorked(sessionColl.update({a: 1}, {$set: {b: 1}})); +assert.eq(res.nMatched, 1, tojson(res)); +assert.eq(res.nModified, 0, tojson(res)); +assert.eq(res.nUpserted, 0, tojson(res)); +res = assert.commandWorked(sessionDB.adminCommand({prepareTransaction: 1})); +// Makes sure prepareTransaction returns prepareTimestamp in its response. +assert(res.hasOwnProperty("prepareTimestamp"), tojson(res)); +assert.commandWorked(session.abortTransaction_forTesting()); }()); diff --git a/jstests/core/txns/ensure_active_txn_for_prepare_transaction.js b/jstests/core/txns/ensure_active_txn_for_prepare_transaction.js index a6ef6ab7c77..2847d139c89 100644 --- a/jstests/core/txns/ensure_active_txn_for_prepare_transaction.js +++ b/jstests/core/txns/ensure_active_txn_for_prepare_transaction.js @@ -5,58 +5,48 @@ */ (function() { - "use strict"; - load("jstests/core/txns/libs/prepare_helpers.js"); - - const dbName = "test"; - const collName = "ensure_active_txn_for_prepare_transaction"; - const testDB = db.getSiblingDB(dbName); - const testColl = testDB.getCollection(collName); - - testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); - assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); - - const session = db.getMongo().startSession({causalConsistency: false}); - const sessionDB = session.getDatabase(dbName); - const sessionColl = sessionDB.getCollection(collName); - - jsTestLog("Test that we can't call prepareTransaction if there was never a transaction on " + - "the session"); - assert.commandFailedWithCode(sessionDB.adminCommand({ - prepareTransaction: 1, - txnNumber: NumberLong(0), - stmtId: NumberInt(1), - autocommit: false - }), - ErrorCodes.NoSuchTransaction); - - jsTestLog( - "Test that we can't call prepareTransaction if the most recent transaction was aborted"); - session.startTransaction(); - assert.commandWorked(sessionColl.insert({_id: 1})); - assert.commandWorked(session.abortTransaction_forTesting()); - - assert.commandFailedWithCode(sessionDB.adminCommand({ - prepareTransaction: 1, - txnNumber: NumberLong(0), - stmtId: NumberInt(1), - autocommit: false - }), - ErrorCodes.NoSuchTransaction); - - jsTestLog( - "Test that we can't call prepareTransaction if the most recent transaction was committed"); - session.startTransaction(); - assert.commandWorked(sessionColl.insert({_id: 1})); - assert.commandWorked(session.commitTransaction_forTesting()); - - assert.commandFailedWithCode(sessionDB.adminCommand({ - prepareTransaction: 1, - txnNumber: NumberLong(1), - stmtId: NumberInt(1), - autocommit: false - }), - ErrorCodes.TransactionCommitted); - - session.endSession(); +"use strict"; +load("jstests/core/txns/libs/prepare_helpers.js"); + +const dbName = "test"; +const collName = "ensure_active_txn_for_prepare_transaction"; +const testDB = db.getSiblingDB(dbName); +const testColl = testDB.getCollection(collName); + +testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); +assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); + +const session = db.getMongo().startSession({causalConsistency: false}); +const sessionDB = session.getDatabase(dbName); +const sessionColl = sessionDB.getCollection(collName); + +jsTestLog("Test that we can't call prepareTransaction if there was never a transaction on " + + "the session"); +assert.commandFailedWithCode( + sessionDB.adminCommand( + {prepareTransaction: 1, txnNumber: NumberLong(0), stmtId: NumberInt(1), autocommit: false}), + ErrorCodes.NoSuchTransaction); + +jsTestLog("Test that we can't call prepareTransaction if the most recent transaction was aborted"); +session.startTransaction(); +assert.commandWorked(sessionColl.insert({_id: 1})); +assert.commandWorked(session.abortTransaction_forTesting()); + +assert.commandFailedWithCode( + sessionDB.adminCommand( + {prepareTransaction: 1, txnNumber: NumberLong(0), stmtId: NumberInt(1), autocommit: false}), + ErrorCodes.NoSuchTransaction); + +jsTestLog( + "Test that we can't call prepareTransaction if the most recent transaction was committed"); +session.startTransaction(); +assert.commandWorked(sessionColl.insert({_id: 1})); +assert.commandWorked(session.commitTransaction_forTesting()); + +assert.commandFailedWithCode( + sessionDB.adminCommand( + {prepareTransaction: 1, txnNumber: NumberLong(1), stmtId: NumberInt(1), autocommit: false}), + ErrorCodes.TransactionCommitted); + +session.endSession(); }()); diff --git a/jstests/core/txns/errors_on_committed_transaction.js b/jstests/core/txns/errors_on_committed_transaction.js index 6425fd1239e..2734f7fa11a 100644 --- a/jstests/core/txns/errors_on_committed_transaction.js +++ b/jstests/core/txns/errors_on_committed_transaction.js @@ -4,72 +4,71 @@ * @tags: [uses_transactions, uses_prepare_transaction] */ (function() { - "use strict"; +"use strict"; - const dbName = "test"; - const collName = "prepare_committed_transaction"; - const testDB = db.getSiblingDB(dbName); - const testColl = testDB.getCollection(collName); +const dbName = "test"; +const collName = "prepare_committed_transaction"; +const testDB = db.getSiblingDB(dbName); +const testColl = testDB.getCollection(collName); - testColl.drop({writeConcern: {w: "majority"}}); - assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); +testColl.drop({writeConcern: {w: "majority"}}); +assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); - const session = db.getMongo().startSession({causalConsistency: false}); - const sessionDB = session.getDatabase(dbName); - const sessionColl = sessionDB.getCollection(collName); +const session = db.getMongo().startSession({causalConsistency: false}); +const sessionDB = session.getDatabase(dbName); +const sessionColl = sessionDB.getCollection(collName); - const doc = {x: 1}; +const doc = { + x: 1 +}; - session.startTransaction(); - assert.commandWorked(sessionColl.insert(doc)); - assert.commandWorked(session.commitTransaction_forTesting()); +session.startTransaction(); +assert.commandWorked(sessionColl.insert(doc)); +assert.commandWorked(session.commitTransaction_forTesting()); - const txnNumber = NumberLong(session.getTxnNumber_forTesting()); +const txnNumber = NumberLong(session.getTxnNumber_forTesting()); - // Call prepare on committed transaction. - jsTestLog("Test that calling prepare on a committed transaction fails."); - assert.commandFailedWithCode( - sessionDB.adminCommand({prepareTransaction: 1, txnNumber: txnNumber, autocommit: false}), - ErrorCodes.TransactionCommitted); +// Call prepare on committed transaction. +jsTestLog("Test that calling prepare on a committed transaction fails."); +assert.commandFailedWithCode( + sessionDB.adminCommand({prepareTransaction: 1, txnNumber: txnNumber, autocommit: false}), + ErrorCodes.TransactionCommitted); - jsTestLog("Test the error precedence when calling prepare on a committed transaction but not " + - "providing txnNumber to prepareTransaction."); - assert.commandFailedWithCode(sessionDB.adminCommand({prepareTransaction: 1, autocommit: false}), - ErrorCodes.InvalidOptions); +jsTestLog("Test the error precedence when calling prepare on a committed transaction but not " + + "providing txnNumber to prepareTransaction."); +assert.commandFailedWithCode(sessionDB.adminCommand({prepareTransaction: 1, autocommit: false}), + ErrorCodes.InvalidOptions); - jsTestLog("Test the error precedence when calling prepare on a committed transaction but not " + - "providing autocommit to prepareTransaction."); - assert.commandFailedWithCode( - sessionDB.adminCommand({prepareTransaction: 1, txnNumber: txnNumber}), 50768); +jsTestLog("Test the error precedence when calling prepare on a committed transaction but not " + + "providing autocommit to prepareTransaction."); +assert.commandFailedWithCode(sessionDB.adminCommand({prepareTransaction: 1, txnNumber: txnNumber}), + 50768); - jsTestLog("Test the error precedence when calling prepare on a committed transaction and " + - "providing startTransaction to prepareTransaction."); - assert.commandFailedWithCode(sessionDB.adminCommand({ - prepareTransaction: 1, - txnNumber: txnNumber, - autocommit: false, - startTransaction: true - }), - ErrorCodes.OperationNotSupportedInTransaction); +jsTestLog("Test the error precedence when calling prepare on a committed transaction and " + + "providing startTransaction to prepareTransaction."); +assert.commandFailedWithCode( + sessionDB.adminCommand( + {prepareTransaction: 1, txnNumber: txnNumber, autocommit: false, startTransaction: true}), + ErrorCodes.OperationNotSupportedInTransaction); - // Call commit on committed transaction without shell helper. - jsTestLog("Test that calling commit with invalid fields on a committed transaction fails."); - assert.commandFailedWithCode( - sessionDB.adminCommand( - {commitTransaction: 1, invalidField: 1, txnNumber: txnNumber, autocommit: false}), - 40415 /* IDL unknown field error */); +// Call commit on committed transaction without shell helper. +jsTestLog("Test that calling commit with invalid fields on a committed transaction fails."); +assert.commandFailedWithCode( + sessionDB.adminCommand( + {commitTransaction: 1, invalidField: 1, txnNumber: txnNumber, autocommit: false}), + 40415 /* IDL unknown field error */); - // Call abort on committed transaction without shell helper. - jsTestLog("Test that calling abort on a committed transaction fails."); - assert.commandFailedWithCode( - sessionDB.adminCommand({abortTransaction: 1, txnNumber: txnNumber, autocommit: false}), - ErrorCodes.TransactionCommitted); +// Call abort on committed transaction without shell helper. +jsTestLog("Test that calling abort on a committed transaction fails."); +assert.commandFailedWithCode( + sessionDB.adminCommand({abortTransaction: 1, txnNumber: txnNumber, autocommit: false}), + ErrorCodes.TransactionCommitted); - jsTestLog("Test that calling abort with invalid fields on a committed transaction fails."); - assert.commandFailedWithCode( - sessionDB.adminCommand( - {abortTransaction: 1, invalidField: 1, txnNumber: txnNumber, autocommit: false}), - ErrorCodes.TransactionCommitted); +jsTestLog("Test that calling abort with invalid fields on a committed transaction fails."); +assert.commandFailedWithCode( + sessionDB.adminCommand( + {abortTransaction: 1, invalidField: 1, txnNumber: txnNumber, autocommit: false}), + ErrorCodes.TransactionCommitted); - session.endSession(); +session.endSession(); }()); diff --git a/jstests/core/txns/find_and_modify_in_transaction.js b/jstests/core/txns/find_and_modify_in_transaction.js index 8724b13c85d..02c5a1639e1 100644 --- a/jstests/core/txns/find_and_modify_in_transaction.js +++ b/jstests/core/txns/find_and_modify_in_transaction.js @@ -1,151 +1,151 @@ // Test transactions including find-and-modify // @tags: [assumes_unsharded_collection, uses_transactions] (function() { - "use strict"; +"use strict"; - const dbName = "test"; - const collName = "find_and_modify_in_transaction"; - const testDB = db.getSiblingDB(dbName); - const testColl = testDB[collName]; +const dbName = "test"; +const collName = "find_and_modify_in_transaction"; +const testDB = db.getSiblingDB(dbName); +const testColl = testDB[collName]; - testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); +testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); - assert.commandWorked( - testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}})); +assert.commandWorked(testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}})); - const sessionOptions = {causalConsistency: false}; - const session = db.getMongo().startSession(sessionOptions); - const sessionDb = session.getDatabase(dbName); - const sessionColl = sessionDb[collName]; +const sessionOptions = { + causalConsistency: false +}; +const session = db.getMongo().startSession(sessionOptions); +const sessionDb = session.getDatabase(dbName); +const sessionColl = sessionDb[collName]; - jsTest.log("Prepopulate the collection."); - assert.writeOK(testColl.insert([{_id: 0, a: 0}, {_id: 1, a: 1}, {_id: 2, a: 2}], - {writeConcern: {w: "majority"}})); +jsTest.log("Prepopulate the collection."); +assert.writeOK(testColl.insert([{_id: 0, a: 0}, {_id: 1, a: 1}, {_id: 2, a: 2}], + {writeConcern: {w: "majority"}})); - /*********************************************************************************************** - * Do a non-matching find-and-modify with remove. - **********************************************************************************************/ +/*********************************************************************************************** + * Do a non-matching find-and-modify with remove. + **********************************************************************************************/ - jsTest.log("Do a non-matching find-and-modify with remove."); - session.startTransaction({writeConcern: {w: "majority"}}); +jsTest.log("Do a non-matching find-and-modify with remove."); +session.startTransaction({writeConcern: {w: "majority"}}); - // Do a findAndModify that affects no documents. - let res = sessionColl.findAndModify({query: {a: 99}, remove: true}); - assert.eq(null, res); - let docs = sessionColl.find({}).toArray(); - assert.sameMembers(docs, [{_id: 0, a: 0}, {_id: 1, a: 1}, {_id: 2, a: 2}]); +// Do a findAndModify that affects no documents. +let res = sessionColl.findAndModify({query: {a: 99}, remove: true}); +assert.eq(null, res); +let docs = sessionColl.find({}).toArray(); +assert.sameMembers(docs, [{_id: 0, a: 0}, {_id: 1, a: 1}, {_id: 2, a: 2}]); - // Commit the transaction. - assert.commandWorked(session.commitTransaction_forTesting()); +// Commit the transaction. +assert.commandWorked(session.commitTransaction_forTesting()); - /*********************************************************************************************** - * Do a non-matching find-and-modify with update. - **********************************************************************************************/ +/*********************************************************************************************** + * Do a non-matching find-and-modify with update. + **********************************************************************************************/ - jsTest.log("Do a non-matching find-and-modify with update."); +jsTest.log("Do a non-matching find-and-modify with update."); - session.startTransaction({writeConcern: {w: "majority"}}); +session.startTransaction({writeConcern: {w: "majority"}}); - res = sessionColl.findAndModify({query: {a: 99}, update: {$inc: {a: 100}}}); - assert.eq(null, res); - docs = sessionColl.find({}).toArray(); - assert.sameMembers(docs, [{_id: 0, a: 0}, {_id: 1, a: 1}, {_id: 2, a: 2}]); +res = sessionColl.findAndModify({query: {a: 99}, update: {$inc: {a: 100}}}); +assert.eq(null, res); +docs = sessionColl.find({}).toArray(); +assert.sameMembers(docs, [{_id: 0, a: 0}, {_id: 1, a: 1}, {_id: 2, a: 2}]); - // Commit the transaction. - assert.commandWorked(session.commitTransaction_forTesting()); +// Commit the transaction. +assert.commandWorked(session.commitTransaction_forTesting()); - /*********************************************************************************************** - * Do a matching find-and-modify with remove. - **********************************************************************************************/ +/*********************************************************************************************** + * Do a matching find-and-modify with remove. + **********************************************************************************************/ - jsTest.log("Do a matching find-and-modify with remove."); +jsTest.log("Do a matching find-and-modify with remove."); - session.startTransaction({writeConcern: {w: "majority"}}); +session.startTransaction({writeConcern: {w: "majority"}}); - res = sessionColl.findAndModify({query: {a: 0}, remove: true}); - assert.eq({_id: 0, a: 0}, res); - docs = sessionColl.find({}).toArray(); - assert.sameMembers(docs, [{_id: 1, a: 1}, {_id: 2, a: 2}]); +res = sessionColl.findAndModify({query: {a: 0}, remove: true}); +assert.eq({_id: 0, a: 0}, res); +docs = sessionColl.find({}).toArray(); +assert.sameMembers(docs, [{_id: 1, a: 1}, {_id: 2, a: 2}]); - // Commit the transaction. - assert.commandWorked(session.commitTransaction_forTesting()); +// Commit the transaction. +assert.commandWorked(session.commitTransaction_forTesting()); - /*********************************************************************************************** - * Do a matching find-and-modify with update, requesting the old doc. - **********************************************************************************************/ +/*********************************************************************************************** + * Do a matching find-and-modify with update, requesting the old doc. + **********************************************************************************************/ - jsTest.log("Do a matching find-and-modify with update, requesting the old doc."); - session.startTransaction({writeConcern: {w: "majority"}}); +jsTest.log("Do a matching find-and-modify with update, requesting the old doc."); +session.startTransaction({writeConcern: {w: "majority"}}); - res = sessionColl.findAndModify({query: {a: 1}, update: {$inc: {a: 100}}}); - assert.eq({_id: 1, a: 1}, res); - docs = sessionColl.find({}).toArray(); - assert.sameMembers(docs, [{_id: 1, a: 101}, {_id: 2, a: 2}]); +res = sessionColl.findAndModify({query: {a: 1}, update: {$inc: {a: 100}}}); +assert.eq({_id: 1, a: 1}, res); +docs = sessionColl.find({}).toArray(); +assert.sameMembers(docs, [{_id: 1, a: 101}, {_id: 2, a: 2}]); - // Commit the transaction. - assert.commandWorked(session.commitTransaction_forTesting()); +// Commit the transaction. +assert.commandWorked(session.commitTransaction_forTesting()); - /*********************************************************************************************** - * Do a matching find-and-modify with update, requesting the new doc. - **********************************************************************************************/ +/*********************************************************************************************** + * Do a matching find-and-modify with update, requesting the new doc. + **********************************************************************************************/ - jsTest.log("Do a matching find-and-modify with update, requesting the new doc."); - session.startTransaction({writeConcern: {w: "majority"}}); +jsTest.log("Do a matching find-and-modify with update, requesting the new doc."); +session.startTransaction({writeConcern: {w: "majority"}}); - res = sessionColl.findAndModify({query: {a: 2}, update: {$inc: {a: 100}}, new: true}); - assert.eq({_id: 2, a: 102}, res); - docs = sessionColl.find({}).toArray(); - assert.sameMembers(docs, [{_id: 1, a: 101}, {_id: 2, a: 102}]); +res = sessionColl.findAndModify({query: {a: 2}, update: {$inc: {a: 100}}, new: true}); +assert.eq({_id: 2, a: 102}, res); +docs = sessionColl.find({}).toArray(); +assert.sameMembers(docs, [{_id: 1, a: 101}, {_id: 2, a: 102}]); - // Commit the transaction. - assert.commandWorked(session.commitTransaction_forTesting()); +// Commit the transaction. +assert.commandWorked(session.commitTransaction_forTesting()); - /*********************************************************************************************** - * Do a matching find-and-modify with upsert, requesting the new doc. - **********************************************************************************************/ +/*********************************************************************************************** + * Do a matching find-and-modify with upsert, requesting the new doc. + **********************************************************************************************/ - jsTest.log("Do a matching find-and-modify with upsert, requesting the new doc."); - session.startTransaction({writeConcern: {w: "majority"}}); +jsTest.log("Do a matching find-and-modify with upsert, requesting the new doc."); +session.startTransaction({writeConcern: {w: "majority"}}); - res = sessionColl.findAndModify( - {query: {_id: 2}, update: {$inc: {a: 100}}, upsert: true, new: true}); - assert.eq({_id: 2, a: 202}, res); - docs = sessionColl.find({}).toArray(); - assert.sameMembers(docs, [{_id: 1, a: 101}, {_id: 2, a: 202}]); +res = + sessionColl.findAndModify({query: {_id: 2}, update: {$inc: {a: 100}}, upsert: true, new: true}); +assert.eq({_id: 2, a: 202}, res); +docs = sessionColl.find({}).toArray(); +assert.sameMembers(docs, [{_id: 1, a: 101}, {_id: 2, a: 202}]); - // Commit the transaction. - assert.commandWorked(session.commitTransaction_forTesting()); +// Commit the transaction. +assert.commandWorked(session.commitTransaction_forTesting()); - /*********************************************************************************************** - * Do a non-matching find-and-modify with upsert, requesting the old doc. - **********************************************************************************************/ +/*********************************************************************************************** + * Do a non-matching find-and-modify with upsert, requesting the old doc. + **********************************************************************************************/ - jsTest.log("Do a non-matching find-and-modify with upsert, requesting the old doc."); - session.startTransaction({writeConcern: {w: "majority"}}); +jsTest.log("Do a non-matching find-and-modify with upsert, requesting the old doc."); +session.startTransaction({writeConcern: {w: "majority"}}); - res = sessionColl.findAndModify({query: {a: 3}, update: {$inc: {a: 100}}, upsert: true}); - assert.eq(null, res); - docs = sessionColl.find({a: 103}, {_id: 0}).toArray(); - assert.sameMembers(docs, [{a: 103}]); +res = sessionColl.findAndModify({query: {a: 3}, update: {$inc: {a: 100}}, upsert: true}); +assert.eq(null, res); +docs = sessionColl.find({a: 103}, {_id: 0}).toArray(); +assert.sameMembers(docs, [{a: 103}]); - // Commit the transaction. - assert.commandWorked(session.commitTransaction_forTesting()); +// Commit the transaction. +assert.commandWorked(session.commitTransaction_forTesting()); - /*********************************************************************************************** - * Do a non-matching find-and-modify with upsert, requesting the new doc. - **********************************************************************************************/ +/*********************************************************************************************** + * Do a non-matching find-and-modify with upsert, requesting the new doc. + **********************************************************************************************/ - jsTest.log("Do a non-matching find-and-modify with upsert, requesting the new doc."); - session.startTransaction({writeConcern: {w: "majority"}}); - res = sessionColl.findAndModify( - {query: {a: 4}, update: {$inc: {a: 200}}, upsert: true, new: true}); +jsTest.log("Do a non-matching find-and-modify with upsert, requesting the new doc."); +session.startTransaction({writeConcern: {w: "majority"}}); +res = sessionColl.findAndModify({query: {a: 4}, update: {$inc: {a: 200}}, upsert: true, new: true}); - const newdoc = res; - assert.eq(204, newdoc.a); - docs = sessionColl.find({a: 204}).toArray(); - assert.sameMembers(docs, [newdoc]); +const newdoc = res; +assert.eq(204, newdoc.a); +docs = sessionColl.find({a: 204}).toArray(); +assert.sameMembers(docs, [newdoc]); - // Commit the transaction. - assert.commandWorked(session.commitTransaction_forTesting()); - session.endSession(); +// Commit the transaction. +assert.commandWorked(session.commitTransaction_forTesting()); +session.endSession(); }()); diff --git a/jstests/core/txns/finished_transaction_error_handling.js b/jstests/core/txns/finished_transaction_error_handling.js index f0907998578..7cabb693fe5 100644 --- a/jstests/core/txns/finished_transaction_error_handling.js +++ b/jstests/core/txns/finished_transaction_error_handling.js @@ -1,140 +1,145 @@ // Test committed and aborted transactions cannot be changed but commitTransaction is retryable. // @tags: [uses_transactions, uses_snapshot_read_concern] (function() { - "use strict"; +"use strict"; - const dbName = "test"; - const collName = "finished_transaction_error_handling"; - const testDB = db.getSiblingDB(dbName); - const testColl = testDB[collName]; +const dbName = "test"; +const collName = "finished_transaction_error_handling"; +const testDB = db.getSiblingDB(dbName); +const testColl = testDB[collName]; - const writeConcern = {w: "majority", wtimeout: ReplSetTest.kDefaultTimeoutMS}; - testDB.runCommand({drop: collName, writeConcern: writeConcern}); - assert.commandWorked(testDB.createCollection(collName, {writeConcern: writeConcern})); +const writeConcern = { + w: "majority", + wtimeout: ReplSetTest.kDefaultTimeoutMS +}; +testDB.runCommand({drop: collName, writeConcern: writeConcern}); +assert.commandWorked(testDB.createCollection(collName, {writeConcern: writeConcern})); - let txnNumber = 0; - let stmtId = 0; +let txnNumber = 0; +let stmtId = 0; - const sessionOptions = {causalConsistency: false}; - const session = db.getMongo().startSession(sessionOptions); - const sessionDb = session.getDatabase(dbName); +const sessionOptions = { + causalConsistency: false +}; +const session = db.getMongo().startSession(sessionOptions); +const sessionDb = session.getDatabase(dbName); - jsTestLog("Test aborted transaction number cannot be reused."); - txnNumber++; - assert.commandWorked(sessionDb.runCommand({ - insert: collName, - documents: [{_id: "abort-txn-1"}], - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(txnNumber), - startTransaction: true, - stmtId: NumberInt(stmtId++), - autocommit: false - })); - assert.commandWorked(sessionDb.adminCommand({ - abortTransaction: 1, - writeConcern: {w: "majority"}, - txnNumber: NumberLong(txnNumber), - stmtId: NumberInt(stmtId++), - autocommit: false - })); +jsTestLog("Test aborted transaction number cannot be reused."); +txnNumber++; +assert.commandWorked(sessionDb.runCommand({ + insert: collName, + documents: [{_id: "abort-txn-1"}], + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(txnNumber), + startTransaction: true, + stmtId: NumberInt(stmtId++), + autocommit: false +})); +assert.commandWorked(sessionDb.adminCommand({ + abortTransaction: 1, + writeConcern: {w: "majority"}, + txnNumber: NumberLong(txnNumber), + stmtId: NumberInt(stmtId++), + autocommit: false +})); - jsTestLog("Attempt to commit an aborted transaction"); - assert.commandFailedWithCode(sessionDb.adminCommand({ - commitTransaction: 1, - txnNumber: NumberLong(txnNumber), - stmtId: NumberInt(stmtId++), - autocommit: false - }), - ErrorCodes.NoSuchTransaction); +jsTestLog("Attempt to commit an aborted transaction"); +assert.commandFailedWithCode(sessionDb.adminCommand({ + commitTransaction: 1, + txnNumber: NumberLong(txnNumber), + stmtId: NumberInt(stmtId++), + autocommit: false +}), + ErrorCodes.NoSuchTransaction); - jsTestLog("Attempt to abort an aborted transaction"); - assert.commandFailedWithCode(sessionDb.adminCommand({ - abortTransaction: 1, - txnNumber: NumberLong(txnNumber), - stmtId: NumberInt(stmtId++), - autocommit: false - }), - ErrorCodes.NoSuchTransaction); +jsTestLog("Attempt to abort an aborted transaction"); +assert.commandFailedWithCode(sessionDb.adminCommand({ + abortTransaction: 1, + txnNumber: NumberLong(txnNumber), + stmtId: NumberInt(stmtId++), + autocommit: false +}), + ErrorCodes.NoSuchTransaction); - jsTestLog("Attempt to continue an aborted transaction"); - assert.commandFailedWithCode(sessionDb.runCommand({ - insert: collName, - documents: [{_id: "abort-txn-2"}], - txnNumber: NumberLong(txnNumber), - stmtId: NumberInt(stmtId++), - autocommit: false - }), - ErrorCodes.NoSuchTransaction); +jsTestLog("Attempt to continue an aborted transaction"); +assert.commandFailedWithCode(sessionDb.runCommand({ + insert: collName, + documents: [{_id: "abort-txn-2"}], + txnNumber: NumberLong(txnNumber), + stmtId: NumberInt(stmtId++), + autocommit: false +}), + ErrorCodes.NoSuchTransaction); - jsTestLog("Attempt to restart an aborted transaction"); - assert.commandFailedWithCode(sessionDb.runCommand({ - insert: collName, - documents: [{_id: "abort-txn-2"}], - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(txnNumber), - startTransaction: true, - stmtId: NumberInt(stmtId++), - autocommit: false - }), - ErrorCodes.ConflictingOperationInProgress); +jsTestLog("Attempt to restart an aborted transaction"); +assert.commandFailedWithCode(sessionDb.runCommand({ + insert: collName, + documents: [{_id: "abort-txn-2"}], + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(txnNumber), + startTransaction: true, + stmtId: NumberInt(stmtId++), + autocommit: false +}), + ErrorCodes.ConflictingOperationInProgress); - jsTest.log("Test commitTransaction command is retryable"); - txnNumber++; - stmtId = 0; - assert.commandWorked(sessionDb.runCommand({ - insert: collName, - documents: [{_id: "commit-txn-1"}], - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(txnNumber), - startTransaction: true, - stmtId: NumberInt(stmtId++), - autocommit: false - })); - assert.commandWorked(sessionDb.adminCommand({ - commitTransaction: 1, - txnNumber: NumberLong(txnNumber), - stmtId: NumberInt(stmtId++), - autocommit: false - })); +jsTest.log("Test commitTransaction command is retryable"); +txnNumber++; +stmtId = 0; +assert.commandWorked(sessionDb.runCommand({ + insert: collName, + documents: [{_id: "commit-txn-1"}], + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(txnNumber), + startTransaction: true, + stmtId: NumberInt(stmtId++), + autocommit: false +})); +assert.commandWorked(sessionDb.adminCommand({ + commitTransaction: 1, + txnNumber: NumberLong(txnNumber), + stmtId: NumberInt(stmtId++), + autocommit: false +})); - jsTestLog("Retry commitTransaction command on a committed transaction"); - assert.commandWorked(sessionDb.adminCommand({ - commitTransaction: 1, - txnNumber: NumberLong(txnNumber), - stmtId: NumberInt(stmtId), - autocommit: false - })); +jsTestLog("Retry commitTransaction command on a committed transaction"); +assert.commandWorked(sessionDb.adminCommand({ + commitTransaction: 1, + txnNumber: NumberLong(txnNumber), + stmtId: NumberInt(stmtId), + autocommit: false +})); - jsTestLog("Attempt to abort a committed transaction"); - assert.commandFailedWithCode(sessionDb.adminCommand({ - abortTransaction: 1, - txnNumber: NumberLong(txnNumber), - stmtId: NumberInt(stmtId++), - autocommit: false - }), - ErrorCodes.TransactionCommitted); +jsTestLog("Attempt to abort a committed transaction"); +assert.commandFailedWithCode(sessionDb.adminCommand({ + abortTransaction: 1, + txnNumber: NumberLong(txnNumber), + stmtId: NumberInt(stmtId++), + autocommit: false +}), + ErrorCodes.TransactionCommitted); - jsTestLog("Attempt to continue a committed transaction"); - assert.commandFailedWithCode(sessionDb.runCommand({ - insert: collName, - documents: [{_id: "commit-txn-2"}], - txnNumber: NumberLong(txnNumber), - stmtId: NumberInt(stmtId++), - autocommit: false - }), - ErrorCodes.TransactionCommitted); +jsTestLog("Attempt to continue a committed transaction"); +assert.commandFailedWithCode(sessionDb.runCommand({ + insert: collName, + documents: [{_id: "commit-txn-2"}], + txnNumber: NumberLong(txnNumber), + stmtId: NumberInt(stmtId++), + autocommit: false +}), + ErrorCodes.TransactionCommitted); - jsTestLog("Attempt to restart a committed transaction"); - assert.commandFailedWithCode(sessionDb.runCommand({ - insert: collName, - documents: [{_id: "commit-txn-2"}], - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(txnNumber), - startTransaction: true, - stmtId: NumberInt(stmtId++), - autocommit: false - }), - ErrorCodes.ConflictingOperationInProgress); +jsTestLog("Attempt to restart a committed transaction"); +assert.commandFailedWithCode(sessionDb.runCommand({ + insert: collName, + documents: [{_id: "commit-txn-2"}], + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(txnNumber), + startTransaction: true, + stmtId: NumberInt(stmtId++), + autocommit: false +}), + ErrorCodes.ConflictingOperationInProgress); - session.endSession(); +session.endSession(); }()); diff --git a/jstests/core/txns/indexing_not_blocked_by_txn.js b/jstests/core/txns/indexing_not_blocked_by_txn.js index c08a6a7e495..020c16b28b8 100644 --- a/jstests/core/txns/indexing_not_blocked_by_txn.js +++ b/jstests/core/txns/indexing_not_blocked_by_txn.js @@ -7,35 +7,37 @@ * @tags: [uses_transactions, assumes_unsharded_collection] */ (function() { - "use strict"; - var dbName = 'indexing_not_blocked_by_txn'; - var mydb = db.getSiblingDB(dbName); - const wcMajority = {writeConcern: {w: "majority"}}; +"use strict"; +var dbName = 'indexing_not_blocked_by_txn'; +var mydb = db.getSiblingDB(dbName); +const wcMajority = { + writeConcern: {w: "majority"} +}; - mydb.foo.drop(wcMajority); - mydb.bar.drop(wcMajority); - assert.commandWorked(mydb.createCollection("foo", wcMajority)); - assert.commandWorked(mydb.foo.createIndex({x: 1})); - assert.commandWorked(mydb.createCollection("bar", wcMajority)); +mydb.foo.drop(wcMajority); +mydb.bar.drop(wcMajority); +assert.commandWorked(mydb.createCollection("foo", wcMajority)); +assert.commandWorked(mydb.foo.createIndex({x: 1})); +assert.commandWorked(mydb.createCollection("bar", wcMajority)); - var session = db.getMongo().startSession(); - var sessionDb = session.getDatabase(dbName); +var session = db.getMongo().startSession(); +var sessionDb = session.getDatabase(dbName); - session.startTransaction(); - assert.commandWorked(sessionDb.foo.insert({x: 1})); +session.startTransaction(); +assert.commandWorked(sessionDb.foo.insert({x: 1})); - // Creating already existing index is a no-op that shouldn't take strong locks. - assert.commandWorked(mydb.foo.createIndex({x: 1})); +// Creating already existing index is a no-op that shouldn't take strong locks. +assert.commandWorked(mydb.foo.createIndex({x: 1})); - // Creating an index on a different collection should not conflict. - assert.commandWorked(mydb.bar.createIndex({x: 1})); +// Creating an index on a different collection should not conflict. +assert.commandWorked(mydb.bar.createIndex({x: 1})); - // Dropping shouldn't either. - assert.commandWorked(mydb.bar.dropIndex({x: 1})); +// Dropping shouldn't either. +assert.commandWorked(mydb.bar.dropIndex({x: 1})); - // Creating an index on a non-existent collection in an existing database should not conflict. - assert.commandWorked(mydb.baz.createIndex({x: 1})); +// Creating an index on a non-existent collection in an existing database should not conflict. +assert.commandWorked(mydb.baz.createIndex({x: 1})); - assert.commandWorked(session.commitTransaction_forTesting()); - session.endSession(); +assert.commandWorked(session.commitTransaction_forTesting()); +session.endSession(); }()); diff --git a/jstests/core/txns/kill_cursors_in_transaction.js b/jstests/core/txns/kill_cursors_in_transaction.js index 84a58bfdc33..f0dbe8330f1 100644 --- a/jstests/core/txns/kill_cursors_in_transaction.js +++ b/jstests/core/txns/kill_cursors_in_transaction.js @@ -1,78 +1,76 @@ // Tests that the killCursors command is allowed in transactions. // @tags: [uses_transactions] (function() { - "use strict"; +"use strict"; - const dbName = "test"; - const collName = "kill_cursors_in_transaction"; - const testDB = db.getSiblingDB(dbName); - const adminDB = db.getSiblingDB("admin"); - const session = db.getMongo().startSession({causalConsistency: false}); - const sessionDb = session.getDatabase(dbName); - const sessionColl = sessionDb[collName]; +const dbName = "test"; +const collName = "kill_cursors_in_transaction"; +const testDB = db.getSiblingDB(dbName); +const adminDB = db.getSiblingDB("admin"); +const session = db.getMongo().startSession({causalConsistency: false}); +const sessionDb = session.getDatabase(dbName); +const sessionColl = sessionDb[collName]; - sessionColl.drop({writeConcern: {w: "majority"}}); - for (let i = 0; i < 4; ++i) { - assert.commandWorked(sessionColl.insert({_id: i})); - } +sessionColl.drop({writeConcern: {w: "majority"}}); +for (let i = 0; i < 4; ++i) { + assert.commandWorked(sessionColl.insert({_id: i})); +} - jsTest.log("Test that the killCursors command is allowed in transactions."); +jsTest.log("Test that the killCursors command is allowed in transactions."); - session.startTransaction(); - let res = assert.commandWorked(sessionDb.runCommand({find: collName, batchSize: 2})); - assert(res.hasOwnProperty("cursor"), tojson(res)); - assert(res.cursor.hasOwnProperty("id"), tojson(res)); - assert.commandWorked(sessionDb.runCommand({killCursors: collName, cursors: [res.cursor.id]})); - assert.commandWorked(session.commitTransaction_forTesting()); +session.startTransaction(); +let res = assert.commandWorked(sessionDb.runCommand({find: collName, batchSize: 2})); +assert(res.hasOwnProperty("cursor"), tojson(res)); +assert(res.cursor.hasOwnProperty("id"), tojson(res)); +assert.commandWorked(sessionDb.runCommand({killCursors: collName, cursors: [res.cursor.id]})); +assert.commandWorked(session.commitTransaction_forTesting()); - jsTest.log("Test that the killCursors cannot be the first operation in a transaction."); - res = assert.commandWorked(sessionDb.runCommand({find: collName, batchSize: 2})); - assert(res.hasOwnProperty("cursor"), tojson(res)); - assert(res.cursor.hasOwnProperty("id"), tojson(res)); - session.startTransaction(); - assert.commandFailedWithCode( - sessionDb.runCommand({killCursors: collName, cursors: [res.cursor.id]}), - ErrorCodes.OperationNotSupportedInTransaction); - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); +jsTest.log("Test that the killCursors cannot be the first operation in a transaction."); +res = assert.commandWorked(sessionDb.runCommand({find: collName, batchSize: 2})); +assert(res.hasOwnProperty("cursor"), tojson(res)); +assert(res.cursor.hasOwnProperty("id"), tojson(res)); +session.startTransaction(); +assert.commandFailedWithCode( + sessionDb.runCommand({killCursors: collName, cursors: [res.cursor.id]}), + ErrorCodes.OperationNotSupportedInTransaction); +assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction); - jsTest.log("killCursors must not block on locks held by the transaction in which it is run."); +jsTest.log("killCursors must not block on locks held by the transaction in which it is run."); - session.startTransaction(); +session.startTransaction(); - // Open a cursor on the collection. - res = assert.commandWorked(sessionDb.runCommand({find: collName, batchSize: 2})); - assert(res.hasOwnProperty("cursor"), tojson(res)); - assert(res.cursor.hasOwnProperty("id"), tojson(res)); +// Open a cursor on the collection. +res = assert.commandWorked(sessionDb.runCommand({find: collName, batchSize: 2})); +assert(res.hasOwnProperty("cursor"), tojson(res)); +assert(res.cursor.hasOwnProperty("id"), tojson(res)); - // Start a drop, which will hang. - let awaitDrop = startParallelShell(function() { - db.getSiblingDB("test")["kill_cursors_in_transaction"].drop( - {writeConcern: {w: "majority"}}); - }); +// Start a drop, which will hang. +let awaitDrop = startParallelShell(function() { + db.getSiblingDB("test")["kill_cursors_in_transaction"].drop({writeConcern: {w: "majority"}}); +}); - // Wait for the drop to have a pending MODE_X lock on the database. - assert.soon( - function() { - return adminDB - .aggregate([ - {$currentOp: {}}, - {$match: {"command.drop": collName, waitingForLock: true}} - ]) - .itcount() === 1; - }, - function() { - return "Failed to find drop in currentOp output: " + - tojson(adminDB.aggregate([{$currentOp: {}}]).toArray()); - }); +// Wait for the drop to have a pending MODE_X lock on the database. +assert.soon( + function() { + return adminDB + .aggregate([ + {$currentOp: {}}, + {$match: {"command.drop": collName, waitingForLock: true}} + ]) + .itcount() === 1; + }, + function() { + return "Failed to find drop in currentOp output: " + + tojson(adminDB.aggregate([{$currentOp: {}}]).toArray()); + }); - // killCursors does not block behind the pending MODE_X lock. - assert.commandWorked(sessionDb.runCommand({killCursors: collName, cursors: [res.cursor.id]})); +// killCursors does not block behind the pending MODE_X lock. +assert.commandWorked(sessionDb.runCommand({killCursors: collName, cursors: [res.cursor.id]})); - assert.commandWorked(session.commitTransaction_forTesting()); +assert.commandWorked(session.commitTransaction_forTesting()); - // Once the transaction has committed, the drop can proceed. - awaitDrop(); +// Once the transaction has committed, the drop can proceed. +awaitDrop(); - session.endSession(); +session.endSession(); }()); diff --git a/jstests/core/txns/kill_op_on_txn_expiry.js b/jstests/core/txns/kill_op_on_txn_expiry.js index 298b5d0926e..dde4930bfae 100644 --- a/jstests/core/txns/kill_op_on_txn_expiry.js +++ b/jstests/core/txns/kill_op_on_txn_expiry.js @@ -1,95 +1,95 @@ // Test that ongoing operations in a transaction are interrupted when the transaction expires. // @tags: [uses_transactions] (function() { - "use strict"; - - load('jstests/libs/parallelTester.js'); - load("jstests/libs/check_log.js"); - - const dbName = "test"; - const collName = "kill_op_on_txn_expiry"; - const testDB = db.getSiblingDB(dbName); - const testColl = testDB[collName]; - - testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); +"use strict"; + +load('jstests/libs/parallelTester.js'); +load("jstests/libs/check_log.js"); + +const dbName = "test"; +const collName = "kill_op_on_txn_expiry"; +const testDB = db.getSiblingDB(dbName); +const testColl = testDB[collName]; + +testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); +assert.commandWorked(testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}})); + +const sessionOptions = { + causalConsistency: false +}; +const session = db.getMongo().startSession(sessionOptions); +const sessionDb = session.getDatabase(dbName); +const sessionColl = sessionDb[collName]; + +// Need the original 'transactionLifetimeLimitSeconds' value so that we can reset it back at the +// end of the test. +const res = + assert.commandWorked(db.adminCommand({getParameter: 1, transactionLifetimeLimitSeconds: 1})); +const originalTransactionLifetimeLimitSeconds = res.transactionLifetimeLimitSeconds; + +// Decrease transactionLifetimeLimitSeconds so it expires faster +jsTest.log("Decrease transactionLifetimeLimitSeconds from " + + originalTransactionLifetimeLimitSeconds + " to 30 seconds."); +assert.commandWorked(db.adminCommand({setParameter: 1, transactionLifetimeLimitSeconds: 30})); + +try { + jsTestLog("Starting transaction"); + + let txnNumber = 0; + assert.commandWorked(testColl.runCommand({ + insert: collName, + documents: [{_id: 0}], + txnNumber: NumberLong(txnNumber), + startTransaction: true, + autocommit: false, + lsid: session.getSessionId(), + })); + + jsTestLog("Enabling fail point to block batch inserts"); assert.commandWorked( - testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}})); - - const sessionOptions = {causalConsistency: false}; - const session = db.getMongo().startSession(sessionOptions); - const sessionDb = session.getDatabase(dbName); - const sessionColl = sessionDb[collName]; - - // Need the original 'transactionLifetimeLimitSeconds' value so that we can reset it back at the - // end of the test. - const res = assert.commandWorked( - db.adminCommand({getParameter: 1, transactionLifetimeLimitSeconds: 1})); - const originalTransactionLifetimeLimitSeconds = res.transactionLifetimeLimitSeconds; - - // Decrease transactionLifetimeLimitSeconds so it expires faster - jsTest.log("Decrease transactionLifetimeLimitSeconds from " + - originalTransactionLifetimeLimitSeconds + " to 30 seconds."); - assert.commandWorked(db.adminCommand({setParameter: 1, transactionLifetimeLimitSeconds: 30})); - - try { - jsTestLog("Starting transaction"); - - let txnNumber = 0; - assert.commandWorked(testColl.runCommand({ + testDB.adminCommand({configureFailPoint: "hangDuringBatchInsert", mode: "alwaysOn"})); + // Clear ramlog so checkLog can't find log messages from previous times this fail point was + // enabled. + assert.commandWorked(testDB.adminCommand({clearLog: 'global'})); + + jsTestLog("Starting insert operation in parallel thread"); + let workerThread = new ScopedThread((sessionId, txnNumber, dbName, collName) => { + // Deserialize the session ID from its string representation. + sessionId = eval("(" + sessionId + ")"); + + let coll = db.getSiblingDB(dbName).getCollection(collName); + assert.commandFailedWithCode(coll.runCommand({ insert: collName, - documents: [{_id: 0}], + documents: [{_id: 1}], txnNumber: NumberLong(txnNumber), - startTransaction: true, autocommit: false, - lsid: session.getSessionId(), - })); - - jsTestLog("Enabling fail point to block batch inserts"); - assert.commandWorked( - testDB.adminCommand({configureFailPoint: "hangDuringBatchInsert", mode: "alwaysOn"})); - // Clear ramlog so checkLog can't find log messages from previous times this fail point was - // enabled. - assert.commandWorked(testDB.adminCommand({clearLog: 'global'})); - - jsTestLog("Starting insert operation in parallel thread"); - let workerThread = new ScopedThread((sessionId, txnNumber, dbName, collName) => { - // Deserialize the session ID from its string representation. - sessionId = eval("(" + sessionId + ")"); - - let coll = db.getSiblingDB(dbName).getCollection(collName); - assert.commandFailedWithCode(coll.runCommand({ - insert: collName, - documents: [{_id: 1}], - txnNumber: NumberLong(txnNumber), - autocommit: false, - lsid: sessionId - }), - ErrorCodes.ExceededTimeLimit); - - }, tojson(session.getSessionId()), txnNumber, dbName, collName); - workerThread.start(); - - jsTestLog("Wait for insert to be blocked"); - checkLog.contains(db.getMongo(), "hangDuringBatchInsert fail point enabled"); - - jsTestLog("Wait for the transaction to expire"); - checkLog.contains(db.getMongo(), "Aborting transaction with txnNumber " + txnNumber); - - jsTestLog("Disabling fail point to enable insert to proceed and detect that the session " + - "has been killed"); - assert.commandWorked( - testDB.adminCommand({configureFailPoint: "hangDuringBatchInsert", mode: "off"})); - - workerThread.join(); - assert(!workerThread.hasFailed()); - } finally { - // Must ensure that the transactionLifetimeLimitSeconds is reset so that it does not impact - // other tests in the suite. - assert.commandWorked(db.adminCommand({ - setParameter: 1, - transactionLifetimeLimitSeconds: originalTransactionLifetimeLimitSeconds - })); - } - - session.endSession(); + lsid: sessionId + }), + ErrorCodes.ExceededTimeLimit); + }, tojson(session.getSessionId()), txnNumber, dbName, collName); + workerThread.start(); + + jsTestLog("Wait for insert to be blocked"); + checkLog.contains(db.getMongo(), "hangDuringBatchInsert fail point enabled"); + + jsTestLog("Wait for the transaction to expire"); + checkLog.contains(db.getMongo(), "Aborting transaction with txnNumber " + txnNumber); + + jsTestLog("Disabling fail point to enable insert to proceed and detect that the session " + + "has been killed"); + assert.commandWorked( + testDB.adminCommand({configureFailPoint: "hangDuringBatchInsert", mode: "off"})); + + workerThread.join(); + assert(!workerThread.hasFailed()); +} finally { + // Must ensure that the transactionLifetimeLimitSeconds is reset so that it does not impact + // other tests in the suite. + assert.commandWorked(db.adminCommand({ + setParameter: 1, + transactionLifetimeLimitSeconds: originalTransactionLifetimeLimitSeconds + })); +} + +session.endSession(); }()); diff --git a/jstests/core/txns/kill_sessions_kills_transaction.js b/jstests/core/txns/kill_sessions_kills_transaction.js index 4b4e7ee9afb..bd03a124624 100644 --- a/jstests/core/txns/kill_sessions_kills_transaction.js +++ b/jstests/core/txns/kill_sessions_kills_transaction.js @@ -1,77 +1,77 @@ // Tests that killSessions kills inactive transactions. // @tags: [uses_transactions] (function() { - "use strict"; +"use strict"; - const dbName = "test"; - const collName = "kill_sessions_kills_transaction"; - const testDB = db.getSiblingDB(dbName); - const adminDB = db.getSiblingDB("admin"); - const testColl = testDB[collName]; - const sessionOptions = {causalConsistency: false}; +const dbName = "test"; +const collName = "kill_sessions_kills_transaction"; +const testDB = db.getSiblingDB(dbName); +const adminDB = db.getSiblingDB("admin"); +const testColl = testDB[collName]; +const sessionOptions = { + causalConsistency: false +}; - testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); +testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); - const bulk = testColl.initializeUnorderedBulkOp(); - for (let i = 0; i < 4; ++i) { - bulk.insert({_id: i}); - } - assert.commandWorked(bulk.execute({w: "majority"})); +const bulk = testColl.initializeUnorderedBulkOp(); +for (let i = 0; i < 4; ++i) { + bulk.insert({_id: i}); +} +assert.commandWorked(bulk.execute({w: "majority"})); - jsTest.log("Test that killing a session kills an inactive transaction."); - let session = db.getMongo().startSession(sessionOptions); - let sessionDb = session.getDatabase(dbName); - let sessionColl = sessionDb[collName]; +jsTest.log("Test that killing a session kills an inactive transaction."); +let session = db.getMongo().startSession(sessionOptions); +let sessionDb = session.getDatabase(dbName); +let sessionColl = sessionDb[collName]; - session.startTransaction(); - assert.commandWorked(sessionColl.insert({_id: 5})); - assert.commandWorked(testDB.runCommand({killSessions: [session.getSessionId()]})); - assert.commandFailedWithCode(session.commitTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); +session.startTransaction(); +assert.commandWorked(sessionColl.insert({_id: 5})); +assert.commandWorked(testDB.runCommand({killSessions: [session.getSessionId()]})); +assert.commandFailedWithCode(session.commitTransaction_forTesting(), ErrorCodes.NoSuchTransaction); - session.endSession(); +session.endSession(); - jsTest.log("killSessions must not block on locks held by a transaction it plans to kill."); - session = db.getMongo().startSession(sessionOptions); - sessionDb = session.getDatabase(dbName); - sessionColl = sessionDb[collName]; +jsTest.log("killSessions must not block on locks held by a transaction it plans to kill."); +session = db.getMongo().startSession(sessionOptions); +sessionDb = session.getDatabase(dbName); +sessionColl = sessionDb[collName]; - session.startTransaction(); - // Open a cursor on the collection. - assert.commandWorked(sessionDb.runCommand({find: collName, batchSize: 2})); +session.startTransaction(); +// Open a cursor on the collection. +assert.commandWorked(sessionDb.runCommand({find: collName, batchSize: 2})); - // Start a drop, which will hang. - let awaitDrop = startParallelShell(function() { - db.getSiblingDB("test")["kill_sessions_kills_transaction"].drop( - {writeConcern: {w: "majority"}}); - }); +// Start a drop, which will hang. +let awaitDrop = startParallelShell(function() { + db.getSiblingDB("test")["kill_sessions_kills_transaction"].drop( + {writeConcern: {w: "majority"}}); +}); - // Wait for the drop to have a pending MODE_X lock on the database. - assert.soon( - function() { - return adminDB - .aggregate([ - {$currentOp: {}}, - {$match: {"command.drop": collName, waitingForLock: true}} - ]) - .itcount() === 1; - }, - function() { - return "Failed to find drop in currentOp output: " + - tojson(adminDB.aggregate([{$currentOp: {}}]).toArray()); - }); +// Wait for the drop to have a pending MODE_X lock on the database. +assert.soon( + function() { + return adminDB + .aggregate([ + {$currentOp: {}}, + {$match: {"command.drop": collName, waitingForLock: true}} + ]) + .itcount() === 1; + }, + function() { + return "Failed to find drop in currentOp output: " + + tojson(adminDB.aggregate([{$currentOp: {}}]).toArray()); + }); - // killSessions needs to acquire a MODE_IS lock on the collection in order to kill the open - // cursor. However, the transaction is holding a MODE_IX lock on the collection, which will - // block the drop from obtaining a MODE_X lock on the database, which will block the - // killSessions from taking a MODE_IS lock on the collection. In order to avoid hanging, - // killSessions must first kill the transaction, so that it releases its MODE_IX collection - // lock. This allows the drop to proceed and obtain and release the MODE_X lock. Finally, - // killSessions can obtain a MODE_IS collection lock and kill the cursor. - assert.commandWorked(testDB.runCommand({killSessions: [session.getSessionId()]})); - awaitDrop(); - assert.commandFailedWithCode(session.commitTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); +// killSessions needs to acquire a MODE_IS lock on the collection in order to kill the open +// cursor. However, the transaction is holding a MODE_IX lock on the collection, which will +// block the drop from obtaining a MODE_X lock on the database, which will block the +// killSessions from taking a MODE_IS lock on the collection. In order to avoid hanging, +// killSessions must first kill the transaction, so that it releases its MODE_IX collection +// lock. This allows the drop to proceed and obtain and release the MODE_X lock. Finally, +// killSessions can obtain a MODE_IS collection lock and kill the cursor. +assert.commandWorked(testDB.runCommand({killSessions: [session.getSessionId()]})); +awaitDrop(); +assert.commandFailedWithCode(session.commitTransaction_forTesting(), ErrorCodes.NoSuchTransaction); - session.endSession(); +session.endSession(); }()); diff --git a/jstests/core/txns/kill_transaction_cursors_after_commit.js b/jstests/core/txns/kill_transaction_cursors_after_commit.js index 003158c3e52..0910b6fb1b7 100644 --- a/jstests/core/txns/kill_transaction_cursors_after_commit.js +++ b/jstests/core/txns/kill_transaction_cursors_after_commit.js @@ -1,35 +1,35 @@ // Tests that cursors created in transactions may be killed outside of the transaction. // @tags: [uses_transactions] (function() { - "use strict"; +"use strict"; - const dbName = "test"; - const collName = "kill_transaction_cursors"; - const testDB = db.getSiblingDB(dbName); - const session = db.getMongo().startSession({causalConsistency: false}); - const sessionDb = session.getDatabase(dbName); - const sessionColl = sessionDb[collName]; +const dbName = "test"; +const collName = "kill_transaction_cursors"; +const testDB = db.getSiblingDB(dbName); +const session = db.getMongo().startSession({causalConsistency: false}); +const sessionDb = session.getDatabase(dbName); +const sessionColl = sessionDb[collName]; - sessionColl.drop({writeConcern: {w: "majority"}}); - for (let i = 0; i < 4; ++i) { - assert.commandWorked(sessionColl.insert({_id: i})); - } +sessionColl.drop({writeConcern: {w: "majority"}}); +for (let i = 0; i < 4; ++i) { + assert.commandWorked(sessionColl.insert({_id: i})); +} - jsTest.log("Test that cursors created in transactions may be kill outside of the transaction."); - session.startTransaction(); - let res = assert.commandWorked(sessionDb.runCommand({find: collName, batchSize: 2})); - assert(res.hasOwnProperty("cursor"), tojson(res)); - assert(res.cursor.hasOwnProperty("id"), tojson(res)); - assert.commandWorked(session.commitTransaction_forTesting()); - assert.commandWorked(sessionDb.runCommand({killCursors: collName, cursors: [res.cursor.id]})); +jsTest.log("Test that cursors created in transactions may be kill outside of the transaction."); +session.startTransaction(); +let res = assert.commandWorked(sessionDb.runCommand({find: collName, batchSize: 2})); +assert(res.hasOwnProperty("cursor"), tojson(res)); +assert(res.cursor.hasOwnProperty("id"), tojson(res)); +assert.commandWorked(session.commitTransaction_forTesting()); +assert.commandWorked(sessionDb.runCommand({killCursors: collName, cursors: [res.cursor.id]})); - jsTest.log("Test that cursors created in transactions may be kill outside of the session."); - session.startTransaction(); - res = assert.commandWorked(sessionDb.runCommand({find: collName, batchSize: 2})); - assert(res.hasOwnProperty("cursor"), tojson(res)); - assert(res.cursor.hasOwnProperty("id"), tojson(res)); - assert.commandWorked(session.commitTransaction_forTesting()); - assert.commandWorked(testDB.runCommand({killCursors: collName, cursors: [res.cursor.id]})); +jsTest.log("Test that cursors created in transactions may be kill outside of the session."); +session.startTransaction(); +res = assert.commandWorked(sessionDb.runCommand({find: collName, batchSize: 2})); +assert(res.hasOwnProperty("cursor"), tojson(res)); +assert(res.cursor.hasOwnProperty("id"), tojson(res)); +assert.commandWorked(session.commitTransaction_forTesting()); +assert.commandWorked(testDB.runCommand({killCursors: collName, cursors: [res.cursor.id]})); - session.endSession(); +session.endSession(); }()); diff --git a/jstests/core/txns/kill_txn_cursor.js b/jstests/core/txns/kill_txn_cursor.js index cd2332b6d24..0dc68af52c1 100644 --- a/jstests/core/txns/kill_txn_cursor.js +++ b/jstests/core/txns/kill_txn_cursor.js @@ -1,63 +1,64 @@ // Tests that killing a cursor created in a transaction does not abort the transaction. // @tags: [uses_transactions] (function() { - "use strict"; - - const dbName = "test"; - const collName = "kill_txn_cursor"; - const testDB = db.getSiblingDB(dbName); - - const sessionOptions = {causalConsistency: false}; - const session = db.getMongo().startSession(sessionOptions); - const sessionDb = session.getDatabase(dbName); - const sessionColl = sessionDb[collName]; - - testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); - - const bulk = sessionColl.initializeUnorderedBulkOp(); - for (let i = 0; i < 4; ++i) { - bulk.insert({_id: i}); - } - assert.commandWorked(bulk.execute({w: "majority"})); - - jsTest.log("Start a transaction."); - session.startTransaction({writeConcern: {w: "majority"}}); - - // Open cursor 1, and do not exhaust the cursor. - let cursorRes1 = assert.commandWorked(sessionDb.runCommand({find: collName, batchSize: 2})); - assert(cursorRes1.hasOwnProperty("cursor"), tojson(cursorRes1)); - assert(cursorRes1.cursor.hasOwnProperty("id"), tojson(cursorRes1)); - let cursorId1 = cursorRes1.cursor.id; - jsTest.log("Opened cursor 1 with id " + cursorId1); - - // Open cursor 2, and do not exhaust the cursor. - let cursorRes2 = assert.commandWorked(sessionDb.runCommand({find: collName, batchSize: 2})); - assert(cursorRes2.hasOwnProperty("cursor"), tojson(cursorRes2)); - assert(cursorRes2.cursor.hasOwnProperty("id"), tojson(cursorRes2)); - let cursorId2 = cursorRes2.cursor.id; - jsTest.log("Opened cursor 2 with id " + cursorId2); - - jsTest.log("Kill cursor 1 outside of the transaction."); - // Kill cursor 1. We check that the kill was successful by asserting that the killCursors - // command worked. We could run a getMore and check that we get a CursorNotFound error, but this - // error would abort the transaction and kill cursor 2, and we want to check that cursor 2 is - // still alive. - assert.commandWorked(testDB.runCommand({killCursors: collName, cursors: [cursorId1]})); - - jsTest.log("Cursor 2 is still alive."); - cursorRes2 = - assert.commandWorked(sessionDb.runCommand({getMore: cursorId2, collection: collName})); - assert(cursorRes2.hasOwnProperty("cursor")); - assert(cursorRes2.cursor.hasOwnProperty("nextBatch")); - assert.sameMembers(cursorRes2.cursor.nextBatch, [{_id: 2}, {_id: 3}]); - - jsTest.log("Can still write in the transaction"); - assert.commandWorked(sessionColl.insert({_id: 4})); - - jsTest.log("Commit transaction."); - assert.commandWorked(session.commitTransaction_forTesting()); - assert.sameMembers([{_id: 0}, {_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}], - sessionColl.find().toArray()); - - session.endSession(); +"use strict"; + +const dbName = "test"; +const collName = "kill_txn_cursor"; +const testDB = db.getSiblingDB(dbName); + +const sessionOptions = { + causalConsistency: false +}; +const session = db.getMongo().startSession(sessionOptions); +const sessionDb = session.getDatabase(dbName); +const sessionColl = sessionDb[collName]; + +testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); + +const bulk = sessionColl.initializeUnorderedBulkOp(); +for (let i = 0; i < 4; ++i) { + bulk.insert({_id: i}); +} +assert.commandWorked(bulk.execute({w: "majority"})); + +jsTest.log("Start a transaction."); +session.startTransaction({writeConcern: {w: "majority"}}); + +// Open cursor 1, and do not exhaust the cursor. +let cursorRes1 = assert.commandWorked(sessionDb.runCommand({find: collName, batchSize: 2})); +assert(cursorRes1.hasOwnProperty("cursor"), tojson(cursorRes1)); +assert(cursorRes1.cursor.hasOwnProperty("id"), tojson(cursorRes1)); +let cursorId1 = cursorRes1.cursor.id; +jsTest.log("Opened cursor 1 with id " + cursorId1); + +// Open cursor 2, and do not exhaust the cursor. +let cursorRes2 = assert.commandWorked(sessionDb.runCommand({find: collName, batchSize: 2})); +assert(cursorRes2.hasOwnProperty("cursor"), tojson(cursorRes2)); +assert(cursorRes2.cursor.hasOwnProperty("id"), tojson(cursorRes2)); +let cursorId2 = cursorRes2.cursor.id; +jsTest.log("Opened cursor 2 with id " + cursorId2); + +jsTest.log("Kill cursor 1 outside of the transaction."); +// Kill cursor 1. We check that the kill was successful by asserting that the killCursors +// command worked. We could run a getMore and check that we get a CursorNotFound error, but this +// error would abort the transaction and kill cursor 2, and we want to check that cursor 2 is +// still alive. +assert.commandWorked(testDB.runCommand({killCursors: collName, cursors: [cursorId1]})); + +jsTest.log("Cursor 2 is still alive."); +cursorRes2 = assert.commandWorked(sessionDb.runCommand({getMore: cursorId2, collection: collName})); +assert(cursorRes2.hasOwnProperty("cursor")); +assert(cursorRes2.cursor.hasOwnProperty("nextBatch")); +assert.sameMembers(cursorRes2.cursor.nextBatch, [{_id: 2}, {_id: 3}]); + +jsTest.log("Can still write in the transaction"); +assert.commandWorked(sessionColl.insert({_id: 4})); + +jsTest.log("Commit transaction."); +assert.commandWorked(session.commitTransaction_forTesting()); +assert.sameMembers([{_id: 0}, {_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}], + sessionColl.find().toArray()); + +session.endSession(); }()); diff --git a/jstests/core/txns/large_transactions_require_fcv42.js b/jstests/core/txns/large_transactions_require_fcv42.js index ce24c8c0c39..cbfa89f6e73 100644 --- a/jstests/core/txns/large_transactions_require_fcv42.js +++ b/jstests/core/txns/large_transactions_require_fcv42.js @@ -4,72 +4,73 @@ * @tags: [uses_transactions] */ (function() { - "uses strict"; - load("jstests/libs/feature_compatibility_version.js"); - load("jstests/core/txns/libs/prepare_helpers.js"); +"uses strict"; +load("jstests/libs/feature_compatibility_version.js"); +load("jstests/core/txns/libs/prepare_helpers.js"); - const dbName = "test"; - const collName = "large_transactions_require_fcv42"; - const testDB = db.getSiblingDB(dbName); - const adminDB = db.getSiblingDB('admin'); +const dbName = "test"; +const collName = "large_transactions_require_fcv42"; +const testDB = db.getSiblingDB(dbName); +const adminDB = db.getSiblingDB('admin'); - testDB[collName].drop({writeConcern: {w: "majority"}}); - assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); +testDB[collName].drop({writeConcern: {w: "majority"}}); +assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); - const sessionOptions = {causalConsistency: false}; - const session = testDB.getMongo().startSession(sessionOptions); - const sessionDB = session.getDatabase(dbName); - const sessionColl = sessionDB.getCollection(collName); +const sessionOptions = { + causalConsistency: false +}; +const session = testDB.getMongo().startSession(sessionOptions); +const sessionDB = session.getDatabase(dbName); +const sessionColl = sessionDB.getCollection(collName); - // As we are not able to send a single request larger than 16MB, we insert two documents - // of 10MB each to create a "large" transaction. - const kSize10MB = 10 * 1024 * 1024; - function createLargeDocument(id) { - return {_id: id, longString: "a".repeat(kSize10MB)}; - } +// As we are not able to send a single request larger than 16MB, we insert two documents +// of 10MB each to create a "large" transaction. +const kSize10MB = 10 * 1024 * 1024; +function createLargeDocument(id) { + return {_id: id, longString: "a".repeat(kSize10MB)}; +} - try { - jsTestLog("Test that creating a transaction larger than 16MB succeeds in FCV 4.2."); - let doc1 = createLargeDocument(1); - let doc2 = createLargeDocument(2); +try { + jsTestLog("Test that creating a transaction larger than 16MB succeeds in FCV 4.2."); + let doc1 = createLargeDocument(1); + let doc2 = createLargeDocument(2); - checkFCV(adminDB, latestFCV); - session.startTransaction(); - assert.commandWorked(sessionColl.insert(doc1)); - assert.commandWorked(sessionColl.insert(doc2)); - assert.commandWorked(session.commitTransaction_forTesting()); + checkFCV(adminDB, latestFCV); + session.startTransaction(); + assert.commandWorked(sessionColl.insert(doc1)); + assert.commandWorked(sessionColl.insert(doc2)); + assert.commandWorked(session.commitTransaction_forTesting()); - jsTestLog("Downgrade the featureCompatibilityVersion."); - assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV})); - checkFCV(adminDB, lastStableFCV); + jsTestLog("Downgrade the featureCompatibilityVersion."); + assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV})); + checkFCV(adminDB, lastStableFCV); - jsTestLog("Test that trying to create a transaction larger than 16MB fails in FCV 4.0."); - let doc3 = createLargeDocument(3); - let doc4 = createLargeDocument(4); + jsTestLog("Test that trying to create a transaction larger than 16MB fails in FCV 4.0."); + let doc3 = createLargeDocument(3); + let doc4 = createLargeDocument(4); - session.startTransaction(); - assert.commandWorked(sessionColl.insert(doc3)); - assert.commandFailedWithCode(sessionColl.insert(doc4), ErrorCodes.TransactionTooLarge); - // We have to call 'abortTransaction' here to clear the transaction state in the shell. - // Otherwise, the later call to 'startTransaction' will fail with 'Transaction already in - // progress'. - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - } finally { - jsTestLog("Restore to FCV 4.2."); - assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV})); - checkFCV(adminDB, latestFCV); - } + session.startTransaction(); + assert.commandWorked(sessionColl.insert(doc3)); + assert.commandFailedWithCode(sessionColl.insert(doc4), ErrorCodes.TransactionTooLarge); + // We have to call 'abortTransaction' here to clear the transaction state in the shell. + // Otherwise, the later call to 'startTransaction' will fail with 'Transaction already in + // progress'. + assert.commandFailedWithCode(session.abortTransaction_forTesting(), + ErrorCodes.NoSuchTransaction); +} finally { + jsTestLog("Restore to FCV 4.2."); + assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV})); + checkFCV(adminDB, latestFCV); +} - jsTestLog( - "Test that creating a transaction larger than 16MB succeeds after upgrading to FCV 4.2."); - let doc5 = createLargeDocument(5); - let doc6 = createLargeDocument(6); +jsTestLog("Test that creating a transaction larger than 16MB succeeds after upgrading to FCV 4.2."); +let doc5 = createLargeDocument(5); +let doc6 = createLargeDocument(6); - session.startTransaction(); - assert.commandWorked(sessionColl.insert(doc5)); - assert.commandWorked(sessionColl.insert(doc6)); - assert.commandWorked(session.commitTransaction_forTesting()); +session.startTransaction(); +assert.commandWorked(sessionColl.insert(doc5)); +assert.commandWorked(sessionColl.insert(doc6)); +assert.commandWorked(session.commitTransaction_forTesting()); - session.endSession(); +session.endSession(); }()); diff --git a/jstests/core/txns/libs/prepare_helpers.js b/jstests/core/txns/libs/prepare_helpers.js index 9fca9e20cfc..c6b22220a94 100644 --- a/jstests/core/txns/libs/prepare_helpers.js +++ b/jstests/core/txns/libs/prepare_helpers.js @@ -5,7 +5,6 @@ * */ const PrepareHelpers = (function() { - /** * Prepares the active transaction on the session. This expects the 'prepareTransaction' command * to succeed and return a non-null 'prepareTimestamp'. @@ -106,7 +105,7 @@ const PrepareHelpers = (function() { assert.commandWorked(coll.insert({tenKB: tenKB}, {writeConcern: {w: numNodes}})); } - for (let [nodeName, oplog] of[["primary", primaryOplog], ["secondary", secondaryOplog]]) { + for (let [nodeName, oplog] of [["primary", primaryOplog], ["secondary", secondaryOplog]]) { assert.soon(function() { const dataSize = oplog.dataSize(); const prepareEntryRemoved = (oplog.findOne({prepare: true}) === null); diff --git a/jstests/core/txns/libs/write_conflicts.js b/jstests/core/txns/libs/write_conflicts.js index 5496464d9ed..fdaf2114d82 100644 --- a/jstests/core/txns/libs/write_conflicts.js +++ b/jstests/core/txns/libs/write_conflicts.js @@ -5,7 +5,6 @@ * */ var WriteConflictHelpers = (function() { - /** * Write conflict test cases. * @@ -125,8 +124,8 @@ var WriteConflictHelpers = (function() { const session2 = conn.startSession(sessionOptions); jsTestLog("Executing write conflict test, case '" + writeConflictTestCase.name + - "'. \n transaction 1 op: " + tojson(txn1Op) + "\n transaction 2 op: " + - tojson(txn2Op)); + "'. \n transaction 1 op: " + tojson(txn1Op) + + "\n transaction 2 op: " + tojson(txn2Op)); // Run the specified write conflict test. try { diff --git a/jstests/core/txns/list_collections_not_blocked_by_txn.js b/jstests/core/txns/list_collections_not_blocked_by_txn.js index 6f23c1ea88b..faf095129d2 100644 --- a/jstests/core/txns/list_collections_not_blocked_by_txn.js +++ b/jstests/core/txns/list_collections_not_blocked_by_txn.js @@ -2,42 +2,42 @@ // This test ensures that listCollections does not conflict with multi-statement transactions // as a result of taking MODE_S locks that are incompatible with MODE_IX needed for writes. (function() { - "use strict"; - var dbName = 'list_collections_not_blocked'; - var mydb = db.getSiblingDB(dbName); - var session = db.getMongo().startSession({causalConsistency: false}); - var sessionDb = session.getDatabase(dbName); +"use strict"; +var dbName = 'list_collections_not_blocked'; +var mydb = db.getSiblingDB(dbName); +var session = db.getMongo().startSession({causalConsistency: false}); +var sessionDb = session.getDatabase(dbName); - mydb.foo.drop({writeConcern: {w: "majority"}}); +mydb.foo.drop({writeConcern: {w: "majority"}}); - assert.commandWorked(mydb.createCollection("foo", {writeConcern: {w: "majority"}})); +assert.commandWorked(mydb.createCollection("foo", {writeConcern: {w: "majority"}})); - const isMongos = assert.commandWorked(db.runCommand("ismaster")).msg === "isdbgrid"; - if (isMongos) { - // Before starting the transaction below, access the collection so it can be implicitly - // sharded and force all shards to refresh their database versions because the refresh - // requires an exclusive lock and would block behind the transaction. - assert.eq(sessionDb.foo.find().itcount(), 0); - assert.commandWorked(sessionDb.runCommand({listCollections: 1, nameOnly: true})); - } +const isMongos = assert.commandWorked(db.runCommand("ismaster")).msg === "isdbgrid"; +if (isMongos) { + // Before starting the transaction below, access the collection so it can be implicitly + // sharded and force all shards to refresh their database versions because the refresh + // requires an exclusive lock and would block behind the transaction. + assert.eq(sessionDb.foo.find().itcount(), 0); + assert.commandWorked(sessionDb.runCommand({listCollections: 1, nameOnly: true})); +} - session.startTransaction({readConcern: {level: "snapshot"}}); +session.startTransaction({readConcern: {level: "snapshot"}}); - assert.commandWorked(sessionDb.foo.insert({x: 1})); +assert.commandWorked(sessionDb.foo.insert({x: 1})); - for (let nameOnly of[false, true]) { - // Check that both the nameOnly and full versions of listCollections don't block. - let res = mydb.runCommand({listCollections: 1, nameOnly, maxTimeMS: 20 * 1000}); - assert.commandWorked(res, "listCollections should have succeeded and not timed out"); - let collObj = res.cursor.firstBatch[0]; - // collObj should only have name and type fields. - assert.eq('foo', collObj.name); - assert.eq('collection', collObj.type); - assert(collObj.hasOwnProperty("idIndex") == !nameOnly, tojson(collObj)); - assert(collObj.hasOwnProperty("options") == !nameOnly, tojson(collObj)); - assert(collObj.hasOwnProperty("info") == !nameOnly, tojson(collObj)); - } +for (let nameOnly of [false, true]) { + // Check that both the nameOnly and full versions of listCollections don't block. + let res = mydb.runCommand({listCollections: 1, nameOnly, maxTimeMS: 20 * 1000}); + assert.commandWorked(res, "listCollections should have succeeded and not timed out"); + let collObj = res.cursor.firstBatch[0]; + // collObj should only have name and type fields. + assert.eq('foo', collObj.name); + assert.eq('collection', collObj.type); + assert(collObj.hasOwnProperty("idIndex") == !nameOnly, tojson(collObj)); + assert(collObj.hasOwnProperty("options") == !nameOnly, tojson(collObj)); + assert(collObj.hasOwnProperty("info") == !nameOnly, tojson(collObj)); +} - assert.commandWorked(session.commitTransaction_forTesting()); - session.endSession(); +assert.commandWorked(session.commitTransaction_forTesting()); +session.endSession(); }()); diff --git a/jstests/core/txns/listcollections_autocomplete.js b/jstests/core/txns/listcollections_autocomplete.js index 9020ded9ca0..01921406f43 100644 --- a/jstests/core/txns/listcollections_autocomplete.js +++ b/jstests/core/txns/listcollections_autocomplete.js @@ -4,56 +4,55 @@ * @tags: [uses_transactions, assumes_unsharded_collection] */ (function() { - 'use strict'; +'use strict'; - function testAutoComplete() { - // This method updates a global object with an array of strings on success. - assert.soon(() => { - shellAutocomplete("db."); - return true; - }, null, 30 * 1000); - return __autocomplete__; - } +function testAutoComplete() { + // This method updates a global object with an array of strings on success. + assert.soon(() => { + shellAutocomplete("db."); + return true; + }, null, 30 * 1000); + return __autocomplete__; +} - // Create a collection. - const collName = 'listcollections_autocomplete'; - assert.commandWorked(db[collName].insertOne({}, {writeConcern: {w: 'majority'}})); +// Create a collection. +const collName = 'listcollections_autocomplete'; +assert.commandWorked(db[collName].insertOne({}, {writeConcern: {w: 'majority'}})); - jsTestLog("Start transaction"); +jsTestLog("Start transaction"); - const session = db.getMongo().startSession(); - const sessionDb = session.getDatabase('test'); - const sessionColl = sessionDb[collName]; - session.startTransaction_forTesting(); - assert.commandWorked(sessionColl.insertOne({})); +const session = db.getMongo().startSession(); +const sessionDb = session.getDatabase('test'); +const sessionColl = sessionDb[collName]; +session.startTransaction_forTesting(); +assert.commandWorked(sessionColl.insertOne({})); - jsTestLog("Start dropDatabase in parallel shell"); +jsTestLog("Start dropDatabase in parallel shell"); - // Wait for global X lock while blocked behind transaction with global IX lock. - var awaitShell = startParallelShell(function() { - db.getSiblingDB("test2").dropDatabase(); - }); +// Wait for global X lock while blocked behind transaction with global IX lock. +var awaitShell = startParallelShell(function() { + db.getSiblingDB("test2").dropDatabase(); +}); - jsTestLog("Wait for dropDatabase to appear in currentOp"); +jsTestLog("Wait for dropDatabase to appear in currentOp"); - assert.soon(() => { - return db.currentOp({'command.dropDatabase': 1}).inprog; - }); +assert.soon(() => { + return db.currentOp({'command.dropDatabase': 1}).inprog; +}); - jsTestLog("Test that autocompleting collection names fails quickly"); +jsTestLog("Test that autocompleting collection names fails quickly"); - let db_stuff = testAutoComplete(); - assert(!db_stuff.includes(collName), - `Completions should not include "${collName}": ${db_stuff}`); +let db_stuff = testAutoComplete(); +assert(!db_stuff.includes(collName), `Completions should not include "${collName}": ${db_stuff}`); - // Verify we have some results despite the timeout. - assert.contains('db.adminCommand(', db_stuff); +// Verify we have some results despite the timeout. +assert.contains('db.adminCommand(', db_stuff); - jsTestLog("Abort transaction"); +jsTestLog("Abort transaction"); - assert.commandWorked(session.abortTransaction_forTesting()); - awaitShell(); - db_stuff = testAutoComplete(); - assert.contains('db.adminCommand(', db_stuff); - assert.contains(`db.${collName}`, db_stuff); +assert.commandWorked(session.abortTransaction_forTesting()); +awaitShell(); +db_stuff = testAutoComplete(); +assert.contains('db.adminCommand(', db_stuff); +assert.contains(`db.${collName}`, db_stuff); })(); diff --git a/jstests/core/txns/many_txns.js b/jstests/core/txns/many_txns.js index 9516b046a5c..2dfda376423 100644 --- a/jstests/core/txns/many_txns.js +++ b/jstests/core/txns/many_txns.js @@ -2,90 +2,95 @@ // many resources (like "write tickets") and don't prevent other operations from succeeding. // @tags: [uses_transactions] (function() { - "use strict"; - - const dbName = "test"; - const collName = "many_txns"; - const numTxns = 150; - - const testDB = db.getSiblingDB(dbName); - const coll = testDB[collName]; - - testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); - assert.commandWorked( - testDB.runCommand({create: coll.getName(), writeConcern: {w: "majority"}})); - - const sessionOptions = {causalConsistency: false}; - - const startTime = new Date(); - - // Non-transactional write to give something to find. - const initialDoc = {_id: "pretransaction1", x: 0}; - assert.commandWorked(coll.insert(initialDoc, {writeConcern: {w: "majority"}})); - - // Start many transactions, each inserting two documents. - jsTest.log("Start " + numTxns + " transactions, each inserting two documents"); - var sessions = []; - for (let txnNr = 0; txnNr < numTxns; ++txnNr) { - const session = testDB.getMongo().startSession(sessionOptions); - sessions[txnNr] = session; - const sessionDb = session.getDatabase(dbName); - const sessionColl = sessionDb[collName]; - let doc = seq => ({_id: "txn-" + txnNr + "-" + seq}); - - session.startTransaction(); - - let docs = sessionColl.find({}).toArray(); - assert.sameMembers(docs, [initialDoc]); - - // Insert a doc within the transaction. - assert.commandWorked(sessionColl.insert(doc(1))); - - // Read in the same transaction returns the doc, but not from other txns. - docs = sessionColl.find({_id: {$ne: initialDoc._id}}).toArray(); - assert.sameMembers(docs, [doc(1)]); - - // Insert a doc within a transaction. - assert.commandWorked(sessionColl.insert(doc(2))); +"use strict"; + +const dbName = "test"; +const collName = "many_txns"; +const numTxns = 150; + +const testDB = db.getSiblingDB(dbName); +const coll = testDB[collName]; + +testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); +assert.commandWorked(testDB.runCommand({create: coll.getName(), writeConcern: {w: "majority"}})); + +const sessionOptions = { + causalConsistency: false +}; + +const startTime = new Date(); + +// Non-transactional write to give something to find. +const initialDoc = { + _id: "pretransaction1", + x: 0 +}; +assert.commandWorked(coll.insert(initialDoc, {writeConcern: {w: "majority"}})); + +// Start many transactions, each inserting two documents. +jsTest.log("Start " + numTxns + " transactions, each inserting two documents"); +var sessions = []; +for (let txnNr = 0; txnNr < numTxns; ++txnNr) { + const session = testDB.getMongo().startSession(sessionOptions); + sessions[txnNr] = session; + const sessionDb = session.getDatabase(dbName); + const sessionColl = sessionDb[collName]; + let doc = seq => ({_id: "txn-" + txnNr + "-" + seq}); + + session.startTransaction(); + + let docs = sessionColl.find({}).toArray(); + assert.sameMembers(docs, [initialDoc]); + + // Insert a doc within the transaction. + assert.commandWorked(sessionColl.insert(doc(1))); + + // Read in the same transaction returns the doc, but not from other txns. + docs = sessionColl.find({_id: {$ne: initialDoc._id}}).toArray(); + assert.sameMembers(docs, [doc(1)]); + + // Insert a doc within a transaction. + assert.commandWorked(sessionColl.insert(doc(2))); +} +const secondDoc = { + _id: "midtransactions", + x: 1 +}; +assert.commandWorked(coll.insert(secondDoc, {writeConcern: {w: "majority"}})); + +// Commit all sessions. +jsTest.log("Commit all transactions."); +let numAborted = 0; +for (let txnNr = 0; txnNr < numTxns; ++txnNr) { + // First check that a non-transactional operation conflicts and times out quickly. + let doc = seq => ({_id: "txn-" + txnNr + "-" + seq}); + let insertCmd = {insert: collName, documents: [doc(1)], maxTimeMS: 10}; + let insertRes = testDB.runCommand(insertCmd); + + const session = sessions[txnNr]; + let commitRes = session.commitTransaction_forTesting(); + if (commitRes.code === ErrorCodes.NoSuchTransaction) { + ++numAborted; + continue; } - const secondDoc = {_id: "midtransactions", x: 1}; - assert.commandWorked(coll.insert(secondDoc, {writeConcern: {w: "majority"}})); - - // Commit all sessions. - jsTest.log("Commit all transactions."); - let numAborted = 0; - for (let txnNr = 0; txnNr < numTxns; ++txnNr) { - // First check that a non-transactional operation conflicts and times out quickly. - let doc = seq => ({_id: "txn-" + txnNr + "-" + seq}); - let insertCmd = {insert: collName, documents: [doc(1)], maxTimeMS: 10}; - let insertRes = testDB.runCommand(insertCmd); - - const session = sessions[txnNr]; - let commitRes = session.commitTransaction_forTesting(); - if (commitRes.code === ErrorCodes.NoSuchTransaction) { - ++numAborted; - continue; - } - assert.commandWorked(commitRes, "couldn't commit transaction " + txnNr); - assert.commandFailedWithCode(insertRes, ErrorCodes.MaxTimeMSExpired, tojson({insertCmd})); - - // Read with default read concern sees the committed transaction. - assert.eq(doc(1), coll.findOne(doc(1))); - assert.eq(doc(2), coll.findOne(doc(2))); - session.endSession(); - } - - assert.eq(initialDoc, coll.findOne(initialDoc)); - assert.eq(secondDoc, coll.findOne(secondDoc)); - - const elapsedTime = new Date() - startTime; - jsTest.log("Test completed with " + numAborted + " aborted transactions in " + elapsedTime + - " ms"); - - // Check whether we should expect aborts. If the parameter doesn't exist (mongos) don't check. - const getParamRes = db.adminCommand({getParameter: 1, transactionLifetimeLimitSeconds: 1}); - if (getParamRes.ok && elapsedTime < getParamRes.transactionLifetimeLimitSeconds) - assert.eq(numAborted, - 0, - "should not get aborts when transactionLifetimeLimitSeconds not exceeded"); + assert.commandWorked(commitRes, "couldn't commit transaction " + txnNr); + assert.commandFailedWithCode(insertRes, ErrorCodes.MaxTimeMSExpired, tojson({insertCmd})); + + // Read with default read concern sees the committed transaction. + assert.eq(doc(1), coll.findOne(doc(1))); + assert.eq(doc(2), coll.findOne(doc(2))); + session.endSession(); +} + +assert.eq(initialDoc, coll.findOne(initialDoc)); +assert.eq(secondDoc, coll.findOne(secondDoc)); + +const elapsedTime = new Date() - startTime; +jsTest.log("Test completed with " + numAborted + " aborted transactions in " + elapsedTime + " ms"); + +// Check whether we should expect aborts. If the parameter doesn't exist (mongos) don't check. +const getParamRes = db.adminCommand({getParameter: 1, transactionLifetimeLimitSeconds: 1}); +if (getParamRes.ok && elapsedTime < getParamRes.transactionLifetimeLimitSeconds) + assert.eq( + numAborted, 0, "should not get aborts when transactionLifetimeLimitSeconds not exceeded"); }()); diff --git a/jstests/core/txns/multi_delete_in_transaction.js b/jstests/core/txns/multi_delete_in_transaction.js index 5dcfab97217..c8aad0c5c79 100644 --- a/jstests/core/txns/multi_delete_in_transaction.js +++ b/jstests/core/txns/multi_delete_in_transaction.js @@ -1,60 +1,61 @@ // Test transactions including multi-deletes // @tags: [uses_transactions] (function() { - "use strict"; +"use strict"; - const dbName = "test"; - const collName = "multi_delete_in_transaction"; - const testDB = db.getSiblingDB(dbName); - const testColl = testDB[collName]; +const dbName = "test"; +const collName = "multi_delete_in_transaction"; +const testDB = db.getSiblingDB(dbName); +const testColl = testDB[collName]; - testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); +testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); - assert.commandWorked( - testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}})); +assert.commandWorked(testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}})); - const sessionOptions = {causalConsistency: false}; - const session = db.getMongo().startSession(sessionOptions); - const sessionDb = session.getDatabase(dbName); - const sessionColl = sessionDb[collName]; +const sessionOptions = { + causalConsistency: false +}; +const session = db.getMongo().startSession(sessionOptions); +const sessionDb = session.getDatabase(dbName); +const sessionColl = sessionDb[collName]; - jsTest.log("Prepopulate the collection."); - assert.writeOK(testColl.insert([{_id: 0, a: 0}, {_id: 1, a: 0}, {_id: 2, a: 1}], - {writeConcern: {w: "majority"}})); +jsTest.log("Prepopulate the collection."); +assert.writeOK(testColl.insert([{_id: 0, a: 0}, {_id: 1, a: 0}, {_id: 2, a: 1}], + {writeConcern: {w: "majority"}})); - jsTest.log("Do an empty multi-delete."); - session.startTransaction({writeConcern: {w: "majority"}}); +jsTest.log("Do an empty multi-delete."); +session.startTransaction({writeConcern: {w: "majority"}}); - // Remove no docs. - let res = sessionColl.remove({a: 99}, {justOne: false}); - assert.eq(0, res.nRemoved); - res = sessionColl.find({}); - assert.sameMembers(res.toArray(), [{_id: 0, a: 0}, {_id: 1, a: 0}, {_id: 2, a: 1}]); +// Remove no docs. +let res = sessionColl.remove({a: 99}, {justOne: false}); +assert.eq(0, res.nRemoved); +res = sessionColl.find({}); +assert.sameMembers(res.toArray(), [{_id: 0, a: 0}, {_id: 1, a: 0}, {_id: 2, a: 1}]); - assert.commandWorked(session.commitTransaction_forTesting()); +assert.commandWorked(session.commitTransaction_forTesting()); - jsTest.log("Do a single-result multi-delete."); - session.startTransaction({writeConcern: {w: "majority"}}); +jsTest.log("Do a single-result multi-delete."); +session.startTransaction({writeConcern: {w: "majority"}}); - // Remove one doc. - res = sessionColl.remove({a: 1}, {justOne: false}); - assert.eq(1, res.nRemoved); - res = sessionColl.find({}); - assert.sameMembers(res.toArray(), [{_id: 0, a: 0}, {_id: 1, a: 0}]); +// Remove one doc. +res = sessionColl.remove({a: 1}, {justOne: false}); +assert.eq(1, res.nRemoved); +res = sessionColl.find({}); +assert.sameMembers(res.toArray(), [{_id: 0, a: 0}, {_id: 1, a: 0}]); - assert.commandWorked(session.commitTransaction_forTesting()); +assert.commandWorked(session.commitTransaction_forTesting()); - jsTest.log("Do a multiple-result multi-delete."); - session.startTransaction({writeConcern: {w: "majority"}}); +jsTest.log("Do a multiple-result multi-delete."); +session.startTransaction({writeConcern: {w: "majority"}}); - // Remove 2 docs. - res = sessionColl.remove({a: 0}, {justOne: false}); - assert.eq(2, res.nRemoved); - res = sessionColl.find({}); - assert.sameMembers(res.toArray(), []); +// Remove 2 docs. +res = sessionColl.remove({a: 0}, {justOne: false}); +assert.eq(2, res.nRemoved); +res = sessionColl.find({}); +assert.sameMembers(res.toArray(), []); - assert.commandWorked(session.commitTransaction_forTesting()); +assert.commandWorked(session.commitTransaction_forTesting()); - // Collection should be empty. - assert.eq(0, testColl.find().itcount()); +// Collection should be empty. +assert.eq(0, testColl.find().itcount()); }()); diff --git a/jstests/core/txns/multi_statement_transaction.js b/jstests/core/txns/multi_statement_transaction.js index 5a29e81d4d7..37d25b56b26 100644 --- a/jstests/core/txns/multi_statement_transaction.js +++ b/jstests/core/txns/multi_statement_transaction.js @@ -1,159 +1,160 @@ // Test basic multi-statement transaction. // @tags: [uses_transactions] (function() { - "use strict"; +"use strict"; - const dbName = "test"; - const collName = "multi_statement_transaction"; - const testDB = db.getSiblingDB(dbName); - const testColl = testDB[collName]; +const dbName = "test"; +const collName = "multi_statement_transaction"; +const testDB = db.getSiblingDB(dbName); +const testColl = testDB[collName]; - testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); +testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); - assert.commandWorked( - testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}})); +assert.commandWorked(testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}})); - const sessionOptions = {causalConsistency: false}; - const session = db.getMongo().startSession(sessionOptions); - const sessionDb = session.getDatabase(dbName); - const sessionColl = sessionDb[collName]; +const sessionOptions = { + causalConsistency: false +}; +const session = db.getMongo().startSession(sessionOptions); +const sessionDb = session.getDatabase(dbName); +const sessionColl = sessionDb[collName]; - /*********************************************************************************************** - * Insert two documents in a transaction. - **********************************************************************************************/ +/*********************************************************************************************** + * Insert two documents in a transaction. + **********************************************************************************************/ - assert.commandWorked(testColl.remove({}, {writeConcern: {w: "majority"}})); +assert.commandWorked(testColl.remove({}, {writeConcern: {w: "majority"}})); - jsTest.log("Insert two documents in a transaction"); +jsTest.log("Insert two documents in a transaction"); - session.startTransaction(); +session.startTransaction(); - // Insert a doc within the transaction. - assert.commandWorked(sessionColl.insert({_id: "insert-1"})); +// Insert a doc within the transaction. +assert.commandWorked(sessionColl.insert({_id: "insert-1"})); - // Cannot read with default read concern. - assert.eq(null, testColl.findOne({_id: "insert-1"})); - // But read in the same transaction returns the doc. - assert.docEq({_id: "insert-1"}, sessionColl.findOne()); +// Cannot read with default read concern. +assert.eq(null, testColl.findOne({_id: "insert-1"})); +// But read in the same transaction returns the doc. +assert.docEq({_id: "insert-1"}, sessionColl.findOne()); - // Read with aggregation also returns the document. - let docs = sessionColl.aggregate([{$match: {_id: "insert-1"}}]).toArray(); - assert.sameMembers([{_id: "insert-1"}], docs); +// Read with aggregation also returns the document. +let docs = sessionColl.aggregate([{$match: {_id: "insert-1"}}]).toArray(); +assert.sameMembers([{_id: "insert-1"}], docs); - // Insert a doc within a transaction. - assert.commandWorked(sessionColl.insert({_id: "insert-2"})); +// Insert a doc within a transaction. +assert.commandWorked(sessionColl.insert({_id: "insert-2"})); - // Cannot read with default read concern. - assert.eq(null, testColl.findOne({_id: "insert-1"})); - // Cannot read with default read concern. - assert.eq(null, testColl.findOne({_id: "insert-2"})); +// Cannot read with default read concern. +assert.eq(null, testColl.findOne({_id: "insert-1"})); +// Cannot read with default read concern. +assert.eq(null, testColl.findOne({_id: "insert-2"})); - // Commit the transaction. - assert.commandWorked(session.commitTransaction_forTesting()); +// Commit the transaction. +assert.commandWorked(session.commitTransaction_forTesting()); - // Read with default read concern sees the committed transaction. - assert.eq({_id: "insert-1"}, testColl.findOne({_id: "insert-1"})); - assert.eq({_id: "insert-2"}, testColl.findOne({_id: "insert-2"})); +// Read with default read concern sees the committed transaction. +assert.eq({_id: "insert-1"}, testColl.findOne({_id: "insert-1"})); +assert.eq({_id: "insert-2"}, testColl.findOne({_id: "insert-2"})); - /*********************************************************************************************** - * Update documents in a transaction. - **********************************************************************************************/ +/*********************************************************************************************** + * Update documents in a transaction. + **********************************************************************************************/ - assert.commandWorked(testColl.remove({}, {writeConcern: {w: "majority"}})); +assert.commandWorked(testColl.remove({}, {writeConcern: {w: "majority"}})); - jsTest.log("Update documents in a transaction"); +jsTest.log("Update documents in a transaction"); - // Insert the docs to be updated. - assert.commandWorked(sessionColl.insert([{_id: "update-1", a: 0}, {_id: "update-2", a: 0}], - {writeConcern: {w: "majority"}})); +// Insert the docs to be updated. +assert.commandWorked(sessionColl.insert([{_id: "update-1", a: 0}, {_id: "update-2", a: 0}], + {writeConcern: {w: "majority"}})); - // Update the docs in a new transaction. - session.startTransaction(); +// Update the docs in a new transaction. +session.startTransaction(); - assert.commandWorked(sessionColl.update({_id: "update-1"}, {$inc: {a: 1}})); +assert.commandWorked(sessionColl.update({_id: "update-1"}, {$inc: {a: 1}})); - // Batch update in transaction. - let bulk = sessionColl.initializeUnorderedBulkOp(); - bulk.find({_id: "update-1"}).updateOne({$inc: {a: 1}}); - bulk.find({_id: "update-2"}).updateOne({$inc: {a: 1}}); - assert.commandWorked(bulk.execute()); +// Batch update in transaction. +let bulk = sessionColl.initializeUnorderedBulkOp(); +bulk.find({_id: "update-1"}).updateOne({$inc: {a: 1}}); +bulk.find({_id: "update-2"}).updateOne({$inc: {a: 1}}); +assert.commandWorked(bulk.execute()); - // Cannot read with default read concern. - assert.eq({_id: "update-1", a: 0}, testColl.findOne({_id: "update-1"})); - assert.eq({_id: "update-2", a: 0}, testColl.findOne({_id: "update-2"})); +// Cannot read with default read concern. +assert.eq({_id: "update-1", a: 0}, testColl.findOne({_id: "update-1"})); +assert.eq({_id: "update-2", a: 0}, testColl.findOne({_id: "update-2"})); - // Commit the transaction. - assert.commandWorked(session.commitTransaction_forTesting()); +// Commit the transaction. +assert.commandWorked(session.commitTransaction_forTesting()); - // Read with default read concern sees the committed transaction. - assert.eq({_id: "update-1", a: 2}, testColl.findOne({_id: "update-1"})); - assert.eq({_id: "update-2", a: 1}, testColl.findOne({_id: "update-2"})); +// Read with default read concern sees the committed transaction. +assert.eq({_id: "update-1", a: 2}, testColl.findOne({_id: "update-1"})); +assert.eq({_id: "update-2", a: 1}, testColl.findOne({_id: "update-2"})); - /*********************************************************************************************** - * Insert, update and read documents in a transaction. - **********************************************************************************************/ +/*********************************************************************************************** + * Insert, update and read documents in a transaction. + **********************************************************************************************/ - assert.commandWorked(testColl.remove({}, {writeConcern: {w: "majority"}})); +assert.commandWorked(testColl.remove({}, {writeConcern: {w: "majority"}})); - jsTest.log("Insert, update and read documents in a transaction"); +jsTest.log("Insert, update and read documents in a transaction"); - session.startTransaction(); - assert.commandWorked(sessionColl.insert([{_id: "doc-1"}, {_id: "doc-2"}])); +session.startTransaction(); +assert.commandWorked(sessionColl.insert([{_id: "doc-1"}, {_id: "doc-2"}])); - // Update the two docs in transaction. - assert.commandWorked(sessionColl.update({_id: "doc-1"}, {$inc: {a: 1}})); - assert.commandWorked(sessionColl.update({_id: "doc-2"}, {$inc: {a: 1}})); +// Update the two docs in transaction. +assert.commandWorked(sessionColl.update({_id: "doc-1"}, {$inc: {a: 1}})); +assert.commandWorked(sessionColl.update({_id: "doc-2"}, {$inc: {a: 1}})); - // Cannot read with default read concern. - assert.eq(null, testColl.findOne({_id: "doc-1"})); - assert.eq(null, testColl.findOne({_id: "doc-2"})); +// Cannot read with default read concern. +assert.eq(null, testColl.findOne({_id: "doc-1"})); +assert.eq(null, testColl.findOne({_id: "doc-2"})); - // But read in the same transaction returns the docs. - docs = sessionColl.find({$or: [{_id: "doc-1"}, {_id: "doc-2"}]}).toArray(); - assert.sameMembers([{_id: "doc-1", a: 1}, {_id: "doc-2", a: 1}], docs); +// But read in the same transaction returns the docs. +docs = sessionColl.find({$or: [{_id: "doc-1"}, {_id: "doc-2"}]}).toArray(); +assert.sameMembers([{_id: "doc-1", a: 1}, {_id: "doc-2", a: 1}], docs); - // Commit the transaction. - assert.commandWorked(session.commitTransaction_forTesting()); +// Commit the transaction. +assert.commandWorked(session.commitTransaction_forTesting()); - // Read with default read concern sees the committed transaction. - assert.eq({_id: "doc-1", a: 1}, testColl.findOne({_id: "doc-1"})); - assert.eq({_id: "doc-2", a: 1}, testColl.findOne({_id: "doc-2"})); +// Read with default read concern sees the committed transaction. +assert.eq({_id: "doc-1", a: 1}, testColl.findOne({_id: "doc-1"})); +assert.eq({_id: "doc-2", a: 1}, testColl.findOne({_id: "doc-2"})); - jsTest.log("Insert and delete documents in a transaction"); +jsTest.log("Insert and delete documents in a transaction"); - assert.commandWorked(testColl.remove({}, {writeConcern: {w: "majority"}})); +assert.commandWorked(testColl.remove({}, {writeConcern: {w: "majority"}})); - assert.commandWorked( - testColl.insert([{_id: "doc-1"}, {_id: "doc-2"}], {writeConcern: {w: "majority"}})); +assert.commandWorked( + testColl.insert([{_id: "doc-1"}, {_id: "doc-2"}], {writeConcern: {w: "majority"}})); - session.startTransaction(); - assert.commandWorked(sessionColl.insert({_id: "doc-3"})); +session.startTransaction(); +assert.commandWorked(sessionColl.insert({_id: "doc-3"})); - // Remove three docs in transaction. - assert.commandWorked(sessionColl.remove({_id: "doc-1"})); +// Remove three docs in transaction. +assert.commandWorked(sessionColl.remove({_id: "doc-1"})); - // Batch delete. - bulk = sessionColl.initializeUnorderedBulkOp(); - bulk.find({_id: "doc-2"}).removeOne(); - bulk.find({_id: "doc-3"}).removeOne(); - assert.commandWorked(bulk.execute()); +// Batch delete. +bulk = sessionColl.initializeUnorderedBulkOp(); +bulk.find({_id: "doc-2"}).removeOne(); +bulk.find({_id: "doc-3"}).removeOne(); +assert.commandWorked(bulk.execute()); - // Cannot read the new doc and still see the to-be removed docs with default read concern. - assert.eq({_id: "doc-1"}, testColl.findOne({_id: "doc-1"})); - assert.eq({_id: "doc-2"}, testColl.findOne({_id: "doc-2"})); - assert.eq(null, testColl.findOne({_id: "doc-3"})); +// Cannot read the new doc and still see the to-be removed docs with default read concern. +assert.eq({_id: "doc-1"}, testColl.findOne({_id: "doc-1"})); +assert.eq({_id: "doc-2"}, testColl.findOne({_id: "doc-2"})); +assert.eq(null, testColl.findOne({_id: "doc-3"})); - // But read in the same transaction sees the docs get deleted. - docs = sessionColl.find({$or: [{_id: "doc-1"}, {_id: "doc-2"}, {_id: "doc-3"}]}).toArray(); - assert.sameMembers([], docs); +// But read in the same transaction sees the docs get deleted. +docs = sessionColl.find({$or: [{_id: "doc-1"}, {_id: "doc-2"}, {_id: "doc-3"}]}).toArray(); +assert.sameMembers([], docs); - // Commit the transaction. - assert.commandWorked(session.commitTransaction_forTesting()); +// Commit the transaction. +assert.commandWorked(session.commitTransaction_forTesting()); - // Read with default read concern sees the commmitted transaction. - assert.eq(null, testColl.findOne({_id: "doc-1"})); - assert.eq(null, testColl.findOne({_id: "doc-2"})); - assert.eq(null, testColl.findOne({_id: "doc-3"})); +// Read with default read concern sees the commmitted transaction. +assert.eq(null, testColl.findOne({_id: "doc-1"})); +assert.eq(null, testColl.findOne({_id: "doc-2"})); +assert.eq(null, testColl.findOne({_id: "doc-3"})); - session.endSession(); +session.endSession(); }()); diff --git a/jstests/core/txns/multi_statement_transaction_abort.js b/jstests/core/txns/multi_statement_transaction_abort.js index 3e8e8a62758..a7946af8eda 100644 --- a/jstests/core/txns/multi_statement_transaction_abort.js +++ b/jstests/core/txns/multi_statement_transaction_abort.js @@ -1,255 +1,257 @@ // Test basic multi-statement transaction abort. // @tags: [uses_transactions, uses_snapshot_read_concern] (function() { - "use strict"; - - const dbName = "test"; - const collName = "multi_statement_transaction_abort"; - const testDB = db.getSiblingDB(dbName); - const testColl = testDB[collName]; - - testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); - - assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); - let txnNumber = 0; - - const sessionOptions = {causalConsistency: false}; - const session = testDB.getMongo().startSession(sessionOptions); - const sessionDb = session.getDatabase(dbName); - - jsTest.log("Insert two documents in a transaction and abort"); - - // Insert a doc within the transaction. - assert.commandWorked(sessionDb.runCommand({ - insert: collName, - documents: [{_id: "insert-1"}], - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(txnNumber), - startTransaction: true, - autocommit: false - })); - - // Insert a doc within a transaction. - assert.commandWorked(sessionDb.runCommand({ - insert: collName, - documents: [{_id: "insert-2"}], - txnNumber: NumberLong(txnNumber), - autocommit: false - })); - - // Cannot read with default read concern. - assert.eq(null, testColl.findOne({_id: "insert-1"})); - // Cannot read with default read concern. - assert.eq(null, testColl.findOne({_id: "insert-2"})); - - // abortTransaction can only be run on the admin database. - assert.commandWorked(sessionDb.adminCommand({ - abortTransaction: 1, - writeConcern: {w: "majority"}, - txnNumber: NumberLong(txnNumber), - autocommit: false - })); - - // Read with default read concern cannot see the aborted transaction. - assert.eq(null, testColl.findOne({_id: "insert-1"})); - assert.eq(null, testColl.findOne({_id: "insert-2"})); - - jsTest.log("Insert two documents in a transaction and commit"); - - // Insert a doc with the same _id in a new transaction should work. - txnNumber++; - assert.commandWorked(sessionDb.runCommand({ - insert: collName, - documents: [{_id: "insert-1"}, {_id: "insert-2"}], - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(txnNumber), - startTransaction: true, - autocommit: false - })); - // commitTransaction can only be called on the admin database. - assert.commandWorked(sessionDb.adminCommand({ - commitTransaction: 1, - writeConcern: {w: "majority"}, - txnNumber: NumberLong(txnNumber), - autocommit: false - })); - // Read with default read concern sees the committed transaction. - assert.eq({_id: "insert-1"}, testColl.findOne({_id: "insert-1"})); - assert.eq({_id: "insert-2"}, testColl.findOne({_id: "insert-2"})); - - jsTest.log("Cannot abort empty transaction because it's not in progress"); - txnNumber++; - // abortTransaction can only be called on the admin database. - let res = sessionDb.adminCommand({ - abortTransaction: 1, - writeConcern: {w: "majority"}, - txnNumber: NumberLong(txnNumber), - autocommit: false - }); - assert.commandFailedWithCode(res, ErrorCodes.NoSuchTransaction); - assert.eq(res.errorLabels, ["TransientTransactionError"]); - - jsTest.log("Abort transaction on duplicated key errors"); - assert.commandWorked(testColl.remove({}, {writeConcern: {w: "majority"}})); - assert.commandWorked(testColl.insert({_id: "insert-1"}, {writeConcern: {w: "majority"}})); - txnNumber++; - // The first insert works well. - assert.commandWorked(sessionDb.runCommand({ - insert: collName, - documents: [{_id: "insert-2"}], - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(txnNumber), - startTransaction: true, - autocommit: false - })); - // But the second insert throws duplicated index key error. - res = assert.commandFailedWithCode(sessionDb.runCommand({ - insert: collName, - documents: [{_id: "insert-1", x: 0}], - txnNumber: NumberLong(txnNumber), - autocommit: false - }), - ErrorCodes.DuplicateKey); - // DuplicateKey is not a transient error. - assert.eq(res.errorLabels, null); - - // The error aborts the transaction. - // commitTransaction can only be called on the admin database. - assert.commandFailedWithCode(sessionDb.adminCommand({ - commitTransaction: 1, - writeConcern: {w: "majority"}, - txnNumber: NumberLong(txnNumber), - autocommit: false - }), - ErrorCodes.NoSuchTransaction); - // Verify the documents are the same. - assert.eq({_id: "insert-1"}, testColl.findOne({_id: "insert-1"})); - assert.eq(null, testColl.findOne({_id: "insert-2"})); - - jsTest.log("Abort transaction on write conflict errors"); - assert.commandWorked(testColl.remove({}, {writeConcern: {w: "majority"}})); - txnNumber++; - const session2 = testDB.getMongo().startSession(sessionOptions); - const sessionDb2 = session2.getDatabase(dbName); - // Insert a doc from session 1. - assert.commandWorked(sessionDb.runCommand({ - insert: collName, - documents: [{_id: "insert-1", from: 1}], - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(txnNumber), - startTransaction: true, - autocommit: false - })); - let txnNumber2 = 0; - // Insert a doc from session 2 that doesn't conflict with session 1. - assert.commandWorked(sessionDb2.runCommand({ - insert: collName, - documents: [{_id: "insert-2", from: 2}], - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(txnNumber2), - startTransaction: true, - autocommit: false - })); - // Insert a doc from session 2 that conflicts with session 1. - res = sessionDb2.runCommand({ - insert: collName, - documents: [{_id: "insert-1", from: 2}], - txnNumber: NumberLong(txnNumber2), - autocommit: false - }); - assert.commandFailedWithCode(res, ErrorCodes.WriteConflict); - assert.eq(res.errorLabels, ["TransientTransactionError"]); - - // Session 1 isn't affected. - // commitTransaction can only be called on the admin database. - assert.commandWorked(sessionDb.adminCommand({ - commitTransaction: 1, - writeConcern: {w: "majority"}, - txnNumber: NumberLong(txnNumber), - autocommit: false - })); - // Transaction on session 2 is aborted. - assert.commandFailedWithCode(sessionDb2.adminCommand({ - commitTransaction: 1, - writeConcern: {w: "majority"}, - txnNumber: NumberLong(txnNumber2), - autocommit: false - }), - ErrorCodes.NoSuchTransaction); - // Verify the documents only reflect the first transaction. - assert.eq({_id: "insert-1", from: 1}, testColl.findOne({_id: "insert-1"})); - assert.eq(null, testColl.findOne({_id: "insert-2"})); - - jsTest.log("Higher transaction number aborts existing running transaction."); - txnNumber++; - assert.commandWorked(sessionDb.runCommand({ - insert: collName, - documents: [{_id: "running-txn-1"}, {_id: "running-txn-2"}], - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(txnNumber), - startTransaction: true, - autocommit: false - })); - // A higher txnNumber aborts the old and inserts the same document. - txnNumber++; - assert.commandWorked(sessionDb.runCommand({ - insert: collName, - documents: [{_id: "running-txn-2"}], - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(txnNumber), - startTransaction: true, - autocommit: false - })); - // commitTransaction can only be called on the admin database. - assert.commandWorked(sessionDb.adminCommand({ - commitTransaction: 1, - writeConcern: {w: "majority"}, - txnNumber: NumberLong(txnNumber), - autocommit: false - })); - // Read with default read concern sees the committed transaction but cannot see the aborted one. - assert.eq(null, testColl.findOne({_id: "running-txn-1"})); - assert.eq({_id: "running-txn-2"}, testColl.findOne({_id: "running-txn-2"})); - - jsTest.log("Higher transaction number aborts existing running snapshot read."); - assert.commandWorked(testColl.remove({}, {writeConcern: {w: "majority"}})); - assert.commandWorked( - testColl.insert([{doc: 1}, {doc: 2}, {doc: 3}], {writeConcern: {w: "majority"}})); - txnNumber++; - // Perform a snapshot read under a new transaction. - let runningReadResult = assert.commandWorked(sessionDb.runCommand({ - find: collName, - batchSize: 2, - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(txnNumber), - startTransaction: true, - autocommit: false - })); - - // The cursor has not been exhausted. - assert(runningReadResult.hasOwnProperty("cursor"), tojson(runningReadResult)); - assert.neq(0, runningReadResult.cursor.id, tojson(runningReadResult)); - - txnNumber++; - // Perform a second snapshot read under a new transaction. - let newReadResult = assert.commandWorked(sessionDb.runCommand({ - find: collName, - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(txnNumber), - startTransaction: true, - autocommit: false - })); - - // The cursor has been exhausted. - assert(newReadResult.hasOwnProperty("cursor"), tojson(newReadResult)); - assert.eq(0, newReadResult.cursor.id, tojson(newReadResult)); - // commitTransaction can only be called on the admin database. - assert.commandWorked(sessionDb.adminCommand({ - commitTransaction: 1, - writeConcern: {w: "majority"}, - txnNumber: NumberLong(txnNumber), - autocommit: false - })); - - session.endSession(); +"use strict"; + +const dbName = "test"; +const collName = "multi_statement_transaction_abort"; +const testDB = db.getSiblingDB(dbName); +const testColl = testDB[collName]; + +testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); + +assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); +let txnNumber = 0; + +const sessionOptions = { + causalConsistency: false +}; +const session = testDB.getMongo().startSession(sessionOptions); +const sessionDb = session.getDatabase(dbName); + +jsTest.log("Insert two documents in a transaction and abort"); + +// Insert a doc within the transaction. +assert.commandWorked(sessionDb.runCommand({ + insert: collName, + documents: [{_id: "insert-1"}], + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(txnNumber), + startTransaction: true, + autocommit: false +})); + +// Insert a doc within a transaction. +assert.commandWorked(sessionDb.runCommand({ + insert: collName, + documents: [{_id: "insert-2"}], + txnNumber: NumberLong(txnNumber), + autocommit: false +})); + +// Cannot read with default read concern. +assert.eq(null, testColl.findOne({_id: "insert-1"})); +// Cannot read with default read concern. +assert.eq(null, testColl.findOne({_id: "insert-2"})); + +// abortTransaction can only be run on the admin database. +assert.commandWorked(sessionDb.adminCommand({ + abortTransaction: 1, + writeConcern: {w: "majority"}, + txnNumber: NumberLong(txnNumber), + autocommit: false +})); + +// Read with default read concern cannot see the aborted transaction. +assert.eq(null, testColl.findOne({_id: "insert-1"})); +assert.eq(null, testColl.findOne({_id: "insert-2"})); + +jsTest.log("Insert two documents in a transaction and commit"); + +// Insert a doc with the same _id in a new transaction should work. +txnNumber++; +assert.commandWorked(sessionDb.runCommand({ + insert: collName, + documents: [{_id: "insert-1"}, {_id: "insert-2"}], + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(txnNumber), + startTransaction: true, + autocommit: false +})); +// commitTransaction can only be called on the admin database. +assert.commandWorked(sessionDb.adminCommand({ + commitTransaction: 1, + writeConcern: {w: "majority"}, + txnNumber: NumberLong(txnNumber), + autocommit: false +})); +// Read with default read concern sees the committed transaction. +assert.eq({_id: "insert-1"}, testColl.findOne({_id: "insert-1"})); +assert.eq({_id: "insert-2"}, testColl.findOne({_id: "insert-2"})); + +jsTest.log("Cannot abort empty transaction because it's not in progress"); +txnNumber++; +// abortTransaction can only be called on the admin database. +let res = sessionDb.adminCommand({ + abortTransaction: 1, + writeConcern: {w: "majority"}, + txnNumber: NumberLong(txnNumber), + autocommit: false +}); +assert.commandFailedWithCode(res, ErrorCodes.NoSuchTransaction); +assert.eq(res.errorLabels, ["TransientTransactionError"]); + +jsTest.log("Abort transaction on duplicated key errors"); +assert.commandWorked(testColl.remove({}, {writeConcern: {w: "majority"}})); +assert.commandWorked(testColl.insert({_id: "insert-1"}, {writeConcern: {w: "majority"}})); +txnNumber++; +// The first insert works well. +assert.commandWorked(sessionDb.runCommand({ + insert: collName, + documents: [{_id: "insert-2"}], + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(txnNumber), + startTransaction: true, + autocommit: false +})); +// But the second insert throws duplicated index key error. +res = assert.commandFailedWithCode(sessionDb.runCommand({ + insert: collName, + documents: [{_id: "insert-1", x: 0}], + txnNumber: NumberLong(txnNumber), + autocommit: false +}), + ErrorCodes.DuplicateKey); +// DuplicateKey is not a transient error. +assert.eq(res.errorLabels, null); + +// The error aborts the transaction. +// commitTransaction can only be called on the admin database. +assert.commandFailedWithCode(sessionDb.adminCommand({ + commitTransaction: 1, + writeConcern: {w: "majority"}, + txnNumber: NumberLong(txnNumber), + autocommit: false +}), + ErrorCodes.NoSuchTransaction); +// Verify the documents are the same. +assert.eq({_id: "insert-1"}, testColl.findOne({_id: "insert-1"})); +assert.eq(null, testColl.findOne({_id: "insert-2"})); + +jsTest.log("Abort transaction on write conflict errors"); +assert.commandWorked(testColl.remove({}, {writeConcern: {w: "majority"}})); +txnNumber++; +const session2 = testDB.getMongo().startSession(sessionOptions); +const sessionDb2 = session2.getDatabase(dbName); +// Insert a doc from session 1. +assert.commandWorked(sessionDb.runCommand({ + insert: collName, + documents: [{_id: "insert-1", from: 1}], + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(txnNumber), + startTransaction: true, + autocommit: false +})); +let txnNumber2 = 0; +// Insert a doc from session 2 that doesn't conflict with session 1. +assert.commandWorked(sessionDb2.runCommand({ + insert: collName, + documents: [{_id: "insert-2", from: 2}], + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(txnNumber2), + startTransaction: true, + autocommit: false +})); +// Insert a doc from session 2 that conflicts with session 1. +res = sessionDb2.runCommand({ + insert: collName, + documents: [{_id: "insert-1", from: 2}], + txnNumber: NumberLong(txnNumber2), + autocommit: false +}); +assert.commandFailedWithCode(res, ErrorCodes.WriteConflict); +assert.eq(res.errorLabels, ["TransientTransactionError"]); + +// Session 1 isn't affected. +// commitTransaction can only be called on the admin database. +assert.commandWorked(sessionDb.adminCommand({ + commitTransaction: 1, + writeConcern: {w: "majority"}, + txnNumber: NumberLong(txnNumber), + autocommit: false +})); +// Transaction on session 2 is aborted. +assert.commandFailedWithCode(sessionDb2.adminCommand({ + commitTransaction: 1, + writeConcern: {w: "majority"}, + txnNumber: NumberLong(txnNumber2), + autocommit: false +}), + ErrorCodes.NoSuchTransaction); +// Verify the documents only reflect the first transaction. +assert.eq({_id: "insert-1", from: 1}, testColl.findOne({_id: "insert-1"})); +assert.eq(null, testColl.findOne({_id: "insert-2"})); + +jsTest.log("Higher transaction number aborts existing running transaction."); +txnNumber++; +assert.commandWorked(sessionDb.runCommand({ + insert: collName, + documents: [{_id: "running-txn-1"}, {_id: "running-txn-2"}], + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(txnNumber), + startTransaction: true, + autocommit: false +})); +// A higher txnNumber aborts the old and inserts the same document. +txnNumber++; +assert.commandWorked(sessionDb.runCommand({ + insert: collName, + documents: [{_id: "running-txn-2"}], + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(txnNumber), + startTransaction: true, + autocommit: false +})); +// commitTransaction can only be called on the admin database. +assert.commandWorked(sessionDb.adminCommand({ + commitTransaction: 1, + writeConcern: {w: "majority"}, + txnNumber: NumberLong(txnNumber), + autocommit: false +})); +// Read with default read concern sees the committed transaction but cannot see the aborted one. +assert.eq(null, testColl.findOne({_id: "running-txn-1"})); +assert.eq({_id: "running-txn-2"}, testColl.findOne({_id: "running-txn-2"})); + +jsTest.log("Higher transaction number aborts existing running snapshot read."); +assert.commandWorked(testColl.remove({}, {writeConcern: {w: "majority"}})); +assert.commandWorked( + testColl.insert([{doc: 1}, {doc: 2}, {doc: 3}], {writeConcern: {w: "majority"}})); +txnNumber++; +// Perform a snapshot read under a new transaction. +let runningReadResult = assert.commandWorked(sessionDb.runCommand({ + find: collName, + batchSize: 2, + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(txnNumber), + startTransaction: true, + autocommit: false +})); + +// The cursor has not been exhausted. +assert(runningReadResult.hasOwnProperty("cursor"), tojson(runningReadResult)); +assert.neq(0, runningReadResult.cursor.id, tojson(runningReadResult)); + +txnNumber++; +// Perform a second snapshot read under a new transaction. +let newReadResult = assert.commandWorked(sessionDb.runCommand({ + find: collName, + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(txnNumber), + startTransaction: true, + autocommit: false +})); + +// The cursor has been exhausted. +assert(newReadResult.hasOwnProperty("cursor"), tojson(newReadResult)); +assert.eq(0, newReadResult.cursor.id, tojson(newReadResult)); +// commitTransaction can only be called on the admin database. +assert.commandWorked(sessionDb.adminCommand({ + commitTransaction: 1, + writeConcern: {w: "majority"}, + txnNumber: NumberLong(txnNumber), + autocommit: false +})); + +session.endSession(); }()); diff --git a/jstests/core/txns/multi_statement_transaction_command_args.js b/jstests/core/txns/multi_statement_transaction_command_args.js index f0e4ce29759..ef176a4a28b 100644 --- a/jstests/core/txns/multi_statement_transaction_command_args.js +++ b/jstests/core/txns/multi_statement_transaction_command_args.js @@ -5,310 +5,308 @@ */ (function() { - "use strict"; - load('jstests/libs/uuid_util.js'); - load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. - - // Makes assertions on commands run without logical session ids. - TestData.disableImplicitSessions = true; - - const dbName = "test"; - const collName = "multi_statement_transaction_command_args"; - const testDB = db.getSiblingDB(dbName); - const testColl = testDB[collName]; - let txnNumber = 0; - - // Set up the test collection. - testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); - - assert.commandWorked( - testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}})); - - // Initiate the session. - const sessionOptions = {causalConsistency: false}; - let session = db.getMongo().startSession(sessionOptions); - let sessionDb = session.getDatabase(dbName); - - /*********************************************************************************************** - * Verify that fields are not accepted unless their preconditional fields are present in - * this hierarchy: lsid -> txnNumber -> autocommit -> startTransaction - * Omitted fields are commented out explicitly. - **********************************************************************************************/ - - // lsid -> txnNumber. - // Running a command through 'sessionDb' implicitly attaches an 'lsid' to commands, - // so 'testDB' is used instead. - jsTestLog("Try to begin a transaction with txnNumber but no lsid"); - txnNumber++; - let res = assert.commandFailedWithCode(testDB.runCommand({ - find: collName, - filter: {}, - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(txnNumber), - // autocommit: false, - // startTransaction: true - }), - ErrorCodes.InvalidOptions); - assert(res.errmsg.includes("Transaction number requires a session ID")); - - // txnNumber -> autocommit - jsTestLog("Try to begin a transaction with autocommit but no txnNumber"); - txnNumber++; - res = assert.commandFailedWithCode(sessionDb.runCommand({ - find: collName, - filter: {}, - readConcern: {level: "snapshot"}, - // txnNumber: NumberLong(txnNumber), - autocommit: false, - // startTransaction: true - }), - ErrorCodes.InvalidOptions); - assert(res.errmsg.includes("'autocommit' field requires a transaction number")); - - // autocommit -> startTransaction - jsTestLog("Try to begin a transaction with startTransaction but no autocommit"); - txnNumber++; - res = assert.commandFailedWithCode(sessionDb.runCommand({ - find: collName, - filter: {}, - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(txnNumber), - // autocommit: false, - startTransaction: true - }), +"use strict"; +load('jstests/libs/uuid_util.js'); +load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. + +// Makes assertions on commands run without logical session ids. +TestData.disableImplicitSessions = true; + +const dbName = "test"; +const collName = "multi_statement_transaction_command_args"; +const testDB = db.getSiblingDB(dbName); +const testColl = testDB[collName]; +let txnNumber = 0; + +// Set up the test collection. +testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); + +assert.commandWorked(testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}})); + +// Initiate the session. +const sessionOptions = { + causalConsistency: false +}; +let session = db.getMongo().startSession(sessionOptions); +let sessionDb = session.getDatabase(dbName); + +/*********************************************************************************************** + * Verify that fields are not accepted unless their preconditional fields are present in + * this hierarchy: lsid -> txnNumber -> autocommit -> startTransaction + * Omitted fields are commented out explicitly. + **********************************************************************************************/ + +// lsid -> txnNumber. +// Running a command through 'sessionDb' implicitly attaches an 'lsid' to commands, +// so 'testDB' is used instead. +jsTestLog("Try to begin a transaction with txnNumber but no lsid"); +txnNumber++; +let res = assert.commandFailedWithCode(testDB.runCommand({ + find: collName, + filter: {}, + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(txnNumber), + // autocommit: false, + // startTransaction: true +}), ErrorCodes.InvalidOptions); - assert(res.errmsg.includes("'startTransaction' field requires 'autocommit' field")); - - /*********************************************************************************************** - * Verify that the 'startTransaction' argument works correctly. - **********************************************************************************************/ - - jsTestLog("Begin a transaction with startTransaction=true and autocommit=false"); - txnNumber++; - - // Start the transaction. - assert.commandWorked(sessionDb.runCommand({ - find: collName, - filter: {}, - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(txnNumber), - startTransaction: true, - autocommit: false - })); - - // Commit the transaction. - assert.commandWorked(sessionDb.adminCommand({ - commitTransaction: 1, - txnNumber: NumberLong(txnNumber), - autocommit: false, - writeConcern: {w: "majority"} - })); - - jsTestLog("Try to start an already in progress transaction."); - txnNumber++; - - // Start the transaction. - assert.commandWorked(sessionDb.runCommand({ - find: collName, - filter: {}, - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(txnNumber), - startTransaction: true, - autocommit: false - })); - - // Try to start the transaction again. - assert.commandFailedWithCode(sessionDb.runCommand({ - find: collName, - filter: {}, - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(txnNumber), - startTransaction: true, - autocommit: false - }), - ErrorCodes.ConflictingOperationInProgress); - - // Commit the transaction. - assert.commandWorked(sessionDb.adminCommand({ - commitTransaction: 1, - txnNumber: NumberLong(txnNumber), - autocommit: false, - writeConcern: {w: "majority"} - })); - - jsTestLog( - "Try to begin a transaction by omitting 'startTransaction' and setting autocommit=false"); - txnNumber++; - assert.commandFailedWithCode(sessionDb.runCommand({ - find: collName, - filter: {}, - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(txnNumber), - autocommit: false - }), - [ErrorCodes.InvalidOptions, ErrorCodes.NoSuchTransaction]); - - jsTestLog("Try to begin a transaction with startTransaction=false and autocommit=false"); - txnNumber++; - assert.commandFailedWithCode(sessionDb.runCommand({ - find: collName, - filter: {}, - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(txnNumber), - startTransaction: false, - autocommit: false - }), - ErrorCodes.InvalidOptions); - - /*********************************************************************************************** - * Setting autocommit=true or omitting autocommit on a transaction operation fails. - **********************************************************************************************/ - - jsTestLog("Run an initial transaction operation with autocommit=true"); - txnNumber++; - - assert.commandFailedWithCode(sessionDb.runCommand({ - find: collName, - filter: {}, - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(txnNumber), - startTransaction: true, - autocommit: true - }), - ErrorCodes.InvalidOptions); - - // Mongos has special handling for commitTransaction to support commit recovery. - if (!FixtureHelpers.isMongos(sessionDb)) { - // Committing the transaction should fail. - assert.commandFailedWithCode(sessionDb.adminCommand({ - commitTransaction: 1, - txnNumber: NumberLong(txnNumber), - autocommit: false, - writeConcern: {w: "majority"} - }), - ErrorCodes.NoSuchTransaction); - } - - jsTestLog("Run a non-initial transaction operation with autocommit=true"); - txnNumber++; - - // Start the transaction with an insert. - assert.commandWorked(sessionDb.runCommand({ - find: collName, - filter: {}, - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(txnNumber), - startTransaction: true, - autocommit: false - })); - - // Try to execute a transaction operation with autocommit=true. It should fail without affecting - // the transaction. - assert.commandFailedWithCode(sessionDb.runCommand({ - insert: collName, - documents: [{_id: txnNumber + "_1"}], - txnNumber: NumberLong(txnNumber), - autocommit: true - }), - ErrorCodes.InvalidOptions); - - // Try to execute a transaction operation without an autocommit field. It should fail without - // affecting the transaction. - assert.commandFailedWithCode(sessionDb.runCommand({ - insert: collName, - documents: [{_id: txnNumber + "_2"}], - txnNumber: NumberLong(txnNumber), - }), - ErrorCodes.IncompleteTransactionHistory); - - // Committing the transaction should succeed. - assert.commandWorked(sessionDb.adminCommand({ - commitTransaction: 1, - txnNumber: NumberLong(txnNumber), - autocommit: false, - writeConcern: {w: "majority"} - })); - - /*********************************************************************************************** - * Invalid to include autocommit field on an operation not inside a transaction. - **********************************************************************************************/ - - jsTestLog("Run an operation with autocommit=false outside of a transaction."); - txnNumber++; - - assert.commandWorked(sessionDb.runCommand({find: collName, filter: {}})); - - assert.commandFailedWithCode( - sessionDb.runCommand( - {find: collName, filter: {}, txnNumber: NumberLong(txnNumber), autocommit: false}), - ErrorCodes.NoSuchTransaction); - - /*********************************************************************************************** - * The 'autocommit' field must be specified on commit/abort commands. - **********************************************************************************************/ - - jsTestLog("Run a commitTransaction command with valid and invalid 'autocommit' field values."); - txnNumber++; - - // Start the transaction with an insert. - assert.commandWorked(sessionDb.runCommand({ - find: collName, - filter: {}, - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(txnNumber), - startTransaction: true, - autocommit: false - })); - - // Committing the transaction should fail if 'autocommit' is omitted. +assert(res.errmsg.includes("Transaction number requires a session ID")); + +// txnNumber -> autocommit +jsTestLog("Try to begin a transaction with autocommit but no txnNumber"); +txnNumber++; +res = assert.commandFailedWithCode(sessionDb.runCommand({ + find: collName, + filter: {}, + readConcern: {level: "snapshot"}, + // txnNumber: NumberLong(txnNumber), + autocommit: false, + // startTransaction: true +}), + ErrorCodes.InvalidOptions); +assert(res.errmsg.includes("'autocommit' field requires a transaction number")); + +// autocommit -> startTransaction +jsTestLog("Try to begin a transaction with startTransaction but no autocommit"); +txnNumber++; +res = assert.commandFailedWithCode(sessionDb.runCommand({ + find: collName, + filter: {}, + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(txnNumber), + // autocommit: false, + startTransaction: true +}), + ErrorCodes.InvalidOptions); +assert(res.errmsg.includes("'startTransaction' field requires 'autocommit' field")); + +/*********************************************************************************************** + * Verify that the 'startTransaction' argument works correctly. + **********************************************************************************************/ + +jsTestLog("Begin a transaction with startTransaction=true and autocommit=false"); +txnNumber++; + +// Start the transaction. +assert.commandWorked(sessionDb.runCommand({ + find: collName, + filter: {}, + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(txnNumber), + startTransaction: true, + autocommit: false +})); + +// Commit the transaction. +assert.commandWorked(sessionDb.adminCommand({ + commitTransaction: 1, + txnNumber: NumberLong(txnNumber), + autocommit: false, + writeConcern: {w: "majority"} +})); + +jsTestLog("Try to start an already in progress transaction."); +txnNumber++; + +// Start the transaction. +assert.commandWorked(sessionDb.runCommand({ + find: collName, + filter: {}, + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(txnNumber), + startTransaction: true, + autocommit: false +})); + +// Try to start the transaction again. +assert.commandFailedWithCode(sessionDb.runCommand({ + find: collName, + filter: {}, + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(txnNumber), + startTransaction: true, + autocommit: false +}), + ErrorCodes.ConflictingOperationInProgress); + +// Commit the transaction. +assert.commandWorked(sessionDb.adminCommand({ + commitTransaction: 1, + txnNumber: NumberLong(txnNumber), + autocommit: false, + writeConcern: {w: "majority"} +})); + +jsTestLog("Try to begin a transaction by omitting 'startTransaction' and setting autocommit=false"); +txnNumber++; +assert.commandFailedWithCode(sessionDb.runCommand({ + find: collName, + filter: {}, + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(txnNumber), + autocommit: false +}), + [ErrorCodes.InvalidOptions, ErrorCodes.NoSuchTransaction]); + +jsTestLog("Try to begin a transaction with startTransaction=false and autocommit=false"); +txnNumber++; +assert.commandFailedWithCode(sessionDb.runCommand({ + find: collName, + filter: {}, + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(txnNumber), + startTransaction: false, + autocommit: false +}), + ErrorCodes.InvalidOptions); + +/*********************************************************************************************** + * Setting autocommit=true or omitting autocommit on a transaction operation fails. + **********************************************************************************************/ + +jsTestLog("Run an initial transaction operation with autocommit=true"); +txnNumber++; + +assert.commandFailedWithCode(sessionDb.runCommand({ + find: collName, + filter: {}, + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(txnNumber), + startTransaction: true, + autocommit: true +}), + ErrorCodes.InvalidOptions); + +// Mongos has special handling for commitTransaction to support commit recovery. +if (!FixtureHelpers.isMongos(sessionDb)) { + // Committing the transaction should fail. assert.commandFailedWithCode(sessionDb.adminCommand({ commitTransaction: 1, txnNumber: NumberLong(txnNumber), - writeConcern: {w: "majority"} - }), - 50768); - - // Committing the transaction should fail if autocommit=true. - assert.commandFailedWithCode(sessionDb.adminCommand({ - commitTransaction: 1, - txnNumber: NumberLong(txnNumber), - writeConcern: {w: "majority"}, - autocommit: true - }), - ErrorCodes.InvalidOptions); - - // Committing the transaction should succeed. - assert.commandWorked(sessionDb.adminCommand({ - commitTransaction: 1, - txnNumber: NumberLong(txnNumber), autocommit: false, writeConcern: {w: "majority"} - })); - - jsTestLog("Run an abortTransaction command with and without an 'autocommit' field"); - txnNumber++; - - // Start the transaction with an insert. - assert.commandWorked(sessionDb.runCommand({ - find: collName, - filter: {}, - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(txnNumber), - startTransaction: true, - autocommit: false, - })); - - // Aborting the transaction should fail if 'autocommit' is omitted. - assert.commandFailedWithCode( - sessionDb.adminCommand({abortTransaction: 1, txnNumber: NumberLong(txnNumber)}), 50768); - - // Aborting the transaction should fail if autocommit=true. - assert.commandFailedWithCode( - sessionDb.adminCommand( - {abortTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: true}), - ErrorCodes.InvalidOptions); - - // Aborting the transaction should succeed. - assert.commandWorked(sessionDb.adminCommand( - {abortTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false})); + }), + ErrorCodes.NoSuchTransaction); +} + +jsTestLog("Run a non-initial transaction operation with autocommit=true"); +txnNumber++; + +// Start the transaction with an insert. +assert.commandWorked(sessionDb.runCommand({ + find: collName, + filter: {}, + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(txnNumber), + startTransaction: true, + autocommit: false +})); + +// Try to execute a transaction operation with autocommit=true. It should fail without affecting +// the transaction. +assert.commandFailedWithCode(sessionDb.runCommand({ + insert: collName, + documents: [{_id: txnNumber + "_1"}], + txnNumber: NumberLong(txnNumber), + autocommit: true +}), + ErrorCodes.InvalidOptions); + +// Try to execute a transaction operation without an autocommit field. It should fail without +// affecting the transaction. +assert.commandFailedWithCode(sessionDb.runCommand({ + insert: collName, + documents: [{_id: txnNumber + "_2"}], + txnNumber: NumberLong(txnNumber), +}), + ErrorCodes.IncompleteTransactionHistory); + +// Committing the transaction should succeed. +assert.commandWorked(sessionDb.adminCommand({ + commitTransaction: 1, + txnNumber: NumberLong(txnNumber), + autocommit: false, + writeConcern: {w: "majority"} +})); + +/*********************************************************************************************** + * Invalid to include autocommit field on an operation not inside a transaction. + **********************************************************************************************/ + +jsTestLog("Run an operation with autocommit=false outside of a transaction."); +txnNumber++; + +assert.commandWorked(sessionDb.runCommand({find: collName, filter: {}})); + +assert.commandFailedWithCode( + sessionDb.runCommand( + {find: collName, filter: {}, txnNumber: NumberLong(txnNumber), autocommit: false}), + ErrorCodes.NoSuchTransaction); + +/*********************************************************************************************** + * The 'autocommit' field must be specified on commit/abort commands. + **********************************************************************************************/ + +jsTestLog("Run a commitTransaction command with valid and invalid 'autocommit' field values."); +txnNumber++; + +// Start the transaction with an insert. +assert.commandWorked(sessionDb.runCommand({ + find: collName, + filter: {}, + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(txnNumber), + startTransaction: true, + autocommit: false +})); + +// Committing the transaction should fail if 'autocommit' is omitted. +assert.commandFailedWithCode( + sessionDb.adminCommand( + {commitTransaction: 1, txnNumber: NumberLong(txnNumber), writeConcern: {w: "majority"}}), + 50768); + +// Committing the transaction should fail if autocommit=true. +assert.commandFailedWithCode(sessionDb.adminCommand({ + commitTransaction: 1, + txnNumber: NumberLong(txnNumber), + writeConcern: {w: "majority"}, + autocommit: true +}), + ErrorCodes.InvalidOptions); + +// Committing the transaction should succeed. +assert.commandWorked(sessionDb.adminCommand({ + commitTransaction: 1, + txnNumber: NumberLong(txnNumber), + autocommit: false, + writeConcern: {w: "majority"} +})); + +jsTestLog("Run an abortTransaction command with and without an 'autocommit' field"); +txnNumber++; + +// Start the transaction with an insert. +assert.commandWorked(sessionDb.runCommand({ + find: collName, + filter: {}, + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(txnNumber), + startTransaction: true, + autocommit: false, +})); + +// Aborting the transaction should fail if 'autocommit' is omitted. +assert.commandFailedWithCode( + sessionDb.adminCommand({abortTransaction: 1, txnNumber: NumberLong(txnNumber)}), 50768); + +// Aborting the transaction should fail if autocommit=true. +assert.commandFailedWithCode( + sessionDb.adminCommand( + {abortTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: true}), + ErrorCodes.InvalidOptions); + +// Aborting the transaction should succeed. +assert.commandWorked(sessionDb.adminCommand( + {abortTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false})); }()); diff --git a/jstests/core/txns/multi_statement_transaction_using_api.js b/jstests/core/txns/multi_statement_transaction_using_api.js index d9b440c355c..910fa45c68b 100644 --- a/jstests/core/txns/multi_statement_transaction_using_api.js +++ b/jstests/core/txns/multi_statement_transaction_using_api.js @@ -1,114 +1,116 @@ // Test basic transaction write ops, reads, and commit/abort using the shell helper. // @tags: [uses_transactions, uses_snapshot_read_concern] (function() { - "use strict"; +"use strict"; - const dbName = "test"; - const collName = "multi_transaction_test_using_api"; - const testDB = db.getSiblingDB(dbName); +const dbName = "test"; +const collName = "multi_transaction_test_using_api"; +const testDB = db.getSiblingDB(dbName); - testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); +testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); - assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); +assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); - const sessionOptions = {causalConsistency: false}; - const session = testDB.getMongo().startSession(sessionOptions); - const sessionDb = session.getDatabase(dbName); - const sessionColl = sessionDb.getCollection(collName); +const sessionOptions = { + causalConsistency: false +}; +const session = testDB.getMongo().startSession(sessionOptions); +const sessionDb = session.getDatabase(dbName); +const sessionColl = sessionDb.getCollection(collName); - // - // Test that calling abortTransaction as the first statement in a transaction is allowed and - // modifies the state accordingly. - // - jsTestLog("Call abortTransaction as the first statement in a transaction"); - session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}}); +// +// Test that calling abortTransaction as the first statement in a transaction is allowed and +// modifies the state accordingly. +// +jsTestLog("Call abortTransaction as the first statement in a transaction"); +session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}}); - // Successfully call abortTransaction. - assert.commandWorked(session.abortTransaction_forTesting()); +// Successfully call abortTransaction. +assert.commandWorked(session.abortTransaction_forTesting()); - // - // Test that calling commitTransaction as the first statement in a transaction is allowed and - // modifies the state accordingly. - // - jsTestLog("Call commitTransaction as the first statement in a transaction"); - session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}}); +// +// Test that calling commitTransaction as the first statement in a transaction is allowed and +// modifies the state accordingly. +// +jsTestLog("Call commitTransaction as the first statement in a transaction"); +session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}}); - // Successfully call commitTransaction. - assert.commandWorked(session.commitTransaction_forTesting()); +// Successfully call commitTransaction. +assert.commandWorked(session.commitTransaction_forTesting()); - jsTestLog("Run CRUD ops, read ops, and commit transaction."); - session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}}); +jsTestLog("Run CRUD ops, read ops, and commit transaction."); +session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}}); - // Performing a read first should work when snapshot readConcern is specified. - assert.docEq(null, sessionColl.findOne({_id: "insert-1"})); +// Performing a read first should work when snapshot readConcern is specified. +assert.docEq(null, sessionColl.findOne({_id: "insert-1"})); - assert.commandWorked(sessionColl.insert({_id: "insert-1", a: 0})); +assert.commandWorked(sessionColl.insert({_id: "insert-1", a: 0})); - assert.commandWorked(sessionColl.insert({_id: "insert-2", a: 0})); +assert.commandWorked(sessionColl.insert({_id: "insert-2", a: 0})); - assert.commandWorked(sessionColl.insert({_id: "insert-3", a: 0})); +assert.commandWorked(sessionColl.insert({_id: "insert-3", a: 0})); - assert.commandWorked(sessionColl.update({_id: "insert-1"}, {$inc: {a: 1}})); +assert.commandWorked(sessionColl.update({_id: "insert-1"}, {$inc: {a: 1}})); - assert.commandWorked(sessionColl.deleteOne({_id: "insert-2"})); +assert.commandWorked(sessionColl.deleteOne({_id: "insert-2"})); - sessionColl.findAndModify({query: {_id: "insert-3"}, update: {$set: {a: 2}}}); +sessionColl.findAndModify({query: {_id: "insert-3"}, update: {$set: {a: 2}}}); - // Try to find a document within a transaction. - let cursor = sessionColl.find({_id: "insert-1"}); - assert.docEq({_id: "insert-1", a: 1}, cursor.next()); - assert(!cursor.hasNext()); +// Try to find a document within a transaction. +let cursor = sessionColl.find({_id: "insert-1"}); +assert.docEq({_id: "insert-1", a: 1}, cursor.next()); +assert(!cursor.hasNext()); - // Try to find a document using findOne within a transaction - assert.eq({_id: "insert-1", a: 1}, sessionColl.findOne({_id: "insert-1"})); +// Try to find a document using findOne within a transaction +assert.eq({_id: "insert-1", a: 1}, sessionColl.findOne({_id: "insert-1"})); - // Find a document with the aggregation shell helper within a transaction. - cursor = sessionColl.aggregate({$match: {_id: "insert-1"}}); - assert.docEq({_id: "insert-1", a: 1}, cursor.next()); - assert(!cursor.hasNext()); +// Find a document with the aggregation shell helper within a transaction. +cursor = sessionColl.aggregate({$match: {_id: "insert-1"}}); +assert.docEq({_id: "insert-1", a: 1}, cursor.next()); +assert(!cursor.hasNext()); - assert.commandWorked(session.commitTransaction_forTesting()); +assert.commandWorked(session.commitTransaction_forTesting()); - // Make sure the correct documents exist after committing the transaciton. - assert.eq({_id: "insert-1", a: 1}, sessionColl.findOne({_id: "insert-1"})); - assert.eq({_id: "insert-3", a: 2}, sessionColl.findOne({_id: "insert-3"})); - assert.eq(null, sessionColl.findOne({_id: "insert-2"})); +// Make sure the correct documents exist after committing the transaciton. +assert.eq({_id: "insert-1", a: 1}, sessionColl.findOne({_id: "insert-1"})); +assert.eq({_id: "insert-3", a: 2}, sessionColl.findOne({_id: "insert-3"})); +assert.eq(null, sessionColl.findOne({_id: "insert-2"})); - jsTestLog("Insert a doc and abort transaction."); - session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}}); +jsTestLog("Insert a doc and abort transaction."); +session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}}); - assert.commandWorked(sessionColl.insert({_id: "insert-4", a: 0})); +assert.commandWorked(sessionColl.insert({_id: "insert-4", a: 0})); - assert.commandWorked(session.abortTransaction_forTesting()); +assert.commandWorked(session.abortTransaction_forTesting()); - // Verify that we cannot see the document we tried to insert. - assert.eq(null, sessionColl.findOne({_id: "insert-4"})); +// Verify that we cannot see the document we tried to insert. +assert.eq(null, sessionColl.findOne({_id: "insert-4"})); - jsTestLog("Bulk insert and update operations within transaction."); +jsTestLog("Bulk insert and update operations within transaction."); - session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}}); - let bulk = sessionColl.initializeUnorderedBulkOp(); - bulk.insert({_id: "bulk-1"}); - bulk.insert({_id: "bulk-2"}); - bulk.find({_id: "bulk-1"}).updateOne({$set: {status: "bulk"}}); - bulk.find({_id: "bulk-2"}).updateOne({$set: {status: "bulk"}}); - assert.commandWorked(bulk.execute()); - assert.commandWorked(session.commitTransaction_forTesting()); +session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}}); +let bulk = sessionColl.initializeUnorderedBulkOp(); +bulk.insert({_id: "bulk-1"}); +bulk.insert({_id: "bulk-2"}); +bulk.find({_id: "bulk-1"}).updateOne({$set: {status: "bulk"}}); +bulk.find({_id: "bulk-2"}).updateOne({$set: {status: "bulk"}}); +assert.commandWorked(bulk.execute()); +assert.commandWorked(session.commitTransaction_forTesting()); - assert.eq({_id: "bulk-1", status: "bulk"}, sessionColl.findOne({_id: "bulk-1"})); - assert.eq({_id: "bulk-2", status: "bulk"}, sessionColl.findOne({_id: "bulk-2"})); +assert.eq({_id: "bulk-1", status: "bulk"}, sessionColl.findOne({_id: "bulk-1"})); +assert.eq({_id: "bulk-2", status: "bulk"}, sessionColl.findOne({_id: "bulk-2"})); - jsTestLog("Bulk delete operations within transaction."); +jsTestLog("Bulk delete operations within transaction."); - session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}}); - bulk = sessionColl.initializeUnorderedBulkOp(); - bulk.find({_id: "bulk-1"}).removeOne(); - bulk.find({_id: "bulk-2"}).removeOne(); - assert.commandWorked(bulk.execute()); - assert.commandWorked(session.commitTransaction_forTesting()); +session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}}); +bulk = sessionColl.initializeUnorderedBulkOp(); +bulk.find({_id: "bulk-1"}).removeOne(); +bulk.find({_id: "bulk-2"}).removeOne(); +assert.commandWorked(bulk.execute()); +assert.commandWorked(session.commitTransaction_forTesting()); - assert.eq(null, sessionColl.findOne({_id: "bulk-1"})); - assert.eq(null, sessionColl.findOne({_id: "bulk-2"})); +assert.eq(null, sessionColl.findOne({_id: "bulk-1"})); +assert.eq(null, sessionColl.findOne({_id: "bulk-2"})); - session.endSession(); +session.endSession(); }()); diff --git a/jstests/core/txns/multi_statement_transaction_write_error.js b/jstests/core/txns/multi_statement_transaction_write_error.js index c8828a2d735..bea6fda153d 100644 --- a/jstests/core/txns/multi_statement_transaction_write_error.js +++ b/jstests/core/txns/multi_statement_transaction_write_error.js @@ -4,187 +4,187 @@ * @tags: [requires_capped, uses_transactions] */ (function() { - "use strict"; - - const dbName = "test"; - const testDB = db.getSiblingDB(dbName); - const testCollName = "transactions_write_errors"; - const cappedCollName = "capped_transactions_write_errors"; - const testColl = testDB[testCollName]; - const cappedColl = testDB[cappedCollName]; - - testDB.runCommand({drop: testCollName, writeConcern: {w: "majority"}}); - testDB.runCommand({drop: cappedCollName, writeConcern: {w: "majority"}}); - - assert.commandWorked(testDB.createCollection(testColl.getName())); - assert.commandWorked(testDB.createCollection(cappedCollName, {capped: true, size: 1000})); - - // Assert that "cmd" fails with error "code" after "nExpected" operations, or fail with "msg" - function runInTxn({cmd, msg, code, nExpected, expectedErrorIndex}) { - const session = db.getMongo().startSession(); - session.startTransaction(); +"use strict"; + +const dbName = "test"; +const testDB = db.getSiblingDB(dbName); +const testCollName = "transactions_write_errors"; +const cappedCollName = "capped_transactions_write_errors"; +const testColl = testDB[testCollName]; +const cappedColl = testDB[cappedCollName]; + +testDB.runCommand({drop: testCollName, writeConcern: {w: "majority"}}); +testDB.runCommand({drop: cappedCollName, writeConcern: {w: "majority"}}); + +assert.commandWorked(testDB.createCollection(testColl.getName())); +assert.commandWorked(testDB.createCollection(cappedCollName, {capped: true, size: 1000})); + +// Assert that "cmd" fails with error "code" after "nExpected" operations, or fail with "msg" +function runInTxn({cmd, msg, code, nExpected, expectedErrorIndex}) { + const session = db.getMongo().startSession(); + session.startTransaction(); + try { + var res = session.getDatabase(dbName).runCommand(cmd); try { - var res = session.getDatabase(dbName).runCommand(cmd); - try { - // Writes reply with ok: 1 and a writeErrors array - assert.eq(res.ok, 1, "reply.ok : " + msg); - assert.eq(res.n, nExpected, "reply.n : " + msg); - // The first and only error comes after nExpected successful writes in the batch - assert.eq(res.writeErrors.length, 1, "number of write errors : " + msg); - assert.eq(res.writeErrors[0].index, expectedErrorIndex, "error index : " + msg); - assert.eq(res.writeErrors[0].code, code, "error code : " + msg); - assert(!res.hasOwnProperty("errorLabels"), msg); - } catch (e) { - printjson(cmd); - printjson(res); - throw e; - } - } finally { - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); + // Writes reply with ok: 1 and a writeErrors array + assert.eq(res.ok, 1, "reply.ok : " + msg); + assert.eq(res.n, nExpected, "reply.n : " + msg); + // The first and only error comes after nExpected successful writes in the batch + assert.eq(res.writeErrors.length, 1, "number of write errors : " + msg); + assert.eq(res.writeErrors[0].index, expectedErrorIndex, "error index : " + msg); + assert.eq(res.writeErrors[0].code, code, "error code : " + msg); + assert(!res.hasOwnProperty("errorLabels"), msg); + } catch (e) { + printjson(cmd); + printjson(res); + throw e; } + } finally { + assert.commandFailedWithCode(session.abortTransaction_forTesting(), + ErrorCodes.NoSuchTransaction); } +} + +// Run "cmdName" against each collection in "collNames", with combos of "goodOp" and "badOp" in +// a batch, it should fail with "code". +function exerciseWriteInTxn({collNames, cmdName, goodOp, badOp, code}) { + for (let collName of collNames) { + for (let ordered of [true, false]) { + let docsField; + switch (cmdName) { + case "insert": + docsField = "documents"; + break; + case "update": + docsField = "updates"; + break; + case "delete": + docsField = "deletes"; + break; + } - // Run "cmdName" against each collection in "collNames", with combos of "goodOp" and "badOp" in - // a batch, it should fail with "code". - function exerciseWriteInTxn({collNames, cmdName, goodOp, badOp, code}) { - for (let collName of collNames) { - for (let ordered of[true, false]) { - let docsField; - switch (cmdName) { - case "insert": - docsField = "documents"; - break; - case "update": - docsField = "updates"; - break; - case "delete": - docsField = "deletes"; - break; + // Construct command like {insert: collectionName, documents: [...]} + let newCmd = () => { + var cmd = {}; + cmd[cmdName] = collName; + if (!ordered) { + cmd.ordered = false; } - // Construct command like {insert: collectionName, documents: [...]} - let newCmd = () => { - var cmd = {}; - cmd[cmdName] = collName; - if (!ordered) { - cmd.ordered = false; - } - - return cmd; - }; - - var cmd = newCmd(); - cmd[docsField] = [badOp]; - runInTxn({ - cmd: cmd, - msg: `one bad ${cmdName} on ${collName} collection, ordered ${ordered}`, - code: code, - nExpected: 0, - expectedErrorIndex: 0 - }); - - cmd = newCmd(); - cmd[docsField] = [goodOp, badOp]; - let expected = 1; - if (cmdName == 'delete' && db.getMongo().isMongos()) { - // The bad delete write will cause mongos to fail during targetting and not - // do any write at all. - expected = 0; - } - runInTxn({ - cmd: cmd, - msg: - `one bad ${cmdName} after a good one on ${collName} collection, ordered ${ordered}`, - code: code, - nExpected: expected, - expectedErrorIndex: 1 - }); - - cmd = newCmd(); - cmd[docsField] = [goodOp, goodOp, badOp]; - expected = 2; - if (cmdName == 'delete' && db.getMongo().isMongos()) { - // The bad delete write will cause mongos to fail during targetting and not - // do any write at all. - expected = 0; - } - runInTxn({ - cmd: cmd, - msg: - `one bad ${cmdName} after two good ones on ${collName} collection, ordered ${ordered}`, - code: code, - nExpected: expected, - expectedErrorIndex: 2 - }); - - cmd = newCmd(); - cmd[docsField] = [goodOp, goodOp, badOp, badOp]; - expected = 2; - if (cmdName == 'delete' && db.getMongo().isMongos()) { - // The bad delete write will cause mongos to fail during targetting and not - // do any write at all. - expected = 0; - } - runInTxn({ - cmd: cmd, - msg: - `two bad ${cmdName}s after two good ones on ${collName} collection, ordered ${ordered}`, - code: code, - nExpected: expected, - expectedErrorIndex: 2 - }); - - cmd = newCmd(); - cmd[docsField] = [badOp, goodOp]; - runInTxn({ - cmd: cmd, - msg: - `good ${cmdName} after a bad one on ${collName} collection, ordered ${ordered}`, - code: code, - nExpected: 0, - expectedErrorIndex: 0 - }); + return cmd; + }; + + var cmd = newCmd(); + cmd[docsField] = [badOp]; + runInTxn({ + cmd: cmd, + msg: `one bad ${cmdName} on ${collName} collection, ordered ${ordered}`, + code: code, + nExpected: 0, + expectedErrorIndex: 0 + }); + + cmd = newCmd(); + cmd[docsField] = [goodOp, badOp]; + let expected = 1; + if (cmdName == 'delete' && db.getMongo().isMongos()) { + // The bad delete write will cause mongos to fail during targetting and not + // do any write at all. + expected = 0; } + runInTxn({ + cmd: cmd, + msg: `one bad ${cmdName} after a good one on ${collName} collection, ordered ${ + ordered}`, + code: code, + nExpected: expected, + expectedErrorIndex: 1 + }); + + cmd = newCmd(); + cmd[docsField] = [goodOp, goodOp, badOp]; + expected = 2; + if (cmdName == 'delete' && db.getMongo().isMongos()) { + // The bad delete write will cause mongos to fail during targetting and not + // do any write at all. + expected = 0; + } + runInTxn({ + cmd: cmd, + msg: `one bad ${cmdName} after two good ones on ${collName} collection, ordered ${ + ordered}`, + code: code, + nExpected: expected, + expectedErrorIndex: 2 + }); + + cmd = newCmd(); + cmd[docsField] = [goodOp, goodOp, badOp, badOp]; + expected = 2; + if (cmdName == 'delete' && db.getMongo().isMongos()) { + // The bad delete write will cause mongos to fail during targetting and not + // do any write at all. + expected = 0; + } + runInTxn({ + cmd: cmd, + msg: `two bad ${cmdName}s after two good ones on ${collName} collection, ordered ${ + ordered}`, + code: code, + nExpected: expected, + expectedErrorIndex: 2 + }); + + cmd = newCmd(); + cmd[docsField] = [badOp, goodOp]; + runInTxn({ + cmd: cmd, + msg: + `good ${cmdName} after a bad one on ${collName} collection, ordered ${ordered}`, + code: code, + nExpected: 0, + expectedErrorIndex: 0 + }); } } - - // Set up a document so we can get a DuplicateKey error trying to insert it again. - assert.commandWorked(testColl.insert({_id: 5})); - exerciseWriteInTxn({ - collNames: [testCollName], - cmdName: "insert", - goodOp: {}, - badOp: {_id: 5}, - code: ErrorCodes.DuplicateKey - }); - - // Set up a document with a string field so we can update it but fail to increment it. - assert.commandWorked(testColl.insertOne({_id: 0, x: "string"})); - exerciseWriteInTxn({ - collNames: [testCollName], - cmdName: "update", - goodOp: {q: {_id: 0}, u: {$set: {x: "STRING"}}}, - badOp: {q: {_id: 0}, u: {$inc: {x: 1}}}, - code: ErrorCodes.TypeMismatch - }); - - // Give the good delete operation some documents to delete - assert.commandWorked(testColl.insertMany([{}, {}, {}, {}])); - exerciseWriteInTxn({ - collNames: [testCollName], - cmdName: "delete", - goodOp: {q: {}, limit: 1}, - badOp: {q: {$foo: 1}, limit: 1}, - code: ErrorCodes.BadValue - }); - - // Capped deletes are prohibited - runInTxn({ - cmd: {delete: cappedCollName, deletes: [{q: {}, limit: 1}]}, - msg: `delete from ${cappedCollName}`, - code: ErrorCodes.IllegalOperation, - nExpected: 0, - expectedErrorIndex: 0 - }); +} + +// Set up a document so we can get a DuplicateKey error trying to insert it again. +assert.commandWorked(testColl.insert({_id: 5})); +exerciseWriteInTxn({ + collNames: [testCollName], + cmdName: "insert", + goodOp: {}, + badOp: {_id: 5}, + code: ErrorCodes.DuplicateKey +}); + +// Set up a document with a string field so we can update it but fail to increment it. +assert.commandWorked(testColl.insertOne({_id: 0, x: "string"})); +exerciseWriteInTxn({ + collNames: [testCollName], + cmdName: "update", + goodOp: {q: {_id: 0}, u: {$set: {x: "STRING"}}}, + badOp: {q: {_id: 0}, u: {$inc: {x: 1}}}, + code: ErrorCodes.TypeMismatch +}); + +// Give the good delete operation some documents to delete +assert.commandWorked(testColl.insertMany([{}, {}, {}, {}])); +exerciseWriteInTxn({ + collNames: [testCollName], + cmdName: "delete", + goodOp: {q: {}, limit: 1}, + badOp: {q: {$foo: 1}, limit: 1}, + code: ErrorCodes.BadValue +}); + +// Capped deletes are prohibited +runInTxn({ + cmd: {delete: cappedCollName, deletes: [{q: {}, limit: 1}]}, + msg: `delete from ${cappedCollName}`, + code: ErrorCodes.IllegalOperation, + nExpected: 0, + expectedErrorIndex: 0 +}); }()); diff --git a/jstests/core/txns/multi_update_in_transaction.js b/jstests/core/txns/multi_update_in_transaction.js index 3b309194a73..c6d9f3e994c 100644 --- a/jstests/core/txns/multi_update_in_transaction.js +++ b/jstests/core/txns/multi_update_in_transaction.js @@ -1,91 +1,92 @@ // Test transactions including multi-updates. // @tags: [uses_transactions] (function() { - "use strict"; - - const dbName = "test"; - const collName = "multi_update_in_transaction"; - const testDB = db.getSiblingDB(dbName); - const testColl = testDB[collName]; - - testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); - - assert.commandWorked( - testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}})); - - const sessionOptions = {causalConsistency: false}; - const session = db.getMongo().startSession(sessionOptions); - const sessionDb = session.getDatabase(dbName); - const sessionColl = sessionDb[collName]; - - jsTest.log("Prepopulate the collection."); - assert.writeOK(testColl.insert([{_id: 0, a: 0}, {_id: 1, a: 0}, {_id: 2, a: 1}], - {writeConcern: {w: "majority"}})); - - jsTest.log("Do an empty multi-update."); - session.startTransaction({writeConcern: {w: "majority"}}); - - // Update 0 docs. - let res = sessionColl.update({a: 99}, {$set: {b: 1}}, {multi: true}); - assert.eq(0, res.nModified); - res = sessionColl.find({}); - assert.sameMembers(res.toArray(), [{_id: 0, a: 0}, {_id: 1, a: 0}, {_id: 2, a: 1}]); - - assert.commandWorked(session.commitTransaction_forTesting()); - - jsTest.log("Do a single-result multi-update."); - session.startTransaction({writeConcern: {w: "majority"}}); - - // Update 1 doc. - res = sessionColl.update({a: 1}, {$set: {b: 1}}, {multi: true}); - assert.eq(1, res.nModified); - res = sessionColl.find({}); - assert.sameMembers(res.toArray(), [{_id: 0, a: 0}, {_id: 1, a: 0}, {_id: 2, a: 1, b: 1}]); - - assert.commandWorked(session.commitTransaction_forTesting()); - - jsTest.log("Do a multiple-result multi-update."); - session.startTransaction({writeConcern: {w: "majority"}}); - - // Update 2 docs. - res = sessionColl.update({a: 0}, {$set: {b: 2}}, {multi: true}); - assert.eq(2, res.nModified); - res = sessionColl.find({}); - assert.sameMembers(res.toArray(), - [{_id: 0, a: 0, b: 2}, {_id: 1, a: 0, b: 2}, {_id: 2, a: 1, b: 1}]); - - assert.commandWorked(session.commitTransaction_forTesting()); - - jsTest.log("Do a multiple-query multi-update."); - session.startTransaction({writeConcern: {w: "majority"}}); - - // Bulk update 3 docs. - let bulk = sessionColl.initializeUnorderedBulkOp(); - bulk.find({a: 0}).update({$set: {c: 1}}); - bulk.find({_id: 2}).update({$set: {c: 2}}); - res = assert.commandWorked(bulk.execute()); - assert.eq(3, res.nModified); - - res = sessionColl.find({}); - assert.sameMembers( - res.toArray(), - [{_id: 0, a: 0, b: 2, c: 1}, {_id: 1, a: 0, b: 2, c: 1}, {_id: 2, a: 1, b: 1, c: 2}]); - - assert.commandWorked(session.commitTransaction_forTesting()); - - jsTest.log("Do a multi-update with upsert."); - session.startTransaction({writeConcern: {w: "majority"}}); - - // Upsert 1 doc. - res = sessionColl.update({_id: 3}, {$set: {d: 1}}, {multi: true, upsert: true}); - assert.eq(1, res.nUpserted); - res = sessionColl.find({}); - assert.sameMembers(res.toArray(), [ - {_id: 0, a: 0, b: 2, c: 1}, - {_id: 1, a: 0, b: 2, c: 1}, - {_id: 2, a: 1, b: 1, c: 2}, - {_id: 3, d: 1} - ]); - - assert.commandWorked(session.commitTransaction_forTesting()); +"use strict"; + +const dbName = "test"; +const collName = "multi_update_in_transaction"; +const testDB = db.getSiblingDB(dbName); +const testColl = testDB[collName]; + +testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); + +assert.commandWorked(testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}})); + +const sessionOptions = { + causalConsistency: false +}; +const session = db.getMongo().startSession(sessionOptions); +const sessionDb = session.getDatabase(dbName); +const sessionColl = sessionDb[collName]; + +jsTest.log("Prepopulate the collection."); +assert.writeOK(testColl.insert([{_id: 0, a: 0}, {_id: 1, a: 0}, {_id: 2, a: 1}], + {writeConcern: {w: "majority"}})); + +jsTest.log("Do an empty multi-update."); +session.startTransaction({writeConcern: {w: "majority"}}); + +// Update 0 docs. +let res = sessionColl.update({a: 99}, {$set: {b: 1}}, {multi: true}); +assert.eq(0, res.nModified); +res = sessionColl.find({}); +assert.sameMembers(res.toArray(), [{_id: 0, a: 0}, {_id: 1, a: 0}, {_id: 2, a: 1}]); + +assert.commandWorked(session.commitTransaction_forTesting()); + +jsTest.log("Do a single-result multi-update."); +session.startTransaction({writeConcern: {w: "majority"}}); + +// Update 1 doc. +res = sessionColl.update({a: 1}, {$set: {b: 1}}, {multi: true}); +assert.eq(1, res.nModified); +res = sessionColl.find({}); +assert.sameMembers(res.toArray(), [{_id: 0, a: 0}, {_id: 1, a: 0}, {_id: 2, a: 1, b: 1}]); + +assert.commandWorked(session.commitTransaction_forTesting()); + +jsTest.log("Do a multiple-result multi-update."); +session.startTransaction({writeConcern: {w: "majority"}}); + +// Update 2 docs. +res = sessionColl.update({a: 0}, {$set: {b: 2}}, {multi: true}); +assert.eq(2, res.nModified); +res = sessionColl.find({}); +assert.sameMembers(res.toArray(), + [{_id: 0, a: 0, b: 2}, {_id: 1, a: 0, b: 2}, {_id: 2, a: 1, b: 1}]); + +assert.commandWorked(session.commitTransaction_forTesting()); + +jsTest.log("Do a multiple-query multi-update."); +session.startTransaction({writeConcern: {w: "majority"}}); + +// Bulk update 3 docs. +let bulk = sessionColl.initializeUnorderedBulkOp(); +bulk.find({a: 0}).update({$set: {c: 1}}); +bulk.find({_id: 2}).update({$set: {c: 2}}); +res = assert.commandWorked(bulk.execute()); +assert.eq(3, res.nModified); + +res = sessionColl.find({}); +assert.sameMembers( + res.toArray(), + [{_id: 0, a: 0, b: 2, c: 1}, {_id: 1, a: 0, b: 2, c: 1}, {_id: 2, a: 1, b: 1, c: 2}]); + +assert.commandWorked(session.commitTransaction_forTesting()); + +jsTest.log("Do a multi-update with upsert."); +session.startTransaction({writeConcern: {w: "majority"}}); + +// Upsert 1 doc. +res = sessionColl.update({_id: 3}, {$set: {d: 1}}, {multi: true, upsert: true}); +assert.eq(1, res.nUpserted); +res = sessionColl.find({}); +assert.sameMembers(res.toArray(), [ + {_id: 0, a: 0, b: 2, c: 1}, + {_id: 1, a: 0, b: 2, c: 1}, + {_id: 2, a: 1, b: 1, c: 2}, + {_id: 3, d: 1} +]); + +assert.commandWorked(session.commitTransaction_forTesting()); }()); diff --git a/jstests/core/txns/no_implicit_collection_creation_in_txn.js b/jstests/core/txns/no_implicit_collection_creation_in_txn.js index 40de017b421..42494d50958 100644 --- a/jstests/core/txns/no_implicit_collection_creation_in_txn.js +++ b/jstests/core/txns/no_implicit_collection_creation_in_txn.js @@ -2,107 +2,102 @@ // multi-document transaction. // @tags: [uses_transactions] (function() { - "use strict"; - - const dbName = "test"; - const collName = "no_implicit_collection_creation_in_txn"; - const testDB = db.getSiblingDB(dbName); - const testColl = testDB[collName]; - - testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); - - const sessionOptions = {causalConsistency: false}; - const session = db.getMongo().startSession(sessionOptions); - const sessionDb = session.getDatabase(dbName); - const sessionColl = sessionDb[collName]; - - jsTest.log("Cannot implicitly create a collection in a transaction using insert."); - - // Insert succeeds when the collection exists. - assert.commandWorked( - testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}})); - - session.startTransaction({writeConcern: {w: "majority"}}); - sessionColl.insert({_id: "doc"}); - assert.commandWorked(session.commitTransaction_forTesting()); - assert.eq({_id: "doc"}, testColl.findOne({_id: "doc"})); - - // Insert fails when the collection does not exist. - assert.commandWorked(testDB.runCommand({drop: collName, writeConcern: {w: "majority"}})); - - session.startTransaction({writeConcern: {w: "majority"}}); - assert.commandFailedWithCode(sessionColl.insert({_id: "doc"}), - ErrorCodes.OperationNotSupportedInTransaction); - - // Committing the transaction should fail, since it should never have been started. - assert.commandFailedWithCode(session.commitTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - assert.eq(null, testColl.findOne({_id: "doc"})); - - jsTest.log("Cannot implicitly create a collection in a transaction using update."); - - // Update with upsert=true succeeds when the collection exists. - assert.commandWorked( - testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}})); - - session.startTransaction({writeConcern: {w: "majority"}}); - sessionColl.update({_id: "doc"}, {$set: {updated: true}}, {upsert: true}); - assert.commandWorked(session.commitTransaction_forTesting()); - assert.eq({_id: "doc", updated: true}, testColl.findOne({_id: "doc"})); - - // Update with upsert=true fails when the collection does not exist. - assert.commandWorked(testDB.runCommand({drop: collName, writeConcern: {w: "majority"}})); - - session.startTransaction({writeConcern: {w: "majority"}}); - assert.commandFailedWithCode( - sessionColl.update({_id: "doc"}, {$set: {updated: true}}, {upsert: true}), - ErrorCodes.OperationNotSupportedInTransaction); - - // Committing the transaction should fail, since it should never have been started. - assert.commandFailedWithCode(session.commitTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - assert.eq(null, testColl.findOne({_id: "doc"})); - - // Update with upsert=false succeeds when the collection does not exist. - session.startTransaction({writeConcern: {w: "majority"}}); - assert.commandWorked( - sessionColl.update({_id: "doc"}, {$set: {updated: true}}, {upsert: false})); - assert.commandWorked(session.commitTransaction_forTesting()); - assert.eq(null, testColl.findOne({_id: "doc"})); - - jsTest.log("Cannot implicitly create a collection in a transaction using findAndModify."); - - // findAndModify with upsert=true succeeds when the collection exists. - assert.commandWorked( - testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}})); - - session.startTransaction({writeConcern: {w: "majority"}}); - let res = sessionColl.findAndModify( - {query: {_id: "doc"}, update: {$set: {updated: true}}, upsert: true}); - assert.eq(null, res); - assert.commandWorked(session.commitTransaction_forTesting()); - assert.eq({_id: "doc", updated: true}, testColl.findOne({_id: "doc"})); - - // findAndModify with upsert=true fails when the collection does not exist. - assert.commandWorked(testDB.runCommand({drop: collName, writeConcern: {w: "majority"}})); - - session.startTransaction({writeConcern: {w: "majority"}}); - res = assert.throws(() => sessionColl.findAndModify( - {query: {_id: "doc"}, update: {$set: {updated: true}}, upsert: true})); - assert.commandFailedWithCode(res, ErrorCodes.OperationNotSupportedInTransaction); - - // Committing the transaction should fail, since it should never have been started. - assert.commandFailedWithCode(session.commitTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - assert.eq(null, testColl.findOne({_id: "doc"})); - - // findAndModify with upsert=false succeeds when the collection does not exist. - session.startTransaction({writeConcern: {w: "majority"}}); - res = sessionColl.findAndModify( - {query: {_id: "doc"}, update: {$set: {updated: true}}, upsert: false}); - assert.eq(null, res); - assert.commandWorked(session.commitTransaction_forTesting()); - assert.eq(null, testColl.findOne({_id: "doc"})); - - session.endSession(); +"use strict"; + +const dbName = "test"; +const collName = "no_implicit_collection_creation_in_txn"; +const testDB = db.getSiblingDB(dbName); +const testColl = testDB[collName]; + +testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); + +const sessionOptions = { + causalConsistency: false +}; +const session = db.getMongo().startSession(sessionOptions); +const sessionDb = session.getDatabase(dbName); +const sessionColl = sessionDb[collName]; + +jsTest.log("Cannot implicitly create a collection in a transaction using insert."); + +// Insert succeeds when the collection exists. +assert.commandWorked(testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}})); + +session.startTransaction({writeConcern: {w: "majority"}}); +sessionColl.insert({_id: "doc"}); +assert.commandWorked(session.commitTransaction_forTesting()); +assert.eq({_id: "doc"}, testColl.findOne({_id: "doc"})); + +// Insert fails when the collection does not exist. +assert.commandWorked(testDB.runCommand({drop: collName, writeConcern: {w: "majority"}})); + +session.startTransaction({writeConcern: {w: "majority"}}); +assert.commandFailedWithCode(sessionColl.insert({_id: "doc"}), + ErrorCodes.OperationNotSupportedInTransaction); + +// Committing the transaction should fail, since it should never have been started. +assert.commandFailedWithCode(session.commitTransaction_forTesting(), ErrorCodes.NoSuchTransaction); +assert.eq(null, testColl.findOne({_id: "doc"})); + +jsTest.log("Cannot implicitly create a collection in a transaction using update."); + +// Update with upsert=true succeeds when the collection exists. +assert.commandWorked(testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}})); + +session.startTransaction({writeConcern: {w: "majority"}}); +sessionColl.update({_id: "doc"}, {$set: {updated: true}}, {upsert: true}); +assert.commandWorked(session.commitTransaction_forTesting()); +assert.eq({_id: "doc", updated: true}, testColl.findOne({_id: "doc"})); + +// Update with upsert=true fails when the collection does not exist. +assert.commandWorked(testDB.runCommand({drop: collName, writeConcern: {w: "majority"}})); + +session.startTransaction({writeConcern: {w: "majority"}}); +assert.commandFailedWithCode( + sessionColl.update({_id: "doc"}, {$set: {updated: true}}, {upsert: true}), + ErrorCodes.OperationNotSupportedInTransaction); + +// Committing the transaction should fail, since it should never have been started. +assert.commandFailedWithCode(session.commitTransaction_forTesting(), ErrorCodes.NoSuchTransaction); +assert.eq(null, testColl.findOne({_id: "doc"})); + +// Update with upsert=false succeeds when the collection does not exist. +session.startTransaction({writeConcern: {w: "majority"}}); +assert.commandWorked(sessionColl.update({_id: "doc"}, {$set: {updated: true}}, {upsert: false})); +assert.commandWorked(session.commitTransaction_forTesting()); +assert.eq(null, testColl.findOne({_id: "doc"})); + +jsTest.log("Cannot implicitly create a collection in a transaction using findAndModify."); + +// findAndModify with upsert=true succeeds when the collection exists. +assert.commandWorked(testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}})); + +session.startTransaction({writeConcern: {w: "majority"}}); +let res = + sessionColl.findAndModify({query: {_id: "doc"}, update: {$set: {updated: true}}, upsert: true}); +assert.eq(null, res); +assert.commandWorked(session.commitTransaction_forTesting()); +assert.eq({_id: "doc", updated: true}, testColl.findOne({_id: "doc"})); + +// findAndModify with upsert=true fails when the collection does not exist. +assert.commandWorked(testDB.runCommand({drop: collName, writeConcern: {w: "majority"}})); + +session.startTransaction({writeConcern: {w: "majority"}}); +res = assert.throws(() => sessionColl.findAndModify( + {query: {_id: "doc"}, update: {$set: {updated: true}}, upsert: true})); +assert.commandFailedWithCode(res, ErrorCodes.OperationNotSupportedInTransaction); + +// Committing the transaction should fail, since it should never have been started. +assert.commandFailedWithCode(session.commitTransaction_forTesting(), ErrorCodes.NoSuchTransaction); +assert.eq(null, testColl.findOne({_id: "doc"})); + +// findAndModify with upsert=false succeeds when the collection does not exist. +session.startTransaction({writeConcern: {w: "majority"}}); +res = sessionColl.findAndModify( + {query: {_id: "doc"}, update: {$set: {updated: true}}, upsert: false}); +assert.eq(null, res); +assert.commandWorked(session.commitTransaction_forTesting()); +assert.eq(null, testColl.findOne({_id: "doc"})); + +session.endSession(); }()); diff --git a/jstests/core/txns/no_new_transactions_when_prepared_transaction_in_progress.js b/jstests/core/txns/no_new_transactions_when_prepared_transaction_in_progress.js index 2aa272a0d2b..ce41fb98620 100644 --- a/jstests/core/txns/no_new_transactions_when_prepared_transaction_in_progress.js +++ b/jstests/core/txns/no_new_transactions_when_prepared_transaction_in_progress.js @@ -5,51 +5,52 @@ */ (function() { - "use strict"; - load("jstests/core/txns/libs/prepare_helpers.js"); - - const dbName = "test"; - const collName = "no_new_transactions_when_prepared_transaction_in_progress"; - const testDB = db.getSiblingDB(dbName); - - testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); - - assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); - - const sessionOptions = {causalConsistency: false}; - const session = testDB.getMongo().startSession(sessionOptions); - const sessionDb = session.getDatabase(dbName); - const sessionColl = sessionDb.getCollection(collName); - - jsTestLog( - "Test starting a new transaction while an existing prepared transaction exists on the " + - "session."); - session.startTransaction(); - assert.commandWorked(sessionColl.insert({_id: "insert-1"})); - PrepareHelpers.prepareTransaction(session); - assert.commandFailedWithCode(sessionDb.runCommand({ - insert: collName, - documents: [{_id: "cannot_start"}], - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(1), - stmtId: NumberInt(0), - startTransaction: true, - autocommit: false - }), - ErrorCodes.PreparedTransactionInProgress); - - jsTestLog( - "Test error precedence when executing a malformed command during a prepared transaction."); - // The following command specifies txnNumber: 2 without startTransaction: true. - assert.commandFailedWithCode(sessionDb.runCommand({ - insert: collName, - documents: [{_id: "no_such_txn"}], - txnNumber: NumberLong(2), - stmtId: NumberInt(0), - autocommit: false - }), - ErrorCodes.NoSuchTransaction); - assert.commandWorked(session.abortTransaction_forTesting()); - - session.endSession(); +"use strict"; +load("jstests/core/txns/libs/prepare_helpers.js"); + +const dbName = "test"; +const collName = "no_new_transactions_when_prepared_transaction_in_progress"; +const testDB = db.getSiblingDB(dbName); + +testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); + +assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); + +const sessionOptions = { + causalConsistency: false +}; +const session = testDB.getMongo().startSession(sessionOptions); +const sessionDb = session.getDatabase(dbName); +const sessionColl = sessionDb.getCollection(collName); + +jsTestLog("Test starting a new transaction while an existing prepared transaction exists on the " + + "session."); +session.startTransaction(); +assert.commandWorked(sessionColl.insert({_id: "insert-1"})); +PrepareHelpers.prepareTransaction(session); +assert.commandFailedWithCode(sessionDb.runCommand({ + insert: collName, + documents: [{_id: "cannot_start"}], + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(1), + stmtId: NumberInt(0), + startTransaction: true, + autocommit: false +}), + ErrorCodes.PreparedTransactionInProgress); + +jsTestLog( + "Test error precedence when executing a malformed command during a prepared transaction."); +// The following command specifies txnNumber: 2 without startTransaction: true. +assert.commandFailedWithCode(sessionDb.runCommand({ + insert: collName, + documents: [{_id: "no_such_txn"}], + txnNumber: NumberLong(2), + stmtId: NumberInt(0), + autocommit: false +}), + ErrorCodes.NoSuchTransaction); +assert.commandWorked(session.abortTransaction_forTesting()); + +session.endSession(); }()); diff --git a/jstests/core/txns/no_read_concern_snapshot_outside_txn.js b/jstests/core/txns/no_read_concern_snapshot_outside_txn.js index 2b69510ecde..7840a538b74 100644 --- a/jstests/core/txns/no_read_concern_snapshot_outside_txn.js +++ b/jstests/core/txns/no_read_concern_snapshot_outside_txn.js @@ -5,67 +5,69 @@ */ (function() { - "use strict"; - const dbName = "test"; - const collName = "no_read_concern_snapshot_outside_txn"; - const testDB = db.getSiblingDB(dbName); +"use strict"; +const dbName = "test"; +const collName = "no_read_concern_snapshot_outside_txn"; +const testDB = db.getSiblingDB(dbName); - // Set up the test collection. - testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); +// Set up the test collection. +testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); - assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}})); +assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}})); - // Initiate the session. - const sessionOptions = {causalConsistency: false}; - let session = db.getMongo().startSession(sessionOptions); - let sessionDb = session.getDatabase(dbName); - let txnNumber = 0; - let stmtId = 0; +// Initiate the session. +const sessionOptions = { + causalConsistency: false +}; +let session = db.getMongo().startSession(sessionOptions); +let sessionDb = session.getDatabase(dbName); +let txnNumber = 0; +let stmtId = 0; - function tryCommands({testDB, message}) { - jsTestLog("Verify that inserts cannot use readConcern snapshot " + message); - let cmd = { - insert: collName, - documents: [{_id: 0}], - readConcern: {level: "snapshot"}, - }; - assert.commandFailedWithCode(testDB.runCommand(cmd), ErrorCodes.InvalidOptions); +function tryCommands({testDB, message}) { + jsTestLog("Verify that inserts cannot use readConcern snapshot " + message); + let cmd = { + insert: collName, + documents: [{_id: 0}], + readConcern: {level: "snapshot"}, + }; + assert.commandFailedWithCode(testDB.runCommand(cmd), ErrorCodes.InvalidOptions); - jsTestLog("Verify that updates cannot use readConcern snapshot " + message); - cmd = { - update: collName, - updates: [{q: {_id: 0}, u: {$set: {x: 1}}}], - readConcern: {level: "snapshot"}, - }; - assert.commandFailedWithCode(testDB.runCommand(cmd), ErrorCodes.InvalidOptions); + jsTestLog("Verify that updates cannot use readConcern snapshot " + message); + cmd = { + update: collName, + updates: [{q: {_id: 0}, u: {$set: {x: 1}}}], + readConcern: {level: "snapshot"}, + }; + assert.commandFailedWithCode(testDB.runCommand(cmd), ErrorCodes.InvalidOptions); - jsTestLog("Verify that deletes cannot use readConcern snapshot " + message); - cmd = { - delete: collName, - deletes: [{q: {_id: 0}, limit: 1}], - readConcern: {level: "snapshot"}, - }; - assert.commandFailedWithCode(testDB.runCommand(cmd), ErrorCodes.InvalidOptions); + jsTestLog("Verify that deletes cannot use readConcern snapshot " + message); + cmd = { + delete: collName, + deletes: [{q: {_id: 0}, limit: 1}], + readConcern: {level: "snapshot"}, + }; + assert.commandFailedWithCode(testDB.runCommand(cmd), ErrorCodes.InvalidOptions); - jsTestLog("Verify that findAndModify cannot use readConcern snapshot " + message); - cmd = { - findAndModify: collName, - query: {_id: 0}, - remove: true, - readConcern: {level: "snapshot"}, - }; - assert.commandFailedWithCode(testDB.runCommand(cmd), ErrorCodes.InvalidOptions); + jsTestLog("Verify that findAndModify cannot use readConcern snapshot " + message); + cmd = { + findAndModify: collName, + query: {_id: 0}, + remove: true, + readConcern: {level: "snapshot"}, + }; + assert.commandFailedWithCode(testDB.runCommand(cmd), ErrorCodes.InvalidOptions); - jsTestLog("Verify that finds cannot use readConcern snapshot " + message); - cmd = {find: collName, readConcern: {level: "snapshot"}}; - assert.commandFailedWithCode(testDB.runCommand(cmd), ErrorCodes.InvalidOptions); + jsTestLog("Verify that finds cannot use readConcern snapshot " + message); + cmd = {find: collName, readConcern: {level: "snapshot"}}; + assert.commandFailedWithCode(testDB.runCommand(cmd), ErrorCodes.InvalidOptions); - jsTestLog("Verify that aggregate cannot use readConcern snapshot " + message); - cmd = {aggregate: collName, pipeline: [], readConcern: {level: "snapshot"}}; - assert.commandFailedWithCode(testDB.runCommand(cmd), ErrorCodes.InvalidOptions); - } - tryCommands({testDB: sessionDb, message: "in session."}); - tryCommands({testDB: testDB, message: "outside session."}); + jsTestLog("Verify that aggregate cannot use readConcern snapshot " + message); + cmd = {aggregate: collName, pipeline: [], readConcern: {level: "snapshot"}}; + assert.commandFailedWithCode(testDB.runCommand(cmd), ErrorCodes.InvalidOptions); +} +tryCommands({testDB: sessionDb, message: "in session."}); +tryCommands({testDB: testDB, message: "outside session."}); - session.endSession(); +session.endSession(); }()); diff --git a/jstests/core/txns/no_read_or_write_concern_inside_txn.js b/jstests/core/txns/no_read_or_write_concern_inside_txn.js index 31e29117f81..b8333eed92f 100644 --- a/jstests/core/txns/no_read_or_write_concern_inside_txn.js +++ b/jstests/core/txns/no_read_or_write_concern_inside_txn.js @@ -6,156 +6,158 @@ */ (function() { - "use strict"; - const dbName = "test"; - const collName = "no_read_or_write_concerns_inside_txn"; - const testDB = db.getSiblingDB(dbName); - const testColl = testDB[collName]; - - // Set up the test collection. - testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); - - assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}})); - - // Initiate the session. - const sessionOptions = {causalConsistency: false}; - let session = db.getMongo().startSession(sessionOptions); - let sessionDb = session.getDatabase(dbName); - let txnNumber = 0; - let stmtId = 0; - - jsTestLog("Starting first transaction"); - assert.commandWorked(sessionDb.runCommand({ - insert: collName, - documents: [{_id: 0}], - readConcern: {level: "snapshot"}, - startTransaction: true, - autocommit: false, - txnNumber: NumberLong(txnNumber), - stmtId: NumberInt(stmtId++) - })); - - jsTestLog("Attempting to insert with readConcern: snapshot within a transaction."); - assert.commandFailedWithCode(sessionDb.runCommand({ - insert: collName, - documents: [{_id: 1}], - readConcern: {level: "snapshot"}, - autocommit: false, - txnNumber: NumberLong(txnNumber), - stmtId: NumberInt(stmtId++) - }), - ErrorCodes.InvalidOptions); - - jsTestLog("Attempting to insert with readConcern:majority within a transaction."); - assert.commandFailedWithCode(sessionDb.runCommand({ - insert: collName, - documents: [{_id: 2}], - readConcern: {level: "majority"}, - autocommit: false, - txnNumber: NumberLong(txnNumber), - stmtId: NumberInt(stmtId++) - }), - ErrorCodes.InvalidOptions); - - jsTestLog("Attempting to insert with readConcern:local within a transaction."); - assert.commandFailedWithCode(sessionDb.runCommand({ - insert: collName, - documents: [{_id: 3}], - readConcern: {level: "local"}, - autocommit: false, - txnNumber: NumberLong(txnNumber), - stmtId: NumberInt(stmtId++) - }), - ErrorCodes.InvalidOptions); - - jsTestLog("Transaction should still commit."); - assert.commandWorked(sessionDb.adminCommand({ - commitTransaction: 1, - autocommit: false, - writeConcern: {w: "majority"}, - txnNumber: NumberLong(txnNumber), - stmtId: NumberInt(stmtId++) - })); - assert.sameMembers(testColl.find().toArray(), [{_id: 0}]); - - // Drop and re-create collection to keep parts of test isolated from one another. - testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); - assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}})); - - txnNumber++; - stmtId = 0; - - jsTestLog("Attempting to start transaction with local writeConcern."); - assert.commandFailedWithCode(sessionDb.runCommand({ - insert: collName, - documents: [{_id: 4}], - readConcern: {level: "snapshot"}, - writeConcern: {w: 1}, - startTransaction: true, - autocommit: false, - txnNumber: NumberLong(txnNumber), - stmtId: NumberInt(stmtId++) - }), - ErrorCodes.InvalidOptions); - txnNumber++; - stmtId = 0; - - jsTestLog("Attempting to start transaction with majority writeConcern."); - assert.commandFailedWithCode(sessionDb.runCommand({ - insert: collName, - documents: [{_id: 5}], - readConcern: {level: "snapshot"}, - writeConcern: {w: "majority"}, - startTransaction: true, - autocommit: false, - txnNumber: NumberLong(txnNumber), - stmtId: NumberInt(stmtId++) - }), - ErrorCodes.InvalidOptions); - txnNumber++; - stmtId = 0; - - jsTestLog("Starting transaction normally."); - assert.commandWorked(sessionDb.runCommand({ - insert: collName, - documents: [{_id: 6}], - readConcern: {level: "snapshot"}, - startTransaction: true, - autocommit: false, - txnNumber: NumberLong(txnNumber), - stmtId: NumberInt(stmtId++) - })); - - jsTestLog("Attempting to write within transaction with majority write concern."); - assert.commandFailedWithCode(sessionDb.runCommand({ - insert: collName, - documents: [{_id: 7}], - writeConcern: {w: "majority"}, - autocommit: false, - txnNumber: NumberLong(txnNumber), - stmtId: NumberInt(stmtId++) - }), - ErrorCodes.InvalidOptions); - - jsTestLog("Attempting to write within transaction with local write concern."); - assert.commandFailedWithCode(sessionDb.runCommand({ - insert: collName, - documents: [{_id: 8}], - writeConcern: {w: 1}, - autocommit: false, - txnNumber: NumberLong(txnNumber), - stmtId: NumberInt(stmtId++) - }), - ErrorCodes.InvalidOptions); - - jsTestLog("Transaction should still commit."); - assert.commandWorked(sessionDb.adminCommand({ - commitTransaction: 1, - autocommit: false, - writeConcern: {w: "majority"}, - txnNumber: NumberLong(txnNumber), - stmtId: NumberInt(stmtId++) - })); - assert.sameMembers(testColl.find().toArray(), [{_id: 6}]); - session.endSession(); +"use strict"; +const dbName = "test"; +const collName = "no_read_or_write_concerns_inside_txn"; +const testDB = db.getSiblingDB(dbName); +const testColl = testDB[collName]; + +// Set up the test collection. +testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); + +assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}})); + +// Initiate the session. +const sessionOptions = { + causalConsistency: false +}; +let session = db.getMongo().startSession(sessionOptions); +let sessionDb = session.getDatabase(dbName); +let txnNumber = 0; +let stmtId = 0; + +jsTestLog("Starting first transaction"); +assert.commandWorked(sessionDb.runCommand({ + insert: collName, + documents: [{_id: 0}], + readConcern: {level: "snapshot"}, + startTransaction: true, + autocommit: false, + txnNumber: NumberLong(txnNumber), + stmtId: NumberInt(stmtId++) +})); + +jsTestLog("Attempting to insert with readConcern: snapshot within a transaction."); +assert.commandFailedWithCode(sessionDb.runCommand({ + insert: collName, + documents: [{_id: 1}], + readConcern: {level: "snapshot"}, + autocommit: false, + txnNumber: NumberLong(txnNumber), + stmtId: NumberInt(stmtId++) +}), + ErrorCodes.InvalidOptions); + +jsTestLog("Attempting to insert with readConcern:majority within a transaction."); +assert.commandFailedWithCode(sessionDb.runCommand({ + insert: collName, + documents: [{_id: 2}], + readConcern: {level: "majority"}, + autocommit: false, + txnNumber: NumberLong(txnNumber), + stmtId: NumberInt(stmtId++) +}), + ErrorCodes.InvalidOptions); + +jsTestLog("Attempting to insert with readConcern:local within a transaction."); +assert.commandFailedWithCode(sessionDb.runCommand({ + insert: collName, + documents: [{_id: 3}], + readConcern: {level: "local"}, + autocommit: false, + txnNumber: NumberLong(txnNumber), + stmtId: NumberInt(stmtId++) +}), + ErrorCodes.InvalidOptions); + +jsTestLog("Transaction should still commit."); +assert.commandWorked(sessionDb.adminCommand({ + commitTransaction: 1, + autocommit: false, + writeConcern: {w: "majority"}, + txnNumber: NumberLong(txnNumber), + stmtId: NumberInt(stmtId++) +})); +assert.sameMembers(testColl.find().toArray(), [{_id: 0}]); + +// Drop and re-create collection to keep parts of test isolated from one another. +testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); +assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}})); + +txnNumber++; +stmtId = 0; + +jsTestLog("Attempting to start transaction with local writeConcern."); +assert.commandFailedWithCode(sessionDb.runCommand({ + insert: collName, + documents: [{_id: 4}], + readConcern: {level: "snapshot"}, + writeConcern: {w: 1}, + startTransaction: true, + autocommit: false, + txnNumber: NumberLong(txnNumber), + stmtId: NumberInt(stmtId++) +}), + ErrorCodes.InvalidOptions); +txnNumber++; +stmtId = 0; + +jsTestLog("Attempting to start transaction with majority writeConcern."); +assert.commandFailedWithCode(sessionDb.runCommand({ + insert: collName, + documents: [{_id: 5}], + readConcern: {level: "snapshot"}, + writeConcern: {w: "majority"}, + startTransaction: true, + autocommit: false, + txnNumber: NumberLong(txnNumber), + stmtId: NumberInt(stmtId++) +}), + ErrorCodes.InvalidOptions); +txnNumber++; +stmtId = 0; + +jsTestLog("Starting transaction normally."); +assert.commandWorked(sessionDb.runCommand({ + insert: collName, + documents: [{_id: 6}], + readConcern: {level: "snapshot"}, + startTransaction: true, + autocommit: false, + txnNumber: NumberLong(txnNumber), + stmtId: NumberInt(stmtId++) +})); + +jsTestLog("Attempting to write within transaction with majority write concern."); +assert.commandFailedWithCode(sessionDb.runCommand({ + insert: collName, + documents: [{_id: 7}], + writeConcern: {w: "majority"}, + autocommit: false, + txnNumber: NumberLong(txnNumber), + stmtId: NumberInt(stmtId++) +}), + ErrorCodes.InvalidOptions); + +jsTestLog("Attempting to write within transaction with local write concern."); +assert.commandFailedWithCode(sessionDb.runCommand({ + insert: collName, + documents: [{_id: 8}], + writeConcern: {w: 1}, + autocommit: false, + txnNumber: NumberLong(txnNumber), + stmtId: NumberInt(stmtId++) +}), + ErrorCodes.InvalidOptions); + +jsTestLog("Transaction should still commit."); +assert.commandWorked(sessionDb.adminCommand({ + commitTransaction: 1, + autocommit: false, + writeConcern: {w: "majority"}, + txnNumber: NumberLong(txnNumber), + stmtId: NumberInt(stmtId++) +})); +assert.sameMembers(testColl.find().toArray(), [{_id: 6}]); +session.endSession(); }()); diff --git a/jstests/core/txns/no_reads_from_system_dot_views_in_txn.js b/jstests/core/txns/no_reads_from_system_dot_views_in_txn.js index 280b4f0f1bf..808bc8dbb72 100644 --- a/jstests/core/txns/no_reads_from_system_dot_views_in_txn.js +++ b/jstests/core/txns/no_reads_from_system_dot_views_in_txn.js @@ -1,43 +1,40 @@ // Tests that it is illegal to read from system.views within a transaction. // @tags: [uses_transactions, uses_snapshot_read_concern] (function() { - "use strict"; +"use strict"; - load("jstests/libs/fixture_helpers.js"); // For 'FixtureHelpers'. +load("jstests/libs/fixture_helpers.js"); // For 'FixtureHelpers'. - const session = db.getMongo().startSession({causalConsistency: false}); +const session = db.getMongo().startSession({causalConsistency: false}); - // Use a custom database to avoid conflict with other tests that use system.views. - const testDB = session.getDatabase("no_reads_from_system_dot_views_in_txn"); - assert.commandWorked(testDB.dropDatabase()); +// Use a custom database to avoid conflict with other tests that use system.views. +const testDB = session.getDatabase("no_reads_from_system_dot_views_in_txn"); +assert.commandWorked(testDB.dropDatabase()); - testDB.runCommand({create: "foo", viewOn: "bar", pipeline: []}); +testDB.runCommand({create: "foo", viewOn: "bar", pipeline: []}); - session.startTransaction({readConcern: {level: "snapshot"}}); - assert.commandFailedWithCode(testDB.runCommand({find: "system.views", filter: {}}), 51071); - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); +session.startTransaction({readConcern: {level: "snapshot"}}); +assert.commandFailedWithCode(testDB.runCommand({find: "system.views", filter: {}}), 51071); +assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction); - if (FixtureHelpers.isMongos(testDB)) { - // The rest of the test is concerned with a find by UUID which is not supported against - // mongos. - return; - } +if (FixtureHelpers.isMongos(testDB)) { + // The rest of the test is concerned with a find by UUID which is not supported against + // mongos. + return; +} - const collectionInfos = - new DBCommandCursor(testDB, assert.commandWorked(testDB.runCommand({listCollections: 1}))); - let systemViewsUUID = null; - while (collectionInfos.hasNext()) { - const next = collectionInfos.next(); - if (next.name === "system.views") { - systemViewsUUID = next.info.uuid; - } +const collectionInfos = + new DBCommandCursor(testDB, assert.commandWorked(testDB.runCommand({listCollections: 1}))); +let systemViewsUUID = null; +while (collectionInfos.hasNext()) { + const next = collectionInfos.next(); + if (next.name === "system.views") { + systemViewsUUID = next.info.uuid; } - assert.neq(null, systemViewsUUID, "did not find UUID for system.views"); - - session.startTransaction({readConcern: {level: "snapshot"}}); - assert.commandFailedWithCode(testDB.runCommand({find: systemViewsUUID, filter: {}}), 51070); - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); +} +assert.neq(null, systemViewsUUID, "did not find UUID for system.views"); +session.startTransaction({readConcern: {level: "snapshot"}}); +assert.commandFailedWithCode(testDB.runCommand({find: systemViewsUUID, filter: {}}), 51070); +assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction); }()); diff --git a/jstests/core/txns/no_writes_to_config_transactions_with_prepared_transaction.js b/jstests/core/txns/no_writes_to_config_transactions_with_prepared_transaction.js index 3c909583cfe..067bf1482fa 100644 --- a/jstests/core/txns/no_writes_to_config_transactions_with_prepared_transaction.js +++ b/jstests/core/txns/no_writes_to_config_transactions_with_prepared_transaction.js @@ -6,95 +6,90 @@ */ (function() { - "use strict"; - load("jstests/core/txns/libs/prepare_helpers.js"); - - TestData.disableImplicitSessions = true; - - const dbName = "test"; - const collName = "no_writes_to_config_transactions_with_prepared_transaction"; - const collName2 = "no_writes_to_config_transactions_with_prepared_transaction2"; - const testDB = db.getSiblingDB(dbName); - const testColl = testDB.getCollection(collName); - - const config = db.getSiblingDB("config"); - const transactionsColl = config.getCollection("transactions"); - - testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); - assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); - - testDB.runCommand({drop: collName2, writeConcern: {w: "majority"}}); - assert.commandWorked(testDB.runCommand({create: collName2, writeConcern: {w: "majority"}})); - - const session = db.getMongo().startSession({causalConsistency: false}); - const sessionDB = session.getDatabase(dbName); - const sessionColl = sessionDB.getCollection(collName); - - const sessionConfigDB = session.getDatabase("config"); - - // Start a transaction using runCommand so that we can run commands on the session but outside - // the transaction. - assert.commandWorked(sessionDB.runCommand({ - insert: collName, - documents: [{_id: 1}], - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(0), - stmtId: NumberInt(0), - startTransaction: true, - autocommit: false - })); - assert.commandWorked(sessionDB.adminCommand({ - prepareTransaction: 1, - txnNumber: NumberLong(0), - stmtId: NumberInt(1), - autocommit: false - })); - - let transactionEntry = config.transactions.findOne(); - const txnNum = transactionEntry.txnNum; - - jsTestLog("Test that updates to config.transactions fails when there is a prepared " + - "transaction on the session"); - assert.commandFailedWithCode( - sessionConfigDB.transactions.update({_id: transactionEntry._id}, - {$set: {"txnNumber": NumberLong(23)}}), - 40528); - - // Make sure that the txnNumber wasn't modified. - transactionEntry = config.transactions.findOne(); - assert.eq(transactionEntry.txnNum, NumberLong(txnNum)); - - jsTestLog("Test that deletes to config.transactions fails when there is a prepared " + - "transaction on the session"); - assert.commandFailedWithCode(sessionConfigDB.transactions.remove({_id: transactionEntry._id}), - 40528); - - // Make sure that the entry in config.transactions wasn't removed. - transactionEntry = config.transactions.findOne(); - assert(transactionEntry); - - jsTestLog("Test that dropping config.transactions fails when there is a prepared transaction" + - " on the session"); - assert.commandFailedWithCode(assert.throws(function() { - sessionConfigDB.transactions.drop(); - }), - 40528); - - jsTestLog("Test that we can prepare a transaction on a different session"); - const session2 = db.getMongo().startSession({causalConsistency: false}); - const sessionDB2 = session2.getDatabase(dbName); - const sessionColl2 = sessionDB2.getCollection(collName2); - - session2.startTransaction(); - assert.commandWorked(sessionColl2.insert({_id: 1})); - // This will cause an insertion into config.transactions - PrepareHelpers.prepareTransaction(session2); - - assert.commandWorked(sessionDB.adminCommand( - {abortTransaction: 1, txnNumber: NumberLong(0), stmtid: NumberInt(2), autocommit: false})); - session.endSession(); - - assert.commandWorked(session2.abortTransaction_forTesting()); - session2.endSession(); - +"use strict"; +load("jstests/core/txns/libs/prepare_helpers.js"); + +TestData.disableImplicitSessions = true; + +const dbName = "test"; +const collName = "no_writes_to_config_transactions_with_prepared_transaction"; +const collName2 = "no_writes_to_config_transactions_with_prepared_transaction2"; +const testDB = db.getSiblingDB(dbName); +const testColl = testDB.getCollection(collName); + +const config = db.getSiblingDB("config"); +const transactionsColl = config.getCollection("transactions"); + +testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); +assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); + +testDB.runCommand({drop: collName2, writeConcern: {w: "majority"}}); +assert.commandWorked(testDB.runCommand({create: collName2, writeConcern: {w: "majority"}})); + +const session = db.getMongo().startSession({causalConsistency: false}); +const sessionDB = session.getDatabase(dbName); +const sessionColl = sessionDB.getCollection(collName); + +const sessionConfigDB = session.getDatabase("config"); + +// Start a transaction using runCommand so that we can run commands on the session but outside +// the transaction. +assert.commandWorked(sessionDB.runCommand({ + insert: collName, + documents: [{_id: 1}], + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(0), + stmtId: NumberInt(0), + startTransaction: true, + autocommit: false +})); +assert.commandWorked(sessionDB.adminCommand( + {prepareTransaction: 1, txnNumber: NumberLong(0), stmtId: NumberInt(1), autocommit: false})); + +let transactionEntry = config.transactions.findOne(); +const txnNum = transactionEntry.txnNum; + +jsTestLog("Test that updates to config.transactions fails when there is a prepared " + + "transaction on the session"); +assert.commandFailedWithCode( + sessionConfigDB.transactions.update({_id: transactionEntry._id}, + {$set: {"txnNumber": NumberLong(23)}}), + 40528); + +// Make sure that the txnNumber wasn't modified. +transactionEntry = config.transactions.findOne(); +assert.eq(transactionEntry.txnNum, NumberLong(txnNum)); + +jsTestLog("Test that deletes to config.transactions fails when there is a prepared " + + "transaction on the session"); +assert.commandFailedWithCode(sessionConfigDB.transactions.remove({_id: transactionEntry._id}), + 40528); + +// Make sure that the entry in config.transactions wasn't removed. +transactionEntry = config.transactions.findOne(); +assert(transactionEntry); + +jsTestLog("Test that dropping config.transactions fails when there is a prepared transaction" + + " on the session"); +assert.commandFailedWithCode(assert.throws(function() { + sessionConfigDB.transactions.drop(); + }), + 40528); + +jsTestLog("Test that we can prepare a transaction on a different session"); +const session2 = db.getMongo().startSession({causalConsistency: false}); +const sessionDB2 = session2.getDatabase(dbName); +const sessionColl2 = sessionDB2.getCollection(collName2); + +session2.startTransaction(); +assert.commandWorked(sessionColl2.insert({_id: 1})); +// This will cause an insertion into config.transactions +PrepareHelpers.prepareTransaction(session2); + +assert.commandWorked(sessionDB.adminCommand( + {abortTransaction: 1, txnNumber: NumberLong(0), stmtid: NumberInt(2), autocommit: false})); +session.endSession(); + +assert.commandWorked(session2.abortTransaction_forTesting()); +session2.endSession(); }()); diff --git a/jstests/core/txns/no_writes_to_system_collections_in_txn.js b/jstests/core/txns/no_writes_to_system_collections_in_txn.js index 4b13908773e..a5956723b1e 100644 --- a/jstests/core/txns/no_writes_to_system_collections_in_txn.js +++ b/jstests/core/txns/no_writes_to_system_collections_in_txn.js @@ -1,58 +1,51 @@ // Tests that it is illegal to write to system collections within a transaction. // @tags: [uses_transactions, uses_snapshot_read_concern] (function() { - "use strict"; - - const session = db.getMongo().startSession({causalConsistency: false}); - - // Use a custom database, to avoid conflict with other tests that use the system.js collection. - const testDB = session.getDatabase("no_writes_system_collections_in_txn"); - assert.commandWorked(testDB.dropDatabase()); - const systemColl = testDB.getCollection("system.js"); - const systemDotViews = testDB.getCollection("system.views"); - - // Ensure that a collection exists with at least one document. - assert.commandWorked(systemColl.insert({name: 0}, {writeConcern: {w: "majority"}})); - - session.startTransaction({readConcern: {level: "snapshot"}}); - let error = assert.throws(() => systemColl.findAndModify({query: {}, update: {}})); - assert.commandFailedWithCode(error, 50781); - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - - session.startTransaction({readConcern: {level: "snapshot"}}); - error = assert.throws(() => systemColl.findAndModify({query: {}, remove: true})); - assert.commandFailedWithCode(error, 50781); - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - - session.startTransaction({readConcern: {level: "snapshot"}}); - assert.commandFailedWithCode(systemColl.insert({name: "new"}), 50791); - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - - session.startTransaction({readConcern: {level: "snapshot"}}); - assert.commandFailedWithCode( - systemDotViews.insert({_id: "new.view", viewOn: "bar", pipeline: []}), 50791); - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - - session.startTransaction({readConcern: {level: "snapshot"}}); - assert.commandFailedWithCode(systemColl.update({name: 0}, {$set: {name: "jungsoo"}}), 50791); - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - - session.startTransaction({readConcern: {level: "snapshot"}}); - assert.commandFailedWithCode( - systemColl.update({name: "nonexistent"}, {$set: {name: "jungsoo"}}, {upsert: true}), 50791); - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - - session.startTransaction({readConcern: {level: "snapshot"}}); - assert.commandFailedWithCode(systemColl.remove({name: 0}), 50791); - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - - assert.commandWorked(systemColl.remove({_id: {$exists: true}})); - assert.eq(systemColl.find().itcount(), 0); +"use strict"; + +const session = db.getMongo().startSession({causalConsistency: false}); + +// Use a custom database, to avoid conflict with other tests that use the system.js collection. +const testDB = session.getDatabase("no_writes_system_collections_in_txn"); +assert.commandWorked(testDB.dropDatabase()); +const systemColl = testDB.getCollection("system.js"); +const systemDotViews = testDB.getCollection("system.views"); + +// Ensure that a collection exists with at least one document. +assert.commandWorked(systemColl.insert({name: 0}, {writeConcern: {w: "majority"}})); + +session.startTransaction({readConcern: {level: "snapshot"}}); +let error = assert.throws(() => systemColl.findAndModify({query: {}, update: {}})); +assert.commandFailedWithCode(error, 50781); +assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction); + +session.startTransaction({readConcern: {level: "snapshot"}}); +error = assert.throws(() => systemColl.findAndModify({query: {}, remove: true})); +assert.commandFailedWithCode(error, 50781); +assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction); + +session.startTransaction({readConcern: {level: "snapshot"}}); +assert.commandFailedWithCode(systemColl.insert({name: "new"}), 50791); +assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction); + +session.startTransaction({readConcern: {level: "snapshot"}}); +assert.commandFailedWithCode(systemDotViews.insert({_id: "new.view", viewOn: "bar", pipeline: []}), + 50791); +assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction); + +session.startTransaction({readConcern: {level: "snapshot"}}); +assert.commandFailedWithCode(systemColl.update({name: 0}, {$set: {name: "jungsoo"}}), 50791); +assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction); + +session.startTransaction({readConcern: {level: "snapshot"}}); +assert.commandFailedWithCode( + systemColl.update({name: "nonexistent"}, {$set: {name: "jungsoo"}}, {upsert: true}), 50791); +assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction); + +session.startTransaction({readConcern: {level: "snapshot"}}); +assert.commandFailedWithCode(systemColl.remove({name: 0}), 50791); +assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction); + +assert.commandWorked(systemColl.remove({_id: {$exists: true}})); +assert.eq(systemColl.find().itcount(), 0); }()); diff --git a/jstests/core/txns/non_transactional_operations_on_session_with_transaction.js b/jstests/core/txns/non_transactional_operations_on_session_with_transaction.js index 74ef4228362..8fb9b6b5a3e 100644 --- a/jstests/core/txns/non_transactional_operations_on_session_with_transaction.js +++ b/jstests/core/txns/non_transactional_operations_on_session_with_transaction.js @@ -7,94 +7,99 @@ */ (function() { - "use strict"; - - const dbName = "test"; - const collName = "non_transactional_operations_on_session_with_transactions"; - - const testDB = db.getSiblingDB(dbName); - const testColl = testDB[collName]; - - // Clean up and create test collection. - testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); - assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); - - const sessionOptions = {causalConsistency: false}; - const session = db.getMongo().startSession(sessionOptions); - const sessionDb = session.getDatabase(dbName); - const sessionColl = sessionDb[collName]; - - let txnNumber = 0; - - /** - * Asserts that the given result cursor has the expected contents and that it is exhausted if - * specified. - */ - function assertCursorBatchContents(result, expectedContents, isExhausted) { - assert.gt(expectedContents.length, 0, "Non-empty expected contents required."); - assert(result.hasOwnProperty("cursor"), tojson(result)); - assert(result["cursor"].hasOwnProperty("firstBatch"), tojson(result)); - assert.eq(expectedContents.length, result["cursor"]["firstBatch"].length, tojson(result)); - for (let i = 0; i < expectedContents.length; i++) { - assert.docEq(expectedContents[i], result["cursor"]["firstBatch"][i], tojson(result)); - } - assert.eq(isExhausted, result["cursor"]["id"] === 0, tojson(result)); - } +"use strict"; + +const dbName = "test"; +const collName = "non_transactional_operations_on_session_with_transactions"; + +const testDB = db.getSiblingDB(dbName); +const testColl = testDB[collName]; + +// Clean up and create test collection. +testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); +assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); - const doc1 = {_id: "insert-1"}; - const doc2 = {_id: "insert-2"}; - - // Insert a document in a transaction. - assert.commandWorked(sessionDb.runCommand({ - insert: collName, - documents: [doc1], - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(txnNumber), - startTransaction: true, - autocommit: false - })); - - // Test that we cannot observe the insert outside of the transaction. - assert.eq(null, testColl.findOne(doc1)); - assert.eq(null, sessionColl.findOne(doc1)); - assert.eq(null, testColl.findOne(doc2)); - assert.eq(null, sessionColl.findOne(doc2)); - - // Test that we observe the insert inside of the transaction. - assertCursorBatchContents( - assert.commandWorked(sessionDb.runCommand( - {find: collName, batchSize: 10, txnNumber: NumberLong(txnNumber), autocommit: false})), - [doc1], - false); - - // Insert a document on the session outside of the transaction. - assert.commandWorked(sessionDb.runCommand({insert: collName, documents: [doc2]})); - - // Test that we observe the insert outside of the transaction. - assert.eq(null, testColl.findOne(doc1)); - assert.eq(null, sessionColl.findOne(doc1)); - assert.docEq(doc2, testColl.findOne(doc2)); - assert.docEq(doc2, sessionColl.findOne(doc2)); - - // Test that we do not observe the new insert inside of the transaction. - assertCursorBatchContents( - assert.commandWorked(sessionDb.runCommand( - {find: collName, batchSize: 10, txnNumber: NumberLong(txnNumber), autocommit: false})), - [doc1], - false); - - // Commit the transaction. - assert.commandWorked(sessionDb.adminCommand({ - commitTransaction: 1, - writeConcern: {w: "majority"}, - txnNumber: NumberLong(txnNumber), - autocommit: false - })); - - // Test that we see both documents outside of the transaction. - assert.docEq(doc1, testColl.findOne(doc1)); - assert.docEq(doc1, sessionColl.findOne(doc1)); - assert.docEq(doc2, testColl.findOne(doc2)); - assert.docEq(doc2, sessionColl.findOne(doc2)); +const sessionOptions = { + causalConsistency: false +}; +const session = db.getMongo().startSession(sessionOptions); +const sessionDb = session.getDatabase(dbName); +const sessionColl = sessionDb[collName]; +let txnNumber = 0; + +/** + * Asserts that the given result cursor has the expected contents and that it is exhausted if + * specified. + */ +function assertCursorBatchContents(result, expectedContents, isExhausted) { + assert.gt(expectedContents.length, 0, "Non-empty expected contents required."); + assert(result.hasOwnProperty("cursor"), tojson(result)); + assert(result["cursor"].hasOwnProperty("firstBatch"), tojson(result)); + assert.eq(expectedContents.length, result["cursor"]["firstBatch"].length, tojson(result)); + for (let i = 0; i < expectedContents.length; i++) { + assert.docEq(expectedContents[i], result["cursor"]["firstBatch"][i], tojson(result)); + } + assert.eq(isExhausted, result["cursor"]["id"] === 0, tojson(result)); +} + +const doc1 = { + _id: "insert-1" +}; +const doc2 = { + _id: "insert-2" +}; + +// Insert a document in a transaction. +assert.commandWorked(sessionDb.runCommand({ + insert: collName, + documents: [doc1], + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(txnNumber), + startTransaction: true, + autocommit: false +})); + +// Test that we cannot observe the insert outside of the transaction. +assert.eq(null, testColl.findOne(doc1)); +assert.eq(null, sessionColl.findOne(doc1)); +assert.eq(null, testColl.findOne(doc2)); +assert.eq(null, sessionColl.findOne(doc2)); + +// Test that we observe the insert inside of the transaction. +assertCursorBatchContents( + assert.commandWorked(sessionDb.runCommand( + {find: collName, batchSize: 10, txnNumber: NumberLong(txnNumber), autocommit: false})), + [doc1], + false); + +// Insert a document on the session outside of the transaction. +assert.commandWorked(sessionDb.runCommand({insert: collName, documents: [doc2]})); + +// Test that we observe the insert outside of the transaction. +assert.eq(null, testColl.findOne(doc1)); +assert.eq(null, sessionColl.findOne(doc1)); +assert.docEq(doc2, testColl.findOne(doc2)); +assert.docEq(doc2, sessionColl.findOne(doc2)); + +// Test that we do not observe the new insert inside of the transaction. +assertCursorBatchContents( + assert.commandWorked(sessionDb.runCommand( + {find: collName, batchSize: 10, txnNumber: NumberLong(txnNumber), autocommit: false})), + [doc1], + false); + +// Commit the transaction. +assert.commandWorked(sessionDb.adminCommand({ + commitTransaction: 1, + writeConcern: {w: "majority"}, + txnNumber: NumberLong(txnNumber), + autocommit: false +})); + +// Test that we see both documents outside of the transaction. +assert.docEq(doc1, testColl.findOne(doc1)); +assert.docEq(doc1, sessionColl.findOne(doc1)); +assert.docEq(doc2, testColl.findOne(doc2)); +assert.docEq(doc2, sessionColl.findOne(doc2)); }());
\ No newline at end of file diff --git a/jstests/core/txns/noop_createIndexes_not_blocked_by_txn.js b/jstests/core/txns/noop_createIndexes_not_blocked_by_txn.js index 52a98d1e69c..675db8b65f3 100644 --- a/jstests/core/txns/noop_createIndexes_not_blocked_by_txn.js +++ b/jstests/core/txns/noop_createIndexes_not_blocked_by_txn.js @@ -1,53 +1,56 @@ // Tests that no-op createIndex commands do not block behind transactions. // @tags: [uses_transactions] (function() { - "use strict"; - - const dbName = 'noop_createIndexes_not_blocked'; - const collName = 'test'; - const testDB = db.getSiblingDB(dbName); - - testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); - - const session = db.getMongo().startSession({causalConsistency: false}); - const sessionDB = session.getDatabase(dbName); - - const isMongos = assert.commandWorked(db.runCommand("ismaster")).msg === "isdbgrid"; - if (isMongos) { - // Access the collection before creating indexes so it can be implicitly sharded. - assert.eq(sessionDB[collName].find().itcount(), 0); - } - - const createIndexesCommand = {createIndexes: collName, indexes: [{key: {a: 1}, name: "a_1"}]}; - assert.commandWorked(sessionDB.runCommand(createIndexesCommand)); - - session.startTransaction(); - assert.commandWorked(sessionDB[collName].insert({a: 5, b: 6})); - - // This should not block because an identical index exists. - let res = testDB.runCommand(createIndexesCommand); - assert.commandWorked(res); - assert.eq(res.numIndexesBefore, res.numIndexesAfter); - - // This should not block but return an error because the index exists with different options. - res = testDB.runCommand({ - createIndexes: collName, - indexes: [{key: {a: 1}, name: "unique_a_1", unique: true}], - }); - assert.commandFailedWithCode(res, ErrorCodes.IndexOptionsConflict); - - // This should block and time out because the index does not already exist. - res = testDB.runCommand( - {createIndexes: collName, indexes: [{key: {b: 1}, name: "b_1"}], maxTimeMS: 500}); - assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired); - - // This should block and time out because one of the indexes does not already exist. - res = testDB.runCommand({ - createIndexes: collName, - indexes: [{key: {a: 1}, name: "a_1"}, {key: {b: 1}, name: "b_1"}], - maxTimeMS: 500 - }); - assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired); - - assert.commandWorked(session.commitTransaction_forTesting()); +"use strict"; + +const dbName = 'noop_createIndexes_not_blocked'; +const collName = 'test'; +const testDB = db.getSiblingDB(dbName); + +testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); + +const session = db.getMongo().startSession({causalConsistency: false}); +const sessionDB = session.getDatabase(dbName); + +const isMongos = assert.commandWorked(db.runCommand("ismaster")).msg === "isdbgrid"; +if (isMongos) { + // Access the collection before creating indexes so it can be implicitly sharded. + assert.eq(sessionDB[collName].find().itcount(), 0); +} + +const createIndexesCommand = { + createIndexes: collName, + indexes: [{key: {a: 1}, name: "a_1"}] +}; +assert.commandWorked(sessionDB.runCommand(createIndexesCommand)); + +session.startTransaction(); +assert.commandWorked(sessionDB[collName].insert({a: 5, b: 6})); + +// This should not block because an identical index exists. +let res = testDB.runCommand(createIndexesCommand); +assert.commandWorked(res); +assert.eq(res.numIndexesBefore, res.numIndexesAfter); + +// This should not block but return an error because the index exists with different options. +res = testDB.runCommand({ + createIndexes: collName, + indexes: [{key: {a: 1}, name: "unique_a_1", unique: true}], +}); +assert.commandFailedWithCode(res, ErrorCodes.IndexOptionsConflict); + +// This should block and time out because the index does not already exist. +res = testDB.runCommand( + {createIndexes: collName, indexes: [{key: {b: 1}, name: "b_1"}], maxTimeMS: 500}); +assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired); + +// This should block and time out because one of the indexes does not already exist. +res = testDB.runCommand({ + createIndexes: collName, + indexes: [{key: {a: 1}, name: "a_1"}, {key: {b: 1}, name: "b_1"}], + maxTimeMS: 500 +}); +assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired); + +assert.commandWorked(session.commitTransaction_forTesting()); }()); diff --git a/jstests/core/txns/prepare_conflict.js b/jstests/core/txns/prepare_conflict.js index eade0e15d1e..7eb4212c5c3 100644 --- a/jstests/core/txns/prepare_conflict.js +++ b/jstests/core/txns/prepare_conflict.js @@ -4,95 +4,101 @@ * @tags: [uses_transactions, uses_prepare_transaction] */ (function() { - "use strict"; - load("jstests/core/txns/libs/prepare_helpers.js"); - - const dbName = "test"; - const collName = "prepare_conflict"; - const testDB = db.getSiblingDB(dbName); - const testColl = testDB.getCollection(collName); - - testColl.drop({writeConcern: {w: "majority"}}); - assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); - - function assertPrepareConflict(filter, clusterTime) { - // Use a 5 second timeout so that there is enough time for the prepared transaction to - // release its locks and for the command to obtain those locks. - assert.commandFailedWithCode( - // Use afterClusterTime read to make sure that it will block on a prepare conflict. - testDB.runCommand({ - find: collName, - filter: filter, - readConcern: {afterClusterTime: clusterTime}, - maxTimeMS: 5000 - }), - ErrorCodes.MaxTimeMSExpired); - - let prepareConflicted = false; - const cur = - testDB.system.profile.find({"ns": testColl.getFullName(), "command.filter": filter}); - while (cur.hasNext()) { - const n = cur.next(); - print("op: " + JSON.stringify(n)); - if (n.prepareReadConflicts > 0) { - prepareConflicted = true; - } +"use strict"; +load("jstests/core/txns/libs/prepare_helpers.js"); + +const dbName = "test"; +const collName = "prepare_conflict"; +const testDB = db.getSiblingDB(dbName); +const testColl = testDB.getCollection(collName); + +testColl.drop({writeConcern: {w: "majority"}}); +assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); + +function assertPrepareConflict(filter, clusterTime) { + // Use a 5 second timeout so that there is enough time for the prepared transaction to + // release its locks and for the command to obtain those locks. + assert.commandFailedWithCode( + // Use afterClusterTime read to make sure that it will block on a prepare conflict. + testDB.runCommand({ + find: collName, + filter: filter, + readConcern: {afterClusterTime: clusterTime}, + maxTimeMS: 5000 + }), + ErrorCodes.MaxTimeMSExpired); + + let prepareConflicted = false; + const cur = + testDB.system.profile.find({"ns": testColl.getFullName(), "command.filter": filter}); + while (cur.hasNext()) { + const n = cur.next(); + print("op: " + JSON.stringify(n)); + if (n.prepareReadConflicts > 0) { + prepareConflicted = true; } - assert(prepareConflicted); } - - // Insert a document modified by the transaction. - const txnDoc = {_id: 1, x: 1}; - assert.commandWorked(testColl.insert(txnDoc)); - - // Insert a document unmodified by the transaction. - const otherDoc = {_id: 2, y: 2}; - assert.commandWorked(testColl.insert(otherDoc, {writeConcern: {w: "majority"}})); - - // Create an index on 'y' to avoid conflicts on the field. - assert.commandWorked(testColl.createIndex({y: 1})); - - // Enable the profiler to log slow queries. We expect a 'find' to hang until the prepare - // conflict is resolved. - assert.commandWorked(testDB.runCommand({profile: 1, level: 1, slowms: 100})); - - const session = db.getMongo().startSession({causalConsistency: false}); - const sessionDB = session.getDatabase(dbName); - session.startTransaction({readConcern: {level: "snapshot"}}); - assert.commandWorked(sessionDB.runCommand({ - update: collName, - updates: [{q: txnDoc, u: {$inc: {x: 1}}}], - })); - - const prepareTimestamp = PrepareHelpers.prepareTransaction(session); - - // Conflict on _id of prepared document. - assertPrepareConflict({_id: txnDoc._id}, prepareTimestamp); - - // Conflict on field that could be added to a prepared document. - assertPrepareConflict({randomField: "random"}, prepareTimestamp); - - // No conflict on _id of a non-prepared document. - assert.commandWorked(testDB.runCommand({find: collName, filter: {_id: otherDoc._id}})); - - // No conflict on indexed field of a non-prepared document. - assert.commandWorked(testDB.runCommand({find: collName, filter: {y: otherDoc.y}})); - - // At this point, we can guarantee all subsequent reads will conflict. Do a read in a parallel - // shell, abort the transaction, then ensure the read succeeded with the old document. - TestData.collName = collName; - TestData.dbName = dbName; - TestData.txnDoc = txnDoc; - const findAwait = startParallelShell(function() { - const it = db.getSiblingDB(TestData.dbName) - .runCommand({find: TestData.collName, filter: {_id: TestData.txnDoc._id}}); - }, db.getMongo().port); - - assert.commandWorked(session.abortTransaction_forTesting()); - - // The find command should be successful. - findAwait({checkExitSuccess: true}); - - // The document should be unmodified, because we aborted. - assert.eq(txnDoc, testColl.findOne(txnDoc)); + assert(prepareConflicted); +} + +// Insert a document modified by the transaction. +const txnDoc = { + _id: 1, + x: 1 +}; +assert.commandWorked(testColl.insert(txnDoc)); + +// Insert a document unmodified by the transaction. +const otherDoc = { + _id: 2, + y: 2 +}; +assert.commandWorked(testColl.insert(otherDoc, {writeConcern: {w: "majority"}})); + +// Create an index on 'y' to avoid conflicts on the field. +assert.commandWorked(testColl.createIndex({y: 1})); + +// Enable the profiler to log slow queries. We expect a 'find' to hang until the prepare +// conflict is resolved. +assert.commandWorked(testDB.runCommand({profile: 1, level: 1, slowms: 100})); + +const session = db.getMongo().startSession({causalConsistency: false}); +const sessionDB = session.getDatabase(dbName); +session.startTransaction({readConcern: {level: "snapshot"}}); +assert.commandWorked(sessionDB.runCommand({ + update: collName, + updates: [{q: txnDoc, u: {$inc: {x: 1}}}], +})); + +const prepareTimestamp = PrepareHelpers.prepareTransaction(session); + +// Conflict on _id of prepared document. +assertPrepareConflict({_id: txnDoc._id}, prepareTimestamp); + +// Conflict on field that could be added to a prepared document. +assertPrepareConflict({randomField: "random"}, prepareTimestamp); + +// No conflict on _id of a non-prepared document. +assert.commandWorked(testDB.runCommand({find: collName, filter: {_id: otherDoc._id}})); + +// No conflict on indexed field of a non-prepared document. +assert.commandWorked(testDB.runCommand({find: collName, filter: {y: otherDoc.y}})); + +// At this point, we can guarantee all subsequent reads will conflict. Do a read in a parallel +// shell, abort the transaction, then ensure the read succeeded with the old document. +TestData.collName = collName; +TestData.dbName = dbName; +TestData.txnDoc = txnDoc; +const findAwait = startParallelShell(function() { + const it = db.getSiblingDB(TestData.dbName) + .runCommand({find: TestData.collName, filter: {_id: TestData.txnDoc._id}}); +}, db.getMongo().port); + +assert.commandWorked(session.abortTransaction_forTesting()); + +// The find command should be successful. +findAwait({checkExitSuccess: true}); + +// The document should be unmodified, because we aborted. +assert.eq(txnDoc, testColl.findOne(txnDoc)); })(); diff --git a/jstests/core/txns/prepare_conflict_aggregation_behavior.js b/jstests/core/txns/prepare_conflict_aggregation_behavior.js index 37c9984d042..c62b7370dc6 100644 --- a/jstests/core/txns/prepare_conflict_aggregation_behavior.js +++ b/jstests/core/txns/prepare_conflict_aggregation_behavior.js @@ -6,85 +6,85 @@ * @tags: [uses_transactions, uses_prepare_transaction] */ (function() { - "use strict"; - load("jstests/core/txns/libs/prepare_helpers.js"); +"use strict"; +load("jstests/core/txns/libs/prepare_helpers.js"); - const failureTimeout = 5 * 1000; // 5 seconds. - const dbName = "test"; - const collName = "prepare_conflict_aggregation_behavior"; - const outCollName = collName + "_out"; - const testDB = db.getSiblingDB(dbName); - const testColl = testDB.getCollection(collName); - const outColl = testDB.getCollection(outCollName); +const failureTimeout = 5 * 1000; // 5 seconds. +const dbName = "test"; +const collName = "prepare_conflict_aggregation_behavior"; +const outCollName = collName + "_out"; +const testDB = db.getSiblingDB(dbName); +const testColl = testDB.getCollection(collName); +const outColl = testDB.getCollection(outCollName); - testColl.drop({writeConcern: {w: "majority"}}); - outColl.drop({writeConcern: {w: "majority"}}); - assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); - assert.commandWorked(testDB.runCommand({create: outCollName, writeConcern: {w: "majority"}})); +testColl.drop({writeConcern: {w: "majority"}}); +outColl.drop({writeConcern: {w: "majority"}}); +assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); +assert.commandWorked(testDB.runCommand({create: outCollName, writeConcern: {w: "majority"}})); - const session = db.getMongo().startSession({causalConsistency: false}); - const sessionDB = session.getDatabase(dbName); - const sessionColl = sessionDB.getCollection(collName); - const sessionOutColl = sessionDB.getCollection(outCollName); +const session = db.getMongo().startSession({causalConsistency: false}); +const sessionDB = session.getDatabase(dbName); +const sessionColl = sessionDB.getCollection(collName); +const sessionOutColl = sessionDB.getCollection(outCollName); - assert.commandWorked(testColl.insert({_id: 1})); - assert.commandWorked(outColl.insert({_id: 0})); +assert.commandWorked(testColl.insert({_id: 1})); +assert.commandWorked(outColl.insert({_id: 0})); - session.startTransaction(); - assert.commandWorked(sessionColl.update({_id: 1}, {a: 1})); - assert.commandWorked(sessionOutColl.update({_id: 0}, {a: 1})); - let prepareTimestamp = PrepareHelpers.prepareTransaction(session); +session.startTransaction(); +assert.commandWorked(sessionColl.update({_id: 1}, {a: 1})); +assert.commandWorked(sessionOutColl.update({_id: 0}, {a: 1})); +let prepareTimestamp = PrepareHelpers.prepareTransaction(session); - jsTestLog("Test that reads from an aggregation pipeline with $merge don't block on prepare" + - " conflicts"); - testColl.aggregate([ - {$addFields: {b: 1}}, - {$merge: {into: outCollName, whenMatched: "fail", whenNotMatched: "insert"}} - ]); +jsTestLog("Test that reads from an aggregation pipeline with $merge don't block on prepare" + + " conflicts"); +testColl.aggregate([ + {$addFields: {b: 1}}, + {$merge: {into: outCollName, whenMatched: "fail", whenNotMatched: "insert"}} +]); - // Make sure that we can see the inserts from the aggregation but not the updates from the - // prepared transaction. - assert.eq([{_id: 0}, {_id: 1, b: 1}], outColl.find().toArray()); +// Make sure that we can see the inserts from the aggregation but not the updates from the +// prepared transaction. +assert.eq([{_id: 0}, {_id: 1, b: 1}], outColl.find().toArray()); - assert.commandWorked(session.abortTransaction_forTesting()); - session.startTransaction(); - assert.commandWorked(sessionOutColl.update({_id: 1}, {_id: 1, a: 1})); - prepareTimestamp = PrepareHelpers.prepareTransaction(session); +assert.commandWorked(session.abortTransaction_forTesting()); +session.startTransaction(); +assert.commandWorked(sessionOutColl.update({_id: 1}, {_id: 1, a: 1})); +prepareTimestamp = PrepareHelpers.prepareTransaction(session); - jsTestLog("Test that writes from an aggregation pipeline block on prepare conflicts"); - let pipeline = [ - {$addFields: {c: 1}}, - {$merge: {into: outCollName, whenMatched: "replace", whenNotMatched: "insert"}} - ]; - assert.commandFailedWithCode(testDB.runCommand({ - aggregate: collName, - pipeline: pipeline, - cursor: {}, - maxTimeMS: failureTimeout, - }), - ErrorCodes.MaxTimeMSExpired); +jsTestLog("Test that writes from an aggregation pipeline block on prepare conflicts"); +let pipeline = [ + {$addFields: {c: 1}}, + {$merge: {into: outCollName, whenMatched: "replace", whenNotMatched: "insert"}} +]; +assert.commandFailedWithCode(testDB.runCommand({ + aggregate: collName, + pipeline: pipeline, + cursor: {}, + maxTimeMS: failureTimeout, +}), + ErrorCodes.MaxTimeMSExpired); - // Make sure that we can't see the update from the aggregation or the prepared transaction. - assert.eq([{_id: 0}, {_id: 1, b: 1}], outColl.find().toArray()); +// Make sure that we can't see the update from the aggregation or the prepared transaction. +assert.eq([{_id: 0}, {_id: 1, b: 1}], outColl.find().toArray()); - assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp)); +assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp)); - // Make sure that the $merge pipeline works once the transaction is committed. - testColl.aggregate(pipeline); - assert.eq([{_id: 0}, {_id: 1, c: 1}], outColl.find().toArray()); +// Make sure that the $merge pipeline works once the transaction is committed. +testColl.aggregate(pipeline); +assert.eq([{_id: 0}, {_id: 1, c: 1}], outColl.find().toArray()); - // At the time of this writing, change streams can sometimes adjust the readConcern to - // 'majority' after receiving the command and thus need to wait for read concern again. When - // doing this, we assume that a change stream with a stage which performs writes is not allowed. - // Test that this is true. - pipeline = [{$changeStream: {}}, {$addFields: {d: 1}}, {$out: outCollName}]; - assert.commandFailedWithCode(testDB.runCommand({ - aggregate: collName, - pipeline: pipeline, - cursor: {}, - maxTimeMS: failureTimeout, - }), - ErrorCodes.IllegalOperation); +// At the time of this writing, change streams can sometimes adjust the readConcern to +// 'majority' after receiving the command and thus need to wait for read concern again. When +// doing this, we assume that a change stream with a stage which performs writes is not allowed. +// Test that this is true. +pipeline = [{$changeStream: {}}, {$addFields: {d: 1}}, {$out: outCollName}]; +assert.commandFailedWithCode(testDB.runCommand({ + aggregate: collName, + pipeline: pipeline, + cursor: {}, + maxTimeMS: failureTimeout, +}), + ErrorCodes.IllegalOperation); - session.endSession(); +session.endSession(); }()); diff --git a/jstests/core/txns/prepare_nonexistent_transaction.js b/jstests/core/txns/prepare_nonexistent_transaction.js index 40e0b540354..c68c9164556 100644 --- a/jstests/core/txns/prepare_nonexistent_transaction.js +++ b/jstests/core/txns/prepare_nonexistent_transaction.js @@ -4,98 +4,97 @@ * @tags: [uses_transactions, uses_prepare_transaction] */ (function() { - "use strict"; +"use strict"; - const dbName = "test"; - const collName = "prepare_nonexistent_transaction"; - const testDB = db.getSiblingDB(dbName); - const testColl = testDB.getCollection(collName); +const dbName = "test"; +const collName = "prepare_nonexistent_transaction"; +const testDB = db.getSiblingDB(dbName); +const testColl = testDB.getCollection(collName); - testColl.drop({writeConcern: {w: "majority"}}); - assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); +testColl.drop({writeConcern: {w: "majority"}}); +assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); - const session = db.getMongo().startSession({causalConsistency: false}); - const sessionDB = session.getDatabase(dbName); - const sessionColl = sessionDB.getCollection(collName); +const session = db.getMongo().startSession({causalConsistency: false}); +const sessionDB = session.getDatabase(dbName); +const sessionColl = sessionDB.getCollection(collName); - const doc = {x: 1}; +const doc = { + x: 1 +}; - jsTestLog("Test that if there is no transaction active on the current session, errors with " + - "'NoSuchTransaction'."); - assert.commandFailedWithCode( - sessionDB.adminCommand( - {prepareTransaction: 1, txnNumber: NumberLong(0), autocommit: false}), - ErrorCodes.NoSuchTransaction); +jsTestLog("Test that if there is no transaction active on the current session, errors with " + + "'NoSuchTransaction'."); +assert.commandFailedWithCode( + sessionDB.adminCommand({prepareTransaction: 1, txnNumber: NumberLong(0), autocommit: false}), + ErrorCodes.NoSuchTransaction); - jsTestLog("Test that if there is a transaction running on the current session and the " + - "'txnNumber' given is greater than the current transaction, errors with " + - "'NoSuchTransaction'."); - session.startTransaction(); - assert.commandWorked(sessionColl.insert(doc)); - assert.commandFailedWithCode(sessionDB.adminCommand({ - prepareTransaction: 1, - txnNumber: NumberLong(session.getTxnNumber_forTesting() + 1), - autocommit: false - }), - ErrorCodes.NoSuchTransaction); - assert.commandWorked(session.abortTransaction_forTesting()); +jsTestLog("Test that if there is a transaction running on the current session and the " + + "'txnNumber' given is greater than the current transaction, errors with " + + "'NoSuchTransaction'."); +session.startTransaction(); +assert.commandWorked(sessionColl.insert(doc)); +assert.commandFailedWithCode(sessionDB.adminCommand({ + prepareTransaction: 1, + txnNumber: NumberLong(session.getTxnNumber_forTesting() + 1), + autocommit: false +}), + ErrorCodes.NoSuchTransaction); +assert.commandWorked(session.abortTransaction_forTesting()); - session.startTransaction(); - assert.commandWorked(sessionColl.insert(doc)); - assert.commandWorked(session.abortTransaction_forTesting()); - jsTestLog("Test that if there is no transaction active on the current session, the " + - "'txnNumber' given matches the last known transaction for this session and the " + - "last known transaction was aborted then it errors with 'NoSuchTransaction'."); - assert.commandFailedWithCode(sessionDB.adminCommand({ - prepareTransaction: 1, - txnNumber: NumberLong(session.getTxnNumber_forTesting()), - autocommit: false - }), - ErrorCodes.NoSuchTransaction); +session.startTransaction(); +assert.commandWorked(sessionColl.insert(doc)); +assert.commandWorked(session.abortTransaction_forTesting()); +jsTestLog("Test that if there is no transaction active on the current session, the " + + "'txnNumber' given matches the last known transaction for this session and the " + + "last known transaction was aborted then it errors with 'NoSuchTransaction'."); +assert.commandFailedWithCode(sessionDB.adminCommand({ + prepareTransaction: 1, + txnNumber: NumberLong(session.getTxnNumber_forTesting()), + autocommit: false +}), + ErrorCodes.NoSuchTransaction); - jsTestLog("Test that if there is a transaction running on the current session and the " + - "'txnNumber' given is less than the current transaction, errors with " + - "'TransactionTooOld'."); - session.startTransaction(); - assert.commandWorked(sessionColl.insert(doc)); - assert.commandFailedWithCode( - sessionDB.adminCommand( - {prepareTransaction: 1, txnNumber: NumberLong(0), autocommit: false}), - ErrorCodes.TransactionTooOld); - assert.commandWorked(session.abortTransaction_forTesting()); +jsTestLog("Test that if there is a transaction running on the current session and the " + + "'txnNumber' given is less than the current transaction, errors with " + + "'TransactionTooOld'."); +session.startTransaction(); +assert.commandWorked(sessionColl.insert(doc)); +assert.commandFailedWithCode( + sessionDB.adminCommand({prepareTransaction: 1, txnNumber: NumberLong(0), autocommit: false}), + ErrorCodes.TransactionTooOld); +assert.commandWorked(session.abortTransaction_forTesting()); - jsTestLog("Test that if there is no transaction active on the current session and the " + - "'txnNumber' given is less than the current transaction, errors with " + - "'TransactionTooOld'."); - assert.commandFailedWithCode( - sessionDB.adminCommand( - {prepareTransaction: 1, txnNumber: NumberLong(0), autocommit: false}), - ErrorCodes.TransactionTooOld); +jsTestLog("Test that if there is no transaction active on the current session and the " + + "'txnNumber' given is less than the current transaction, errors with " + + "'TransactionTooOld'."); +assert.commandFailedWithCode( + sessionDB.adminCommand({prepareTransaction: 1, txnNumber: NumberLong(0), autocommit: false}), + ErrorCodes.TransactionTooOld); - jsTestLog("Test the error precedence when calling prepare on a nonexistent transaction but " + - "not providing txnNumber to prepareTransaction."); - assert.commandFailedWithCode(sessionDB.adminCommand({prepareTransaction: 1, autocommit: false}), - ErrorCodes.InvalidOptions); +jsTestLog("Test the error precedence when calling prepare on a nonexistent transaction but " + + "not providing txnNumber to prepareTransaction."); +assert.commandFailedWithCode(sessionDB.adminCommand({prepareTransaction: 1, autocommit: false}), + ErrorCodes.InvalidOptions); - jsTestLog("Test the error precedence when calling prepare on a nonexistent transaction but " + - "not providing autocommit to prepareTransaction."); - assert.commandFailedWithCode(sessionDB.adminCommand({ - prepareTransaction: 1, - txnNumber: NumberLong(session.getTxnNumber_forTesting() + 1), - }), - 50768); +jsTestLog("Test the error precedence when calling prepare on a nonexistent transaction but " + + "not providing autocommit to prepareTransaction."); +assert.commandFailedWithCode(sessionDB.adminCommand({ + prepareTransaction: 1, + txnNumber: NumberLong(session.getTxnNumber_forTesting() + 1), +}), + 50768); - jsTestLog("Test the error precedence when calling prepare on a nonexistent transaction and " + - "providing startTransaction to prepareTransaction."); - assert.commandFailedWithCode(sessionDB.adminCommand({ - prepareTransaction: 1, - // The last txnNumber we used was saved on the server's session, so we use a txnNumber that - // is greater than that to make sure it has never been seen before. - txnNumber: NumberLong(session.getTxnNumber_forTesting() + 2), - autocommit: false, - startTransaction: true - }), - ErrorCodes.OperationNotSupportedInTransaction); +jsTestLog("Test the error precedence when calling prepare on a nonexistent transaction and " + + "providing startTransaction to prepareTransaction."); +assert.commandFailedWithCode(sessionDB.adminCommand({ + prepareTransaction: 1, + // The last txnNumber we used was saved on the server's session, so we use a txnNumber that + // is greater than that to make sure it has never been seen before. + txnNumber: NumberLong(session.getTxnNumber_forTesting() + 2), + autocommit: false, + startTransaction: true +}), + ErrorCodes.OperationNotSupportedInTransaction); - session.endSession(); +session.endSession(); }()); diff --git a/jstests/core/txns/prepare_prepared_transaction.js b/jstests/core/txns/prepare_prepared_transaction.js index 8032e885e3e..e7148349f53 100644 --- a/jstests/core/txns/prepare_prepared_transaction.js +++ b/jstests/core/txns/prepare_prepared_transaction.js @@ -4,34 +4,37 @@ * @tags: [uses_transactions, uses_prepare_transaction] */ (function() { - "use strict"; - load("jstests/core/txns/libs/prepare_helpers.js"); +"use strict"; +load("jstests/core/txns/libs/prepare_helpers.js"); - const dbName = "test"; - const collName = "prepare_prepared_transaction"; - const testDB = db.getSiblingDB(dbName); - const testColl = testDB.getCollection(collName); +const dbName = "test"; +const collName = "prepare_prepared_transaction"; +const testDB = db.getSiblingDB(dbName); +const testColl = testDB.getCollection(collName); - testColl.drop({writeConcern: {w: "majority"}}); - assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); +testColl.drop({writeConcern: {w: "majority"}}); +assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); - const session = testDB.getMongo().startSession({causalConsistency: false}); - const sessionDB = session.getDatabase(dbName); - const sessionColl = sessionDB.getCollection(collName); +const session = testDB.getMongo().startSession({causalConsistency: false}); +const sessionDB = session.getDatabase(dbName); +const sessionColl = sessionDB.getCollection(collName); - const doc1 = {_id: 1, x: 1}; +const doc1 = { + _id: 1, + x: 1 +}; - // Attempting to prepare an already prepared transaction should return successfully with a - // prepareTimestamp. +// Attempting to prepare an already prepared transaction should return successfully with a +// prepareTimestamp. - // Client's opTime is later than the prepareOpTime, so just return the prepareTimestamp. - session.startTransaction(); - assert.commandWorked(sessionColl.insert(doc1)); - const firstTimestamp = PrepareHelpers.prepareTransaction(session); - const secondTimestamp = PrepareHelpers.prepareTransaction(session); - // Both prepareTimestamps should be equal. - assert.eq(firstTimestamp, secondTimestamp); - assert.commandWorked(session.abortTransaction_forTesting()); +// Client's opTime is later than the prepareOpTime, so just return the prepareTimestamp. +session.startTransaction(); +assert.commandWorked(sessionColl.insert(doc1)); +const firstTimestamp = PrepareHelpers.prepareTransaction(session); +const secondTimestamp = PrepareHelpers.prepareTransaction(session); +// Both prepareTimestamps should be equal. +assert.eq(firstTimestamp, secondTimestamp); +assert.commandWorked(session.abortTransaction_forTesting()); - session.endSession(); +session.endSession(); }()); diff --git a/jstests/core/txns/prepare_requires_fcv42.js b/jstests/core/txns/prepare_requires_fcv42.js index a7be765a969..6f32918d332 100644 --- a/jstests/core/txns/prepare_requires_fcv42.js +++ b/jstests/core/txns/prepare_requires_fcv42.js @@ -4,54 +4,56 @@ * @tags: [uses_transactions, uses_prepare_transaction] */ (function() { - "use strict"; - load("jstests/libs/feature_compatibility_version.js"); - load("jstests/core/txns/libs/prepare_helpers.js"); - - const dbName = "test"; - const collName = "prepare_requires_fcv42"; - const testDB = db.getSiblingDB(dbName); - const adminDB = db.getSiblingDB('admin'); - - testDB[collName].drop({writeConcern: {w: "majority"}}); - assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); - - const sessionOptions = {causalConsistency: false}; - const session = testDB.getMongo().startSession(sessionOptions); - const sessionDB = session.getDatabase(dbName); - - try { - jsTestLog("Transaction succeeds in latest FCV."); - checkFCV(adminDB, latestFCV); - session.startTransaction(); - assert.commandWorked(sessionDB[collName].insert({_id: "a"})); - let prepareTimestamp = PrepareHelpers.prepareTransaction(session); - assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp)); - - jsTestLog("Downgrade the featureCompatibilityVersion."); - assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV})); - checkFCV(adminDB, lastStableFCV); - - jsTestLog("Transaction fails to prepare in last stable FCV."); - session.startTransaction(); - assert.commandWorked(sessionDB[collName].insert({_id: "b"})); - assert.commandFailedWithCode(sessionDB.adminCommand({prepareTransaction: 1}), - ErrorCodes.CommandNotSupported); - // Abort the transaction in the shell. - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - - } finally { - jsTestLog("Restore the original featureCompatibilityVersion."); - assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV})); - checkFCV(adminDB, latestFCV); - } - - jsTestLog("Transaction succeeds in latest FCV after upgrade."); +"use strict"; +load("jstests/libs/feature_compatibility_version.js"); +load("jstests/core/txns/libs/prepare_helpers.js"); + +const dbName = "test"; +const collName = "prepare_requires_fcv42"; +const testDB = db.getSiblingDB(dbName); +const adminDB = db.getSiblingDB('admin'); + +testDB[collName].drop({writeConcern: {w: "majority"}}); +assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); + +const sessionOptions = { + causalConsistency: false +}; +const session = testDB.getMongo().startSession(sessionOptions); +const sessionDB = session.getDatabase(dbName); + +try { + jsTestLog("Transaction succeeds in latest FCV."); + checkFCV(adminDB, latestFCV); session.startTransaction(); - assert.commandWorked(sessionDB[collName].insert({_id: "c"})); + assert.commandWorked(sessionDB[collName].insert({_id: "a"})); let prepareTimestamp = PrepareHelpers.prepareTransaction(session); assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp)); - session.endSession(); + jsTestLog("Downgrade the featureCompatibilityVersion."); + assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV})); + checkFCV(adminDB, lastStableFCV); + + jsTestLog("Transaction fails to prepare in last stable FCV."); + session.startTransaction(); + assert.commandWorked(sessionDB[collName].insert({_id: "b"})); + assert.commandFailedWithCode(sessionDB.adminCommand({prepareTransaction: 1}), + ErrorCodes.CommandNotSupported); + // Abort the transaction in the shell. + assert.commandFailedWithCode(session.abortTransaction_forTesting(), + ErrorCodes.NoSuchTransaction); + +} finally { + jsTestLog("Restore the original featureCompatibilityVersion."); + assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV})); + checkFCV(adminDB, latestFCV); +} + +jsTestLog("Transaction succeeds in latest FCV after upgrade."); +session.startTransaction(); +assert.commandWorked(sessionDB[collName].insert({_id: "c"})); +let prepareTimestamp = PrepareHelpers.prepareTransaction(session); +assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp)); + +session.endSession(); }()); diff --git a/jstests/core/txns/prepare_transaction_fails_on_temp_collections.js b/jstests/core/txns/prepare_transaction_fails_on_temp_collections.js index 091665d2509..14ba3cb7926 100644 --- a/jstests/core/txns/prepare_transaction_fails_on_temp_collections.js +++ b/jstests/core/txns/prepare_transaction_fails_on_temp_collections.js @@ -8,31 +8,30 @@ */ (function() { - "use strict"; +"use strict"; - const dbName = "test"; - const tempCollName = "prepare_transaction_fails_on_temp_collections"; - const testDB = db.getSiblingDB(dbName); - const testTempColl = testDB.getCollection(tempCollName); +const dbName = "test"; +const tempCollName = "prepare_transaction_fails_on_temp_collections"; +const testDB = db.getSiblingDB(dbName); +const testTempColl = testDB.getCollection(tempCollName); - testTempColl.drop({writeConcern: {w: "majority"}}); +testTempColl.drop({writeConcern: {w: "majority"}}); - jsTest.log("Creating a temporary collection."); - assert.commandWorked(testDB.runCommand({ - applyOps: - [{op: "c", ns: testDB.getName() + ".$cmd", o: {create: tempCollName, temp: true}}] - })); +jsTest.log("Creating a temporary collection."); +assert.commandWorked(testDB.runCommand({ + applyOps: [{op: "c", ns: testDB.getName() + ".$cmd", o: {create: tempCollName, temp: true}}] +})); - const session = db.getMongo().startSession(); - const sessionDB = session.getDatabase(dbName); - const sessionTempColl = sessionDB.getCollection(tempCollName); +const session = db.getMongo().startSession(); +const sessionDB = session.getDatabase(dbName); +const sessionTempColl = sessionDB.getCollection(tempCollName); - jsTest.log("Setting up a transaction with an operation on a temporary collection."); - session.startTransaction(); - assert.commandWorked(sessionTempColl.insert({x: 1000})); +jsTest.log("Setting up a transaction with an operation on a temporary collection."); +session.startTransaction(); +assert.commandWorked(sessionTempColl.insert({x: 1000})); - jsTest.log("Calling prepareTransaction for a transaction with operations against a " + - "temporary collection should now fail."); - assert.commandFailedWithCode(sessionDB.adminCommand({prepareTransaction: 1}), - ErrorCodes.OperationNotSupportedInTransaction); +jsTest.log("Calling prepareTransaction for a transaction with operations against a " + + "temporary collection should now fail."); +assert.commandFailedWithCode(sessionDB.adminCommand({prepareTransaction: 1}), + ErrorCodes.OperationNotSupportedInTransaction); })(); diff --git a/jstests/core/txns/prepare_transaction_unique_index_conflict.js b/jstests/core/txns/prepare_transaction_unique_index_conflict.js index e364a1e0a0e..9fc0dae7a0b 100644 --- a/jstests/core/txns/prepare_transaction_unique_index_conflict.js +++ b/jstests/core/txns/prepare_transaction_unique_index_conflict.js @@ -9,36 +9,36 @@ */ (function() { - "use strict"; - load("jstests/core/txns/libs/prepare_helpers.js"); +"use strict"; +load("jstests/core/txns/libs/prepare_helpers.js"); - const dbName = "test"; - const collName = "prepare_transaction_unique_index_conflict"; - const testDB = db.getSiblingDB(dbName); - const testColl = testDB.getCollection(collName); +const dbName = "test"; +const collName = "prepare_transaction_unique_index_conflict"; +const testDB = db.getSiblingDB(dbName); +const testColl = testDB.getCollection(collName); - testColl.drop({writeConcern: {w: "majority"}}); - assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); +testColl.drop({writeConcern: {w: "majority"}}); +assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); - const session = db.getMongo().startSession(); - const sessionDB = session.getDatabase(dbName); - const sessionColl = sessionDB.getCollection(collName); +const session = db.getMongo().startSession(); +const sessionDB = session.getDatabase(dbName); +const sessionColl = sessionDB.getCollection(collName); - assert.commandWorked(testColl.insert({_id: 1, a: 0})); +assert.commandWorked(testColl.insert({_id: 1, a: 0})); - // Ensure that the "a" field is unique. - assert.commandWorked(testColl.createIndex({"a": 1}, {unique: true})); +// Ensure that the "a" field is unique. +assert.commandWorked(testColl.createIndex({"a": 1}, {unique: true})); - session.startTransaction(); - assert.commandWorked(sessionColl.insert({_id: 2, a: 1})); - assert.commandWorked(sessionColl.update({_id: 2}, {$unset: {a: 1}})); - const prepareTimestamp = PrepareHelpers.prepareTransaction(session); +session.startTransaction(); +assert.commandWorked(sessionColl.insert({_id: 2, a: 1})); +assert.commandWorked(sessionColl.update({_id: 2}, {$unset: {a: 1}})); +const prepareTimestamp = PrepareHelpers.prepareTransaction(session); - // While trying to insert this document, the node will have to perform reads to check if it - // violates the unique index, which should cause a prepare conflict. - assert.commandFailedWithCode( - testDB.runCommand({insert: collName, documents: [{_id: 3, a: 1}], maxTimeMS: 5000}), - ErrorCodes.MaxTimeMSExpired); +// While trying to insert this document, the node will have to perform reads to check if it +// violates the unique index, which should cause a prepare conflict. +assert.commandFailedWithCode( + testDB.runCommand({insert: collName, documents: [{_id: 3, a: 1}], maxTimeMS: 5000}), + ErrorCodes.MaxTimeMSExpired); - assert.commandWorked(session.abortTransaction_forTesting()); +assert.commandWorked(session.abortTransaction_forTesting()); })(); diff --git a/jstests/core/txns/prepared_transactions_do_not_block_non_conflicting_ddl.js b/jstests/core/txns/prepared_transactions_do_not_block_non_conflicting_ddl.js index da915154ee8..0458f213960 100644 --- a/jstests/core/txns/prepared_transactions_do_not_block_non_conflicting_ddl.js +++ b/jstests/core/txns/prepared_transactions_do_not_block_non_conflicting_ddl.js @@ -1,65 +1,78 @@ // Test that prepared transactions don't block DDL operations on the non-conflicting collections. // @tags: [uses_transactions, uses_prepare_transaction] (function() { - "use strict"; +"use strict"; - load("jstests/core/txns/libs/prepare_helpers.js"); - const dbName = "prepared_transactions_do_not_block_non_conflicting_ddl"; - const collName = "transactions_collection"; - const otherDBName = "prepared_transactions_do_not_block_non_conflicting_ddl_other"; - const otherCollName = "transactions_collection_other"; - const testDB = db.getSiblingDB(dbName); - const otherDB = db.getSiblingDB(otherDBName); +load("jstests/core/txns/libs/prepare_helpers.js"); +const dbName = "prepared_transactions_do_not_block_non_conflicting_ddl"; +const collName = "transactions_collection"; +const otherDBName = "prepared_transactions_do_not_block_non_conflicting_ddl_other"; +const otherCollName = "transactions_collection_other"; +const testDB = db.getSiblingDB(dbName); +const otherDB = db.getSiblingDB(otherDBName); - const session = testDB.getMongo().startSession({causalConsistency: false}); - const sessionDB = session.getDatabase(dbName); - const sessionColl = sessionDB[collName]; +const session = testDB.getMongo().startSession({causalConsistency: false}); +const sessionDB = session.getDatabase(dbName); +const sessionColl = sessionDB[collName]; - // Setup. - testDB.dropDatabase(); - otherDB.dropDatabase(); - assert.commandWorked(sessionColl.insert({_id: 1, x: 0})); +// Setup. +testDB.dropDatabase(); +otherDB.dropDatabase(); +assert.commandWorked(sessionColl.insert({_id: 1, x: 0})); - /** - * Tests that DDL operations on non-conflicting namespaces don't block on transactions. - */ - function testSuccess(cmdDBName, ddlCmd) { - session.startTransaction(); - assert.commandWorked(sessionColl.update({_id: 1}, {$inc: {x: 1}})); - const prepareTimestamp = PrepareHelpers.prepareTransaction(session); - assert.commandWorked(testDB.getSiblingDB(cmdDBName).runCommand(ddlCmd)); - assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp)); - } +/** + * Tests that DDL operations on non-conflicting namespaces don't block on transactions. + */ +function testSuccess(cmdDBName, ddlCmd) { + session.startTransaction(); + assert.commandWorked(sessionColl.update({_id: 1}, {$inc: {x: 1}})); + const prepareTimestamp = PrepareHelpers.prepareTransaction(session); + assert.commandWorked(testDB.getSiblingDB(cmdDBName).runCommand(ddlCmd)); + assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp)); +} - jsTest.log("Test 'create'."); - const createCmd = {create: collName}; - testSuccess(otherDBName, createCmd); +jsTest.log("Test 'create'."); +const createCmd = { + create: collName +}; +testSuccess(otherDBName, createCmd); - jsTest.log("Test 'createIndexes'."); - const createIndexesCmd = {createIndexes: collName, indexes: [{key: {x: 1}, name: "x_1"}]}; - testSuccess(otherDBName, createIndexesCmd); +jsTest.log("Test 'createIndexes'."); +const createIndexesCmd = { + createIndexes: collName, + indexes: [{key: {x: 1}, name: "x_1"}] +}; +testSuccess(otherDBName, createIndexesCmd); - jsTest.log("Test 'dropIndexes'."); - const dropIndexesCmd = {dropIndexes: collName, index: "x_1"}; - testSuccess(otherDBName, dropIndexesCmd); +jsTest.log("Test 'dropIndexes'."); +const dropIndexesCmd = { + dropIndexes: collName, + index: "x_1" +}; +testSuccess(otherDBName, dropIndexesCmd); - sessionColl.createIndex({multiKeyField: 1}); - jsTest.log("Test 'insert' that enables multi-key index on the same collection."); - const insertAndSetMultiKeyCmd = {insert: collName, documents: [{multiKeyField: [1, 2]}]}; - testSuccess(dbName, insertAndSetMultiKeyCmd); +sessionColl.createIndex({multiKeyField: 1}); +jsTest.log("Test 'insert' that enables multi-key index on the same collection."); +const insertAndSetMultiKeyCmd = { + insert: collName, + documents: [{multiKeyField: [1, 2]}] +}; +testSuccess(dbName, insertAndSetMultiKeyCmd); - jsTest.log("Test 'drop'."); - const dropCmd = {drop: collName}; - testSuccess(otherDBName, dropCmd); +jsTest.log("Test 'drop'."); +const dropCmd = { + drop: collName +}; +testSuccess(otherDBName, dropCmd); - jsTest.log("Test 'renameCollection'."); - assert.commandWorked(otherDB.getCollection(collName).insert({x: "doc-for-rename-collection"})); - otherDB.runCommand({drop: otherCollName}); - const renameCollectionCmd = { - renameCollection: otherDBName + "." + collName, - to: otherDBName + "." + otherCollName - }; - testSuccess("admin", renameCollectionCmd); +jsTest.log("Test 'renameCollection'."); +assert.commandWorked(otherDB.getCollection(collName).insert({x: "doc-for-rename-collection"})); +otherDB.runCommand({drop: otherCollName}); +const renameCollectionCmd = { + renameCollection: otherDBName + "." + collName, + to: otherDBName + "." + otherCollName +}; +testSuccess("admin", renameCollectionCmd); - session.endSession(); +session.endSession(); }()); diff --git a/jstests/core/txns/read_concerns.js b/jstests/core/txns/read_concerns.js index ffdd381128a..409236a5ffd 100644 --- a/jstests/core/txns/read_concerns.js +++ b/jstests/core/txns/read_concerns.js @@ -2,61 +2,61 @@ // // @tags: [uses_transactions, uses_snapshot_read_concern] (function() { - "use strict"; - - const dbName = "test"; - const collName = "supported_read_concern_levels"; - - function runTest(level, sessionOptions, supported) { - jsTestLog("Testing transactions with read concern level: " + level + - " and sessionOptions: " + tojson(sessionOptions)); - - db.getSiblingDB(dbName).runCommand({drop: collName, writeConcern: {w: "majority"}}); - - const session = db.getMongo().startSession(sessionOptions); - const sessionDB = session.getDatabase(dbName); - const sessionColl = sessionDB[collName]; - - // Set up the collection. - assert.writeOK(sessionColl.insert({_id: 0}, {writeConcern: {w: "majority"}})); - - if (level) { - session.startTransaction({readConcern: {level: level}}); - } else { - session.startTransaction(); - } - - const res = sessionDB.runCommand({find: collName}); - if (supported) { - assert.commandWorked(res, - "expected success, read concern level: " + level + - ", sessionOptions: " + tojson(sessionOptions)); - assert.commandWorked(session.commitTransaction_forTesting()); - } else { - assert.commandFailedWithCode(res, - ErrorCodes.InvalidOptions, - "expected failure, read concern level: " + level + - ", sessionOptions: " + tojson(sessionOptions)); - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - } - - session.endSession(); - } +"use strict"; + +const dbName = "test"; +const collName = "supported_read_concern_levels"; + +function runTest(level, sessionOptions, supported) { + jsTestLog("Testing transactions with read concern level: " + level + + " and sessionOptions: " + tojson(sessionOptions)); + + db.getSiblingDB(dbName).runCommand({drop: collName, writeConcern: {w: "majority"}}); + + const session = db.getMongo().startSession(sessionOptions); + const sessionDB = session.getDatabase(dbName); + const sessionColl = sessionDB[collName]; - // Starting a txn with no read concern level is allowed. - runTest(undefined, {causalConsistency: false}, true /*supported*/); - runTest(undefined, {causalConsistency: true}, true /*supported*/); + // Set up the collection. + assert.writeOK(sessionColl.insert({_id: 0}, {writeConcern: {w: "majority"}})); - const kSupportedLevels = ["local", "majority", "snapshot"]; - for (let level of kSupportedLevels) { - runTest(level, {causalConsistency: false}, true /*supported*/); - runTest(level, {causalConsistency: true}, true /*supported*/); + if (level) { + session.startTransaction({readConcern: {level: level}}); + } else { + session.startTransaction(); } - const kUnsupportedLevels = ["available", "linearizable"]; - for (let level of kUnsupportedLevels) { - runTest(level, {causalConsistency: false}, false /*supported*/); - runTest(level, {causalConsistency: true}, false /*supported*/); + const res = sessionDB.runCommand({find: collName}); + if (supported) { + assert.commandWorked(res, + "expected success, read concern level: " + level + + ", sessionOptions: " + tojson(sessionOptions)); + assert.commandWorked(session.commitTransaction_forTesting()); + } else { + assert.commandFailedWithCode(res, + ErrorCodes.InvalidOptions, + "expected failure, read concern level: " + level + + ", sessionOptions: " + tojson(sessionOptions)); + assert.commandFailedWithCode(session.abortTransaction_forTesting(), + ErrorCodes.NoSuchTransaction); } + + session.endSession(); +} + +// Starting a txn with no read concern level is allowed. +runTest(undefined, {causalConsistency: false}, true /*supported*/); +runTest(undefined, {causalConsistency: true}, true /*supported*/); + +const kSupportedLevels = ["local", "majority", "snapshot"]; +for (let level of kSupportedLevels) { + runTest(level, {causalConsistency: false}, true /*supported*/); + runTest(level, {causalConsistency: true}, true /*supported*/); +} + +const kUnsupportedLevels = ["available", "linearizable"]; +for (let level of kUnsupportedLevels) { + runTest(level, {causalConsistency: false}, false /*supported*/); + runTest(level, {causalConsistency: true}, false /*supported*/); +} }()); diff --git a/jstests/core/txns/read_own_multikey_writes.js b/jstests/core/txns/read_own_multikey_writes.js index 69f6f035f3e..9af97dc4baa 100644 --- a/jstests/core/txns/read_own_multikey_writes.js +++ b/jstests/core/txns/read_own_multikey_writes.js @@ -1,32 +1,32 @@ // Tests that multikey updates made inside a transaction are visible to that transaction's reads. // @tags: [assumes_unsharded_collection, uses_transactions] (function() { - "use strict"; +"use strict"; - const dbName = 'test'; - const collName = 'testReadOwnMultikeyWrites'; - // Use majority write concern to clear the drop-pending that can cause lock conflicts with - // transactions. - db.getSiblingDB(dbName).getCollection(collName).drop({writeConcern: {w: "majority"}}); +const dbName = 'test'; +const collName = 'testReadOwnMultikeyWrites'; +// Use majority write concern to clear the drop-pending that can cause lock conflicts with +// transactions. +db.getSiblingDB(dbName).getCollection(collName).drop({writeConcern: {w: "majority"}}); - const session = db.getMongo().startSession({causalConsistency: false}); - const sessionDb = session.getDatabase(dbName); - const sessionColl = sessionDb.getCollection(collName); +const session = db.getMongo().startSession({causalConsistency: false}); +const sessionDb = session.getDatabase(dbName); +const sessionColl = sessionDb.getCollection(collName); - assert.commandWorked(sessionDb.runCommand({create: collName})); +assert.commandWorked(sessionDb.runCommand({create: collName})); - assert.writeOK(sessionColl.insert({a: 1})); - assert.commandWorked(sessionColl.createIndex({a: 1})); +assert.writeOK(sessionColl.insert({a: 1})); +assert.commandWorked(sessionColl.createIndex({a: 1})); - session.startTransaction(); - assert.writeOK(sessionColl.update({}, {$set: {a: [1, 2, 3]}})); - assert.eq(1, sessionColl.find({}, {_id: 0, a: 1}).sort({a: 1}).itcount()); - assert.commandWorked(session.commitTransaction_forTesting()); +session.startTransaction(); +assert.writeOK(sessionColl.update({}, {$set: {a: [1, 2, 3]}})); +assert.eq(1, sessionColl.find({}, {_id: 0, a: 1}).sort({a: 1}).itcount()); +assert.commandWorked(session.commitTransaction_forTesting()); - assert.eq(1, - db.getSiblingDB(dbName) - .getCollection(collName) - .find({}, {_id: 0, a: 1}) - .sort({a: 1}) - .itcount()); +assert.eq(1, + db.getSiblingDB(dbName) + .getCollection(collName) + .find({}, {_id: 0, a: 1}) + .sort({a: 1}) + .itcount()); })(); diff --git a/jstests/core/txns/rename_collection_not_blocked_by_txn.js b/jstests/core/txns/rename_collection_not_blocked_by_txn.js index b5c6cb4c0c4..4c3921d6c12 100644 --- a/jstests/core/txns/rename_collection_not_blocked_by_txn.js +++ b/jstests/core/txns/rename_collection_not_blocked_by_txn.js @@ -5,31 +5,30 @@ */ (function() { - "use strict"; +"use strict"; - let rst = new ReplSetTest({nodes: 1}); - rst.startSet(); - rst.initiate(); +let rst = new ReplSetTest({nodes: 1}); +rst.startSet(); +rst.initiate(); - let db = rst.getPrimary().getDB("test"); +let db = rst.getPrimary().getDB("test"); - assert.commandWorked(db.runCommand({insert: "t", documents: [{x: 1}]})); - assert.commandWorked(db.runCommand({insert: "a", documents: [{x: 1}]})); - assert.commandWorked(db.runCommand({insert: "b", documents: [{x: 1}]})); +assert.commandWorked(db.runCommand({insert: "t", documents: [{x: 1}]})); +assert.commandWorked(db.runCommand({insert: "a", documents: [{x: 1}]})); +assert.commandWorked(db.runCommand({insert: "b", documents: [{x: 1}]})); - const session = db.getMongo().startSession(); - const sessionDb = session.getDatabase("test"); +const session = db.getMongo().startSession(); +const sessionDb = session.getDatabase("test"); - session.startTransaction(); - // This holds a database IX lock and a collection IX lock on "test.t". - sessionDb.t.insert({y: 1}); +session.startTransaction(); +// This holds a database IX lock and a collection IX lock on "test.t". +sessionDb.t.insert({y: 1}); - // This only requires database IX lock. - assert.commandWorked( - db.adminCommand({renameCollection: "test.a", to: "test.b", dropTarget: true})); - assert.commandWorked(db.adminCommand({renameCollection: "test.b", to: "test.c"})); +// This only requires database IX lock. +assert.commandWorked(db.adminCommand({renameCollection: "test.a", to: "test.b", dropTarget: true})); +assert.commandWorked(db.adminCommand({renameCollection: "test.b", to: "test.c"})); - assert.commandWorked(session.commitTransaction_forTesting()); +assert.commandWorked(session.commitTransaction_forTesting()); - rst.stopSet(); +rst.stopSet(); })(); diff --git a/jstests/core/txns/repeatable_reads_in_transaction.js b/jstests/core/txns/repeatable_reads_in_transaction.js index 2aa80d4cc71..3286b6e72cb 100644 --- a/jstests/core/txns/repeatable_reads_in_transaction.js +++ b/jstests/core/txns/repeatable_reads_in_transaction.js @@ -2,75 +2,75 @@ // read the same data even if it was modified outside of the transaction. // @tags: [uses_transactions, uses_snapshot_read_concern] (function() { - "use strict"; +"use strict"; - const dbName = "test"; - const collName = "repeatable_reads_in_transaction"; - const testDB = db.getSiblingDB(dbName); - const testColl = testDB[collName]; +const dbName = "test"; +const collName = "repeatable_reads_in_transaction"; +const testDB = db.getSiblingDB(dbName); +const testColl = testDB[collName]; - testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); +testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); - assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}})); +assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}})); - const sessionOptions = {causalConsistency: false}; - const session = db.getMongo().startSession(sessionOptions); - const sessionDb = session.getDatabase(dbName); - const sessionColl = sessionDb.getCollection(collName); +const sessionOptions = { + causalConsistency: false +}; +const session = db.getMongo().startSession(sessionOptions); +const sessionDb = session.getDatabase(dbName); +const sessionColl = sessionDb.getCollection(collName); - // Initialize second session variables. - const session2 = testDB.getMongo().startSession(sessionOptions); - const session2Db = session2.getDatabase(dbName); - const session2Coll = session2Db.getCollection(collName); +// Initialize second session variables. +const session2 = testDB.getMongo().startSession(sessionOptions); +const session2Db = session2.getDatabase(dbName); +const session2Coll = session2Db.getCollection(collName); - jsTest.log("Prepopulate the collection."); - assert.writeOK( - testColl.insert([{_id: 0}, {_id: 1}, {_id: 2}], {writeConcern: {w: "majority"}})); +jsTest.log("Prepopulate the collection."); +assert.writeOK(testColl.insert([{_id: 0}, {_id: 1}, {_id: 2}], {writeConcern: {w: "majority"}})); - // Create a constant array of documents we expect to be returned during a read-only transaction. - // The value should not change since external changes should not be visible within this - // transaction. - const expectedDocs = [{_id: 0}, {_id: 1}, {_id: 2}]; +// Create a constant array of documents we expect to be returned during a read-only transaction. +// The value should not change since external changes should not be visible within this +// transaction. +const expectedDocs = [{_id: 0}, {_id: 1}, {_id: 2}]; - jsTestLog("Start a read-only transaction on the first session."); - session.startTransaction({writeConcern: {w: "majority"}}); +jsTestLog("Start a read-only transaction on the first session."); +session.startTransaction({writeConcern: {w: "majority"}}); - assert.sameMembers(expectedDocs, sessionColl.find().toArray()); +assert.sameMembers(expectedDocs, sessionColl.find().toArray()); - jsTestLog("Start a transaction on the second session that modifies the same collection."); - session2.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}}); +jsTestLog("Start a transaction on the second session that modifies the same collection."); +session2.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}}); - assert.commandWorked(session2Coll.insert({_id: 3})); - assert.commandWorked(session2Coll.update({_id: 1}, {$set: {a: 1}})); - assert.commandWorked(session2Coll.deleteOne({_id: 2})); +assert.commandWorked(session2Coll.insert({_id: 3})); +assert.commandWorked(session2Coll.update({_id: 1}, {$set: {a: 1}})); +assert.commandWorked(session2Coll.deleteOne({_id: 2})); - jsTestLog( - "Continue reading in the first transaction. Changes from the second transaction should not be visible."); +jsTestLog( + "Continue reading in the first transaction. Changes from the second transaction should not be visible."); - assert.sameMembers(expectedDocs, sessionColl.find().toArray()); +assert.sameMembers(expectedDocs, sessionColl.find().toArray()); - jsTestLog("Committing the second transaction."); - assert.commandWorked(session2.commitTransaction_forTesting()); +jsTestLog("Committing the second transaction."); +assert.commandWorked(session2.commitTransaction_forTesting()); - jsTestLog( - "Committed changes from the second transaction should still not be visible to the first."); +jsTestLog( + "Committed changes from the second transaction should still not be visible to the first."); - assert.sameMembers(expectedDocs, sessionColl.find().toArray()); +assert.sameMembers(expectedDocs, sessionColl.find().toArray()); - jsTestLog( - "Writes that occur outside of a transaction should not be visible to a read only transaction."); +jsTestLog( + "Writes that occur outside of a transaction should not be visible to a read only transaction."); - assert.writeOK(testColl.insert({_id: 4}, {writeConcern: {w: "majority"}})); +assert.writeOK(testColl.insert({_id: 4}, {writeConcern: {w: "majority"}})); - assert.sameMembers(expectedDocs, sessionColl.find().toArray()); +assert.sameMembers(expectedDocs, sessionColl.find().toArray()); - jsTestLog("Committing first transaction."); - assert.commandWorked(session.commitTransaction_forTesting()); +jsTestLog("Committing first transaction."); +assert.commandWorked(session.commitTransaction_forTesting()); - // Make sure the correct documents exist after committing the second transaction. - assert.sameMembers([{_id: 0}, {_id: 1, a: 1}, {_id: 3}, {_id: 4}], - sessionColl.find().toArray()); +// Make sure the correct documents exist after committing the second transaction. +assert.sameMembers([{_id: 0}, {_id: 1, a: 1}, {_id: 3}, {_id: 4}], sessionColl.find().toArray()); - session.endSession(); - session2.endSession(); +session.endSession(); +session2.endSession(); }()); diff --git a/jstests/core/txns/shell_prompt_in_transaction.js b/jstests/core/txns/shell_prompt_in_transaction.js index 019ea5595de..ab96bef95f2 100644 --- a/jstests/core/txns/shell_prompt_in_transaction.js +++ b/jstests/core/txns/shell_prompt_in_transaction.js @@ -2,42 +2,44 @@ // @tags: [uses_transactions] (function() { - "use strict"; - - const collName = "shell_prompt_in_transaction"; - - db.getCollection(collName).drop({writeConcern: {w: "majority"}}); - assert.commandWorked(db.runCommand({create: collName, writeConcern: {w: "majority"}})); - - // Override the global "db". - const session = db.getMongo().startSession(); - db = session.getDatabase(db.getName()); - const coll = db.getCollection(collName); - - function simulatePrompt() { - __promptWrapper__(defaultPrompt); - } - - // Start a transaction, so the session will attach txn info to the commands running on it. - session.startTransaction(); - jsTestLog("Run shell prompt to simulate a user hitting enter."); - simulatePrompt(); - const doc = {_id: "shell-write"}; - assert.commandWorked(coll.insert(doc)); - assert.docEq(doc, coll.findOne()); - simulatePrompt(); - assert.commandWorked(session.abortTransaction_forTesting()); - assert.docEq(null, coll.findOne()); - - // Start a transaction, so the session has a running transaction now. - simulatePrompt(); - session.startTransaction(); - jsTestLog("Run shell prompt to simulate a user hitting enter."); - simulatePrompt(); - assert.commandWorked(coll.insert(doc)); - simulatePrompt(); - assert.commandWorked(session.commitTransaction_forTesting()); - assert.docEq(doc, coll.findOne()); - - coll.drop({writeConcern: {w: "majority"}}); +"use strict"; + +const collName = "shell_prompt_in_transaction"; + +db.getCollection(collName).drop({writeConcern: {w: "majority"}}); +assert.commandWorked(db.runCommand({create: collName, writeConcern: {w: "majority"}})); + +// Override the global "db". +const session = db.getMongo().startSession(); +db = session.getDatabase(db.getName()); +const coll = db.getCollection(collName); + +function simulatePrompt() { + __promptWrapper__(defaultPrompt); +} + +// Start a transaction, so the session will attach txn info to the commands running on it. +session.startTransaction(); +jsTestLog("Run shell prompt to simulate a user hitting enter."); +simulatePrompt(); +const doc = { + _id: "shell-write" +}; +assert.commandWorked(coll.insert(doc)); +assert.docEq(doc, coll.findOne()); +simulatePrompt(); +assert.commandWorked(session.abortTransaction_forTesting()); +assert.docEq(null, coll.findOne()); + +// Start a transaction, so the session has a running transaction now. +simulatePrompt(); +session.startTransaction(); +jsTestLog("Run shell prompt to simulate a user hitting enter."); +simulatePrompt(); +assert.commandWorked(coll.insert(doc)); +simulatePrompt(); +assert.commandWorked(session.commitTransaction_forTesting()); +assert.docEq(doc, coll.findOne()); + +coll.drop({writeConcern: {w: "majority"}}); })(); diff --git a/jstests/core/txns/speculative_snapshot_includes_all_writes.js b/jstests/core/txns/speculative_snapshot_includes_all_writes.js index d3c3b01f827..efeefdfa889 100644 --- a/jstests/core/txns/speculative_snapshot_includes_all_writes.js +++ b/jstests/core/txns/speculative_snapshot_includes_all_writes.js @@ -4,111 +4,111 @@ * @tags: [uses_transactions] */ (function() { - "use strict"; - - load("jstests/libs/check_log.js"); - - const dbName = "test"; - const collName = "speculative_snapshot_includes_all_writes_1"; - const collName2 = "speculative_snapshot_includes_all_writes_2"; - const testDB = db.getSiblingDB(dbName); - const testColl = testDB[collName]; - const testColl2 = testDB[collName2]; - - testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); - testDB.runCommand({drop: collName2, writeConcern: {w: "majority"}}); - - assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}})); - assert.commandWorked(testDB.createCollection(collName2, {writeConcern: {w: "majority"}})); - - const sessionOptions = {causalConsistency: false}; - - function startSessionAndTransaction(readConcernLevel) { - let session = db.getMongo().startSession(sessionOptions); - jsTestLog("Start a transaction with readConcern " + readConcernLevel.level + "."); - session.startTransaction({readConcern: readConcernLevel}); - return session; - } - - let checkReads = (session, collExpected, coll2Expected) => { - let sessionDb = session.getDatabase(dbName); - let coll = sessionDb.getCollection(collName); - let coll2 = sessionDb.getCollection(collName2); - assert.sameMembers(collExpected, coll.find().toArray()); - assert.sameMembers(coll2Expected, coll2.find().toArray()); - }; - - // Clear ramlog so checkLog can't find log messages from previous times this fail point was - // enabled. - assert.commandWorked(testDB.adminCommand({clearLog: 'global'})); - - jsTest.log("Prepopulate the collections."); - assert.commandWorked(testColl.insert([{_id: 0}], {writeConcern: {w: "majority"}})); - assert.commandWorked(testColl2.insert([{_id: "a"}], {writeConcern: {w: "majority"}})); - - jsTest.log("Create the uncommitted write."); - - assert.commandWorked(db.adminCommand({ - configureFailPoint: "hangAfterCollectionInserts", - mode: "alwaysOn", - data: {collectionNS: testColl2.getFullName()} - })); - - const joinHungWrite = startParallelShell(() => { - assert.commandWorked( - db.getSiblingDB("test").speculative_snapshot_includes_all_writes_2.insert( - {_id: "b"}, {writeConcern: {w: "majority"}})); - }); - - checkLog.contains( - db.getMongo(), - "hangAfterCollectionInserts fail point enabled for " + testColl2.getFullName()); - - jsTest.log("Create a write following the uncommitted write."); - // Note this write must use local write concern; it cannot be majority committed until - // the prior uncommitted write is committed. - assert.commandWorked(testColl.insert([{_id: 1}])); - - const snapshotSession = startSessionAndTransaction({level: "snapshot"}); - checkReads(snapshotSession, [{_id: 0}], [{_id: "a"}]); - - const majoritySession = startSessionAndTransaction({level: "majority"}); - checkReads(majoritySession, [{_id: 0}, {_id: 1}], [{_id: "a"}]); - - const localSession = startSessionAndTransaction({level: "local"}); - checkReads(localSession, [{_id: 0}, {_id: 1}], [{_id: "a"}]); - - const defaultSession = startSessionAndTransaction({}); - checkReads(defaultSession, [{_id: 0}, {_id: 1}], [{_id: "a"}]); - - jsTestLog("Allow the uncommitted write to finish."); - assert.commandWorked(db.adminCommand({ - configureFailPoint: "hangAfterCollectionInserts", - mode: "off", - })); - - joinHungWrite(); - - jsTestLog("Double-checking that writes not committed at start of snapshot cannot appear."); - checkReads(snapshotSession, [{_id: 0}], [{_id: "a"}]); - - jsTestLog( - "Double-checking that writes performed before the start of a transaction of 'majority' or lower must appear."); - checkReads(majoritySession, [{_id: 0}, {_id: 1}], [{_id: "a"}]); - checkReads(localSession, [{_id: 0}, {_id: 1}], [{_id: "a"}]); - checkReads(defaultSession, [{_id: 0}, {_id: 1}], [{_id: "a"}]); - - jsTestLog("Committing transactions."); - assert.commandWorked(snapshotSession.commitTransaction_forTesting()); - assert.commandWorked(majoritySession.commitTransaction_forTesting()); - assert.commandWorked(localSession.commitTransaction_forTesting()); - assert.commandWorked(defaultSession.commitTransaction_forTesting()); - - jsTestLog("A new local read must see all committed writes."); - checkReads(defaultSession, [{_id: 0}, {_id: 1}], [{_id: "a"}, {_id: "b"}]); - - snapshotSession.endSession(); - majoritySession.endSession(); - localSession.endSession(); - defaultSession.endSession(); +"use strict"; + +load("jstests/libs/check_log.js"); + +const dbName = "test"; +const collName = "speculative_snapshot_includes_all_writes_1"; +const collName2 = "speculative_snapshot_includes_all_writes_2"; +const testDB = db.getSiblingDB(dbName); +const testColl = testDB[collName]; +const testColl2 = testDB[collName2]; + +testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); +testDB.runCommand({drop: collName2, writeConcern: {w: "majority"}}); + +assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}})); +assert.commandWorked(testDB.createCollection(collName2, {writeConcern: {w: "majority"}})); + +const sessionOptions = { + causalConsistency: false +}; + +function startSessionAndTransaction(readConcernLevel) { + let session = db.getMongo().startSession(sessionOptions); + jsTestLog("Start a transaction with readConcern " + readConcernLevel.level + "."); + session.startTransaction({readConcern: readConcernLevel}); + return session; +} + +let checkReads = (session, collExpected, coll2Expected) => { + let sessionDb = session.getDatabase(dbName); + let coll = sessionDb.getCollection(collName); + let coll2 = sessionDb.getCollection(collName2); + assert.sameMembers(collExpected, coll.find().toArray()); + assert.sameMembers(coll2Expected, coll2.find().toArray()); +}; + +// Clear ramlog so checkLog can't find log messages from previous times this fail point was +// enabled. +assert.commandWorked(testDB.adminCommand({clearLog: 'global'})); + +jsTest.log("Prepopulate the collections."); +assert.commandWorked(testColl.insert([{_id: 0}], {writeConcern: {w: "majority"}})); +assert.commandWorked(testColl2.insert([{_id: "a"}], {writeConcern: {w: "majority"}})); + +jsTest.log("Create the uncommitted write."); + +assert.commandWorked(db.adminCommand({ + configureFailPoint: "hangAfterCollectionInserts", + mode: "alwaysOn", + data: {collectionNS: testColl2.getFullName()} +})); + +const joinHungWrite = startParallelShell(() => { + assert.commandWorked(db.getSiblingDB("test").speculative_snapshot_includes_all_writes_2.insert( + {_id: "b"}, {writeConcern: {w: "majority"}})); +}); + +checkLog.contains(db.getMongo(), + "hangAfterCollectionInserts fail point enabled for " + testColl2.getFullName()); + +jsTest.log("Create a write following the uncommitted write."); +// Note this write must use local write concern; it cannot be majority committed until +// the prior uncommitted write is committed. +assert.commandWorked(testColl.insert([{_id: 1}])); + +const snapshotSession = startSessionAndTransaction({level: "snapshot"}); +checkReads(snapshotSession, [{_id: 0}], [{_id: "a"}]); + +const majoritySession = startSessionAndTransaction({level: "majority"}); +checkReads(majoritySession, [{_id: 0}, {_id: 1}], [{_id: "a"}]); + +const localSession = startSessionAndTransaction({level: "local"}); +checkReads(localSession, [{_id: 0}, {_id: 1}], [{_id: "a"}]); + +const defaultSession = startSessionAndTransaction({}); +checkReads(defaultSession, [{_id: 0}, {_id: 1}], [{_id: "a"}]); + +jsTestLog("Allow the uncommitted write to finish."); +assert.commandWorked(db.adminCommand({ + configureFailPoint: "hangAfterCollectionInserts", + mode: "off", +})); + +joinHungWrite(); + +jsTestLog("Double-checking that writes not committed at start of snapshot cannot appear."); +checkReads(snapshotSession, [{_id: 0}], [{_id: "a"}]); + +jsTestLog( + "Double-checking that writes performed before the start of a transaction of 'majority' or lower must appear."); +checkReads(majoritySession, [{_id: 0}, {_id: 1}], [{_id: "a"}]); +checkReads(localSession, [{_id: 0}, {_id: 1}], [{_id: "a"}]); +checkReads(defaultSession, [{_id: 0}, {_id: 1}], [{_id: "a"}]); + +jsTestLog("Committing transactions."); +assert.commandWorked(snapshotSession.commitTransaction_forTesting()); +assert.commandWorked(majoritySession.commitTransaction_forTesting()); +assert.commandWorked(localSession.commitTransaction_forTesting()); +assert.commandWorked(defaultSession.commitTransaction_forTesting()); + +jsTestLog("A new local read must see all committed writes."); +checkReads(defaultSession, [{_id: 0}, {_id: 1}], [{_id: "a"}, {_id: "b"}]); + +snapshotSession.endSession(); +majoritySession.endSession(); +localSession.endSession(); +defaultSession.endSession(); }()); diff --git a/jstests/core/txns/start_transaction_with_read.js b/jstests/core/txns/start_transaction_with_read.js index f49a4518171..045b9af1083 100644 --- a/jstests/core/txns/start_transaction_with_read.js +++ b/jstests/core/txns/start_transaction_with_read.js @@ -1,52 +1,57 @@ // Test transaction starting with read. // @tags: [uses_transactions] (function() { - "use strict"; +"use strict"; - const dbName = "test"; - const collName = "start_transaction_with_read"; +const dbName = "test"; +const collName = "start_transaction_with_read"; - const testDB = db.getSiblingDB(dbName); - const coll = testDB[collName]; +const testDB = db.getSiblingDB(dbName); +const coll = testDB[collName]; - testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); +testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); - testDB.runCommand({create: coll.getName(), writeConcern: {w: "majority"}}); +testDB.runCommand({create: coll.getName(), writeConcern: {w: "majority"}}); - const sessionOptions = {causalConsistency: false}; - const session = testDB.getMongo().startSession(sessionOptions); - const sessionDb = session.getDatabase(dbName); - const sessionColl = sessionDb[collName]; +const sessionOptions = { + causalConsistency: false +}; +const session = testDB.getMongo().startSession(sessionOptions); +const sessionDb = session.getDatabase(dbName); +const sessionColl = sessionDb[collName]; - // Non-transactional write to give something to find. - const initialDoc = {_id: "pretransaction1", x: 0}; - assert.writeOK(sessionColl.insert(initialDoc, {writeConcern: {w: "majority"}})); +// Non-transactional write to give something to find. +const initialDoc = { + _id: "pretransaction1", + x: 0 +}; +assert.writeOK(sessionColl.insert(initialDoc, {writeConcern: {w: "majority"}})); - jsTest.log("Start a transaction with a read"); +jsTest.log("Start a transaction with a read"); - session.startTransaction(); +session.startTransaction(); - let docs = sessionColl.find({}).toArray(); - assert.sameMembers(docs, [initialDoc]); +let docs = sessionColl.find({}).toArray(); +assert.sameMembers(docs, [initialDoc]); - jsTest.log("Insert two documents in a transaction"); +jsTest.log("Insert two documents in a transaction"); - // Insert a doc within the transaction. - assert.commandWorked(sessionColl.insert({_id: "insert-1"})); +// Insert a doc within the transaction. +assert.commandWorked(sessionColl.insert({_id: "insert-1"})); - // Read in the same transaction returns the doc. - docs = sessionColl.find({_id: "insert-1"}).toArray(); - assert.sameMembers(docs, [{_id: "insert-1"}]); +// Read in the same transaction returns the doc. +docs = sessionColl.find({_id: "insert-1"}).toArray(); +assert.sameMembers(docs, [{_id: "insert-1"}]); - // Insert a doc within a transaction. - assert.commandWorked(sessionColl.insert({_id: "insert-2"})); +// Insert a doc within a transaction. +assert.commandWorked(sessionColl.insert({_id: "insert-2"})); - assert.commandWorked(session.commitTransaction_forTesting()); +assert.commandWorked(session.commitTransaction_forTesting()); - // Read with default read concern sees the committed transaction. - assert.eq({_id: "insert-1"}, coll.findOne({_id: "insert-1"})); - assert.eq({_id: "insert-2"}, coll.findOne({_id: "insert-2"})); - assert.eq(initialDoc, coll.findOne(initialDoc)); +// Read with default read concern sees the committed transaction. +assert.eq({_id: "insert-1"}, coll.findOne({_id: "insert-1"})); +assert.eq({_id: "insert-2"}, coll.findOne({_id: "insert-2"})); +assert.eq(initialDoc, coll.findOne(initialDoc)); - session.endSession(); +session.endSession(); }()); diff --git a/jstests/core/txns/statement_ids_accepted.js b/jstests/core/txns/statement_ids_accepted.js index 82a8e4455fb..d93c0e818be 100644 --- a/jstests/core/txns/statement_ids_accepted.js +++ b/jstests/core/txns/statement_ids_accepted.js @@ -2,120 +2,197 @@ // commands that are allowed in transactions. // @tags: [uses_transactions, uses_prepare_transaction] (function() { - "use strict"; +"use strict"; - const dbName = "test"; - const collName = "statement_ids_accepted"; - const testDB = db.getSiblingDB(dbName); - const testColl = testDB[collName]; +const dbName = "test"; +const collName = "statement_ids_accepted"; +const testDB = db.getSiblingDB(dbName); +const testColl = testDB[collName]; - testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); +testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); - assert.commandWorked( - testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}})); +assert.commandWorked(testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}})); - const sessionOptions = {causalConsistency: false}; - const session = db.getMongo().startSession(sessionOptions); - const sessionDb = session.getDatabase(dbName); - let txnNumber = 0; +const sessionOptions = { + causalConsistency: false +}; +const session = db.getMongo().startSession(sessionOptions); +const sessionDb = session.getDatabase(dbName); +let txnNumber = 0; - jsTestLog("Check that abortTransaction accepts a statement ID"); - assert.commandWorked(sessionDb.runCommand({ - find: collName, - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(txnNumber), - stmtId: NumberInt(0), - startTransaction: true, - autocommit: false - })); - // abortTransaction can only be run on the admin database. - assert.commandWorked(sessionDb.adminCommand({ - abortTransaction: 1, - txnNumber: NumberLong(txnNumber++), - stmtId: NumberInt(1), - autocommit: false - })); +jsTestLog("Check that abortTransaction accepts a statement ID"); +assert.commandWorked(sessionDb.runCommand({ + find: collName, + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(txnNumber), + stmtId: NumberInt(0), + startTransaction: true, + autocommit: false +})); +// abortTransaction can only be run on the admin database. +assert.commandWorked(sessionDb.adminCommand({ + abortTransaction: 1, + txnNumber: NumberLong(txnNumber++), + stmtId: NumberInt(1), + autocommit: false +})); - jsTestLog("Check that aggregate accepts a statement ID"); - assert.commandWorked(sessionDb.runCommand({ - aggregate: collName, - cursor: {}, - pipeline: [{$match: {}}], - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(txnNumber++), - stmtId: NumberInt(0), - startTransaction: true, - autocommit: false - })); +jsTestLog("Check that aggregate accepts a statement ID"); +assert.commandWorked(sessionDb.runCommand({ + aggregate: collName, + cursor: {}, + pipeline: [{$match: {}}], + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(txnNumber++), + stmtId: NumberInt(0), + startTransaction: true, + autocommit: false +})); - // The applyOps command is intentionally left out. +// The applyOps command is intentionally left out. - jsTestLog("Check that commitTransaction accepts a statement ID"); - assert.commandWorked(sessionDb.runCommand({ - find: collName, - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(txnNumber), - stmtId: NumberInt(0), - startTransaction: true, - autocommit: false - })); - // commitTransaction can only be run on the admin database. - assert.commandWorked(sessionDb.adminCommand({ - commitTransaction: 1, - txnNumber: NumberLong(txnNumber++), - stmtId: NumberInt(1), - autocommit: false - })); +jsTestLog("Check that commitTransaction accepts a statement ID"); +assert.commandWorked(sessionDb.runCommand({ + find: collName, + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(txnNumber), + stmtId: NumberInt(0), + startTransaction: true, + autocommit: false +})); +// commitTransaction can only be run on the admin database. +assert.commandWorked(sessionDb.adminCommand({ + commitTransaction: 1, + txnNumber: NumberLong(txnNumber++), + stmtId: NumberInt(1), + autocommit: false +})); - jsTestLog("Check that delete accepts a statement ID"); - assert.commandWorked(sessionDb.runCommand({ - delete: collName, - deletes: [{q: {}, limit: 1}], - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(txnNumber++), - stmtId: NumberInt(0), - startTransaction: true, - autocommit: false - })); +jsTestLog("Check that delete accepts a statement ID"); +assert.commandWorked(sessionDb.runCommand({ + delete: collName, + deletes: [{q: {}, limit: 1}], + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(txnNumber++), + stmtId: NumberInt(0), + startTransaction: true, + autocommit: false +})); - jsTestLog("Check that distinct accepts a statement ID"); - assert.commandWorked(sessionDb.runCommand({ - distinct: collName, - key: "x", - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(txnNumber++), - stmtId: NumberInt(0), - startTransaction: true, - autocommit: false - })); +jsTestLog("Check that distinct accepts a statement ID"); +assert.commandWorked(sessionDb.runCommand({ + distinct: collName, + key: "x", + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(txnNumber++), + stmtId: NumberInt(0), + startTransaction: true, + autocommit: false +})); - jsTestLog("Check that find and getmore accept a statement ID"); - // Put in some data to find so getMore has a cursor to use. - assert.writeOK(testColl.insert([{_id: 0}, {_id: 1}], {writeConcern: {w: "majority"}})); - let res = assert.commandWorked(sessionDb.runCommand({ - find: collName, - batchSize: 1, - filter: {}, - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(txnNumber), - stmtId: NumberInt(0), - startTransaction: true, - autocommit: false - })); +jsTestLog("Check that find and getmore accept a statement ID"); +// Put in some data to find so getMore has a cursor to use. +assert.writeOK(testColl.insert([{_id: 0}, {_id: 1}], {writeConcern: {w: "majority"}})); +let res = assert.commandWorked(sessionDb.runCommand({ + find: collName, + batchSize: 1, + filter: {}, + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(txnNumber), + stmtId: NumberInt(0), + startTransaction: true, + autocommit: false +})); + +assert.commandWorked(sessionDb.runCommand({ + getMore: res.cursor.id, + collection: collName, + batchSize: 1, + txnNumber: NumberLong(txnNumber++), + stmtId: NumberInt(1), + autocommit: false +})); + +jsTestLog("Check that findandmodify accepts a statement ID"); +assert.commandWorked(sessionDb.runCommand({ + findandmodify: collName, + remove: true, + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(txnNumber++), + stmtId: NumberInt(0), + startTransaction: true, + autocommit: false +})); + +jsTestLog("Check that findAndModify accepts a statement ID"); +assert.commandWorked(sessionDb.runCommand({ + findAndModify: collName, + remove: true, + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(txnNumber), + stmtId: NumberInt(0), + startTransaction: true, + autocommit: false +})); + +// Abort the transaction to release locks. +// abortTransaction can only be run on the admin database. +assert.commandWorked(sessionDb.adminCommand({ + abortTransaction: 1, + txnNumber: NumberLong(txnNumber++), + stmtId: NumberInt(0), + autocommit: false +})); +jsTestLog("Check that insert accepts a statement ID"); +assert.commandWorked(sessionDb.runCommand({ + insert: collName, + documents: [{_id: "doc1"}], + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(txnNumber), + stmtId: NumberInt(0), + startTransaction: true, + autocommit: false +})); + +// Abort the transaction to release locks. +// abortTransaction can only be run on the admin database. +assert.commandWorked(sessionDb.adminCommand({ + abortTransaction: 1, + txnNumber: NumberLong(txnNumber++), + stmtId: NumberInt(1), + autocommit: false +})); + +const isMongos = assert.commandWorked(db.runCommand("ismaster")).msg === "isdbgrid"; +if (!isMongos) { + // Skip commands that do not exist on mongos. + + jsTestLog("Check that geoSearch accepts a statement ID"); + assert.writeOK(testColl.insert({geo: {type: "Point", coordinates: [0, 0]}, a: 0}), + {writeConcern: {w: "majority"}}); + assert.writeOK(testColl.insert({geoh: {lat: 0, long: 0}, b: 0}), + {writeConcern: {w: "majority"}}); assert.commandWorked(sessionDb.runCommand({ - getMore: res.cursor.id, - collection: collName, - batchSize: 1, - txnNumber: NumberLong(txnNumber++), - stmtId: NumberInt(1), - autocommit: false + createIndexes: collName, + indexes: [ + {name: "geo", key: {geo: "2dsphere"}}, + {name: "geoh", key: {geoh: "geoHaystack", b: 1}, bucketSize: 1} + ], + writeConcern: {w: "majority"} })); + // Ensure the snapshot is available following the index creation. + assert.soonNoExcept(function() { + testColl.find({}, {readConcern: {level: "snapshot"}}); + return true; + }); - jsTestLog("Check that findandmodify accepts a statement ID"); + jsTestLog("Check that geoSearch accepts a statement ID"); assert.commandWorked(sessionDb.runCommand({ - findandmodify: collName, - remove: true, + geoSearch: collName, + search: {b: 0}, + near: [0, 0], + maxDistance: 1, readConcern: {level: "snapshot"}, txnNumber: NumberLong(txnNumber++), stmtId: NumberInt(0), @@ -123,136 +200,60 @@ autocommit: false })); - jsTestLog("Check that findAndModify accepts a statement ID"); + jsTestLog("Check that prepareTransaction accepts a statement ID"); assert.commandWorked(sessionDb.runCommand({ - findAndModify: collName, - remove: true, + insert: collName, + documents: [{_id: "doc2"}], readConcern: {level: "snapshot"}, txnNumber: NumberLong(txnNumber), stmtId: NumberInt(0), startTransaction: true, autocommit: false })); - - // Abort the transaction to release locks. - // abortTransaction can only be run on the admin database. + // prepareTransaction can only be run on the admin database. assert.commandWorked(sessionDb.adminCommand({ - abortTransaction: 1, - txnNumber: NumberLong(txnNumber++), - stmtId: NumberInt(0), - autocommit: false - })); - - jsTestLog("Check that insert accepts a statement ID"); - assert.commandWorked(sessionDb.runCommand({ - insert: collName, - documents: [{_id: "doc1"}], - readConcern: {level: "snapshot"}, + prepareTransaction: 1, txnNumber: NumberLong(txnNumber), - stmtId: NumberInt(0), - startTransaction: true, + stmtId: NumberInt(1), autocommit: false })); - - // Abort the transaction to release locks. - // abortTransaction can only be run on the admin database. assert.commandWorked(sessionDb.adminCommand({ abortTransaction: 1, txnNumber: NumberLong(txnNumber++), - stmtId: NumberInt(1), + stmtId: NumberInt(2), autocommit: false })); - - const isMongos = assert.commandWorked(db.runCommand("ismaster")).msg === "isdbgrid"; - if (!isMongos) { - // Skip commands that do not exist on mongos. - - jsTestLog("Check that geoSearch accepts a statement ID"); - assert.writeOK(testColl.insert({geo: {type: "Point", coordinates: [0, 0]}, a: 0}), - {writeConcern: {w: "majority"}}); - assert.writeOK(testColl.insert({geoh: {lat: 0, long: 0}, b: 0}), - {writeConcern: {w: "majority"}}); - assert.commandWorked(sessionDb.runCommand({ - createIndexes: collName, - indexes: [ - {name: "geo", key: {geo: "2dsphere"}}, - {name: "geoh", key: {geoh: "geoHaystack", b: 1}, bucketSize: 1} - ], - writeConcern: {w: "majority"} - })); - // Ensure the snapshot is available following the index creation. - assert.soonNoExcept(function() { - testColl.find({}, {readConcern: {level: "snapshot"}}); - return true; - }); - - jsTestLog("Check that geoSearch accepts a statement ID"); - assert.commandWorked(sessionDb.runCommand({ - geoSearch: collName, - search: {b: 0}, - near: [0, 0], - maxDistance: 1, - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(txnNumber++), - stmtId: NumberInt(0), - startTransaction: true, - autocommit: false - })); - - jsTestLog("Check that prepareTransaction accepts a statement ID"); - assert.commandWorked(sessionDb.runCommand({ - insert: collName, - documents: [{_id: "doc2"}], - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(txnNumber), - stmtId: NumberInt(0), - startTransaction: true, - autocommit: false - })); - // prepareTransaction can only be run on the admin database. - assert.commandWorked(sessionDb.adminCommand({ - prepareTransaction: 1, - txnNumber: NumberLong(txnNumber), - stmtId: NumberInt(1), - autocommit: false - })); - assert.commandWorked(sessionDb.adminCommand({ - abortTransaction: 1, - txnNumber: NumberLong(txnNumber++), - stmtId: NumberInt(2), - autocommit: false - })); - assert.commandFailedWithCode(sessionDb.runCommand({ - prepareTransaction: 1, - txnNumber: NumberLong(txnNumber++), - stmtId: NumberInt(0), - autocommit: false - }), - ErrorCodes.Unauthorized); - } - - // refreshLogicalSessionCacheNow is intentionally omitted. - - jsTestLog("Check that update accepts a statement ID"); - assert.commandWorked(sessionDb.runCommand({ - update: collName, - updates: [{q: {_id: "doc1"}, u: {$inc: {a: 1}}}], - readConcern: {level: "snapshot"}, - txnNumber: NumberLong(txnNumber), + assert.commandFailedWithCode(sessionDb.runCommand({ + prepareTransaction: 1, + txnNumber: NumberLong(txnNumber++), stmtId: NumberInt(0), - startTransaction: true, autocommit: false - })); + }), + ErrorCodes.Unauthorized); +} - // Abort the last transaction because it appears the system stalls during shutdown if - // a transaction is open. - // abortTransaction can only be run on the admin database. - assert.commandWorked(sessionDb.adminCommand({ - abortTransaction: 1, - txnNumber: NumberLong(txnNumber++), - stmtId: NumberInt(1), - autocommit: false - })); +// refreshLogicalSessionCacheNow is intentionally omitted. + +jsTestLog("Check that update accepts a statement ID"); +assert.commandWorked(sessionDb.runCommand({ + update: collName, + updates: [{q: {_id: "doc1"}, u: {$inc: {a: 1}}}], + readConcern: {level: "snapshot"}, + txnNumber: NumberLong(txnNumber), + stmtId: NumberInt(0), + startTransaction: true, + autocommit: false +})); + +// Abort the last transaction because it appears the system stalls during shutdown if +// a transaction is open. +// abortTransaction can only be run on the admin database. +assert.commandWorked(sessionDb.adminCommand({ + abortTransaction: 1, + txnNumber: NumberLong(txnNumber++), + stmtId: NumberInt(1), + autocommit: false +})); - session.endSession(); +session.endSession(); }()); diff --git a/jstests/core/txns/timestamped_reads_wait_for_prepare_oplog_visibility.js b/jstests/core/txns/timestamped_reads_wait_for_prepare_oplog_visibility.js index e26f88b85c3..6910dd88b68 100644 --- a/jstests/core/txns/timestamped_reads_wait_for_prepare_oplog_visibility.js +++ b/jstests/core/txns/timestamped_reads_wait_for_prepare_oplog_visibility.js @@ -5,235 +5,236 @@ * @tags: [uses_transactions, uses_prepare_transaction] */ (function() { - 'use strict'; +'use strict'; +load("jstests/libs/check_log.js"); +load('jstests/core/txns/libs/prepare_helpers.js'); +load('jstests/libs/parallel_shell_helpers.js'); + +TestData.dbName = 'test'; +const baseCollName = 'timestamped_reads_wait_for_prepare_oplog_visibility'; +const testDB = db.getSiblingDB(TestData.dbName); +TestData.failureTimeout = 1 * 1000; // 1 second. +TestData.successTimeout = 5 * 60 * 1000; // 5 minutes. +TestData.txnDoc = { + _id: 1, + x: 1 +}; +TestData.otherDoc = { + _id: 2, + y: 7 +}; +TestData.txnDocFilter = { + _id: TestData.txnDoc._id +}; +TestData.otherDocFilter = { + _id: TestData.otherDoc._id +}; + +/** + * A function that accepts a 'readFunc' and a collection name. 'readFunc' accepts a collection + * name and returns an object with an 'oplogVisibility' test field and a 'prepareConflict' test + * field. This function is run in a separate thread and tests that oplog visibility blocks + * certain reads and that prepare conflicts block other types of reads. + */ +const readThreadFunc = function(readFunc, _collName) { load("jstests/libs/check_log.js"); - load('jstests/core/txns/libs/prepare_helpers.js'); - load('jstests/libs/parallel_shell_helpers.js'); - - TestData.dbName = 'test'; - const baseCollName = 'timestamped_reads_wait_for_prepare_oplog_visibility'; - const testDB = db.getSiblingDB(TestData.dbName); - TestData.failureTimeout = 1 * 1000; // 1 second. - TestData.successTimeout = 5 * 60 * 1000; // 5 minutes. - TestData.txnDoc = {_id: 1, x: 1}; - TestData.otherDoc = {_id: 2, y: 7}; - TestData.txnDocFilter = {_id: TestData.txnDoc._id}; - TestData.otherDocFilter = {_id: TestData.otherDoc._id}; - - /** - * A function that accepts a 'readFunc' and a collection name. 'readFunc' accepts a collection - * name and returns an object with an 'oplogVisibility' test field and a 'prepareConflict' test - * field. This function is run in a separate thread and tests that oplog visibility blocks - * certain reads and that prepare conflicts block other types of reads. - */ - const readThreadFunc = function(readFunc, _collName) { - load("jstests/libs/check_log.js"); - - // Do not start reads until we are blocked in 'prepareTransaction'. - checkLog.contains(db.getMongo(), "hangAfterReservingPrepareTimestamp fail point enabled"); - - // Create a 'readFuncObj' from the 'readFunc'. - const readFuncObj = readFunc(_collName); - readFuncObj.oplogVisibility(); - - // Let the transaction finish preparing and wait for 'prepareTransaction' to complete. - assert.commandWorked(db.adminCommand( - {configureFailPoint: 'hangAfterReservingPrepareTimestamp', mode: 'off'})); - checkLog.contains(db.getMongo(), "command: prepareTransaction"); - - readFuncObj.prepareConflict(); - }; - function runTest(prefix, readFunc) { - // Reset the log history between tests. - assert.commandWorked(db.adminCommand({clearLog: 'global'})); + // Do not start reads until we are blocked in 'prepareTransaction'. + checkLog.contains(db.getMongo(), "hangAfterReservingPrepareTimestamp fail point enabled"); + + // Create a 'readFuncObj' from the 'readFunc'. + const readFuncObj = readFunc(_collName); + readFuncObj.oplogVisibility(); + + // Let the transaction finish preparing and wait for 'prepareTransaction' to complete. + assert.commandWorked( + db.adminCommand({configureFailPoint: 'hangAfterReservingPrepareTimestamp', mode: 'off'})); + checkLog.contains(db.getMongo(), "command: prepareTransaction"); + + readFuncObj.prepareConflict(); +}; + +function runTest(prefix, readFunc) { + // Reset the log history between tests. + assert.commandWorked(db.adminCommand({clearLog: 'global'})); + + jsTestLog('Testing oplog visibility for ' + prefix); + const collName = baseCollName + '_' + prefix; + const testColl = testDB.getCollection(collName); + + testColl.drop({writeConcern: {w: "majority"}}); + assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: 'majority'}})); + + assert.commandWorked(testDB.adminCommand( + {configureFailPoint: 'hangAfterReservingPrepareTimestamp', mode: 'alwaysOn'})); + + // Insert a document for the transaction. + assert.commandWorked(testColl.insert(TestData.txnDoc)); + // Insert a document untouched by the transaction. + assert.commandWorked(testColl.insert(TestData.otherDoc, {writeconcern: {w: "majority"}})); + + // Start a transaction with a single update on the 'txnDoc'. + const session = db.getMongo().startSession({causalConsistency: false}); + const sessionDB = session.getDatabase(TestData.dbName); + session.startTransaction({readConcern: {level: 'snapshot'}}); + assert.commandWorked(sessionDB[collName].update(TestData.txnDoc, {$inc: {x: 1}})); + + // We set the log level up to know when 'prepareTransaction' completes. + db.setLogLevel(1); - jsTestLog('Testing oplog visibility for ' + prefix); - const collName = baseCollName + '_' + prefix; - const testColl = testDB.getCollection(collName); + // Clear the log history to ensure we only see the most recent 'prepareTransaction' + // failpoint log message. + assert.commandWorked(db.adminCommand({clearLog: 'global'})); + const joinReadThread = startParallelShell(funWithArgs(readThreadFunc, readFunc, collName)); - testColl.drop({writeConcern: {w: "majority"}}); - assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: 'majority'}})); + jsTestLog("Preparing the transaction for " + prefix); + const prepareTimestamp = PrepareHelpers.prepareTransaction(session); - assert.commandWorked(testDB.adminCommand( - {configureFailPoint: 'hangAfterReservingPrepareTimestamp', mode: 'alwaysOn'})); + db.setLogLevel(0); + joinReadThread({checkExitSuccess: true}); - // Insert a document for the transaction. - assert.commandWorked(testColl.insert(TestData.txnDoc)); - // Insert a document untouched by the transaction. - assert.commandWorked(testColl.insert(TestData.otherDoc, {writeconcern: {w: "majority"}})); + PrepareHelpers.commitTransaction(session, prepareTimestamp); +} + +const snapshotRead = function(_collName) { + const _db = db.getSiblingDB(TestData.dbName); + + const session = db.getMongo().startSession({causalConsistency: false}); + const sessionDB = session.getDatabase(TestData.dbName); + + const oplogVisibility = function() { + jsTestLog("Snapshot reads should not block on oplog visibility."); + session.startTransaction({readConcern: {level: 'snapshot'}}); + let cursor = assert.commandWorked(sessionDB.runCommand( + {find: _collName, filter: TestData.txnDocFilter, maxTimeMS: TestData.successTimeout})); + assert.sameMembers(cursor.cursor.firstBatch, [TestData.txnDoc], tojson(cursor)); + assert.commandWorked(session.abortTransaction_forTesting()); - // Start a transaction with a single update on the 'txnDoc'. - const session = db.getMongo().startSession({causalConsistency: false}); - const sessionDB = session.getDatabase(TestData.dbName); session.startTransaction({readConcern: {level: 'snapshot'}}); - assert.commandWorked(sessionDB[collName].update(TestData.txnDoc, {$inc: {x: 1}})); - - // We set the log level up to know when 'prepareTransaction' completes. - db.setLogLevel(1); - - // Clear the log history to ensure we only see the most recent 'prepareTransaction' - // failpoint log message. - assert.commandWorked(db.adminCommand({clearLog: 'global'})); - const joinReadThread = startParallelShell(funWithArgs(readThreadFunc, readFunc, collName)); - - jsTestLog("Preparing the transaction for " + prefix); - const prepareTimestamp = PrepareHelpers.prepareTransaction(session); - - db.setLogLevel(0); - joinReadThread({checkExitSuccess: true}); - - PrepareHelpers.commitTransaction(session, prepareTimestamp); - } - - const snapshotRead = function(_collName) { - const _db = db.getSiblingDB(TestData.dbName); - - const session = db.getMongo().startSession({causalConsistency: false}); - const sessionDB = session.getDatabase(TestData.dbName); - - const oplogVisibility = function() { - jsTestLog("Snapshot reads should not block on oplog visibility."); - session.startTransaction({readConcern: {level: 'snapshot'}}); - let cursor = assert.commandWorked(sessionDB.runCommand({ - find: _collName, - filter: TestData.txnDocFilter, - maxTimeMS: TestData.successTimeout - })); - assert.sameMembers(cursor.cursor.firstBatch, [TestData.txnDoc], tojson(cursor)); - assert.commandWorked(session.abortTransaction_forTesting()); - - session.startTransaction({readConcern: {level: 'snapshot'}}); - cursor = assert.commandWorked(sessionDB.runCommand({ - find: _collName, - filter: TestData.otherDocFilter, - maxTimeMS: TestData.successTimeout - })); - assert.sameMembers(cursor.cursor.firstBatch, [TestData.otherDoc], tojson(cursor)); - assert.commandWorked(session.abortTransaction_forTesting()); - }; - - const prepareConflict = function() { - jsTestLog("Snapshot reads should block on prepared transactions for " + - "conflicting documents."); - session.startTransaction({readConcern: {level: 'snapshot'}}); - let cursor = assert.commandFailedWithCode(sessionDB.runCommand({ - find: _collName, - filter: TestData.txnDocFilter, - maxTimeMS: TestData.failureTimeout - }), - ErrorCodes.MaxTimeMSExpired); - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - - jsTestLog("Snapshot reads should succeed on non-conflicting documents while a " + - "transaction is in prepare."); - session.startTransaction({readConcern: {level: 'snapshot'}}); - cursor = assert.commandWorked(sessionDB.runCommand({ - find: _collName, - filter: TestData.otherDocFilter, - maxTimeMS: TestData.successTimeout - })); - assert.sameMembers(cursor.cursor.firstBatch, [TestData.otherDoc], tojson(cursor)); - assert.commandWorked(session.abortTransaction_forTesting()); - }; - - return {oplogVisibility: oplogVisibility, prepareConflict: prepareConflict}; + cursor = assert.commandWorked(sessionDB.runCommand({ + find: _collName, + filter: TestData.otherDocFilter, + maxTimeMS: TestData.successTimeout + })); + assert.sameMembers(cursor.cursor.firstBatch, [TestData.otherDoc], tojson(cursor)); + assert.commandWorked(session.abortTransaction_forTesting()); }; - const afterClusterTime = function(_collName) { - const _db = db.getSiblingDB(TestData.dbName); - - // Advance the cluster time with an arbitrary other insert. - let res = assert.commandWorked( - _db.runCommand({insert: _collName, documents: [{advanceClusterTime: 1}]})); - assert(res.hasOwnProperty("$clusterTime"), tojson(res)); - assert(res.$clusterTime.hasOwnProperty("clusterTime"), tojson(res)); - const clusterTime = res.$clusterTime.clusterTime; - jsTestLog("Using afterClusterTime: " + clusterTime); - - const oplogVisibility = function() { - jsTestLog("afterClusterTime reads should block on oplog visibility."); - assert.commandFailedWithCode(_db.runCommand({ - find: _collName, - filter: TestData.txnDocFilter, - readConcern: {afterClusterTime: clusterTime}, - maxTimeMS: TestData.failureTimeout - }), - ErrorCodes.MaxTimeMSExpired); - assert.commandFailedWithCode(_db.runCommand({ - find: _collName, - filter: TestData.otherDocFilter, - readConcern: {afterClusterTime: clusterTime}, - maxTimeMS: TestData.failureTimeout - }), - ErrorCodes.MaxTimeMSExpired); - }; - - const prepareConflict = function() { - jsTestLog("afterClusterTime reads should block on prepared transactions for " + - "conflicting documents."); - assert.commandFailedWithCode(_db.runCommand({ - find: _collName, - filter: TestData.txnDocFilter, - readConcern: {afterClusterTime: clusterTime}, - maxTimeMS: TestData.failureTimeout - }), - ErrorCodes.MaxTimeMSExpired); - - jsTestLog("afterClusterTime reads should succeed on non-conflicting documents " + - "while transaction is in prepare."); - let cursor = assert.commandWorked(_db.runCommand({ - find: _collName, - filter: TestData.otherDocFilter, - readConcern: {afterClusterTime: clusterTime}, - maxTimeMS: TestData.successTimeout - })); - assert.sameMembers(cursor.cursor.firstBatch, [TestData.otherDoc], tojson(cursor)); - }; - - return {oplogVisibility: oplogVisibility, prepareConflict: prepareConflict}; + const prepareConflict = function() { + jsTestLog("Snapshot reads should block on prepared transactions for " + + "conflicting documents."); + session.startTransaction({readConcern: {level: 'snapshot'}}); + let cursor = assert.commandFailedWithCode(sessionDB.runCommand({ + find: _collName, + filter: TestData.txnDocFilter, + maxTimeMS: TestData.failureTimeout + }), + ErrorCodes.MaxTimeMSExpired); + assert.commandFailedWithCode(session.abortTransaction_forTesting(), + ErrorCodes.NoSuchTransaction); + + jsTestLog("Snapshot reads should succeed on non-conflicting documents while a " + + "transaction is in prepare."); + session.startTransaction({readConcern: {level: 'snapshot'}}); + cursor = assert.commandWorked(sessionDB.runCommand({ + find: _collName, + filter: TestData.otherDocFilter, + maxTimeMS: TestData.successTimeout + })); + assert.sameMembers(cursor.cursor.firstBatch, [TestData.otherDoc], tojson(cursor)); + assert.commandWorked(session.abortTransaction_forTesting()); }; - const normalRead = function(_collName) { - const _db = db.getSiblingDB(TestData.dbName); - - const oplogVisibility = function() { - jsTestLog("Ordinary reads should not block on oplog visibility."); - let cursor = assert.commandWorked(_db.runCommand({ - find: _collName, - filter: TestData.txnDocFilter, - maxTimeMS: TestData.successTimeout - })); - assert.sameMembers(cursor.cursor.firstBatch, [TestData.txnDoc], tojson(cursor)); - cursor = assert.commandWorked(_db.runCommand({ - find: _collName, - filter: TestData.otherDocFilter, - maxTimeMS: TestData.successTimeout - })); - assert.sameMembers(cursor.cursor.firstBatch, [TestData.otherDoc], tojson(cursor)); - }; - - const prepareConflict = function() { - jsTestLog("Ordinary reads should not block on prepared transactions."); - let cursor = assert.commandWorked(_db.runCommand({ - find: _collName, - filter: TestData.txnDocFilter, - maxTimeMS: TestData.successTimeout - })); - assert.sameMembers(cursor.cursor.firstBatch, [TestData.txnDoc], tojson(cursor)); - cursor = assert.commandWorked(_db.runCommand({ - find: _collName, - filter: TestData.otherDocFilter, - maxTimeMS: TestData.successTimeout - })); - assert.sameMembers(cursor.cursor.firstBatch, [TestData.otherDoc], tojson(cursor)); - }; - - return {oplogVisibility: oplogVisibility, prepareConflict: prepareConflict}; + return {oplogVisibility: oplogVisibility, prepareConflict: prepareConflict}; +}; + +const afterClusterTime = function(_collName) { + const _db = db.getSiblingDB(TestData.dbName); + + // Advance the cluster time with an arbitrary other insert. + let res = assert.commandWorked( + _db.runCommand({insert: _collName, documents: [{advanceClusterTime: 1}]})); + assert(res.hasOwnProperty("$clusterTime"), tojson(res)); + assert(res.$clusterTime.hasOwnProperty("clusterTime"), tojson(res)); + const clusterTime = res.$clusterTime.clusterTime; + jsTestLog("Using afterClusterTime: " + clusterTime); + + const oplogVisibility = function() { + jsTestLog("afterClusterTime reads should block on oplog visibility."); + assert.commandFailedWithCode(_db.runCommand({ + find: _collName, + filter: TestData.txnDocFilter, + readConcern: {afterClusterTime: clusterTime}, + maxTimeMS: TestData.failureTimeout + }), + ErrorCodes.MaxTimeMSExpired); + assert.commandFailedWithCode(_db.runCommand({ + find: _collName, + filter: TestData.otherDocFilter, + readConcern: {afterClusterTime: clusterTime}, + maxTimeMS: TestData.failureTimeout + }), + ErrorCodes.MaxTimeMSExpired); }; - runTest('normal_reads', normalRead); - runTest('snapshot_reads', snapshotRead); - runTest('afterClusterTime', afterClusterTime); + const prepareConflict = function() { + jsTestLog("afterClusterTime reads should block on prepared transactions for " + + "conflicting documents."); + assert.commandFailedWithCode(_db.runCommand({ + find: _collName, + filter: TestData.txnDocFilter, + readConcern: {afterClusterTime: clusterTime}, + maxTimeMS: TestData.failureTimeout + }), + ErrorCodes.MaxTimeMSExpired); + + jsTestLog("afterClusterTime reads should succeed on non-conflicting documents " + + "while transaction is in prepare."); + let cursor = assert.commandWorked(_db.runCommand({ + find: _collName, + filter: TestData.otherDocFilter, + readConcern: {afterClusterTime: clusterTime}, + maxTimeMS: TestData.successTimeout + })); + assert.sameMembers(cursor.cursor.firstBatch, [TestData.otherDoc], tojson(cursor)); + }; + + return {oplogVisibility: oplogVisibility, prepareConflict: prepareConflict}; +}; + +const normalRead = function(_collName) { + const _db = db.getSiblingDB(TestData.dbName); + + const oplogVisibility = function() { + jsTestLog("Ordinary reads should not block on oplog visibility."); + let cursor = assert.commandWorked(_db.runCommand( + {find: _collName, filter: TestData.txnDocFilter, maxTimeMS: TestData.successTimeout})); + assert.sameMembers(cursor.cursor.firstBatch, [TestData.txnDoc], tojson(cursor)); + cursor = assert.commandWorked(_db.runCommand({ + find: _collName, + filter: TestData.otherDocFilter, + maxTimeMS: TestData.successTimeout + })); + assert.sameMembers(cursor.cursor.firstBatch, [TestData.otherDoc], tojson(cursor)); + }; + + const prepareConflict = function() { + jsTestLog("Ordinary reads should not block on prepared transactions."); + let cursor = assert.commandWorked(_db.runCommand( + {find: _collName, filter: TestData.txnDocFilter, maxTimeMS: TestData.successTimeout})); + assert.sameMembers(cursor.cursor.firstBatch, [TestData.txnDoc], tojson(cursor)); + cursor = assert.commandWorked(_db.runCommand({ + find: _collName, + filter: TestData.otherDocFilter, + maxTimeMS: TestData.successTimeout + })); + assert.sameMembers(cursor.cursor.firstBatch, [TestData.otherDoc], tojson(cursor)); + }; + + return {oplogVisibility: oplogVisibility, prepareConflict: prepareConflict}; +}; + +runTest('normal_reads', normalRead); +runTest('snapshot_reads', snapshotRead); +runTest('afterClusterTime', afterClusterTime); })(); diff --git a/jstests/core/txns/transaction_error_handling.js b/jstests/core/txns/transaction_error_handling.js index 26ccd742934..74852bd58f8 100644 --- a/jstests/core/txns/transaction_error_handling.js +++ b/jstests/core/txns/transaction_error_handling.js @@ -1,122 +1,124 @@ // Test basic transaction error handling. // @tags: [uses_transactions] (function() { - "use strict"; - - const dbName = "test"; - const collName = "transaction_error_handling"; - const testDB = db.getSiblingDB(dbName); - - testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); - - assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); - - const sessionOptions = {causalConsistency: false}; - const session = testDB.getMongo().startSession(sessionOptions); - const sessionDb = session.getDatabase(dbName); - const sessionColl = sessionDb.getCollection(collName); - - jsTestLog("Test that we cannot abort or commit a nonexistant transaction."); - // Cannot abort or commit a nonexistant transaction. - try { - assert.commandWorked(session.commitTransaction_forTesting()); - } catch (e) { - assert.eq(e.message, "There is no active transaction to commit on this session."); - } - - try { - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - } catch (e) { - assert.eq(e.message, "There is no active transaction to abort on this session."); - } - - // Try to start a transaction when the state is 'active'. - jsTestLog("Test that we cannot start a transaction with one already started or in progress."); - session.startTransaction(); - try { - session.startTransaction(); - } catch (e) { - assert.eq(e.message, "Transaction already in progress on this session."); - } - - // Try starting a transaction after inserting something. - assert.commandWorked(sessionColl.insert({_id: "insert-1"})); - // Try to start a transaction when the state is 'active'. - try { - session.startTransaction(); - } catch (e) { - assert.eq(e.message, "Transaction already in progress on this session."); - } - - // At this point, the transaction is still 'active'. We will commit this transaction and test - // that calling commitTransaction again should work while calling abortTransaction should not. - assert.commandWorked(session.commitTransaction_forTesting()); +"use strict"; - jsTestLog("Test that we can commit a transaction more than once."); - // The transaction state is 'committed'. We can call commitTransaction again in this state. - assert.commandWorked(session.commitTransaction_forTesting()); +const dbName = "test"; +const collName = "transaction_error_handling"; +const testDB = db.getSiblingDB(dbName); + +testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); + +assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); - jsTestLog("Test that we cannot abort a transaction that has already been committed"); - // We cannot call abortTransaction on a transaction that has already been committed. - try { - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - } catch (e) { - assert.eq(e.message, "Cannot call abortTransaction after calling commitTransaction."); - } - - // Start a new transaction that will be aborted. Test that we cannot call commit or - // abortTransaction on a transaction that is in the 'aborted' state. +const sessionOptions = { + causalConsistency: false +}; +const session = testDB.getMongo().startSession(sessionOptions); +const sessionDb = session.getDatabase(dbName); +const sessionColl = sessionDb.getCollection(collName); + +jsTestLog("Test that we cannot abort or commit a nonexistant transaction."); +// Cannot abort or commit a nonexistant transaction. +try { + assert.commandWorked(session.commitTransaction_forTesting()); +} catch (e) { + assert.eq(e.message, "There is no active transaction to commit on this session."); +} + +try { + assert.commandFailedWithCode(session.abortTransaction_forTesting(), + ErrorCodes.NoSuchTransaction); +} catch (e) { + assert.eq(e.message, "There is no active transaction to abort on this session."); +} + +// Try to start a transaction when the state is 'active'. +jsTestLog("Test that we cannot start a transaction with one already started or in progress."); +session.startTransaction(); +try { session.startTransaction(); - assert.commandWorked(sessionColl.insert({_id: "insert-2"})); - assert.commandWorked(session.abortTransaction_forTesting()); - - jsTestLog("Test that we cannot commit a transaction that has already been aborted."); - // We cannot call commitTransaction on a transaction that has already been aborted. - try { - assert.commandWorked(session.commitTransaction_forTesting()); - } catch (e) { - assert.eq(e.message, "Cannot call commitTransaction after calling abortTransaction."); - } - - jsTestLog("Test that we cannot abort a transaction that has already been aborted."); - // We also cannot call abortTransaction on a transaction that has already been aborted. - try { - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - } catch (e) { - assert.eq(e.message, "Cannot call abortTransaction twice."); - } - - jsTestLog( - "Test that a normal operation after committing a transaction changes the state to inactive."); +} catch (e) { + assert.eq(e.message, "Transaction already in progress on this session."); +} + +// Try starting a transaction after inserting something. +assert.commandWorked(sessionColl.insert({_id: "insert-1"})); +// Try to start a transaction when the state is 'active'. +try { session.startTransaction(); - assert.commandWorked(sessionColl.insert({_id: "insert-3"})); - // The transaction state should be changed to 'committed'. +} catch (e) { + assert.eq(e.message, "Transaction already in progress on this session."); +} + +// At this point, the transaction is still 'active'. We will commit this transaction and test +// that calling commitTransaction again should work while calling abortTransaction should not. +assert.commandWorked(session.commitTransaction_forTesting()); + +jsTestLog("Test that we can commit a transaction more than once."); +// The transaction state is 'committed'. We can call commitTransaction again in this state. +assert.commandWorked(session.commitTransaction_forTesting()); + +jsTestLog("Test that we cannot abort a transaction that has already been committed"); +// We cannot call abortTransaction on a transaction that has already been committed. +try { + assert.commandFailedWithCode(session.abortTransaction_forTesting(), + ErrorCodes.NoSuchTransaction); +} catch (e) { + assert.eq(e.message, "Cannot call abortTransaction after calling commitTransaction."); +} + +// Start a new transaction that will be aborted. Test that we cannot call commit or +// abortTransaction on a transaction that is in the 'aborted' state. +session.startTransaction(); +assert.commandWorked(sessionColl.insert({_id: "insert-2"})); +assert.commandWorked(session.abortTransaction_forTesting()); + +jsTestLog("Test that we cannot commit a transaction that has already been aborted."); +// We cannot call commitTransaction on a transaction that has already been aborted. +try { assert.commandWorked(session.commitTransaction_forTesting()); - // The transaction state should be changed to 'inactive'. - assert.commandWorked(sessionColl.insert({_id: "normal-insert"})); - try { - assert.commandWorked(session.commitTransaction_forTesting()); - } catch (e) { - assert.eq(e.message, "There is no active transaction to commit on this session."); - } - - jsTestLog( - "Test that a normal operation after aborting a transaction changes the state to inactive."); - session.startTransaction(); - assert.commandWorked(sessionColl.insert({_id: "insert-4"})); - // The transaction state should be changed to 'aborted'. - assert.commandWorked(session.abortTransaction_forTesting()); - // The transaction state should be changed to 'inactive'. - assert.commandWorked(sessionColl.insert({_id: "normal-insert-2"})); - try { - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - } catch (e) { - assert.eq(e.message, "There is no active transaction to abort on this session."); - } - - session.endSession(); +} catch (e) { + assert.eq(e.message, "Cannot call commitTransaction after calling abortTransaction."); +} + +jsTestLog("Test that we cannot abort a transaction that has already been aborted."); +// We also cannot call abortTransaction on a transaction that has already been aborted. +try { + assert.commandFailedWithCode(session.abortTransaction_forTesting(), + ErrorCodes.NoSuchTransaction); +} catch (e) { + assert.eq(e.message, "Cannot call abortTransaction twice."); +} + +jsTestLog( + "Test that a normal operation after committing a transaction changes the state to inactive."); +session.startTransaction(); +assert.commandWorked(sessionColl.insert({_id: "insert-3"})); +// The transaction state should be changed to 'committed'. +assert.commandWorked(session.commitTransaction_forTesting()); +// The transaction state should be changed to 'inactive'. +assert.commandWorked(sessionColl.insert({_id: "normal-insert"})); +try { + assert.commandWorked(session.commitTransaction_forTesting()); +} catch (e) { + assert.eq(e.message, "There is no active transaction to commit on this session."); +} + +jsTestLog( + "Test that a normal operation after aborting a transaction changes the state to inactive."); +session.startTransaction(); +assert.commandWorked(sessionColl.insert({_id: "insert-4"})); +// The transaction state should be changed to 'aborted'. +assert.commandWorked(session.abortTransaction_forTesting()); +// The transaction state should be changed to 'inactive'. +assert.commandWorked(sessionColl.insert({_id: "normal-insert-2"})); +try { + assert.commandFailedWithCode(session.abortTransaction_forTesting(), + ErrorCodes.NoSuchTransaction); +} catch (e) { + assert.eq(e.message, "There is no active transaction to abort on this session."); +} + +session.endSession(); }()); diff --git a/jstests/core/txns/transaction_ops_against_capped_collection.js b/jstests/core/txns/transaction_ops_against_capped_collection.js index b7a0720a875..86c7c4f3383 100644 --- a/jstests/core/txns/transaction_ops_against_capped_collection.js +++ b/jstests/core/txns/transaction_ops_against_capped_collection.js @@ -6,99 +6,95 @@ * @tags: [requires_capped, uses_transactions] */ (function() { - "use strict"; - - const dbName = "test"; - const cappedCollName = "transaction_ops_against_capped_collection"; - const testDB = db.getSiblingDB(dbName); - const cappedTestColl = testDB.getCollection(cappedCollName); - const testDocument = {"a": 1}; - - cappedTestColl.drop({writeConcern: {w: "majority"}}); - - jsTest.log("Creating a capped collection '" + dbName + "." + cappedCollName + "'."); - assert.commandWorked(testDB.createCollection(cappedCollName, {capped: true, size: 500})); - - jsTest.log("Adding a document to the capped collection so that the update op can be tested " + - "in the subsequent transaction attempts"); - assert.commandWorked(cappedTestColl.insert(testDocument)); - - jsTest.log("Setting up a transaction in which to execute transaction ops."); - const session = db.getMongo().startSession(); - const sessionDB = session.getDatabase(dbName); - const sessionCappedColl = sessionDB.getCollection(cappedCollName); - - jsTest.log( - "Starting individual transactions for writes against capped collections that should " + - " fail."); - - /* - * Write ops (should fail): - */ - - jsTest.log("About to try: insert"); - session.startTransaction(); - assert.commandFailedWithCode(sessionCappedColl.insert({"x": 55}), - ErrorCodes.OperationNotSupportedInTransaction); - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - - jsTest.log("About to try: update"); - session.startTransaction(); - assert.commandFailedWithCode(sessionCappedColl.update(testDocument, {"a": 1000}), - ErrorCodes.OperationNotSupportedInTransaction); - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - - jsTest.log("About to try: findAndModify (update version)"); - session.startTransaction(); - assert.commandFailedWithCode( - sessionDB.runCommand( - {findAndModify: cappedCollName, query: testDocument, update: {"$set": {"a": 1000}}}), - ErrorCodes.OperationNotSupportedInTransaction); - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - - jsTest.log("About to try: findAndModify (remove version)"); - session.startTransaction(); - assert.commandFailedWithCode( - sessionDB.runCommand({findAndModify: cappedCollName, query: testDocument, remove: true}), - ErrorCodes.OperationNotSupportedInTransaction); - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - - // Deletes do not work against capped collections so we will not test them in transactions. - - jsTest.log( - "Starting individual transactions for reads against capped collections that should " + - " succeed."); - - /* - * Read ops (should succeed): - */ - - jsTest.log("About to try: find"); - session.startTransaction(); - let findRes = assert.commandWorked(sessionDB.runCommand({"find": cappedCollName})); - assert.eq(1, findRes.cursor.firstBatch[0].a); - assert.commandWorked(session.abortTransaction_forTesting()); - - jsTest.log("About to try: distinct"); - session.startTransaction(); - let distinctRes = - assert.commandWorked(sessionDB.runCommand({"distinct": cappedCollName, "key": "a"})); - assert.eq(1, distinctRes.values); - assert.commandWorked(session.abortTransaction_forTesting()); - - jsTest.log("About to try: aggregate"); - session.startTransaction(); - let aggRes = assert.commandWorked(sessionDB.runCommand({ - aggregate: cappedCollName, - pipeline: [{$match: {"a": 1}}], - cursor: {}, - })); - assert.eq(1, aggRes.cursor.firstBatch[0].a); - assert.commandWorked(session.abortTransaction_forTesting()); - - session.endSession(); +"use strict"; + +const dbName = "test"; +const cappedCollName = "transaction_ops_against_capped_collection"; +const testDB = db.getSiblingDB(dbName); +const cappedTestColl = testDB.getCollection(cappedCollName); +const testDocument = { + "a": 1 +}; + +cappedTestColl.drop({writeConcern: {w: "majority"}}); + +jsTest.log("Creating a capped collection '" + dbName + "." + cappedCollName + "'."); +assert.commandWorked(testDB.createCollection(cappedCollName, {capped: true, size: 500})); + +jsTest.log("Adding a document to the capped collection so that the update op can be tested " + + "in the subsequent transaction attempts"); +assert.commandWorked(cappedTestColl.insert(testDocument)); + +jsTest.log("Setting up a transaction in which to execute transaction ops."); +const session = db.getMongo().startSession(); +const sessionDB = session.getDatabase(dbName); +const sessionCappedColl = sessionDB.getCollection(cappedCollName); + +jsTest.log("Starting individual transactions for writes against capped collections that should " + + " fail."); + +/* + * Write ops (should fail): + */ + +jsTest.log("About to try: insert"); +session.startTransaction(); +assert.commandFailedWithCode(sessionCappedColl.insert({"x": 55}), + ErrorCodes.OperationNotSupportedInTransaction); +assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction); + +jsTest.log("About to try: update"); +session.startTransaction(); +assert.commandFailedWithCode(sessionCappedColl.update(testDocument, {"a": 1000}), + ErrorCodes.OperationNotSupportedInTransaction); +assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction); + +jsTest.log("About to try: findAndModify (update version)"); +session.startTransaction(); +assert.commandFailedWithCode( + sessionDB.runCommand( + {findAndModify: cappedCollName, query: testDocument, update: {"$set": {"a": 1000}}}), + ErrorCodes.OperationNotSupportedInTransaction); +assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction); + +jsTest.log("About to try: findAndModify (remove version)"); +session.startTransaction(); +assert.commandFailedWithCode( + sessionDB.runCommand({findAndModify: cappedCollName, query: testDocument, remove: true}), + ErrorCodes.OperationNotSupportedInTransaction); +assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction); + +// Deletes do not work against capped collections so we will not test them in transactions. + +jsTest.log("Starting individual transactions for reads against capped collections that should " + + " succeed."); + +/* + * Read ops (should succeed): + */ + +jsTest.log("About to try: find"); +session.startTransaction(); +let findRes = assert.commandWorked(sessionDB.runCommand({"find": cappedCollName})); +assert.eq(1, findRes.cursor.firstBatch[0].a); +assert.commandWorked(session.abortTransaction_forTesting()); + +jsTest.log("About to try: distinct"); +session.startTransaction(); +let distinctRes = + assert.commandWorked(sessionDB.runCommand({"distinct": cappedCollName, "key": "a"})); +assert.eq(1, distinctRes.values); +assert.commandWorked(session.abortTransaction_forTesting()); + +jsTest.log("About to try: aggregate"); +session.startTransaction(); +let aggRes = assert.commandWorked(sessionDB.runCommand({ + aggregate: cappedCollName, + pipeline: [{$match: {"a": 1}}], + cursor: {}, +})); +assert.eq(1, aggRes.cursor.firstBatch[0].a); +assert.commandWorked(session.abortTransaction_forTesting()); + +session.endSession(); })(); diff --git a/jstests/core/txns/transactions_block_ddl.js b/jstests/core/txns/transactions_block_ddl.js index 70b085c6b71..5e34a4b84be 100644 --- a/jstests/core/txns/transactions_block_ddl.js +++ b/jstests/core/txns/transactions_block_ddl.js @@ -1,121 +1,129 @@ // Test that open transactions block DDL operations on the involved collections. // @tags: [uses_transactions] (function() { - "use strict"; +"use strict"; - load("jstests/libs/parallelTester.js"); // for ScopedThread. +load("jstests/libs/parallelTester.js"); // for ScopedThread. - const dbName = "transactions_block_ddl"; - const collName = "transactions_block_ddl"; - const otherDBName = "transactions_block_ddl_other"; - const otherCollName = "transactions_block_ddl_other"; - const testDB = db.getSiblingDB(dbName); +const dbName = "transactions_block_ddl"; +const collName = "transactions_block_ddl"; +const otherDBName = "transactions_block_ddl_other"; +const otherCollName = "transactions_block_ddl_other"; +const testDB = db.getSiblingDB(dbName); - const session = testDB.getMongo().startSession({causalConsistency: false}); - const sessionDB = session.getDatabase(dbName); - const sessionColl = sessionDB[collName]; +const session = testDB.getMongo().startSession({causalConsistency: false}); +const sessionDB = session.getDatabase(dbName); +const sessionColl = sessionDB[collName]; - /** - * Tests that DDL operations block on transactions and fail when their maxTimeMS expires. - */ - function testTimeout(cmdDBName, ddlCmd) { - // Setup. - sessionDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); - assert.commandWorked(sessionColl.createIndex({b: 1}, {name: "b_1"})); +/** + * Tests that DDL operations block on transactions and fail when their maxTimeMS expires. + */ +function testTimeout(cmdDBName, ddlCmd) { + // Setup. + sessionDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); + assert.commandWorked(sessionColl.createIndex({b: 1}, {name: "b_1"})); - session.startTransaction(); - assert.commandWorked(sessionColl.insert({a: 5, b: 6})); - assert.commandFailedWithCode( - testDB.getSiblingDB(cmdDBName).runCommand(Object.assign({}, ddlCmd, {maxTimeMS: 500})), - ErrorCodes.MaxTimeMSExpired); - assert.commandWorked(session.commitTransaction_forTesting()); - } + session.startTransaction(); + assert.commandWorked(sessionColl.insert({a: 5, b: 6})); + assert.commandFailedWithCode( + testDB.getSiblingDB(cmdDBName).runCommand(Object.assign({}, ddlCmd, {maxTimeMS: 500})), + ErrorCodes.MaxTimeMSExpired); + assert.commandWorked(session.commitTransaction_forTesting()); +} - /** - * Tests that DDL operations block on transactions but can succeed once the transaction commits. - */ - function testSuccessOnTxnCommit(cmdDBName, ddlCmd, currentOpFilter) { - // Setup. - sessionDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); - assert.commandWorked(sessionColl.createIndex({b: 1}, {name: "b_1"})); +/** + * Tests that DDL operations block on transactions but can succeed once the transaction commits. + */ +function testSuccessOnTxnCommit(cmdDBName, ddlCmd, currentOpFilter) { + // Setup. + sessionDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); + assert.commandWorked(sessionColl.createIndex({b: 1}, {name: "b_1"})); - jsTestLog("About to start tranasction"); - session.startTransaction(); - assert.commandWorked(sessionColl.insert({a: 5, b: 6})); - jsTestLog("Transaction started, running ddl operation " + ddlCmd); - let thread = new ScopedThread(function(cmdDBName, ddlCmd) { - return db.getSiblingDB(cmdDBName).runCommand(ddlCmd); - }, cmdDBName, ddlCmd); - thread.start(); - // Wait for the DDL operation to have pending locks. - assert.soon( - function() { - // Note that we cannot use the $currentOp agg stage because it acquires locks - // (SERVER-35289). - return testDB.currentOp({$and: [currentOpFilter, {waitingForLock: true}]}) - .inprog.length === 1; - }, - function() { - return "Failed to find DDL command in currentOp output: " + - tojson(testDB.currentOp().inprog); - }); - jsTestLog("Committing transaction"); - assert.commandWorked(session.commitTransaction_forTesting()); - jsTestLog("Transaction committed, waiting for ddl operation to complete."); - thread.join(); - assert.commandWorked(thread.returnData()); - } + jsTestLog("About to start tranasction"); + session.startTransaction(); + assert.commandWorked(sessionColl.insert({a: 5, b: 6})); + jsTestLog("Transaction started, running ddl operation " + ddlCmd); + let thread = new ScopedThread(function(cmdDBName, ddlCmd) { + return db.getSiblingDB(cmdDBName).runCommand(ddlCmd); + }, cmdDBName, ddlCmd); + thread.start(); + // Wait for the DDL operation to have pending locks. + assert.soon( + function() { + // Note that we cannot use the $currentOp agg stage because it acquires locks + // (SERVER-35289). + return testDB.currentOp({$and: [currentOpFilter, {waitingForLock: true}]}) + .inprog.length === 1; + }, + function() { + return "Failed to find DDL command in currentOp output: " + + tojson(testDB.currentOp().inprog); + }); + jsTestLog("Committing transaction"); + assert.commandWorked(session.commitTransaction_forTesting()); + jsTestLog("Transaction committed, waiting for ddl operation to complete."); + thread.join(); + assert.commandWorked(thread.returnData()); +} - jsTestLog("Testing that 'drop' blocks on transactions"); - const dropCmd = {drop: collName, writeConcern: {w: "majority"}}; - testTimeout(dbName, dropCmd); - testSuccessOnTxnCommit(dbName, dropCmd, {"command.drop": collName}); +jsTestLog("Testing that 'drop' blocks on transactions"); +const dropCmd = { + drop: collName, + writeConcern: {w: "majority"} +}; +testTimeout(dbName, dropCmd); +testSuccessOnTxnCommit(dbName, dropCmd, {"command.drop": collName}); - jsTestLog("Testing that 'dropDatabase' blocks on transactions"); - const dropDatabaseCmd = {dropDatabase: 1, writeConcern: {w: "majority"}}; - testTimeout(dbName, dropDatabaseCmd); - testSuccessOnTxnCommit(dbName, dropDatabaseCmd, {"command.dropDatabase": 1}); +jsTestLog("Testing that 'dropDatabase' blocks on transactions"); +const dropDatabaseCmd = { + dropDatabase: 1, + writeConcern: {w: "majority"} +}; +testTimeout(dbName, dropDatabaseCmd); +testSuccessOnTxnCommit(dbName, dropDatabaseCmd, {"command.dropDatabase": 1}); - jsTestLog("Testing that 'renameCollection' within databases blocks on transactions"); - testDB.runCommand({drop: otherCollName, writeConcern: {w: "majority"}}); - const renameCollectionCmdSameDB = { - renameCollection: sessionColl.getFullName(), - to: dbName + "." + otherCollName, - writeConcern: {w: "majority"} - }; - testTimeout("admin", renameCollectionCmdSameDB); - testSuccessOnTxnCommit("admin", - renameCollectionCmdSameDB, - {"command.renameCollection": sessionColl.getFullName()}); +jsTestLog("Testing that 'renameCollection' within databases blocks on transactions"); +testDB.runCommand({drop: otherCollName, writeConcern: {w: "majority"}}); +const renameCollectionCmdSameDB = { + renameCollection: sessionColl.getFullName(), + to: dbName + "." + otherCollName, + writeConcern: {w: "majority"} +}; +testTimeout("admin", renameCollectionCmdSameDB); +testSuccessOnTxnCommit( + "admin", renameCollectionCmdSameDB, {"command.renameCollection": sessionColl.getFullName()}); - jsTestLog("Testing that 'renameCollection' across databases blocks on transactions"); - testDB.getSiblingDB(otherDBName) - .runCommand({drop: otherCollName, writeConcern: {w: "majority"}}); - const renameCollectionCmdDifferentDB = { - renameCollection: sessionColl.getFullName(), - to: otherDBName + "." + otherCollName, - writeConcern: {w: "majority"} - }; - testTimeout("admin", renameCollectionCmdDifferentDB); - testSuccessOnTxnCommit("admin", - renameCollectionCmdDifferentDB, - {"command.renameCollection": sessionColl.getFullName()}); +jsTestLog("Testing that 'renameCollection' across databases blocks on transactions"); +testDB.getSiblingDB(otherDBName).runCommand({drop: otherCollName, writeConcern: {w: "majority"}}); +const renameCollectionCmdDifferentDB = { + renameCollection: sessionColl.getFullName(), + to: otherDBName + "." + otherCollName, + writeConcern: {w: "majority"} +}; +testTimeout("admin", renameCollectionCmdDifferentDB); +testSuccessOnTxnCommit("admin", + renameCollectionCmdDifferentDB, + {"command.renameCollection": sessionColl.getFullName()}); - jsTestLog("Testing that 'createIndexes' blocks on transactions"); - // The transaction will insert a document that has a field 'a'. - const createIndexesCmd = { - createIndexes: collName, - indexes: [{key: {a: 1}, name: "a_1"}], - writeConcern: {w: "majority"} - }; - testTimeout(dbName, createIndexesCmd); - testSuccessOnTxnCommit(dbName, createIndexesCmd, {"command.createIndexes": collName}); +jsTestLog("Testing that 'createIndexes' blocks on transactions"); +// The transaction will insert a document that has a field 'a'. +const createIndexesCmd = { + createIndexes: collName, + indexes: [{key: {a: 1}, name: "a_1"}], + writeConcern: {w: "majority"} +}; +testTimeout(dbName, createIndexesCmd); +testSuccessOnTxnCommit(dbName, createIndexesCmd, {"command.createIndexes": collName}); - jsTestLog("Testing that 'dropIndexes' blocks on transactions"); - // The setup creates an index on {b: 1} called 'b_1'. The transaction will insert a document - // that has a field 'b'. - const dropIndexesCmd = {dropIndexes: collName, index: "b_1", writeConcern: {w: "majority"}}; - testTimeout(dbName, dropIndexesCmd); - testSuccessOnTxnCommit(dbName, dropIndexesCmd, {"command.dropIndexes": collName}); - session.endSession(); +jsTestLog("Testing that 'dropIndexes' blocks on transactions"); +// The setup creates an index on {b: 1} called 'b_1'. The transaction will insert a document +// that has a field 'b'. +const dropIndexesCmd = { + dropIndexes: collName, + index: "b_1", + writeConcern: {w: "majority"} +}; +testTimeout(dbName, dropIndexesCmd); +testSuccessOnTxnCommit(dbName, dropIndexesCmd, {"command.dropIndexes": collName}); +session.endSession(); }()); diff --git a/jstests/core/txns/transactions_profiling.js b/jstests/core/txns/transactions_profiling.js index 548c800eeb4..55f63ab6cfc 100644 --- a/jstests/core/txns/transactions_profiling.js +++ b/jstests/core/txns/transactions_profiling.js @@ -1,248 +1,242 @@ // Test profiling for commands in multi-document transactions. // @tags: [uses_transactions] (function() { - "use strict"; - load("jstests/libs/profiler.js"); // For getLatestProfilerEntry. - - const dbName = "test"; - const collName = "transactions_profiling"; - const testDB = db.getSiblingDB(dbName); - testDB[collName].drop({writeConcern: {w: "majority"}}); - - testDB.setProfilingLevel(2); - - const sessionOptions = {causalConsistency: false}; - let session = testDB.getMongo().startSession(sessionOptions); - let sessionDB = session.getDatabase(dbName); - let sessionColl = sessionDB[collName]; - - assert.commandWorked(sessionColl.insert({_id: "findAndModify-doc"})); - assert.commandWorked(sessionColl.insert({_id: "delete-doc"})); - assert.commandWorked(sessionColl.insert({_id: "multi-delete-doc-1"})); - assert.commandWorked(sessionColl.insert({_id: "multi-delete-doc-2"})); - assert.commandWorked(sessionColl.insert({_id: "multi-delete-doc-3"})); - assert.commandWorked(sessionColl.insert({_id: "multi-delete-doc-4"})); - assert.commandWorked(sessionColl.insert({_id: "read-doc"})); - assert.commandWorked(sessionColl.insert({_id: "update-doc"})); - assert.commandWorked(sessionColl.insert({_id: "multi-update-doc-1"})); - assert.commandWorked(sessionColl.insert({_id: "multi-update-doc-2"})); - assert.commandWorked(testDB.runCommand({ - createIndexes: collName, - indexes: [{key: {haystack: "geoHaystack", a: 1}, name: "haystack_geo", bucketSize: 1}], - writeConcern: {w: "majority"} - })); - - jsTestLog("Test commands that can use shell helpers."); - session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}}); - - jsTestLog("Test aggregate."); - assert.eq(1, sessionColl.aggregate([{$match: {_id: "read-doc"}}]).itcount()); - let profileObj = getLatestProfilerEntry(testDB); - assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj)); - assert.eq(profileObj.op, "command", tojson(profileObj)); - assert.eq(profileObj.command.aggregate, sessionColl.getName(), tojson(profileObj)); - assert.eq(profileObj.nreturned, 1, tojson(profileObj)); - - jsTestLog("Test delete."); - assert.commandWorked(sessionColl.deleteOne({_id: "delete-doc"})); - profileObj = getLatestProfilerEntry(testDB); - assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj)); - assert.eq(profileObj.op, "remove", tojson(profileObj)); - assert.eq(profileObj.ndeleted, 1, tojson(profileObj)); - - jsTestLog("Test multi delete."); - assert.commandWorked( - sessionColl.deleteMany({_id: {$in: ["multi-delete-doc-1", "multi-delete-doc-2"]}})); - profileObj = getLatestProfilerEntry(testDB); - assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj)); - assert.eq(profileObj.op, "remove", tojson(profileObj)); - assert.eq(profileObj.ndeleted, 2, tojson(profileObj)); - - jsTestLog("Test batch delete."); - assert.commandWorked(sessionDB.runCommand({ - delete: collName, - deletes: [ - {q: {_id: "multi-delete-doc-3"}, limit: 1}, - {q: {_id: "multi-delete-doc-4"}, limit: 1} - ] - })); - // We see the profile entry from the second delete. - profileObj = getLatestProfilerEntry(testDB); - assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj)); - assert.eq(profileObj.op, "remove", tojson(profileObj)); - assert.eq(profileObj.ndeleted, 1, tojson(profileObj)); - - jsTestLog("Test distinct."); - assert.eq(["read-doc"], sessionColl.distinct("_id", {_id: "read-doc"})); - profileObj = getLatestProfilerEntry(testDB); - assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj)); - assert.eq(profileObj.op, "command", tojson(profileObj)); - assert.eq(profileObj.command.distinct, sessionColl.getName(), tojson(profileObj)); - - jsTestLog("Test find."); - assert.eq(1, sessionColl.find({_id: "read-doc"}).itcount()); - profileObj = getLatestProfilerEntry(testDB); - assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj)); - assert.eq(profileObj.op, "query", tojson(profileObj)); - assert.eq(profileObj.nreturned, 1, tojson(profileObj)); - - jsTestLog("Test findAndModify."); - assert.eq({_id: "findAndModify-doc", updated: true}, - sessionColl.findAndModify( - {query: {_id: "findAndModify-doc"}, update: {$set: {updated: true}}, new: true})); - profileObj = getLatestProfilerEntry(testDB); - assert.eq(profileObj.op, "command", tojson(profileObj)); - assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj)); - assert.eq(profileObj.command.findandmodify, sessionColl.getName(), tojson(profileObj)); - assert.eq(profileObj.nMatched, 1, tojson(profileObj)); - assert.eq(profileObj.nModified, 1, tojson(profileObj)); - - jsTestLog("Test geoSearch."); - assert.commandWorked( - sessionDB.runCommand({geoSearch: collName, near: [0, 0], maxDistance: 1, search: {a: 1}})); - profileObj = getLatestProfilerEntry(testDB); - assert.eq(profileObj.op, "command", tojson(profileObj)); - assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj)); - assert.eq(profileObj.command.geoSearch, sessionColl.getName(), tojson(profileObj)); - - jsTestLog("Test getMore."); - let res = assert.commandWorked( - sessionDB.runCommand({find: collName, filter: {_id: "read-doc"}, batchSize: 0})); - assert(res.hasOwnProperty("cursor"), tojson(res)); - assert(res.cursor.hasOwnProperty("id"), tojson(res)); - let cursorId = res.cursor.id; - res = assert.commandWorked(sessionDB.runCommand({getMore: cursorId, collection: collName})); - assert.eq([{_id: "read-doc"}], res.cursor.nextBatch, tojson(res)); - profileObj = getLatestProfilerEntry(testDB); - assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj)); - assert.eq(profileObj.op, "getmore", tojson(profileObj)); - assert.eq(profileObj.nreturned, 1, tojson(profileObj)); - - jsTestLog("Test insert."); - assert.commandWorked(sessionColl.insert({_id: "insert-doc"})); - profileObj = getLatestProfilerEntry(testDB); - assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj)); - assert.eq(profileObj.op, "insert", tojson(profileObj)); - assert.eq(profileObj.ninserted, 1, tojson(profileObj)); - - jsTestLog("Test update."); - assert.commandWorked(sessionColl.update({_id: "update-doc"}, {$set: {updated: true}})); - profileObj = getLatestProfilerEntry(testDB); - assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj)); - assert.eq(profileObj.op, "update", tojson(profileObj)); - assert.eq(profileObj.nMatched, 1, tojson(profileObj)); - assert.eq(profileObj.nModified, 1, tojson(profileObj)); - - jsTestLog("Test multi update."); - assert.commandWorked(sessionColl.updateMany( - {_id: {$in: ["multi-update-doc-1", "multi-update-doc-2"]}}, {$set: {updated: true}})); - profileObj = getLatestProfilerEntry(testDB); - assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj)); - assert.eq(profileObj.op, "update", tojson(profileObj)); - assert.eq(profileObj.nMatched, 2, tojson(profileObj)); - assert.eq(profileObj.nModified, 2, tojson(profileObj)); - - jsTestLog("Test batch update."); - assert.commandWorked(sessionDB.runCommand({ - update: collName, - updates: [ - {q: {_id: "multi-update-doc-1"}, u: {$set: {batch_updated: true}}}, - {q: {_id: "multi-update-doc-2"}, u: {$set: {batch_updated: true}}} - ] - })); - // We see the profile entry from the second update. - profileObj = getLatestProfilerEntry(testDB); - assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj)); - assert.eq(profileObj.op, "update", tojson(profileObj)); - assert.eq(profileObj.nMatched, 1, tojson(profileObj)); - assert.eq(profileObj.nModified, 1, tojson(profileObj)); - - jsTestLog("Committing transaction."); - assert.commandWorked(session.commitTransaction_forTesting()); - - jsTestLog("Test delete with a write conflict."); - assert.commandWorked(sessionColl.insert({_id: "delete-doc"}, {writeConcern: {w: "majority"}})); - session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}}); - - // Perform an operation in the transaction to establish the snapshot. - assert.eq(1, sessionColl.find({_id: "read-doc"}).itcount()); - - // Update the document outside of the transaction. - assert.commandWorked(testDB[collName].update({_id: "delete-doc"}, {$set: {conflict: true}})); - - // Deleting the document in the transaction fails, but profiling is still successful. - assert.throws(function() { - sessionColl.deleteOne({_id: "delete-doc"}); - }); - profileObj = getLatestProfilerEntry(testDB); - assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj)); - assert.eq(profileObj.op, "remove", tojson(profileObj)); - assert.eq(profileObj.errCode, ErrorCodes.WriteConflict, tojson(profileObj)); - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - - jsTestLog("Test findAndModify with a write conflict."); - session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}}); - - // Perform an operation in the transaction to establish the snapshot. - assert.eq(1, sessionColl.find({_id: "read-doc"}).itcount()); - - // Update the document outside of the transaction. - assert.commandWorked( - testDB[collName].update({_id: "findAndModify-doc"}, {$set: {conflict: true}})); - - // Modifying the document in the transaction fails, but profiling is still successful. - assert.throws(function() { - sessionColl.findAndModify( - {query: {_id: "findAndModify-doc"}, update: {$set: {conflict: false}}}); - }); - profileObj = getLatestProfilerEntry(testDB); - assert.eq(profileObj.op, "command", tojson(profileObj)); - assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj)); - assert.eq(profileObj.command.findandmodify, sessionColl.getName(), tojson(profileObj)); - assert.eq(profileObj.errCode, ErrorCodes.WriteConflict, tojson(profileObj)); - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - - jsTestLog("Test insert with a write conflict."); - session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}}); - - // Perform an operation in the transaction to establish the snapshot. - assert.eq(1, sessionColl.find({_id: "read-doc"}).itcount()); - - // Insert a document outside of the transaction. - assert.commandWorked(testDB[collName].insert({_id: "conflict-doc"})); - - // Inserting a document with the same _id in the transaction fails, but profiling is still - // successful. - assert.commandFailedWithCode(sessionColl.insert({_id: "conflict-doc"}), - ErrorCodes.WriteConflict); - profileObj = getLatestProfilerEntry(testDB); - assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj)); - assert.eq(profileObj.op, "insert", tojson(profileObj)); - assert.eq(profileObj.ninserted, 0, tojson(profileObj)); - assert.eq(profileObj.errCode, ErrorCodes.WriteConflict, tojson(profileObj)); - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - - jsTestLog("Test update with a write conflict."); - session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}}); - - // Perform an operation in the transaction to establish the snapshot. - assert.eq(1, sessionColl.find({_id: "read-doc"}).itcount()); - - // Update the document outside of the transaction. - assert.commandWorked(testDB[collName].update({_id: "update-doc"}, {$set: {conflict: true}})); - - // Updating the document in the transaction fails, but profiling is still successful. - assert.commandFailedWithCode(sessionColl.update({_id: "update-doc"}, {$set: {conflict: false}}), - ErrorCodes.WriteConflict); - profileObj = getLatestProfilerEntry(testDB); - assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj)); - assert.eq(profileObj.op, "update", tojson(profileObj)); - assert.eq(profileObj.errCode, ErrorCodes.WriteConflict, tojson(profileObj)); - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - - session.endSession(); +"use strict"; +load("jstests/libs/profiler.js"); // For getLatestProfilerEntry. + +const dbName = "test"; +const collName = "transactions_profiling"; +const testDB = db.getSiblingDB(dbName); +testDB[collName].drop({writeConcern: {w: "majority"}}); + +testDB.setProfilingLevel(2); + +const sessionOptions = { + causalConsistency: false +}; +let session = testDB.getMongo().startSession(sessionOptions); +let sessionDB = session.getDatabase(dbName); +let sessionColl = sessionDB[collName]; + +assert.commandWorked(sessionColl.insert({_id: "findAndModify-doc"})); +assert.commandWorked(sessionColl.insert({_id: "delete-doc"})); +assert.commandWorked(sessionColl.insert({_id: "multi-delete-doc-1"})); +assert.commandWorked(sessionColl.insert({_id: "multi-delete-doc-2"})); +assert.commandWorked(sessionColl.insert({_id: "multi-delete-doc-3"})); +assert.commandWorked(sessionColl.insert({_id: "multi-delete-doc-4"})); +assert.commandWorked(sessionColl.insert({_id: "read-doc"})); +assert.commandWorked(sessionColl.insert({_id: "update-doc"})); +assert.commandWorked(sessionColl.insert({_id: "multi-update-doc-1"})); +assert.commandWorked(sessionColl.insert({_id: "multi-update-doc-2"})); +assert.commandWorked(testDB.runCommand({ + createIndexes: collName, + indexes: [{key: {haystack: "geoHaystack", a: 1}, name: "haystack_geo", bucketSize: 1}], + writeConcern: {w: "majority"} +})); + +jsTestLog("Test commands that can use shell helpers."); +session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}}); + +jsTestLog("Test aggregate."); +assert.eq(1, sessionColl.aggregate([{$match: {_id: "read-doc"}}]).itcount()); +let profileObj = getLatestProfilerEntry(testDB); +assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj)); +assert.eq(profileObj.op, "command", tojson(profileObj)); +assert.eq(profileObj.command.aggregate, sessionColl.getName(), tojson(profileObj)); +assert.eq(profileObj.nreturned, 1, tojson(profileObj)); + +jsTestLog("Test delete."); +assert.commandWorked(sessionColl.deleteOne({_id: "delete-doc"})); +profileObj = getLatestProfilerEntry(testDB); +assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj)); +assert.eq(profileObj.op, "remove", tojson(profileObj)); +assert.eq(profileObj.ndeleted, 1, tojson(profileObj)); + +jsTestLog("Test multi delete."); +assert.commandWorked( + sessionColl.deleteMany({_id: {$in: ["multi-delete-doc-1", "multi-delete-doc-2"]}})); +profileObj = getLatestProfilerEntry(testDB); +assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj)); +assert.eq(profileObj.op, "remove", tojson(profileObj)); +assert.eq(profileObj.ndeleted, 2, tojson(profileObj)); + +jsTestLog("Test batch delete."); +assert.commandWorked(sessionDB.runCommand({ + delete: collName, + deletes: + [{q: {_id: "multi-delete-doc-3"}, limit: 1}, {q: {_id: "multi-delete-doc-4"}, limit: 1}] +})); +// We see the profile entry from the second delete. +profileObj = getLatestProfilerEntry(testDB); +assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj)); +assert.eq(profileObj.op, "remove", tojson(profileObj)); +assert.eq(profileObj.ndeleted, 1, tojson(profileObj)); + +jsTestLog("Test distinct."); +assert.eq(["read-doc"], sessionColl.distinct("_id", {_id: "read-doc"})); +profileObj = getLatestProfilerEntry(testDB); +assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj)); +assert.eq(profileObj.op, "command", tojson(profileObj)); +assert.eq(profileObj.command.distinct, sessionColl.getName(), tojson(profileObj)); + +jsTestLog("Test find."); +assert.eq(1, sessionColl.find({_id: "read-doc"}).itcount()); +profileObj = getLatestProfilerEntry(testDB); +assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj)); +assert.eq(profileObj.op, "query", tojson(profileObj)); +assert.eq(profileObj.nreturned, 1, tojson(profileObj)); + +jsTestLog("Test findAndModify."); +assert.eq({_id: "findAndModify-doc", updated: true}, + sessionColl.findAndModify( + {query: {_id: "findAndModify-doc"}, update: {$set: {updated: true}}, new: true})); +profileObj = getLatestProfilerEntry(testDB); +assert.eq(profileObj.op, "command", tojson(profileObj)); +assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj)); +assert.eq(profileObj.command.findandmodify, sessionColl.getName(), tojson(profileObj)); +assert.eq(profileObj.nMatched, 1, tojson(profileObj)); +assert.eq(profileObj.nModified, 1, tojson(profileObj)); + +jsTestLog("Test geoSearch."); +assert.commandWorked( + sessionDB.runCommand({geoSearch: collName, near: [0, 0], maxDistance: 1, search: {a: 1}})); +profileObj = getLatestProfilerEntry(testDB); +assert.eq(profileObj.op, "command", tojson(profileObj)); +assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj)); +assert.eq(profileObj.command.geoSearch, sessionColl.getName(), tojson(profileObj)); + +jsTestLog("Test getMore."); +let res = assert.commandWorked( + sessionDB.runCommand({find: collName, filter: {_id: "read-doc"}, batchSize: 0})); +assert(res.hasOwnProperty("cursor"), tojson(res)); +assert(res.cursor.hasOwnProperty("id"), tojson(res)); +let cursorId = res.cursor.id; +res = assert.commandWorked(sessionDB.runCommand({getMore: cursorId, collection: collName})); +assert.eq([{_id: "read-doc"}], res.cursor.nextBatch, tojson(res)); +profileObj = getLatestProfilerEntry(testDB); +assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj)); +assert.eq(profileObj.op, "getmore", tojson(profileObj)); +assert.eq(profileObj.nreturned, 1, tojson(profileObj)); + +jsTestLog("Test insert."); +assert.commandWorked(sessionColl.insert({_id: "insert-doc"})); +profileObj = getLatestProfilerEntry(testDB); +assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj)); +assert.eq(profileObj.op, "insert", tojson(profileObj)); +assert.eq(profileObj.ninserted, 1, tojson(profileObj)); + +jsTestLog("Test update."); +assert.commandWorked(sessionColl.update({_id: "update-doc"}, {$set: {updated: true}})); +profileObj = getLatestProfilerEntry(testDB); +assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj)); +assert.eq(profileObj.op, "update", tojson(profileObj)); +assert.eq(profileObj.nMatched, 1, tojson(profileObj)); +assert.eq(profileObj.nModified, 1, tojson(profileObj)); + +jsTestLog("Test multi update."); +assert.commandWorked(sessionColl.updateMany( + {_id: {$in: ["multi-update-doc-1", "multi-update-doc-2"]}}, {$set: {updated: true}})); +profileObj = getLatestProfilerEntry(testDB); +assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj)); +assert.eq(profileObj.op, "update", tojson(profileObj)); +assert.eq(profileObj.nMatched, 2, tojson(profileObj)); +assert.eq(profileObj.nModified, 2, tojson(profileObj)); + +jsTestLog("Test batch update."); +assert.commandWorked(sessionDB.runCommand({ + update: collName, + updates: [ + {q: {_id: "multi-update-doc-1"}, u: {$set: {batch_updated: true}}}, + {q: {_id: "multi-update-doc-2"}, u: {$set: {batch_updated: true}}} + ] +})); +// We see the profile entry from the second update. +profileObj = getLatestProfilerEntry(testDB); +assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj)); +assert.eq(profileObj.op, "update", tojson(profileObj)); +assert.eq(profileObj.nMatched, 1, tojson(profileObj)); +assert.eq(profileObj.nModified, 1, tojson(profileObj)); + +jsTestLog("Committing transaction."); +assert.commandWorked(session.commitTransaction_forTesting()); + +jsTestLog("Test delete with a write conflict."); +assert.commandWorked(sessionColl.insert({_id: "delete-doc"}, {writeConcern: {w: "majority"}})); +session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}}); + +// Perform an operation in the transaction to establish the snapshot. +assert.eq(1, sessionColl.find({_id: "read-doc"}).itcount()); + +// Update the document outside of the transaction. +assert.commandWorked(testDB[collName].update({_id: "delete-doc"}, {$set: {conflict: true}})); + +// Deleting the document in the transaction fails, but profiling is still successful. +assert.throws(function() { + sessionColl.deleteOne({_id: "delete-doc"}); +}); +profileObj = getLatestProfilerEntry(testDB); +assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj)); +assert.eq(profileObj.op, "remove", tojson(profileObj)); +assert.eq(profileObj.errCode, ErrorCodes.WriteConflict, tojson(profileObj)); +assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction); + +jsTestLog("Test findAndModify with a write conflict."); +session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}}); + +// Perform an operation in the transaction to establish the snapshot. +assert.eq(1, sessionColl.find({_id: "read-doc"}).itcount()); + +// Update the document outside of the transaction. +assert.commandWorked(testDB[collName].update({_id: "findAndModify-doc"}, {$set: {conflict: true}})); + +// Modifying the document in the transaction fails, but profiling is still successful. +assert.throws(function() { + sessionColl.findAndModify( + {query: {_id: "findAndModify-doc"}, update: {$set: {conflict: false}}}); +}); +profileObj = getLatestProfilerEntry(testDB); +assert.eq(profileObj.op, "command", tojson(profileObj)); +assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj)); +assert.eq(profileObj.command.findandmodify, sessionColl.getName(), tojson(profileObj)); +assert.eq(profileObj.errCode, ErrorCodes.WriteConflict, tojson(profileObj)); +assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction); + +jsTestLog("Test insert with a write conflict."); +session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}}); + +// Perform an operation in the transaction to establish the snapshot. +assert.eq(1, sessionColl.find({_id: "read-doc"}).itcount()); + +// Insert a document outside of the transaction. +assert.commandWorked(testDB[collName].insert({_id: "conflict-doc"})); + +// Inserting a document with the same _id in the transaction fails, but profiling is still +// successful. +assert.commandFailedWithCode(sessionColl.insert({_id: "conflict-doc"}), ErrorCodes.WriteConflict); +profileObj = getLatestProfilerEntry(testDB); +assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj)); +assert.eq(profileObj.op, "insert", tojson(profileObj)); +assert.eq(profileObj.ninserted, 0, tojson(profileObj)); +assert.eq(profileObj.errCode, ErrorCodes.WriteConflict, tojson(profileObj)); +assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction); + +jsTestLog("Test update with a write conflict."); +session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}}); + +// Perform an operation in the transaction to establish the snapshot. +assert.eq(1, sessionColl.find({_id: "read-doc"}).itcount()); + +// Update the document outside of the transaction. +assert.commandWorked(testDB[collName].update({_id: "update-doc"}, {$set: {conflict: true}})); + +// Updating the document in the transaction fails, but profiling is still successful. +assert.commandFailedWithCode(sessionColl.update({_id: "update-doc"}, {$set: {conflict: false}}), + ErrorCodes.WriteConflict); +profileObj = getLatestProfilerEntry(testDB); +assert.eq(profileObj.ns, sessionColl.getFullName(), tojson(profileObj)); +assert.eq(profileObj.op, "update", tojson(profileObj)); +assert.eq(profileObj.errCode, ErrorCodes.WriteConflict, tojson(profileObj)); +assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction); + +session.endSession(); }()); diff --git a/jstests/core/txns/transactions_profiling_with_drops.js b/jstests/core/txns/transactions_profiling_with_drops.js index ee25f5cc442..03fea946b35 100644 --- a/jstests/core/txns/transactions_profiling_with_drops.js +++ b/jstests/core/txns/transactions_profiling_with_drops.js @@ -1,112 +1,112 @@ // Tests that locks acquisitions for profiling in a transaction have a 0-second timeout. // @tags: [uses_transactions] (function() { - "use strict"; - - load("jstests/libs/profiler.js"); // For getLatestProfilerEntry. - - const dbName = "test"; - const collName = "transactions_profiling_with_drops"; - const adminDB = db.getSiblingDB("admin"); - const testDB = db.getSiblingDB(dbName); - const session = db.getMongo().startSession({causalConsistency: false}); - const sessionDb = session.getDatabase(dbName); - const sessionColl = sessionDb[collName]; - - sessionDb.runCommand({dropDatabase: 1, writeConcern: {w: "majority"}}); - assert.commandWorked(sessionColl.insert({_id: "doc"}, {w: "majority"})); - assert.commandWorked(sessionDb.runCommand({profile: 1, slowms: 1})); - - jsTest.log("Test read profiling with operation holding database X lock."); - - jsTest.log("Start transaction."); - session.startTransaction(); - - jsTest.log("Run a slow read. Profiling in the transaction should succeed."); - assert.sameMembers( - [{_id: "doc"}], - sessionColl.find({$where: "sleep(1000); return true;"}).comment("read success").toArray()); - profilerHasSingleMatchingEntryOrThrow( - {profileDB: testDB, filter: {"command.comment": "read success"}}); - - // Lock 'test' database in X mode. - let lockShell = startParallelShell(function() { - assert.commandFailed(db.adminCommand({ - sleep: 1, - secs: 500, - lock: "w", - lockTarget: "test", - $comment: "transaction_profiling_with_drops lock sleep" - })); +"use strict"; + +load("jstests/libs/profiler.js"); // For getLatestProfilerEntry. + +const dbName = "test"; +const collName = "transactions_profiling_with_drops"; +const adminDB = db.getSiblingDB("admin"); +const testDB = db.getSiblingDB(dbName); +const session = db.getMongo().startSession({causalConsistency: false}); +const sessionDb = session.getDatabase(dbName); +const sessionColl = sessionDb[collName]; + +sessionDb.runCommand({dropDatabase: 1, writeConcern: {w: "majority"}}); +assert.commandWorked(sessionColl.insert({_id: "doc"}, {w: "majority"})); +assert.commandWorked(sessionDb.runCommand({profile: 1, slowms: 1})); + +jsTest.log("Test read profiling with operation holding database X lock."); + +jsTest.log("Start transaction."); +session.startTransaction(); + +jsTest.log("Run a slow read. Profiling in the transaction should succeed."); +assert.sameMembers( + [{_id: "doc"}], + sessionColl.find({$where: "sleep(1000); return true;"}).comment("read success").toArray()); +profilerHasSingleMatchingEntryOrThrow( + {profileDB: testDB, filter: {"command.comment": "read success"}}); + +// Lock 'test' database in X mode. +let lockShell = startParallelShell(function() { + assert.commandFailed(db.adminCommand({ + sleep: 1, + secs: 500, + lock: "w", + lockTarget: "test", + $comment: "transaction_profiling_with_drops lock sleep" + })); +}); + +const waitForCommand = function(opFilter) { + let opId = -1; + assert.soon(function() { + const curopRes = testDB.currentOp(); + assert.commandWorked(curopRes); + const foundOp = curopRes["inprog"].filter(opFilter); + + if (foundOp.length == 1) { + opId = foundOp[0]["opid"]; + } + return (foundOp.length == 1); }); + return opId; +}; - const waitForCommand = function(opFilter) { - let opId = -1; - assert.soon(function() { - const curopRes = testDB.currentOp(); - assert.commandWorked(curopRes); - const foundOp = curopRes["inprog"].filter(opFilter); - - if (foundOp.length == 1) { - opId = foundOp[0]["opid"]; - } - return (foundOp.length == 1); - }); - return opId; - }; - - // Wait for sleep to appear in currentOp - let opId = waitForCommand( - op => (op["ns"] == "admin.$cmd" && - op["command"]["$comment"] == "transaction_profiling_with_drops lock sleep")); - - jsTest.log("Run a slow read. Profiling in the transaction should fail."); - assert.sameMembers( - [{_id: "doc"}], - sessionColl.find({$where: "sleep(1000); return true;"}).comment("read failure").toArray()); - assert.commandWorked(session.commitTransaction_forTesting()); - - assert.commandWorked(testDB.killOp(opId)); - lockShell(); - - profilerHasZeroMatchingEntriesOrThrow( - {profileDB: testDB, filter: {"command.comment": "read failure"}}); - - jsTest.log("Test write profiling with operation holding database X lock."); - - jsTest.log("Start transaction."); - session.startTransaction(); - - jsTest.log("Run a slow write. Profiling in the transaction should succeed."); - assert.commandWorked(sessionColl.update( - {$where: "sleep(1000); return true;"}, {$inc: {good: 1}}, {collation: {locale: "en"}})); - profilerHasSingleMatchingEntryOrThrow( - {profileDB: testDB, filter: {"command.collation": {locale: "en"}}}); - - // Lock 'test' database in X mode. - lockShell = startParallelShell(function() { - assert.commandFailed(db.getSiblingDB("test").adminCommand( - {sleep: 1, secs: 500, lock: "w", lockTarget: "test", $comment: "lock sleep"})); - }); +// Wait for sleep to appear in currentOp +let opId = waitForCommand( + op => (op["ns"] == "admin.$cmd" && + op["command"]["$comment"] == "transaction_profiling_with_drops lock sleep")); + +jsTest.log("Run a slow read. Profiling in the transaction should fail."); +assert.sameMembers( + [{_id: "doc"}], + sessionColl.find({$where: "sleep(1000); return true;"}).comment("read failure").toArray()); +assert.commandWorked(session.commitTransaction_forTesting()); + +assert.commandWorked(testDB.killOp(opId)); +lockShell(); + +profilerHasZeroMatchingEntriesOrThrow( + {profileDB: testDB, filter: {"command.comment": "read failure"}}); + +jsTest.log("Test write profiling with operation holding database X lock."); + +jsTest.log("Start transaction."); +session.startTransaction(); + +jsTest.log("Run a slow write. Profiling in the transaction should succeed."); +assert.commandWorked(sessionColl.update( + {$where: "sleep(1000); return true;"}, {$inc: {good: 1}}, {collation: {locale: "en"}})); +profilerHasSingleMatchingEntryOrThrow( + {profileDB: testDB, filter: {"command.collation": {locale: "en"}}}); + +// Lock 'test' database in X mode. +lockShell = startParallelShell(function() { + assert.commandFailed(db.getSiblingDB("test").adminCommand( + {sleep: 1, secs: 500, lock: "w", lockTarget: "test", $comment: "lock sleep"})); +}); - // Wait for sleep to appear in currentOp - opId = waitForCommand( - op => (op["ns"] == "admin.$cmd" && op["command"]["$comment"] == "lock sleep")); +// Wait for sleep to appear in currentOp +opId = + waitForCommand(op => (op["ns"] == "admin.$cmd" && op["command"]["$comment"] == "lock sleep")); - jsTest.log("Run a slow write. Profiling in the transaction should still succeed " + - "since the transaction already has an IX DB lock."); - assert.commandWorked(sessionColl.update( - {$where: "sleep(1000); return true;"}, {$inc: {good: 1}}, {collation: {locale: "fr"}})); - assert.commandWorked(session.commitTransaction_forTesting()); +jsTest.log("Run a slow write. Profiling in the transaction should still succeed " + + "since the transaction already has an IX DB lock."); +assert.commandWorked(sessionColl.update( + {$where: "sleep(1000); return true;"}, {$inc: {good: 1}}, {collation: {locale: "fr"}})); +assert.commandWorked(session.commitTransaction_forTesting()); - assert.commandWorked(testDB.killOp(opId)); - lockShell(); +assert.commandWorked(testDB.killOp(opId)); +lockShell(); - profilerHasSingleMatchingEntryOrThrow( - {profileDB: testDB, filter: {"command.collation": {locale: "fr"}}}); +profilerHasSingleMatchingEntryOrThrow( + {profileDB: testDB, filter: {"command.collation": {locale: "fr"}}}); - jsTest.log("Both writes should succeed"); - assert.docEq({_id: "doc", good: 2}, sessionColl.findOne()); +jsTest.log("Both writes should succeed"); +assert.docEq({_id: "doc", good: 2}, sessionColl.findOne()); - session.endSession(); +session.endSession(); }()); diff --git a/jstests/core/txns/transactions_write_conflicts.js b/jstests/core/txns/transactions_write_conflicts.js index dc4b742deeb..531477353fc 100644 --- a/jstests/core/txns/transactions_write_conflicts.js +++ b/jstests/core/txns/transactions_write_conflicts.js @@ -32,164 +32,232 @@ * @tags: [uses_transactions] */ (function() { - "use strict"; - - load("jstests/core/txns/libs/write_conflicts.js"); // for 'WriteConflictHelpers'. - - const dbName = "test"; - const collName = "transactions_write_conflicts"; - - const testDB = db.getSiblingDB(dbName); - const coll = testDB[collName]; - - // Clean up and create test collection. - testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); - testDB.runCommand({create: coll.getName(), writeConcern: {w: "majority"}}); - - /*********************************************************************************************** - * Single document write conflicts. - **********************************************************************************************/ - - jsTestLog("Test single document write conflicts."); - - print("insert-insert conflict."); - let t1Op = {insert: collName, documents: [{_id: 1, t1: 1}]}; - let t2Op = {insert: collName, documents: [{_id: 1, t2: 1}]}; - let expectedDocs1 = [{_id: 1, t1: 1}]; - WriteConflictHelpers.writeConflictTest( - coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins); - let expectedDocs2 = [{_id: 1, t2: 1}]; - WriteConflictHelpers.writeConflictTest( - coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins); - - print("update-update conflict"); - let initOp = {insert: collName, documents: [{_id: 1}]}; // the document to update. - t1Op = {update: collName, updates: [{q: {_id: 1}, u: {$set: {t1: 1}}}]}; - t2Op = {update: collName, updates: [{q: {_id: 1}, u: {$set: {t2: 1}}}]}; - expectedDocs1 = [{_id: 1, t1: 1}]; - WriteConflictHelpers.writeConflictTest( - coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp); - expectedDocs2 = [{_id: 1, t2: 1}]; - WriteConflictHelpers.writeConflictTest( - coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp); - - print("upsert-upsert conflict"); - t1Op = {update: collName, updates: [{q: {_id: 1}, u: {$set: {t1: 1}}, upsert: true}]}; - t2Op = {update: collName, updates: [{q: {_id: 1}, u: {$set: {t2: 1}}, upsert: true}]}; - expectedDocs1 = [{_id: 1, t1: 1}]; - WriteConflictHelpers.writeConflictTest( - coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp); - expectedDocs2 = [{_id: 1, t2: 1}]; - WriteConflictHelpers.writeConflictTest( - coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp); - - print("delete-delete conflict"); - initOp = {insert: collName, documents: [{_id: 1}]}; // the document to delete. - t1Op = {delete: collName, deletes: [{q: {_id: 1}, limit: 1}]}; - t2Op = {delete: collName, deletes: [{q: {_id: 1}, limit: 1}]}; - expectedDocs1 = []; - WriteConflictHelpers.writeConflictTest( - coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp); - expectedDocs2 = []; - WriteConflictHelpers.writeConflictTest( - coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp); - - print("update-delete conflict"); - initOp = {insert: collName, documents: [{_id: 1}]}; // the document to delete/update. - t1Op = {update: collName, updates: [{q: {_id: 1}, u: {$set: {t1: 1}}}]}; - t2Op = {delete: collName, deletes: [{q: {_id: 1}, limit: 1}]}; - expectedDocs1 = [{_id: 1, t1: 1}]; - WriteConflictHelpers.writeConflictTest( - coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp); - expectedDocs2 = []; - WriteConflictHelpers.writeConflictTest( - coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp); - - print("delete-update conflict"); - initOp = {insert: collName, documents: [{_id: 1}]}; // the document to delete/update. - t1Op = {delete: collName, deletes: [{q: {_id: 1}, limit: 1}]}; - t2Op = {update: collName, updates: [{q: {_id: 1}, u: {$set: {t2: 1}}}]}; - expectedDocs1 = []; - WriteConflictHelpers.writeConflictTest( - coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp); - expectedDocs2 = [{_id: 1, t2: 1}]; - WriteConflictHelpers.writeConflictTest( - coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp); - - /*********************************************************************************************** - * Multi-document and predicate based write conflicts. - **********************************************************************************************/ - - jsTestLog("Test multi-document and predicate based write conflicts."); - - print("batch insert-batch insert conflict"); - t1Op = {insert: collName, documents: [{_id: 1}, {_id: 2}, {_id: 3}]}; - t2Op = {insert: collName, documents: [{_id: 2}, {_id: 3}, {_id: 4}]}; - expectedDocs1 = [{_id: 1}, {_id: 2}, {_id: 3}]; - WriteConflictHelpers.writeConflictTest( - coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins); - expectedDocs2 = [{_id: 2}, {_id: 3}, {_id: 4}]; - WriteConflictHelpers.writeConflictTest( - coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins); - - print("multiupdate-multiupdate conflict"); - initOp = { - insert: collName, - documents: [{_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}] // the documents to update/delete. - }; - // Predicate intersection: [{_id: 2}, {_id: 3}] - t1Op = {update: collName, updates: [{q: {_id: {$lte: 3}}, u: {$set: {t1: 1}}, multi: true}]}; - t2Op = {update: collName, updates: [{q: {_id: {$gte: 2}}, u: {$set: {t2: 1}}, multi: true}]}; - expectedDocs1 = [{_id: 1, t1: 1}, {_id: 2, t1: 1}, {_id: 3, t1: 1}, {_id: 4}]; - WriteConflictHelpers.writeConflictTest( - coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp); - expectedDocs2 = [{_id: 1}, {_id: 2, t2: 1}, {_id: 3, t2: 1}, {_id: 4, t2: 1}]; - WriteConflictHelpers.writeConflictTest( - coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp); - - print("multiupdate-multidelete conflict"); - initOp = { - insert: collName, - documents: [{_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}] // the documents to update/delete. - }; - // Predicate intersection: [{_id: 2}, {_id: 3}] - t1Op = {update: collName, updates: [{q: {_id: {$lte: 3}}, u: {$set: {t1: 1}}, multi: true}]}; - t2Op = {delete: collName, deletes: [{q: {_id: {$gte: 2}}, limit: 0}]}; - expectedDocs1 = [{_id: 1, t1: 1}, {_id: 2, t1: 1}, {_id: 3, t1: 1}, {_id: 4}]; - WriteConflictHelpers.writeConflictTest( - coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp); - expectedDocs2 = [{_id: 1}]; - WriteConflictHelpers.writeConflictTest( - coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp); - - print("multidelete-multiupdate conflict"); - initOp = { - insert: collName, - documents: [{_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}] // the documents to update/delete. - }; - // Predicate intersection: [{_id: 2}, {_id: 3}] - t1Op = {delete: collName, deletes: [{q: {_id: {$lte: 3}}, limit: 0}]}; - t2Op = {update: collName, updates: [{q: {_id: {$gte: 2}}, u: {$set: {t2: 1}}, multi: true}]}; - expectedDocs1 = [{_id: 4}]; - WriteConflictHelpers.writeConflictTest( - coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp); - expectedDocs2 = [{_id: 1}, {_id: 2, t2: 1}, {_id: 3, t2: 1}, {_id: 4, t2: 1}]; - WriteConflictHelpers.writeConflictTest( - coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp); - - print("multidelete-multidelete conflict"); - initOp = { - insert: collName, - documents: [{_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}] // the documents to delete. - }; - // Predicate intersection: [{_id: 2}, {_id: 3}] - t1Op = {delete: collName, deletes: [{q: {_id: {$lte: 3}}, limit: 0}]}; - t2Op = {delete: collName, deletes: [{q: {_id: {$gte: 2}}, limit: 0}]}; - expectedDocs1 = [{_id: 4}]; - WriteConflictHelpers.writeConflictTest( - coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp); - expectedDocs2 = [{_id: 1}]; - WriteConflictHelpers.writeConflictTest( - coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp); +"use strict"; +load("jstests/core/txns/libs/write_conflicts.js"); // for 'WriteConflictHelpers'. + +const dbName = "test"; +const collName = "transactions_write_conflicts"; + +const testDB = db.getSiblingDB(dbName); +const coll = testDB[collName]; + +// Clean up and create test collection. +testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); +testDB.runCommand({create: coll.getName(), writeConcern: {w: "majority"}}); + +/*********************************************************************************************** + * Single document write conflicts. + **********************************************************************************************/ + +jsTestLog("Test single document write conflicts."); + +print("insert-insert conflict."); +let t1Op = {insert: collName, documents: [{_id: 1, t1: 1}]}; +let t2Op = {insert: collName, documents: [{_id: 1, t2: 1}]}; +let expectedDocs1 = [{_id: 1, t1: 1}]; +WriteConflictHelpers.writeConflictTest( + coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins); +let expectedDocs2 = [{_id: 1, t2: 1}]; +WriteConflictHelpers.writeConflictTest( + coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins); + +print("update-update conflict"); +let initOp = {insert: collName, documents: [{_id: 1}]}; // the document to update. +t1Op = { + update: collName, + updates: [{q: {_id: 1}, u: {$set: {t1: 1}}}] +}; +t2Op = { + update: collName, + updates: [{q: {_id: 1}, u: {$set: {t2: 1}}}] +}; +expectedDocs1 = [{_id: 1, t1: 1}]; +WriteConflictHelpers.writeConflictTest( + coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp); +expectedDocs2 = [{_id: 1, t2: 1}]; +WriteConflictHelpers.writeConflictTest( + coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp); + +print("upsert-upsert conflict"); +t1Op = { + update: collName, + updates: [{q: {_id: 1}, u: {$set: {t1: 1}}, upsert: true}] +}; +t2Op = { + update: collName, + updates: [{q: {_id: 1}, u: {$set: {t2: 1}}, upsert: true}] +}; +expectedDocs1 = [{_id: 1, t1: 1}]; +WriteConflictHelpers.writeConflictTest( + coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp); +expectedDocs2 = [{_id: 1, t2: 1}]; +WriteConflictHelpers.writeConflictTest( + coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp); + +print("delete-delete conflict"); +initOp = { + insert: collName, + documents: [{_id: 1}] +}; // the document to delete. +t1Op = { + delete: collName, + deletes: [{q: {_id: 1}, limit: 1}] +}; +t2Op = { + delete: collName, + deletes: [{q: {_id: 1}, limit: 1}] +}; +expectedDocs1 = []; +WriteConflictHelpers.writeConflictTest( + coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp); +expectedDocs2 = []; +WriteConflictHelpers.writeConflictTest( + coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp); + +print("update-delete conflict"); +initOp = { + insert: collName, + documents: [{_id: 1}] +}; // the document to delete/update. +t1Op = { + update: collName, + updates: [{q: {_id: 1}, u: {$set: {t1: 1}}}] +}; +t2Op = { + delete: collName, + deletes: [{q: {_id: 1}, limit: 1}] +}; +expectedDocs1 = [{_id: 1, t1: 1}]; +WriteConflictHelpers.writeConflictTest( + coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp); +expectedDocs2 = []; +WriteConflictHelpers.writeConflictTest( + coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp); + +print("delete-update conflict"); +initOp = { + insert: collName, + documents: [{_id: 1}] +}; // the document to delete/update. +t1Op = { + delete: collName, + deletes: [{q: {_id: 1}, limit: 1}] +}; +t2Op = { + update: collName, + updates: [{q: {_id: 1}, u: {$set: {t2: 1}}}] +}; +expectedDocs1 = []; +WriteConflictHelpers.writeConflictTest( + coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp); +expectedDocs2 = [{_id: 1, t2: 1}]; +WriteConflictHelpers.writeConflictTest( + coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp); + +/*********************************************************************************************** + * Multi-document and predicate based write conflicts. + **********************************************************************************************/ + +jsTestLog("Test multi-document and predicate based write conflicts."); + +print("batch insert-batch insert conflict"); +t1Op = { + insert: collName, + documents: [{_id: 1}, {_id: 2}, {_id: 3}] +}; +t2Op = { + insert: collName, + documents: [{_id: 2}, {_id: 3}, {_id: 4}] +}; +expectedDocs1 = [{_id: 1}, {_id: 2}, {_id: 3}]; +WriteConflictHelpers.writeConflictTest( + coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins); +expectedDocs2 = [{_id: 2}, {_id: 3}, {_id: 4}]; +WriteConflictHelpers.writeConflictTest( + coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins); + +print("multiupdate-multiupdate conflict"); +initOp = { + insert: collName, + documents: [{_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}] // the documents to update/delete. +}; +// Predicate intersection: [{_id: 2}, {_id: 3}] +t1Op = { + update: collName, + updates: [{q: {_id: {$lte: 3}}, u: {$set: {t1: 1}}, multi: true}] +}; +t2Op = { + update: collName, + updates: [{q: {_id: {$gte: 2}}, u: {$set: {t2: 1}}, multi: true}] +}; +expectedDocs1 = [{_id: 1, t1: 1}, {_id: 2, t1: 1}, {_id: 3, t1: 1}, {_id: 4}]; +WriteConflictHelpers.writeConflictTest( + coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp); +expectedDocs2 = [{_id: 1}, {_id: 2, t2: 1}, {_id: 3, t2: 1}, {_id: 4, t2: 1}]; +WriteConflictHelpers.writeConflictTest( + coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp); + +print("multiupdate-multidelete conflict"); +initOp = { + insert: collName, + documents: [{_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}] // the documents to update/delete. +}; +// Predicate intersection: [{_id: 2}, {_id: 3}] +t1Op = { + update: collName, + updates: [{q: {_id: {$lte: 3}}, u: {$set: {t1: 1}}, multi: true}] +}; +t2Op = { + delete: collName, + deletes: [{q: {_id: {$gte: 2}}, limit: 0}] +}; +expectedDocs1 = [{_id: 1, t1: 1}, {_id: 2, t1: 1}, {_id: 3, t1: 1}, {_id: 4}]; +WriteConflictHelpers.writeConflictTest( + coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp); +expectedDocs2 = [{_id: 1}]; +WriteConflictHelpers.writeConflictTest( + coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp); + +print("multidelete-multiupdate conflict"); +initOp = { + insert: collName, + documents: [{_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}] // the documents to update/delete. +}; +// Predicate intersection: [{_id: 2}, {_id: 3}] +t1Op = { + delete: collName, + deletes: [{q: {_id: {$lte: 3}}, limit: 0}] +}; +t2Op = { + update: collName, + updates: [{q: {_id: {$gte: 2}}, u: {$set: {t2: 1}}, multi: true}] +}; +expectedDocs1 = [{_id: 4}]; +WriteConflictHelpers.writeConflictTest( + coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp); +expectedDocs2 = [{_id: 1}, {_id: 2, t2: 1}, {_id: 3, t2: 1}, {_id: 4, t2: 1}]; +WriteConflictHelpers.writeConflictTest( + coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp); + +print("multidelete-multidelete conflict"); +initOp = { + insert: collName, + documents: [{_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}] // the documents to delete. +}; +// Predicate intersection: [{_id: 2}, {_id: 3}] +t1Op = { + delete: collName, + deletes: [{q: {_id: {$lte: 3}}, limit: 0}] +}; +t2Op = { + delete: collName, + deletes: [{q: {_id: {$gte: 2}}, limit: 0}] +}; +expectedDocs1 = [{_id: 4}]; +WriteConflictHelpers.writeConflictTest( + coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp); +expectedDocs2 = [{_id: 1}]; +WriteConflictHelpers.writeConflictTest( + coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp); }()); diff --git a/jstests/core/txns/transactions_write_conflicts_unique_indexes.js b/jstests/core/txns/transactions_write_conflicts_unique_indexes.js index 4ce4a2d5eb1..53158d7dd88 100644 --- a/jstests/core/txns/transactions_write_conflicts_unique_indexes.js +++ b/jstests/core/txns/transactions_write_conflicts_unique_indexes.js @@ -5,117 +5,140 @@ */ (function() { - "use strict"; - - load("jstests/core/txns/libs/write_conflicts.js"); // for 'WriteConflictHelpers'. - - const dbName = "test"; - const collName = "transactions_write_conflicts_unique_indexes"; - - const testDB = db.getSiblingDB(dbName); - const coll = testDB[collName]; - - // Clean up and create test collection. - testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); - testDB.runCommand({create: coll.getName(), writeConcern: {w: "majority"}}); - - // Create a unique index on field 'x'. - assert.commandWorked(coll.createIndex({x: 1}, {unique: true})); - - /*********************************************************************************************** - * Single document conflicts. - **********************************************************************************************/ - - jsTestLog("Test single document write conflicts."); - - print("insert-insert conflict."); - - let t1Op = {insert: collName, documents: [{_id: 1, x: 1}]}; - let t2Op = {insert: collName, documents: [{_id: 2, x: 1}]}; - let expectedDocs1 = [{_id: 1, x: 1}]; - WriteConflictHelpers.writeConflictTest( - coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins); - let expectedDocs2 = [{_id: 2, x: 1}]; - WriteConflictHelpers.writeConflictTest( - coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins); - - print("update-update conflict"); - let initOp = { - insert: collName, - documents: [{_id: 1, x: 1}, {_id: 2, x: 2}] - }; // the document to update. - t1Op = {update: collName, updates: [{q: {_id: 1}, u: {$set: {x: 3}}}]}; - t2Op = {update: collName, updates: [{q: {_id: 2}, u: {$set: {x: 3}}}]}; - expectedDocs1 = [{_id: 1, x: 3}, {_id: 2, x: 2}]; - WriteConflictHelpers.writeConflictTest( - coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp); - expectedDocs2 = [{_id: 1, x: 1}, {_id: 2, x: 3}]; - WriteConflictHelpers.writeConflictTest( - coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp); - - print("upsert-upsert conflict"); - t1Op = {update: collName, updates: [{q: {_id: 1}, u: {$set: {x: 1}}, upsert: true}]}; - t2Op = {update: collName, updates: [{q: {_id: 2}, u: {$set: {x: 1}}, upsert: true}]}; - expectedDocs1 = [{_id: 1, x: 1}]; - WriteConflictHelpers.writeConflictTest( - coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins); - expectedDocs2 = [{_id: 2, x: 1}]; - WriteConflictHelpers.writeConflictTest( - coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins); - - /*********************************************************************************************** - * Multi-document and predicate based conflicts. - **********************************************************************************************/ - - jsTestLog("Test multi-document and predicate based write conflicts."); - - print("batch insert-batch insert conflict"); - t1Op = {insert: collName, documents: [{_id: 1, x: 1}, {_id: 2, x: 2}, {_id: 3, x: 3}]}; - t2Op = {insert: collName, documents: [{_id: 4, x: 2}, {_id: 5, x: 3}, {_id: 6, x: 4}]}; - expectedDocs1 = [{_id: 1, x: 1}, {_id: 2, x: 2}, {_id: 3, x: 3}]; - WriteConflictHelpers.writeConflictTest( - coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins); - expectedDocs2 = [{_id: 4, x: 2}, {_id: 5, x: 3}, {_id: 6, x: 4}]; - WriteConflictHelpers.writeConflictTest( - coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins); - - print("multiupdate-multiupdate conflict"); - // Update disjoint sets of documents such that the post-image of each set would create a unique - // index violation. - initOp = { - insert: collName, - documents: [ - // Set 1 - {_id: 1, x: 1}, - {_id: 2, x: 2}, - {_id: 3, x: 3}, - // Set 2 - {_id: 4, x: 10}, - {_id: 5, x: 11}, - {_id: 6, x: 12} - ] // the documents to update. - }; - t1Op = {update: collName, updates: [{q: {_id: {$lte: 3}}, u: {$inc: {x: 4}}, multi: true}]}; - t2Op = {update: collName, updates: [{q: {_id: {$gte: 4}}, u: {$inc: {x: -4}}, multi: true}]}; - expectedDocs1 = [ - {_id: 1, x: 5}, - {_id: 2, x: 6}, - {_id: 3, x: 7}, - {_id: 4, x: 10}, - {_id: 5, x: 11}, - {_id: 6, x: 12} - ]; - WriteConflictHelpers.writeConflictTest( - coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp); - expectedDocs2 = [ +"use strict"; + +load("jstests/core/txns/libs/write_conflicts.js"); // for 'WriteConflictHelpers'. + +const dbName = "test"; +const collName = "transactions_write_conflicts_unique_indexes"; + +const testDB = db.getSiblingDB(dbName); +const coll = testDB[collName]; + +// Clean up and create test collection. +testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); +testDB.runCommand({create: coll.getName(), writeConcern: {w: "majority"}}); + +// Create a unique index on field 'x'. +assert.commandWorked(coll.createIndex({x: 1}, {unique: true})); + +/*********************************************************************************************** + * Single document conflicts. + **********************************************************************************************/ + +jsTestLog("Test single document write conflicts."); + +print("insert-insert conflict."); + +let t1Op = {insert: collName, documents: [{_id: 1, x: 1}]}; +let t2Op = {insert: collName, documents: [{_id: 2, x: 1}]}; +let expectedDocs1 = [{_id: 1, x: 1}]; +WriteConflictHelpers.writeConflictTest( + coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins); +let expectedDocs2 = [{_id: 2, x: 1}]; +WriteConflictHelpers.writeConflictTest( + coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins); + +print("update-update conflict"); +let initOp = { + insert: collName, + documents: [{_id: 1, x: 1}, {_id: 2, x: 2}] +}; // the document to update. +t1Op = { + update: collName, + updates: [{q: {_id: 1}, u: {$set: {x: 3}}}] +}; +t2Op = { + update: collName, + updates: [{q: {_id: 2}, u: {$set: {x: 3}}}] +}; +expectedDocs1 = [{_id: 1, x: 3}, {_id: 2, x: 2}]; +WriteConflictHelpers.writeConflictTest( + coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp); +expectedDocs2 = [{_id: 1, x: 1}, {_id: 2, x: 3}]; +WriteConflictHelpers.writeConflictTest( + coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp); + +print("upsert-upsert conflict"); +t1Op = { + update: collName, + updates: [{q: {_id: 1}, u: {$set: {x: 1}}, upsert: true}] +}; +t2Op = { + update: collName, + updates: [{q: {_id: 2}, u: {$set: {x: 1}}, upsert: true}] +}; +expectedDocs1 = [{_id: 1, x: 1}]; +WriteConflictHelpers.writeConflictTest( + coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins); +expectedDocs2 = [{_id: 2, x: 1}]; +WriteConflictHelpers.writeConflictTest( + coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins); + +/*********************************************************************************************** + * Multi-document and predicate based conflicts. + **********************************************************************************************/ + +jsTestLog("Test multi-document and predicate based write conflicts."); + +print("batch insert-batch insert conflict"); +t1Op = { + insert: collName, + documents: [{_id: 1, x: 1}, {_id: 2, x: 2}, {_id: 3, x: 3}] +}; +t2Op = { + insert: collName, + documents: [{_id: 4, x: 2}, {_id: 5, x: 3}, {_id: 6, x: 4}] +}; +expectedDocs1 = [{_id: 1, x: 1}, {_id: 2, x: 2}, {_id: 3, x: 3}]; +WriteConflictHelpers.writeConflictTest( + coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins); +expectedDocs2 = [{_id: 4, x: 2}, {_id: 5, x: 3}, {_id: 6, x: 4}]; +WriteConflictHelpers.writeConflictTest( + coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins); + +print("multiupdate-multiupdate conflict"); +// Update disjoint sets of documents such that the post-image of each set would create a unique +// index violation. +initOp = { + insert: collName, + documents: [ + // Set 1 {_id: 1, x: 1}, {_id: 2, x: 2}, {_id: 3, x: 3}, - {_id: 4, x: 6}, - {_id: 5, x: 7}, - {_id: 6, x: 8} - ]; - WriteConflictHelpers.writeConflictTest( - coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp); - + // Set 2 + {_id: 4, x: 10}, + {_id: 5, x: 11}, + {_id: 6, x: 12} + ] // the documents to update. +}; +t1Op = { + update: collName, + updates: [{q: {_id: {$lte: 3}}, u: {$inc: {x: 4}}, multi: true}] +}; +t2Op = { + update: collName, + updates: [{q: {_id: {$gte: 4}}, u: {$inc: {x: -4}}, multi: true}] +}; +expectedDocs1 = [ + {_id: 1, x: 5}, + {_id: 2, x: 6}, + {_id: 3, x: 7}, + {_id: 4, x: 10}, + {_id: 5, x: 11}, + {_id: 6, x: 12} +]; +WriteConflictHelpers.writeConflictTest( + coll, t1Op, t2Op, expectedDocs1, WriteConflictHelpers.T1StartsFirstAndWins, initOp); +expectedDocs2 = [ + {_id: 1, x: 1}, + {_id: 2, x: 2}, + {_id: 3, x: 3}, + {_id: 4, x: 6}, + {_id: 5, x: 7}, + {_id: 6, x: 8} +]; +WriteConflictHelpers.writeConflictTest( + coll, t1Op, t2Op, expectedDocs2, WriteConflictHelpers.T2StartsSecondAndWins, initOp); }()); diff --git a/jstests/core/txns/upconvert_read_concern.js b/jstests/core/txns/upconvert_read_concern.js index 15d166dde08..2f49280e128 100644 --- a/jstests/core/txns/upconvert_read_concern.js +++ b/jstests/core/txns/upconvert_read_concern.js @@ -2,97 +2,94 @@ // 'snapshot'. // @tags: [uses_transactions] (function() { - "use strict"; +"use strict"; - const dbName = "test"; - const collName = "upconvert_read_concern"; - const testDB = db.getSiblingDB(dbName); - const testColl = testDB[collName]; +const dbName = "test"; +const collName = "upconvert_read_concern"; +const testDB = db.getSiblingDB(dbName); +const testColl = testDB[collName]; - testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); +testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); - assert.commandWorked( - testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}})); +assert.commandWorked(testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}})); - const sessionOptions = {causalConsistency: false}; - const session = db.getMongo().startSession(sessionOptions); - const sessionDb = session.getDatabase(dbName); +const sessionOptions = { + causalConsistency: false +}; +const session = db.getMongo().startSession(sessionOptions); +const sessionDb = session.getDatabase(dbName); - function testUpconvertReadConcern(readConcern) { - jsTest.log("Test that the following readConcern is upconverted: " + tojson(readConcern)); - assert.commandWorked(testColl.remove({}, {writeConcern: {w: "majority"}})); +function testUpconvertReadConcern(readConcern) { + jsTest.log("Test that the following readConcern is upconverted: " + tojson(readConcern)); + assert.commandWorked(testColl.remove({}, {writeConcern: {w: "majority"}})); - // Start a new transaction with the given readConcern. - session.startTransaction(); - let command = {find: collName}; - if (readConcern) { - Object.extend(command, {readConcern: readConcern}); - } - assert.commandWorked(sessionDb.runCommand(command)); + // Start a new transaction with the given readConcern. + session.startTransaction(); + let command = {find: collName}; + if (readConcern) { + Object.extend(command, {readConcern: readConcern}); + } + assert.commandWorked(sessionDb.runCommand(command)); - // Insert a document outside of the transaction. - assert.commandWorked(testColl.insert({_id: 0}, {writeConcern: {w: "majority"}})); + // Insert a document outside of the transaction. + assert.commandWorked(testColl.insert({_id: 0}, {writeConcern: {w: "majority"}})); - // Test that the transaction does not see the new document (it has snapshot isolation). - let res = assert.commandWorked(sessionDb.runCommand({find: collName})); - assert.eq(res.cursor.firstBatch.length, 0, tojson(res)); + // Test that the transaction does not see the new document (it has snapshot isolation). + let res = assert.commandWorked(sessionDb.runCommand({find: collName})); + assert.eq(res.cursor.firstBatch.length, 0, tojson(res)); - // Commit the transaction. - assert.commandWorked(session.commitTransaction_forTesting()); - } - - testUpconvertReadConcern(null); - testUpconvertReadConcern({}); - testUpconvertReadConcern({level: "local"}); - testUpconvertReadConcern({level: "majority"}); - testUpconvertReadConcern({level: "snapshot"}); - - function testCannotUpconvertReadConcern(readConcern) { - jsTest.log("Test that the following readConcern cannot be upconverted: " + readConcern); - - // Start a new transaction with the given readConcern. - session.startTransaction(); - assert.commandFailedWithCode( - sessionDb.runCommand({find: collName, readConcern: readConcern}), - ErrorCodes.InvalidOptions); - - // No more operations are allowed in the transaction. - assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), - ErrorCodes.NoSuchTransaction); - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - } + // Commit the transaction. + assert.commandWorked(session.commitTransaction_forTesting()); +} - testCannotUpconvertReadConcern({level: "available"}); - testCannotUpconvertReadConcern({level: "linearizable"}); +testUpconvertReadConcern(null); +testUpconvertReadConcern({}); +testUpconvertReadConcern({level: "local"}); +testUpconvertReadConcern({level: "majority"}); +testUpconvertReadConcern({level: "snapshot"}); - jsTest.log("Test starting a transaction with an invalid readConcern"); +function testCannotUpconvertReadConcern(readConcern) { + jsTest.log("Test that the following readConcern cannot be upconverted: " + readConcern); // Start a new transaction with the given readConcern. session.startTransaction(); - assert.commandFailedWithCode( - sessionDb.runCommand({find: collName, readConcern: {level: "bad"}}), - ErrorCodes.FailedToParse); + assert.commandFailedWithCode(sessionDb.runCommand({find: collName, readConcern: readConcern}), + ErrorCodes.InvalidOptions); // No more operations are allowed in the transaction. assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.NoSuchTransaction); assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction); +} - jsTest.log("Test specifying readConcern on the second statement in a transaction"); +testCannotUpconvertReadConcern({level: "available"}); +testCannotUpconvertReadConcern({level: "linearizable"}); - // Start a new transaction with snapshot readConcern. - session.startTransaction(); - assert.commandWorked(sessionDb.runCommand({find: collName, readConcern: {level: "snapshot"}})); +jsTest.log("Test starting a transaction with an invalid readConcern"); - // The second statement cannot specify a readConcern. - assert.commandFailedWithCode( - sessionDb.runCommand({find: collName, readConcern: {level: "snapshot"}}), - ErrorCodes.InvalidOptions); +// Start a new transaction with the given readConcern. +session.startTransaction(); +assert.commandFailedWithCode(sessionDb.runCommand({find: collName, readConcern: {level: "bad"}}), + ErrorCodes.FailedToParse); - // The transaction is still active and can be committed. - assert.commandWorked(session.commitTransaction_forTesting()); +// No more operations are allowed in the transaction. +assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.NoSuchTransaction); +assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction); + +jsTest.log("Test specifying readConcern on the second statement in a transaction"); + +// Start a new transaction with snapshot readConcern. +session.startTransaction(); +assert.commandWorked(sessionDb.runCommand({find: collName, readConcern: {level: "snapshot"}})); + +// The second statement cannot specify a readConcern. +assert.commandFailedWithCode( + sessionDb.runCommand({find: collName, readConcern: {level: "snapshot"}}), + ErrorCodes.InvalidOptions); + +// The transaction is still active and can be committed. +assert.commandWorked(session.commitTransaction_forTesting()); - session.endSession(); +session.endSession(); }()); diff --git a/jstests/core/txns/view_reads_in_transaction.js b/jstests/core/txns/view_reads_in_transaction.js index 9e7ab14cd95..5a1a08761b8 100644 --- a/jstests/core/txns/view_reads_in_transaction.js +++ b/jstests/core/txns/view_reads_in_transaction.js @@ -1,60 +1,62 @@ // Tests that reads on views are supported in transactions. // @tags: [uses_transactions, uses_snapshot_read_concern] (function() { - "use strict"; - - const session = db.getMongo().startSession({causalConsistency: false}); - const testDB = session.getDatabase("test"); - const coll = testDB.getCollection("view_reads_in_transaction_data_coll"); - const view = testDB.getCollection("view_reads_in_transaction_actual_view"); - - coll.drop({writeConcern: {w: "majority"}}); - view.drop({writeConcern: {w: "majority"}}); - - // Populate the backing collection. - const testDoc = {_id: "kyle"}; - assert.commandWorked(coll.insert(testDoc, {writeConcern: {w: "majority"}})); - - // Create an identity view on the data-bearing collection. - assert.commandWorked(view.runCommand( - "create", {viewOn: coll.getName(), pipeline: [], writeConcern: {w: "majority"}})); - - const isMongos = assert.commandWorked(db.runCommand("ismaster")).msg === "isdbgrid"; - if (isMongos) { - // Refresh the router's and shard's database versions so the distinct run below can succeed. - // This is necessary because shards always abort their local transaction on stale version - // errors and mongos is not allowed to retry on these errors in a transaction if the stale - // shard has completed at least one earlier statement. - assert.eq(view.distinct("_id"), ["kyle"]); - } - - // Run a dummy find to start the transaction. - jsTestLog("Starting transaction."); - session.startTransaction({readConcern: {level: "snapshot"}}); - let cursor = coll.find(); - cursor.next(); - - // Insert a document outside of the transaction. Subsequent reads should not see this document. - jsTestLog("Inserting document outside of transaction."); - assert.commandWorked(db.getSiblingDB(testDB.getName()).getCollection(coll.getName()).insert({ - _id: "not_visible_in_transaction", - })); - - // Perform reads on views, which will be transformed into aggregations on the backing - // collection. - jsTestLog("Performing reads on the view inside the transaction."); - cursor = view.find(); - assert.docEq(testDoc, cursor.next()); - assert(!cursor.hasNext()); - - cursor = view.aggregate({$match: {}}); - assert.docEq(testDoc, cursor.next()); - assert(!cursor.hasNext()); - - assert.eq(view.find({_id: {$exists: 1}}).itcount(), 1); - +"use strict"; + +const session = db.getMongo().startSession({causalConsistency: false}); +const testDB = session.getDatabase("test"); +const coll = testDB.getCollection("view_reads_in_transaction_data_coll"); +const view = testDB.getCollection("view_reads_in_transaction_actual_view"); + +coll.drop({writeConcern: {w: "majority"}}); +view.drop({writeConcern: {w: "majority"}}); + +// Populate the backing collection. +const testDoc = { + _id: "kyle" +}; +assert.commandWorked(coll.insert(testDoc, {writeConcern: {w: "majority"}})); + +// Create an identity view on the data-bearing collection. +assert.commandWorked(view.runCommand( + "create", {viewOn: coll.getName(), pipeline: [], writeConcern: {w: "majority"}})); + +const isMongos = assert.commandWorked(db.runCommand("ismaster")).msg === "isdbgrid"; +if (isMongos) { + // Refresh the router's and shard's database versions so the distinct run below can succeed. + // This is necessary because shards always abort their local transaction on stale version + // errors and mongos is not allowed to retry on these errors in a transaction if the stale + // shard has completed at least one earlier statement. assert.eq(view.distinct("_id"), ["kyle"]); +} + +// Run a dummy find to start the transaction. +jsTestLog("Starting transaction."); +session.startTransaction({readConcern: {level: "snapshot"}}); +let cursor = coll.find(); +cursor.next(); + +// Insert a document outside of the transaction. Subsequent reads should not see this document. +jsTestLog("Inserting document outside of transaction."); +assert.commandWorked(db.getSiblingDB(testDB.getName()).getCollection(coll.getName()).insert({ + _id: "not_visible_in_transaction", +})); + +// Perform reads on views, which will be transformed into aggregations on the backing +// collection. +jsTestLog("Performing reads on the view inside the transaction."); +cursor = view.find(); +assert.docEq(testDoc, cursor.next()); +assert(!cursor.hasNext()); + +cursor = view.aggregate({$match: {}}); +assert.docEq(testDoc, cursor.next()); +assert(!cursor.hasNext()); + +assert.eq(view.find({_id: {$exists: 1}}).itcount(), 1); + +assert.eq(view.distinct("_id"), ["kyle"]); - assert.commandWorked(session.commitTransaction_forTesting()); - jsTestLog("Transaction committed."); +assert.commandWorked(session.commitTransaction_forTesting()); +jsTestLog("Transaction committed."); }()); diff --git a/jstests/core/txns/write_conflicts_with_non_txns.js b/jstests/core/txns/write_conflicts_with_non_txns.js index 451e7a6ae29..e8c3f9fcd47 100644 --- a/jstests/core/txns/write_conflicts_with_non_txns.js +++ b/jstests/core/txns/write_conflicts_with_non_txns.js @@ -19,129 +19,134 @@ (function() { - "use strict"; - - load('jstests/libs/parallelTester.js'); // for ScopedThread. - - const dbName = "test"; - const collName = "write_conflicts_with_non_txns"; - - const testDB = db.getSiblingDB(dbName); - const testColl = testDB[collName]; - - // Clean up and create test collection. - testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); - assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); - - const sessionOptions = {causalConsistency: false}; - const session = db.getMongo().startSession(sessionOptions); - const sessionDb = session.getDatabase(dbName); - const sessionColl = sessionDb[collName]; - - // Two conflicting documents to be inserted by a multi-document transaction and a - // non-transactional write, respectively. - const txnDoc = {_id: 1}; - const nonTxnDoc = {_id: 1, nonTxn: true}; - - // Performs a single document insert on the test collection. Returns the command result object. - function singleDocWrite(dbName, collName, doc) { - const testColl = db.getSiblingDB(dbName)[collName]; - return testColl.runCommand({insert: collName, documents: [doc]}); - } - - // Returns true if a single document insert has started running on the server. - function writeStarted() { - return testDB.currentOp().inprog.some(op => { - return op.active && (op.ns === testColl.getFullName()) && (op.op === "insert") && - (op.writeConflicts > 0); - }); - } - - /** - * A non-transactional (single document) write should keep retrying when attempting to insert a - * document that conflicts with a previous write done by a running transaction, and should be - * allowed to continue after the transaction commits. If 'maxTimeMS' is specified, a single - * document write should timeout after the given time limit if there is a write conflict. - */ - - jsTestLog("Start a multi-document transaction with a document insert."); - session.startTransaction(); - assert.commandWorked(sessionColl.insert(txnDoc)); - - jsTestLog("Do a conflicting single document insert outside of transaction with maxTimeMS."); - assert.commandFailedWithCode( - testColl.runCommand({insert: collName, documents: [nonTxnDoc], maxTimeMS: 100}), - ErrorCodes.MaxTimeMSExpired); - - jsTestLog("Doing conflicting single document write in separate thread."); - let thread = new ScopedThread(singleDocWrite, dbName, collName, nonTxnDoc); - thread.start(); - - // Wait for the single doc write to start. - assert.soon(writeStarted); - - // Commit the transaction, which should allow the single document write to finish. Since the - // single doc write should get serialized after the transaction, we expect it to fail with a - // duplicate key error. - jsTestLog("Commit the multi-document transaction."); - assert.commandWorked(session.commitTransaction_forTesting()); - thread.join(); - assert.commandFailedWithCode(thread.returnData(), ErrorCodes.DuplicateKey); - - // Check the final documents. - assert.sameMembers([txnDoc], testColl.find().toArray()); - - // Clean up the test collection. - assert.commandWorked(testColl.remove({})); - - /** - * A non-transactional (single document) write should keep retrying when attempting to insert a - * document that conflicts with a previous write done by a running transaction, and should be - * allowed to continue and complete successfully after the transaction aborts. - */ - - jsTestLog("Start a multi-document transaction with a document insert."); - session.startTransaction(); - assert.commandWorked(sessionColl.insert(txnDoc)); - - jsTestLog("Doing conflicting single document write in separate thread."); - thread = new ScopedThread(singleDocWrite, dbName, collName, nonTxnDoc); - thread.start(); - - // Wait for the single doc write to start. - assert.soon(writeStarted); - - // Abort the transaction, which should allow the single document write to finish and insert its - // document successfully. - jsTestLog("Abort the multi-document transaction."); - assert.commandWorked(session.abortTransaction_forTesting()); - thread.join(); - assert.commandWorked(thread.returnData()); - - // Check the final documents. - assert.sameMembers([nonTxnDoc], testColl.find().toArray()); - - // Clean up the test collection. - assert.commandWorked(testColl.remove({})); - - /** - * A transaction that tries to write to a document that was updated by a non-transaction after - * it started should fail with a WriteConflict. - */ - - jsTestLog("Start a multi-document transaction."); - session.startTransaction(); - assert.commandWorked(sessionColl.runCommand({find: collName})); - - jsTestLog("Do a single document insert outside of the transaction."); - assert.commandWorked(testColl.insert(nonTxnDoc)); - - jsTestLog("Insert a conflicting document inside the multi-document transaction."); - assert.commandFailedWithCode(sessionColl.insert(txnDoc), ErrorCodes.WriteConflict); - assert.commandFailedWithCode(session.commitTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - - // Check the final documents. - assert.sameMembers([nonTxnDoc], testColl.find().toArray()); +"use strict"; + +load('jstests/libs/parallelTester.js'); // for ScopedThread. + +const dbName = "test"; +const collName = "write_conflicts_with_non_txns"; + +const testDB = db.getSiblingDB(dbName); +const testColl = testDB[collName]; + +// Clean up and create test collection. +testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); +assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); + +const sessionOptions = { + causalConsistency: false +}; +const session = db.getMongo().startSession(sessionOptions); +const sessionDb = session.getDatabase(dbName); +const sessionColl = sessionDb[collName]; + +// Two conflicting documents to be inserted by a multi-document transaction and a +// non-transactional write, respectively. +const txnDoc = { + _id: 1 +}; +const nonTxnDoc = { + _id: 1, + nonTxn: true +}; + +// Performs a single document insert on the test collection. Returns the command result object. +function singleDocWrite(dbName, collName, doc) { + const testColl = db.getSiblingDB(dbName)[collName]; + return testColl.runCommand({insert: collName, documents: [doc]}); +} + +// Returns true if a single document insert has started running on the server. +function writeStarted() { + return testDB.currentOp().inprog.some(op => { + return op.active && (op.ns === testColl.getFullName()) && (op.op === "insert") && + (op.writeConflicts > 0); + }); +} +/** + * A non-transactional (single document) write should keep retrying when attempting to insert a + * document that conflicts with a previous write done by a running transaction, and should be + * allowed to continue after the transaction commits. If 'maxTimeMS' is specified, a single + * document write should timeout after the given time limit if there is a write conflict. + */ + +jsTestLog("Start a multi-document transaction with a document insert."); +session.startTransaction(); +assert.commandWorked(sessionColl.insert(txnDoc)); + +jsTestLog("Do a conflicting single document insert outside of transaction with maxTimeMS."); +assert.commandFailedWithCode( + testColl.runCommand({insert: collName, documents: [nonTxnDoc], maxTimeMS: 100}), + ErrorCodes.MaxTimeMSExpired); + +jsTestLog("Doing conflicting single document write in separate thread."); +let thread = new ScopedThread(singleDocWrite, dbName, collName, nonTxnDoc); +thread.start(); + +// Wait for the single doc write to start. +assert.soon(writeStarted); + +// Commit the transaction, which should allow the single document write to finish. Since the +// single doc write should get serialized after the transaction, we expect it to fail with a +// duplicate key error. +jsTestLog("Commit the multi-document transaction."); +assert.commandWorked(session.commitTransaction_forTesting()); +thread.join(); +assert.commandFailedWithCode(thread.returnData(), ErrorCodes.DuplicateKey); + +// Check the final documents. +assert.sameMembers([txnDoc], testColl.find().toArray()); + +// Clean up the test collection. +assert.commandWorked(testColl.remove({})); + +/** + * A non-transactional (single document) write should keep retrying when attempting to insert a + * document that conflicts with a previous write done by a running transaction, and should be + * allowed to continue and complete successfully after the transaction aborts. + */ + +jsTestLog("Start a multi-document transaction with a document insert."); +session.startTransaction(); +assert.commandWorked(sessionColl.insert(txnDoc)); + +jsTestLog("Doing conflicting single document write in separate thread."); +thread = new ScopedThread(singleDocWrite, dbName, collName, nonTxnDoc); +thread.start(); + +// Wait for the single doc write to start. +assert.soon(writeStarted); + +// Abort the transaction, which should allow the single document write to finish and insert its +// document successfully. +jsTestLog("Abort the multi-document transaction."); +assert.commandWorked(session.abortTransaction_forTesting()); +thread.join(); +assert.commandWorked(thread.returnData()); + +// Check the final documents. +assert.sameMembers([nonTxnDoc], testColl.find().toArray()); + +// Clean up the test collection. +assert.commandWorked(testColl.remove({})); + +/** + * A transaction that tries to write to a document that was updated by a non-transaction after + * it started should fail with a WriteConflict. + */ + +jsTestLog("Start a multi-document transaction."); +session.startTransaction(); +assert.commandWorked(sessionColl.runCommand({find: collName})); + +jsTestLog("Do a single document insert outside of the transaction."); +assert.commandWorked(testColl.insert(nonTxnDoc)); + +jsTestLog("Insert a conflicting document inside the multi-document transaction."); +assert.commandFailedWithCode(sessionColl.insert(txnDoc), ErrorCodes.WriteConflict); +assert.commandFailedWithCode(session.commitTransaction_forTesting(), ErrorCodes.NoSuchTransaction); + +// Check the final documents. +assert.sameMembers([nonTxnDoc], testColl.find().toArray()); }()); diff --git a/jstests/core/type4.js b/jstests/core/type4.js index 82197d4f1e2..7f3adf6645c 100644 --- a/jstests/core/type4.js +++ b/jstests/core/type4.js @@ -1,42 +1,42 @@ (function() { - "use strict"; +"use strict"; - // Tests for SERVER-20080 - // - // Verify that various types cannot be invoked as constructors +// Tests for SERVER-20080 +// +// Verify that various types cannot be invoked as constructors - var t = db.jstests_type4; - t.drop(); - t.insert({}); - t.insert({}); - t.insert({}); +var t = db.jstests_type4; +t.drop(); +t.insert({}); +t.insert({}); +t.insert({}); - var oldReadMode = db.getMongo().readMode(); +var oldReadMode = db.getMongo().readMode(); - assert.throws(function() { - (new _rand())(); - }, [], "invoke constructor on natively injected function"); +assert.throws(function() { + (new _rand())(); +}, [], "invoke constructor on natively injected function"); - assert.throws(function() { - var doc = db.test.findOne(); - new doc(); - }, [], "invoke constructor on BSON"); +assert.throws(function() { + var doc = db.test.findOne(); + new doc(); +}, [], "invoke constructor on BSON"); - assert.throws(function() { - db.getMongo().forceReadMode("commands"); - var cursor = t.find(); - cursor.next(); +assert.throws(function() { + db.getMongo().forceReadMode("commands"); + var cursor = t.find(); + cursor.next(); - new cursor._cursor._cursorHandle(); - }, [], "invoke constructor on CursorHandle"); + new cursor._cursor._cursorHandle(); +}, [], "invoke constructor on CursorHandle"); - assert.throws(function() { - db.getMongo().forceReadMode("legacy"); - var cursor = t.find(); - cursor.next(); +assert.throws(function() { + db.getMongo().forceReadMode("legacy"); + var cursor = t.find(); + cursor.next(); - new cursor._cursor(); - }, [], "invoke constructor on Cursor"); + new cursor._cursor(); +}, [], "invoke constructor on Cursor"); - db.getMongo().forceReadMode(oldReadMode); +db.getMongo().forceReadMode(oldReadMode); })(); diff --git a/jstests/core/type5.js b/jstests/core/type5.js index d4dfc42d9f6..b0f84f4885b 100644 --- a/jstests/core/type5.js +++ b/jstests/core/type5.js @@ -1,22 +1,21 @@ (function() { - "use strict"; +"use strict"; - // This checks SERVER-20375 - Constrain JS method thisv - // - // Check to make sure we can't invoke methods on incorrect types, or on - // prototypes of objects that aren't intended to have methods invoked on - // them. - - assert.throws(function() { - HexData(0, "aaaa").hex.apply({}); - }, [], "invoke method on object of incorrect type"); - assert.throws(function() { - var x = HexData(0, "aaaa"); - x.hex.apply(10); - }, [], "invoke method on incorrect type"); - assert.throws(function() { - var x = HexData(0, "aaaa"); - x.hex.apply(x.__proto__); - }, [], "invoke method on prototype of correct type"); +// This checks SERVER-20375 - Constrain JS method thisv +// +// Check to make sure we can't invoke methods on incorrect types, or on +// prototypes of objects that aren't intended to have methods invoked on +// them. +assert.throws(function() { + HexData(0, "aaaa").hex.apply({}); +}, [], "invoke method on object of incorrect type"); +assert.throws(function() { + var x = HexData(0, "aaaa"); + x.hex.apply(10); +}, [], "invoke method on incorrect type"); +assert.throws(function() { + var x = HexData(0, "aaaa"); + x.hex.apply(x.__proto__); +}, [], "invoke method on prototype of correct type"); })(); diff --git a/jstests/core/type6.js b/jstests/core/type6.js index 39c3e2567bb..8dbc1770cc3 100644 --- a/jstests/core/type6.js +++ b/jstests/core/type6.js @@ -1,17 +1,17 @@ (function() { - "use strict"; +"use strict"; - // SERVER-20319 Min/MaxKey check type of singleton - // - // make sure swapping min/max key's prototype doesn't blow things up +// SERVER-20319 Min/MaxKey check type of singleton +// +// make sure swapping min/max key's prototype doesn't blow things up - assert.throws(function() { - MinKey().__proto__.singleton = 1000; - MinKey(); - }, [], "make sure manipulating MinKey's proto is safe"); +assert.throws(function() { + MinKey().__proto__.singleton = 1000; + MinKey(); +}, [], "make sure manipulating MinKey's proto is safe"); - assert.throws(function() { - MaxKey().__proto__.singleton = 1000; - MaxKey(); - }, [], "make sure manipulating MaxKey's proto is safe"); +assert.throws(function() { + MaxKey().__proto__.singleton = 1000; + MaxKey(); +}, [], "make sure manipulating MaxKey's proto is safe"); })(); diff --git a/jstests/core/type7.js b/jstests/core/type7.js index 1d67922d491..a9e0d67c3b0 100644 --- a/jstests/core/type7.js +++ b/jstests/core/type7.js @@ -1,46 +1,46 @@ (function() { - "use strict"; +"use strict"; - // SERVER-20332 make JS NumberLong more robust - // - // Make sure swapping floatApprox, top and bottom don't break NumberLong +// SERVER-20332 make JS NumberLong more robust +// +// Make sure swapping floatApprox, top and bottom don't break NumberLong - // Picking 2^54 because it's representable as a double (as a power of - // two), but big enough that the NumberLong code doesn't know it (numbers - // over 2^53 can lose precision) - var number = NumberLong("18014398509481984"); +// Picking 2^54 because it's representable as a double (as a power of +// two), but big enough that the NumberLong code doesn't know it (numbers +// over 2^53 can lose precision) +var number = NumberLong("18014398509481984"); - { - // Make sure all elements in a new NumberLong are valid +{ + // Make sure all elements in a new NumberLong are valid - assert.eq(number.floatApprox, 18014398509481984); - assert.eq(number.top, 4194304); - assert.eq(number.bottom, 0); - assert.eq(number.valueOf(), 18014398509481984); - } + assert.eq(number.floatApprox, 18014398509481984); + assert.eq(number.top, 4194304); + assert.eq(number.bottom, 0); + assert.eq(number.valueOf(), 18014398509481984); +} - { - // Make sure that floatApprox, top and bottom cannot be set +{ + // Make sure that floatApprox, top and bottom cannot be set - assert.throws(function() { - number.floatApprox = "a"; - }, [], "floatApprox should not be setable."); + assert.throws(function() { + number.floatApprox = "a"; + }, [], "floatApprox should not be setable."); - assert.throws(function() { - number.top = "a"; - }, [], "top should not be setable."); + assert.throws(function() { + number.top = "a"; + }, [], "top should not be setable."); - assert.throws(function() { - number.bottom = "a"; - }, [], "bottom should not be setable."); - } + assert.throws(function() { + number.bottom = "a"; + }, [], "bottom should not be setable."); +} - { - // Make sure we fall back to floatApprox +{ + // Make sure we fall back to floatApprox - delete number.top; - delete number.bottom; + delete number.top; + delete number.bottom; - assert.eq(number.valueOf(), 18014398509481984); - } + assert.eq(number.valueOf(), 18014398509481984); +} })(); diff --git a/jstests/core/type8.js b/jstests/core/type8.js index ceb4993ecb1..e540cc901c7 100644 --- a/jstests/core/type8.js +++ b/jstests/core/type8.js @@ -1,18 +1,18 @@ (function() { - "use strict"; +"use strict"; - // SERVER-8246 Min/MaxKey should be comparable - // - // make sure that the MinKey MaxKey JS types are comparable +// SERVER-8246 Min/MaxKey should be comparable +// +// make sure that the MinKey MaxKey JS types are comparable - function testType(t1, t2) { - db.minmaxcmp.save({_id: t1}); - var doc = db.minmaxcmp.findOne({_id: t1}); - assert.eq(doc._id, t1, "Value for " + t1 + " did not round-trip to DB correctly"); - assert.neq(doc._id, t2, "Value for " + t1 + " should not equal " + t2); - assert(doc._id instanceof t1, "Value for " + t1 + "should be instance of" + t1); - assert(!(doc._id instanceof t2), "Value for " + t1 + "shouldn't be instance of" + t2); - } - testType(MinKey, MaxKey); - testType(MaxKey, MinKey); +function testType(t1, t2) { + db.minmaxcmp.save({_id: t1}); + var doc = db.minmaxcmp.findOne({_id: t1}); + assert.eq(doc._id, t1, "Value for " + t1 + " did not round-trip to DB correctly"); + assert.neq(doc._id, t2, "Value for " + t1 + " should not equal " + t2); + assert(doc._id instanceof t1, "Value for " + t1 + "should be instance of" + t1); + assert(!(doc._id instanceof t2), "Value for " + t1 + "shouldn't be instance of" + t2); +} +testType(MinKey, MaxKey); +testType(MaxKey, MinKey); })(); diff --git a/jstests/core/type_array.js b/jstests/core/type_array.js index 49ebf26764e..0bc3dc0f6f3 100644 --- a/jstests/core/type_array.js +++ b/jstests/core/type_array.js @@ -4,70 +4,69 @@ * Tests for the array-related behavior of the $type query operator. */ (function() { - "use strict"; +"use strict"; - let coll = db.jstest_type_array; - coll.drop(); +let coll = db.jstest_type_array; +coll.drop(); - /** - * Iterates 'cursor' and returns a sorted array of the '_id' fields for the returned documents. - */ - function extractSortedIdsFromCursor(cursor) { - let ids = []; - while (cursor.hasNext()) { - ids.push(cursor.next()._id); - } - return ids.sort(); +/** + * Iterates 'cursor' and returns a sorted array of the '_id' fields for the returned documents. + */ +function extractSortedIdsFromCursor(cursor) { + let ids = []; + while (cursor.hasNext()) { + ids.push(cursor.next()._id); } + return ids.sort(); +} - function runTests() { - assert.writeOK(coll.remove({})); - assert.writeOK(coll.insert({_id: 1, a: [1, 2, 3]})); - assert.writeOK(coll.insert({_id: 2, a: [1, "foo", 3]})); - assert.writeOK(coll.insert({_id: 3, a: []})); - assert.writeOK(coll.insert({_id: 4, a: [[]]})); - assert.writeOK(coll.insert({_id: 5, a: [[[]]]})); - assert.writeOK(coll.insert({_id: 6, a: 1})); - assert.writeOK(coll.insert({_id: 7, a: "foo"})); +function runTests() { + assert.writeOK(coll.remove({})); + assert.writeOK(coll.insert({_id: 1, a: [1, 2, 3]})); + assert.writeOK(coll.insert({_id: 2, a: [1, "foo", 3]})); + assert.writeOK(coll.insert({_id: 3, a: []})); + assert.writeOK(coll.insert({_id: 4, a: [[]]})); + assert.writeOK(coll.insert({_id: 5, a: [[[]]]})); + assert.writeOK(coll.insert({_id: 6, a: 1})); + assert.writeOK(coll.insert({_id: 7, a: "foo"})); - assert.eq([1, 2, 6], extractSortedIdsFromCursor(coll.find({a: {$type: "number"}}))); - assert.eq([2, 7], extractSortedIdsFromCursor(coll.find({a: {$type: "string"}}))); - assert.eq([1, 2, 3, 4, 5], extractSortedIdsFromCursor(coll.find({a: {$type: "array"}}))); - assert.eq([4, 5], extractSortedIdsFromCursor(coll.find({"a.0": {$type: "array"}}))); - assert.eq([5], extractSortedIdsFromCursor(coll.find({"a.0.0": {$type: "array"}}))); + assert.eq([1, 2, 6], extractSortedIdsFromCursor(coll.find({a: {$type: "number"}}))); + assert.eq([2, 7], extractSortedIdsFromCursor(coll.find({a: {$type: "string"}}))); + assert.eq([1, 2, 3, 4, 5], extractSortedIdsFromCursor(coll.find({a: {$type: "array"}}))); + assert.eq([4, 5], extractSortedIdsFromCursor(coll.find({"a.0": {$type: "array"}}))); + assert.eq([5], extractSortedIdsFromCursor(coll.find({"a.0.0": {$type: "array"}}))); - assert.writeOK(coll.remove({})); - assert.writeOK(coll.insert({_id: 0, a: 1})); - assert.writeOK(coll.insert({_id: 1, a: NumberInt(1)})); - assert.writeOK(coll.insert({_id: 2, a: NumberLong(1)})); - assert.writeOK(coll.insert({_id: 3, a: "str"})); - assert.writeOK(coll.insert({_id: 4, a: []})); - assert.writeOK(coll.insert({_id: 5, a: [NumberInt(1), "str"]})); - assert.writeOK(coll.insert({_id: 6})); + assert.writeOK(coll.remove({})); + assert.writeOK(coll.insert({_id: 0, a: 1})); + assert.writeOK(coll.insert({_id: 1, a: NumberInt(1)})); + assert.writeOK(coll.insert({_id: 2, a: NumberLong(1)})); + assert.writeOK(coll.insert({_id: 3, a: "str"})); + assert.writeOK(coll.insert({_id: 4, a: []})); + assert.writeOK(coll.insert({_id: 5, a: [NumberInt(1), "str"]})); + assert.writeOK(coll.insert({_id: 6})); - // Test that $type fails when given array that contains an element that is neither a string - // nor a number. - assert.throws(() => coll.find({a: {$type: ["string", null]}}).itcount()); - assert.throws(() => coll.find({a: {$type: [{}, "string"]}}).itcount()); + // Test that $type fails when given array that contains an element that is neither a string + // nor a number. + assert.throws(() => coll.find({a: {$type: ["string", null]}}).itcount()); + assert.throws(() => coll.find({a: {$type: [{}, "string"]}}).itcount()); - // Test that $type with an array of types can accept both string aliases and numerical type - // codes. Also verifies matching behavior for arrays and for missing values. - assert.eq([2, 3, 5], extractSortedIdsFromCursor(coll.find({a: {$type: ["long", 2]}}))); + // Test that $type with an array of types can accept both string aliases and numerical type + // codes. Also verifies matching behavior for arrays and for missing values. + assert.eq([2, 3, 5], extractSortedIdsFromCursor(coll.find({a: {$type: ["long", 2]}}))); - // Test $type with an array of types, where one of those types is itself "array". - assert.eq([2, 4, 5], - extractSortedIdsFromCursor(coll.find({a: {$type: ["long", "array"]}}))); + // Test $type with an array of types, where one of those types is itself "array". + assert.eq([2, 4, 5], extractSortedIdsFromCursor(coll.find({a: {$type: ["long", "array"]}}))); - // Test that duplicate types are allowed in the array. - assert.eq([2, 4, 5], - extractSortedIdsFromCursor( - coll.find({a: {$type: ["long", "array", "long", "array"]}}))); - assert.eq([2, 4, 5], - extractSortedIdsFromCursor(coll.find({a: {$type: ["long", "array", 18, 4]}}))); - } + // Test that duplicate types are allowed in the array. + assert.eq( + [2, 4, 5], + extractSortedIdsFromCursor(coll.find({a: {$type: ["long", "array", "long", "array"]}}))); + assert.eq([2, 4, 5], + extractSortedIdsFromCursor(coll.find({a: {$type: ["long", "array", 18, 4]}}))); +} - // Verify $type queries both with and without an index. - runTests(); - assert.writeOK(coll.createIndex({a: 1})); - runTests(); +// Verify $type queries both with and without an index. +runTests(); +assert.writeOK(coll.createIndex({a: 1})); +runTests(); }()); diff --git a/jstests/core/uniqueness.js b/jstests/core/uniqueness.js index e25c8a48d70..54a5a71c276 100644 --- a/jstests/core/uniqueness.js +++ b/jstests/core/uniqueness.js @@ -9,76 +9,78 @@ // ] (function() { - "use strict"; +"use strict"; - var res; +var res; - let t = db.jstests_uniqueness; +let t = db.jstests_uniqueness; - t.drop(); +t.drop(); - // test uniqueness of _id +// test uniqueness of _id - res = t.save({_id: 3}); - assert.writeOK(res); +res = t.save({_id: 3}); +assert.writeOK(res); - // this should yield an error - res = t.insert({_id: 3}); - assert.writeError(res); - assert.eq(1, t.count()); +// this should yield an error +res = t.insert({_id: 3}); +assert.writeError(res); +assert.eq(1, t.count()); - res = t.insert({_id: 4, x: 99}); - assert.writeOK(res); +res = t.insert({_id: 4, x: 99}); +assert.writeOK(res); - // this should yield an error - res = t.update({_id: 4}, {_id: 3, x: 99}); - assert.writeError(res); - assert(t.findOne({_id: 4})); +// this should yield an error +res = t.update({_id: 4}, {_id: 3, x: 99}); +assert.writeError(res); +assert(t.findOne({_id: 4})); - // Check for an error message when we index and there are dups - db.jstests_uniqueness2.drop(); - db.jstests_uniqueness2.insert({a: 3}); - db.jstests_uniqueness2.insert({a: 3}); - assert.eq(2, db.jstests_uniqueness2.count()); - res = db.jstests_uniqueness2.ensureIndex({a: 1}, true); - assert.commandFailed(res); - assert(res.errmsg.match(/E11000/)); +// Check for an error message when we index and there are dups +db.jstests_uniqueness2.drop(); +db.jstests_uniqueness2.insert({a: 3}); +db.jstests_uniqueness2.insert({a: 3}); +assert.eq(2, db.jstests_uniqueness2.count()); +res = db.jstests_uniqueness2.ensureIndex({a: 1}, true); +assert.commandFailed(res); +assert(res.errmsg.match(/E11000/)); - // Check for an error message when we index in the background and there are dups - db.jstests_uniqueness2.drop(); - db.jstests_uniqueness2.insert({a: 3}); - db.jstests_uniqueness2.insert({a: 3}); - assert.eq(2, db.jstests_uniqueness2.count()); - res = db.jstests_uniqueness2.ensureIndex({a: 1}, {unique: true, background: true}); - assert.commandFailed(res); - assert(res.errmsg.match(/E11000/)); +// Check for an error message when we index in the background and there are dups +db.jstests_uniqueness2.drop(); +db.jstests_uniqueness2.insert({a: 3}); +db.jstests_uniqueness2.insert({a: 3}); +assert.eq(2, db.jstests_uniqueness2.count()); +res = db.jstests_uniqueness2.ensureIndex({a: 1}, {unique: true, background: true}); +assert.commandFailed(res); +assert(res.errmsg.match(/E11000/)); - // Verify that duplicate key errors follow a fixed format, including field information. - const coll = db.checkDupErrorMessage; - const key = {_id: 1}; - const expectedMessage = - 'E11000 duplicate key error collection: ' + coll + ' index: _id_ dup key: { _id: 1.0 }'; - coll.drop(); - assert.commandWorked(coll.insert(key)); - res = coll.insert(key); - assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey); - assert.eq(res.nInserted, 0, tojson(res)); - const writeError = res.getWriteError(); - assert.eq(writeError.errmsg, - expectedMessage, - "The duplicate key error message must exactly match." + tojson(res)); +// Verify that duplicate key errors follow a fixed format, including field information. +const coll = db.checkDupErrorMessage; +const key = { + _id: 1 +}; +const expectedMessage = + 'E11000 duplicate key error collection: ' + coll + ' index: _id_ dup key: { _id: 1.0 }'; +coll.drop(); +assert.commandWorked(coll.insert(key)); +res = coll.insert(key); +assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey); +assert.eq(res.nInserted, 0, tojson(res)); +const writeError = res.getWriteError(); +assert.eq(writeError.errmsg, + expectedMessage, + "The duplicate key error message must exactly match." + tojson(res)); - /* Check that if we update and remove _id, it gets added back by the DB */ +/* Check that if we update and remove _id, it gets added back by the DB */ - /* - test when object grows */ - t.drop(); - t.save({_id: 'Z'}); - t.update({}, {k: 2}); - assert.eq('Z', t.findOne()._id, "uniqueness.js problem with adding back _id"); +/* - test when object grows */ +t.drop(); +t.save({_id: 'Z'}); +t.update({}, {k: 2}); +assert.eq('Z', t.findOne()._id, "uniqueness.js problem with adding back _id"); - /* - test when doesn't grow */ - t.drop(); - t.save({_id: 'Z', k: 3}); - t.update({}, {k: 2}); - assert.eq('Z', t.findOne()._id, "uniqueness.js problem with adding back _id (2)"); +/* - test when doesn't grow */ +t.drop(); +t.save({_id: 'Z', k: 3}); +t.update({}, {k: 2}); +assert.eq('Z', t.findOne()._id, "uniqueness.js problem with adding back _id (2)"); })(); diff --git a/jstests/core/update_affects_indexes.js b/jstests/core/update_affects_indexes.js index a396fc29079..956efadbf0e 100644 --- a/jstests/core/update_affects_indexes.js +++ b/jstests/core/update_affects_indexes.js @@ -1,94 +1,94 @@ // This is a regression test for SERVER-32048. It checks that index keys are correctly updated when // an update modifier implicitly creates a new array element. (function() { - "use strict"; +"use strict"; - let coll = db.update_affects_indexes; - coll.drop(); - let indexKeyPattern = {"a.b": 1}; - assert.commandWorked(coll.createIndex(indexKeyPattern)); +let coll = db.update_affects_indexes; +coll.drop(); +let indexKeyPattern = {"a.b": 1}; +assert.commandWorked(coll.createIndex(indexKeyPattern)); - // Tests that the document 'docId' has all the index keys in 'expectedKeys' and none of the - // index keys in 'unexpectedKeys'. - function assertExpectedIndexKeys(docId, expectedKeys, unexpectedKeys) { - for (let key of expectedKeys) { - let res = coll.find(docId).hint(indexKeyPattern).min(key).returnKey().toArray(); - assert.eq(1, res.length, tojson(res)); - assert.eq(key, res[0]); - } +// Tests that the document 'docId' has all the index keys in 'expectedKeys' and none of the +// index keys in 'unexpectedKeys'. +function assertExpectedIndexKeys(docId, expectedKeys, unexpectedKeys) { + for (let key of expectedKeys) { + let res = coll.find(docId).hint(indexKeyPattern).min(key).returnKey().toArray(); + assert.eq(1, res.length, tojson(res)); + assert.eq(key, res[0]); + } - for (let key of unexpectedKeys) { - let res = coll.find(docId).hint(indexKeyPattern).min(key).returnKey().toArray(); - if (res.length > 0) { - assert.eq(1, res.length, tojson(res)); - assert.neq(0, bsonWoCompare(key, res[0]), tojson(res[0])); - } + for (let key of unexpectedKeys) { + let res = coll.find(docId).hint(indexKeyPattern).min(key).returnKey().toArray(); + if (res.length > 0) { + assert.eq(1, res.length, tojson(res)); + assert.neq(0, bsonWoCompare(key, res[0]), tojson(res[0])); } } +} - // $set implicitly creates array element at end of array. - assert.writeOK(coll.insert({_id: 0, a: [{b: 0}]})); - assertExpectedIndexKeys({_id: 0}, [{"a.b": 0}], [{"a.b": null}]); - assert.writeOK(coll.update({_id: 0}, {$set: {"a.1.c": 0}})); - assertExpectedIndexKeys({_id: 0}, [{"a.b": 0}, {"a.b": null}], []); +// $set implicitly creates array element at end of array. +assert.writeOK(coll.insert({_id: 0, a: [{b: 0}]})); +assertExpectedIndexKeys({_id: 0}, [{"a.b": 0}], [{"a.b": null}]); +assert.writeOK(coll.update({_id: 0}, {$set: {"a.1.c": 0}})); +assertExpectedIndexKeys({_id: 0}, [{"a.b": 0}, {"a.b": null}], []); - // $set implicitly creates array element beyond end of array. - assert.writeOK(coll.insert({_id: 1, a: [{b: 0}]})); - assertExpectedIndexKeys({_id: 1}, [{"a.b": 0}], [{"a.b": null}]); - assert.writeOK(coll.update({_id: 1}, {$set: {"a.3.c": 0}})); - assertExpectedIndexKeys({_id: 1}, [{"a.b": 0}, {"a.b": null}], []); +// $set implicitly creates array element beyond end of array. +assert.writeOK(coll.insert({_id: 1, a: [{b: 0}]})); +assertExpectedIndexKeys({_id: 1}, [{"a.b": 0}], [{"a.b": null}]); +assert.writeOK(coll.update({_id: 1}, {$set: {"a.3.c": 0}})); +assertExpectedIndexKeys({_id: 1}, [{"a.b": 0}, {"a.b": null}], []); - // $set implicitly creates array element in empty array (no index key changes needed). - assert.writeOK(coll.insert({_id: 2, a: []})); - assertExpectedIndexKeys({_id: 2}, [{"a.b": null}], []); - assert.writeOK(coll.update({_id: 2}, {$set: {"a.0.c": 0}})); - assertExpectedIndexKeys({_id: 2}, [{"a.b": null}], []); +// $set implicitly creates array element in empty array (no index key changes needed). +assert.writeOK(coll.insert({_id: 2, a: []})); +assertExpectedIndexKeys({_id: 2}, [{"a.b": null}], []); +assert.writeOK(coll.update({_id: 2}, {$set: {"a.0.c": 0}})); +assertExpectedIndexKeys({_id: 2}, [{"a.b": null}], []); - // $inc implicitly creates array element at end of array. - assert.writeOK(coll.insert({_id: 3, a: [{b: 0}]})); - assertExpectedIndexKeys({_id: 3}, [{"a.b": 0}], [{"a.b": null}]); - assert.writeOK(coll.update({_id: 3}, {$inc: {"a.1.c": 0}})); - assertExpectedIndexKeys({_id: 3}, [{"a.b": 0}, {"a.b": null}], []); +// $inc implicitly creates array element at end of array. +assert.writeOK(coll.insert({_id: 3, a: [{b: 0}]})); +assertExpectedIndexKeys({_id: 3}, [{"a.b": 0}], [{"a.b": null}]); +assert.writeOK(coll.update({_id: 3}, {$inc: {"a.1.c": 0}})); +assertExpectedIndexKeys({_id: 3}, [{"a.b": 0}, {"a.b": null}], []); - // $mul implicitly creates array element at end of array. - assert.writeOK(coll.insert({_id: 4, a: [{b: 0}]})); - assertExpectedIndexKeys({_id: 4}, [{"a.b": 0}], [{"a.b": null}]); - assert.writeOK(coll.update({_id: 4}, {$mul: {"a.1.c": 0}})); - assertExpectedIndexKeys({_id: 4}, [{"a.b": 0}, {"a.b": null}], []); +// $mul implicitly creates array element at end of array. +assert.writeOK(coll.insert({_id: 4, a: [{b: 0}]})); +assertExpectedIndexKeys({_id: 4}, [{"a.b": 0}], [{"a.b": null}]); +assert.writeOK(coll.update({_id: 4}, {$mul: {"a.1.c": 0}})); +assertExpectedIndexKeys({_id: 4}, [{"a.b": 0}, {"a.b": null}], []); - // $addToSet implicitly creates array element at end of array. - assert.writeOK(coll.insert({_id: 5, a: [{b: 0}]})); - assertExpectedIndexKeys({_id: 5}, [{"a.b": 0}], [{"a.b": null}]); - assert.writeOK(coll.update({_id: 5}, {$addToSet: {"a.1.c": 0}})); - assertExpectedIndexKeys({_id: 5}, [{"a.b": 0}, {"a.b": null}], []); +// $addToSet implicitly creates array element at end of array. +assert.writeOK(coll.insert({_id: 5, a: [{b: 0}]})); +assertExpectedIndexKeys({_id: 5}, [{"a.b": 0}], [{"a.b": null}]); +assert.writeOK(coll.update({_id: 5}, {$addToSet: {"a.1.c": 0}})); +assertExpectedIndexKeys({_id: 5}, [{"a.b": 0}, {"a.b": null}], []); - // $bit implicitly creates array element at end of array. - assert.writeOK(coll.insert({_id: 6, a: [{b: 0}]})); - assertExpectedIndexKeys({_id: 6}, [{"a.b": 0}], [{"a.b": null}]); - assert.writeOK(coll.update({_id: 6}, {$bit: {"a.1.c": {and: NumberInt(1)}}})); - assertExpectedIndexKeys({_id: 6}, [{"a.b": 0}, {"a.b": null}], []); +// $bit implicitly creates array element at end of array. +assert.writeOK(coll.insert({_id: 6, a: [{b: 0}]})); +assertExpectedIndexKeys({_id: 6}, [{"a.b": 0}], [{"a.b": null}]); +assert.writeOK(coll.update({_id: 6}, {$bit: {"a.1.c": {and: NumberInt(1)}}})); +assertExpectedIndexKeys({_id: 6}, [{"a.b": 0}, {"a.b": null}], []); - // $min implicitly creates array element at end of array. - assert.writeOK(coll.insert({_id: 7, a: [{b: 0}]})); - assertExpectedIndexKeys({_id: 7}, [{"a.b": 0}], [{"a.b": null}]); - assert.writeOK(coll.update({_id: 7}, {$min: {"a.1.c": 0}})); - assertExpectedIndexKeys({_id: 7}, [{"a.b": 0}, {"a.b": null}], []); +// $min implicitly creates array element at end of array. +assert.writeOK(coll.insert({_id: 7, a: [{b: 0}]})); +assertExpectedIndexKeys({_id: 7}, [{"a.b": 0}], [{"a.b": null}]); +assert.writeOK(coll.update({_id: 7}, {$min: {"a.1.c": 0}})); +assertExpectedIndexKeys({_id: 7}, [{"a.b": 0}, {"a.b": null}], []); - // $max implicitly creates array element at end of array. - assert.writeOK(coll.insert({_id: 8, a: [{b: 0}]})); - assertExpectedIndexKeys({_id: 8}, [{"a.b": 0}], [{"a.b": null}]); - assert.writeOK(coll.update({_id: 8}, {$max: {"a.1.c": 0}})); - assertExpectedIndexKeys({_id: 8}, [{"a.b": 0}, {"a.b": null}], []); +// $max implicitly creates array element at end of array. +assert.writeOK(coll.insert({_id: 8, a: [{b: 0}]})); +assertExpectedIndexKeys({_id: 8}, [{"a.b": 0}], [{"a.b": null}]); +assert.writeOK(coll.update({_id: 8}, {$max: {"a.1.c": 0}})); +assertExpectedIndexKeys({_id: 8}, [{"a.b": 0}, {"a.b": null}], []); - // $currentDate implicitly creates array element at end of array. - assert.writeOK(coll.insert({_id: 9, a: [{b: 0}]})); - assertExpectedIndexKeys({_id: 9}, [{"a.b": 0}], [{"a.b": null}]); - assert.writeOK(coll.update({_id: 9}, {$currentDate: {"a.1.c": true}})); - assertExpectedIndexKeys({_id: 9}, [{"a.b": 0}, {"a.b": null}], []); +// $currentDate implicitly creates array element at end of array. +assert.writeOK(coll.insert({_id: 9, a: [{b: 0}]})); +assertExpectedIndexKeys({_id: 9}, [{"a.b": 0}], [{"a.b": null}]); +assert.writeOK(coll.update({_id: 9}, {$currentDate: {"a.1.c": true}})); +assertExpectedIndexKeys({_id: 9}, [{"a.b": 0}, {"a.b": null}], []); - // $push implicitly creates array element at end of array. - assert.writeOK(coll.insert({_id: 10, a: [{b: 0}]})); - assertExpectedIndexKeys({_id: 10}, [{"a.b": 0}], [{"a.b": null}]); - assert.writeOK(coll.update({_id: 10}, {$push: {"a.1.c": 0}})); - assertExpectedIndexKeys({_id: 10}, [{"a.b": 0}, {"a.b": null}], []); +// $push implicitly creates array element at end of array. +assert.writeOK(coll.insert({_id: 10, a: [{b: 0}]})); +assertExpectedIndexKeys({_id: 10}, [{"a.b": 0}], [{"a.b": null}]); +assert.writeOK(coll.update({_id: 10}, {$push: {"a.1.c": 0}})); +assertExpectedIndexKeys({_id: 10}, [{"a.b": 0}, {"a.b": null}], []); }()); diff --git a/jstests/core/update_arrayFilters.js b/jstests/core/update_arrayFilters.js index a59b135e75d..55d7614495d 100644 --- a/jstests/core/update_arrayFilters.js +++ b/jstests/core/update_arrayFilters.js @@ -4,714 +4,695 @@ // Tests for the arrayFilters option to update and findAndModify. (function() { - "use strict"; +"use strict"; - const collName = "update_arrayFilters"; - let coll = db[collName]; - coll.drop(); - assert.commandWorked(db.createCollection(collName)); - let res; - - // - // Tests for update. - // - - if (db.getMongo().writeMode() !== "commands") { - assert.throws(function() { - coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]}); - }); - } else { - // Non-array arrayFilters fails to parse. - assert.writeError(coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: {i: 0}}), - ErrorCodes.TypeMismatch); - - // Non-object array filter fails to parse. - assert.writeError(coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: ["bad"]}), - ErrorCodes.TypeMismatch); - - // Bad array filter fails to parse. - res = coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0, j: 0}]}); - assert.writeErrorWithCode(res, ErrorCodes.FailedToParse); - assert.neq(-1, - res.getWriteError().errmsg.indexOf("Expected a single top-level field name"), - "update failed for a reason other than failing to parse array filters"); - - // Multiple array filters with the same id fails to parse. - res = coll.update( - {_id: 0}, {$set: {"a.$[i]": 5, "a.$[j]": 6}}, {arrayFilters: [{i: 0}, {j: 0}, {i: 1}]}); - assert.writeErrorWithCode(res, ErrorCodes.FailedToParse); - assert.neq( - -1, - res.getWriteError().errmsg.indexOf( - "Found multiple array filters with the same top-level field name"), - "update failed for a reason other than multiple array filters with the same top-level field name"); - - // Unused array filter fails to parse. - res = coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}, {j: 0}]}); - assert.writeErrorWithCode(res, ErrorCodes.FailedToParse); - assert.neq( - -1, - res.getWriteError().errmsg.indexOf( - "The array filter for identifier 'j' was not used in the update { $set: { a.$[i]: 5.0 } }"), - "update failed for a reason other than unused array filter"); - - // Array filter without a top-level field name fails to parse. - res = coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{$alwaysTrue: 1}]}); - assert.writeErrorWithCode(res, ErrorCodes.FailedToParse); - assert.neq( - -1, - res.getWriteError().errmsg.indexOf( - "Cannot use an expression without a top-level field name in arrayFilters"), - "update failed for a reason other than missing a top-level field name in arrayFilter"); - - // Array filter with $text inside fails to parse. - res = coll.update( - {_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{$text: {$search: "foo"}}]}); - assert.writeErrorWithCode(res, ErrorCodes.BadValue); - - // Array filter with $where inside fails to parse. - res = - coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{$where: "this.a == 2"}]}); - assert.writeErrorWithCode(res, ErrorCodes.BadValue); - - // Array filter with $geoNear inside fails to parse. - res = coll.update( - {_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{loc: {$geoNear: [50, 50]}}]}); - assert.writeErrorWithCode(res, ErrorCodes.BadValue); - - // Array filter with $expr inside fails to parse. - res = coll.update( - {_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{$expr: {$eq: ["$foo", "$bar"]}}]}); - assert.writeErrorWithCode(res, ErrorCodes.QueryFeatureNotAllowed); - - // Good value for arrayFilters succeeds. - assert.writeOK(coll.update( - {_id: 0}, {$set: {"a.$[i]": 5, "a.$[j]": 6}}, {arrayFilters: [{i: 0}, {j: 0}]})); - assert.writeOK(coll.update( - {_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{$or: [{i: 0}, {$and: [{}]}]}]})); - } - - // - // Tests for findAndModify. - // +const collName = "update_arrayFilters"; +let coll = db[collName]; +coll.drop(); +assert.commandWorked(db.createCollection(collName)); +let res; - // Non-array arrayFilters fails to parse. - assert.throws(function() { - coll.findAndModify({query: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: {i: 0}}); - }); +// +// Tests for update. +// - // Non-object array filter fails to parse. +if (db.getMongo().writeMode() !== "commands") { assert.throws(function() { - coll.findAndModify({query: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: ["bad"]}); + coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]}); }); +} else { + // Non-array arrayFilters fails to parse. + assert.writeError(coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: {i: 0}}), + ErrorCodes.TypeMismatch); - // arrayFilters option not allowed with remove=true. - assert.throws(function() { - coll.findAndModify({query: {_id: 0}, remove: true, arrayFilters: [{i: 0}]}); - }); + // Non-object array filter fails to parse. + assert.writeError(coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: ["bad"]}), + ErrorCodes.TypeMismatch); // Bad array filter fails to parse. - assert.throws(function() { - coll.findAndModify( - {query: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: [{i: 0, j: 0}]}); - }); + res = coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0, j: 0}]}); + assert.writeErrorWithCode(res, ErrorCodes.FailedToParse); + assert.neq(-1, + res.getWriteError().errmsg.indexOf("Expected a single top-level field name"), + "update failed for a reason other than failing to parse array filters"); // Multiple array filters with the same id fails to parse. - assert.throws(function() { - coll.findAndModify({ - query: {_id: 0}, - update: {$set: {"a.$[i]": 5, "a.$[j]": 6}}, - arrayFilters: [{i: 0}, {j: 0}, {i: 1}] - }); - }); + res = coll.update( + {_id: 0}, {$set: {"a.$[i]": 5, "a.$[j]": 6}}, {arrayFilters: [{i: 0}, {j: 0}, {i: 1}]}); + assert.writeErrorWithCode(res, ErrorCodes.FailedToParse); + assert.neq( + -1, + res.getWriteError().errmsg.indexOf( + "Found multiple array filters with the same top-level field name"), + "update failed for a reason other than multiple array filters with the same top-level field name"); // Unused array filter fails to parse. - assert.throws(function() { - coll.findAndModify( - {query: {_id: 0}, update: {$set: {"a.$[i]": 5}, arrayFilters: [{i: 0}, {j: 0}]}}); - }); + res = coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}, {j: 0}]}); + assert.writeErrorWithCode(res, ErrorCodes.FailedToParse); + assert.neq( + -1, + res.getWriteError().errmsg.indexOf( + "The array filter for identifier 'j' was not used in the update { $set: { a.$[i]: 5.0 } }"), + "update failed for a reason other than unused array filter"); + + // Array filter without a top-level field name fails to parse. + res = coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{$alwaysTrue: 1}]}); + assert.writeErrorWithCode(res, ErrorCodes.FailedToParse); + assert.neq( + -1, + res.getWriteError().errmsg.indexOf( + "Cannot use an expression without a top-level field name in arrayFilters"), + "update failed for a reason other than missing a top-level field name in arrayFilter"); + + // Array filter with $text inside fails to parse. + res = coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{$text: {$search: "foo"}}]}); + assert.writeErrorWithCode(res, ErrorCodes.BadValue); + + // Array filter with $where inside fails to parse. + res = coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{$where: "this.a == 2"}]}); + assert.writeErrorWithCode(res, ErrorCodes.BadValue); + + // Array filter with $geoNear inside fails to parse. + res = + coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{loc: {$geoNear: [50, 50]}}]}); + assert.writeErrorWithCode(res, ErrorCodes.BadValue); + + // Array filter with $expr inside fails to parse. + res = coll.update( + {_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{$expr: {$eq: ["$foo", "$bar"]}}]}); + assert.writeErrorWithCode(res, ErrorCodes.QueryFeatureNotAllowed); // Good value for arrayFilters succeeds. - assert.eq(null, coll.findAndModify({ + assert.writeOK(coll.update( + {_id: 0}, {$set: {"a.$[i]": 5, "a.$[j]": 6}}, {arrayFilters: [{i: 0}, {j: 0}]})); + assert.writeOK(coll.update( + {_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{$or: [{i: 0}, {$and: [{}]}]}]})); +} + +// +// Tests for findAndModify. +// + +// Non-array arrayFilters fails to parse. +assert.throws(function() { + coll.findAndModify({query: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: {i: 0}}); +}); + +// Non-object array filter fails to parse. +assert.throws(function() { + coll.findAndModify({query: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: ["bad"]}); +}); + +// arrayFilters option not allowed with remove=true. +assert.throws(function() { + coll.findAndModify({query: {_id: 0}, remove: true, arrayFilters: [{i: 0}]}); +}); + +// Bad array filter fails to parse. +assert.throws(function() { + coll.findAndModify( + {query: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: [{i: 0, j: 0}]}); +}); + +// Multiple array filters with the same id fails to parse. +assert.throws(function() { + coll.findAndModify({ query: {_id: 0}, update: {$set: {"a.$[i]": 5, "a.$[j]": 6}}, - arrayFilters: [{i: 0}, {j: 0}] - })); - assert.eq(null, coll.findAndModify({ - query: {_id: 0}, - update: {$set: {"a.$[i]": 5}}, - arrayFilters: [{$or: [{i: 0}, {$and: [{}]}]}] - })); - - // - // Tests for the bulk API. - // - - if (db.getMongo().writeMode() !== "commands") { - let bulk = coll.initializeUnorderedBulkOp(); - bulk.find({}); - assert.throws(function() { - bulk.arrayFilters([{i: 0}]); - }); - } else { - // update(). - let bulk = coll.initializeUnorderedBulkOp(); - bulk.find({}).arrayFilters("bad").update({$set: {"a.$[i]": 5}}); - assert.throws(function() { - bulk.execute(); - }); - bulk = coll.initializeUnorderedBulkOp(); - bulk.find({}).arrayFilters([{i: 0}]).update({$set: {"a.$[i]": 5}}); - assert.writeOK(bulk.execute()); - - // updateOne(). - bulk = coll.initializeUnorderedBulkOp(); - bulk.find({_id: 0}).arrayFilters("bad").updateOne({$set: {"a.$[i]": 5}}); - assert.throws(function() { - bulk.execute(); - }); - bulk = coll.initializeUnorderedBulkOp(); - bulk.find({_id: 0}).arrayFilters([{i: 0}]).updateOne({$set: {"a.$[i]": 5}}); - assert.writeOK(bulk.execute()); - } - - // - // Tests for the CRUD API. - // - - // findOneAndUpdate(). + arrayFilters: [{i: 0}, {j: 0}, {i: 1}] + }); +}); + +// Unused array filter fails to parse. +assert.throws(function() { + coll.findAndModify( + {query: {_id: 0}, update: {$set: {"a.$[i]": 5}, arrayFilters: [{i: 0}, {j: 0}]}}); +}); + +// Good value for arrayFilters succeeds. +assert.eq(null, coll.findAndModify({ + query: {_id: 0}, + update: {$set: {"a.$[i]": 5, "a.$[j]": 6}}, + arrayFilters: [{i: 0}, {j: 0}] +})); +assert.eq(null, coll.findAndModify({ + query: {_id: 0}, + update: {$set: {"a.$[i]": 5}}, + arrayFilters: [{$or: [{i: 0}, {$and: [{}]}]}] +})); + +// +// Tests for the bulk API. +// + +if (db.getMongo().writeMode() !== "commands") { + let bulk = coll.initializeUnorderedBulkOp(); + bulk.find({}); assert.throws(function() { - coll.findOneAndUpdate({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: "bad"}); + bulk.arrayFilters([{i: 0}]); }); - assert.eq(null, - coll.findOneAndUpdate({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]})); +} else { + // update(). + let bulk = coll.initializeUnorderedBulkOp(); + bulk.find({}).arrayFilters("bad").update({$set: {"a.$[i]": 5}}); + assert.throws(function() { + bulk.execute(); + }); + bulk = coll.initializeUnorderedBulkOp(); + bulk.find({}).arrayFilters([{i: 0}]).update({$set: {"a.$[i]": 5}}); + assert.writeOK(bulk.execute()); // updateOne(). - if (db.getMongo().writeMode() !== "commands") { - assert.throws(function() { - coll.updateOne({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]}); - }); - } else { - assert.throws(function() { - coll.updateOne({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: "bad"}); - }); - res = coll.updateOne({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]}); - assert.eq(0, res.modifiedCount); - } - - // updateMany(). - if (db.getMongo().writeMode() !== "commands") { - assert.throws(function() { - coll.updateMany({}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]}); - }); - } else { - assert.throws(function() { - coll.updateMany({}, {$set: {"a.$[i]": 5}}, {arrayFilters: "bad"}); - }); - res = coll.updateMany({}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]}); - assert.eq(0, res.modifiedCount); - } - - // updateOne with bulkWrite(). - if (db.getMongo().writeMode() !== "commands") { - assert.throws(function() { - coll.bulkWrite([{ - updateOne: - {filter: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: [{i: 0}]} - }]); - }); - } else { - assert.throws(function() { - coll.bulkWrite([{ - updateOne: - {filter: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: "bad"} - }]); - }); - res = coll.bulkWrite([{ - updateOne: {filter: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: [{i: 0}]} - }]); - assert.eq(0, res.matchedCount); - } - - // updateMany with bulkWrite(). - if (db.getMongo().writeMode() !== "commands") { - assert.throws(function() { - coll.bulkWrite([ - {updateMany: {filter: {}, update: {$set: {"a.$[i]": 5}}, arrayFilters: [{i: 0}]}} - ]); - }); - } else { - assert.throws(function() { - coll.bulkWrite( - [{updateMany: {filter: {}, update: {$set: {"a.$[i]": 5}}, arrayFilters: "bad"}}]); - }); - res = coll.bulkWrite( - [{updateMany: {filter: {}, update: {$set: {"a.$[i]": 5}}, arrayFilters: [{i: 0}]}}]); - assert.eq(0, res.matchedCount); - } - - // - // Tests for explain(). - // + bulk = coll.initializeUnorderedBulkOp(); + bulk.find({_id: 0}).arrayFilters("bad").updateOne({$set: {"a.$[i]": 5}}); + assert.throws(function() { + bulk.execute(); + }); + bulk = coll.initializeUnorderedBulkOp(); + bulk.find({_id: 0}).arrayFilters([{i: 0}]).updateOne({$set: {"a.$[i]": 5}}); + assert.writeOK(bulk.execute()); +} + +// +// Tests for the CRUD API. +// + +// findOneAndUpdate(). +assert.throws(function() { + coll.findOneAndUpdate({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: "bad"}); +}); +assert.eq(null, coll.findOneAndUpdate({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]})); + +// updateOne(). +if (db.getMongo().writeMode() !== "commands") { + assert.throws(function() { + coll.updateOne({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]}); + }); +} else { + assert.throws(function() { + coll.updateOne({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: "bad"}); + }); + res = coll.updateOne({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]}); + assert.eq(0, res.modifiedCount); +} - // update(). - if (db.getMongo().writeMode() !== "commands") { - assert.throws(function() { - coll.explain().update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]}); - }); - } else { - assert.throws(function() { - coll.explain().update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: "bad"}); - }); - assert.commandWorked( - coll.explain().update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]})); - } - - // findAndModify(). +// updateMany(). +if (db.getMongo().writeMode() !== "commands") { assert.throws(function() { - coll.explain().findAndModify( - {query: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: "bad"}); + coll.updateMany({}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]}); }); - assert.commandWorked(coll.explain().findAndModify( - {query: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: [{i: 0}]})); +} else { + assert.throws(function() { + coll.updateMany({}, {$set: {"a.$[i]": 5}}, {arrayFilters: "bad"}); + }); + res = coll.updateMany({}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]}); + assert.eq(0, res.modifiedCount); +} - // - // Tests for individual update modifiers. - // +// updateOne with bulkWrite(). +if (db.getMongo().writeMode() !== "commands") { + assert.throws(function() { + coll.bulkWrite([ + {updateOne: {filter: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: [{i: 0}]}} + ]); + }); +} else { + assert.throws(function() { + coll.bulkWrite( + [{updateOne: {filter: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: "bad"}}]); + }); + res = coll.bulkWrite( + [{updateOne: {filter: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: [{i: 0}]}}]); + assert.eq(0, res.matchedCount); +} - // $set. - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [0, 1, 0, 1]})); - if (db.getMongo().writeMode() === "commands") { - assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[i]": 2}}, {arrayFilters: [{i: 0}]})); - assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [2, 1, 2, 1]}); - } - assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[]": 3}})); - assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [3, 3, 3, 3]}); - - // $unset. - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [0, 1, 0, 1]})); - if (db.getMongo().writeMode() === "commands") { - assert.writeOK(coll.update({_id: 0}, {$unset: {"a.$[i]": true}}, {arrayFilters: [{i: 0}]})); - assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [null, 1, null, 1]}); - } - assert.writeOK(coll.update({_id: 0}, {$unset: {"a.$[]": true}})); - assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [null, null, null, null]}); - - // $inc. - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [0, 1, 0, 1]})); - if (db.getMongo().writeMode() === "commands") { - assert.writeOK(coll.update({_id: 0}, {$inc: {"a.$[i]": 1}}, {arrayFilters: [{i: 1}]})); - assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [0, 2, 0, 2]}); - } - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [0, 1, 0, 1]})); - assert.writeOK(coll.update({_id: 0}, {$inc: {"a.$[]": 1}})); - assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [1, 2, 1, 2]}); +// updateMany with bulkWrite(). +if (db.getMongo().writeMode() !== "commands") { + assert.throws(function() { + coll.bulkWrite( + [{updateMany: {filter: {}, update: {$set: {"a.$[i]": 5}}, arrayFilters: [{i: 0}]}}]); + }); +} else { + assert.throws(function() { + coll.bulkWrite( + [{updateMany: {filter: {}, update: {$set: {"a.$[i]": 5}}, arrayFilters: "bad"}}]); + }); + res = coll.bulkWrite( + [{updateMany: {filter: {}, update: {$set: {"a.$[i]": 5}}, arrayFilters: [{i: 0}]}}]); + assert.eq(0, res.matchedCount); +} - // $mul. - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [0, 2, 0, 2]})); - if (db.getMongo().writeMode() === "commands") { - assert.writeOK(coll.update({_id: 0}, {$mul: {"a.$[i]": 3}}, {arrayFilters: [{i: 2}]})); - assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [0, 6, 0, 6]}); - } - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [1, 2, 1, 2]})); - assert.writeOK(coll.update({_id: 0}, {$mul: {"a.$[]": 3}})); - assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [3, 6, 3, 6]}); +// +// Tests for explain(). +// - // $rename. - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [1, 2, 3, 4]})); - if (db.getMongo().writeMode() === "commands") { - res = coll.update({_id: 0}, {$rename: {"a.$[i]": "b"}}, {arrayFilters: [{i: 0}]}); - assert.writeErrorWithCode(res, ErrorCodes.BadValue); - assert.neq(-1, - res.getWriteError().errmsg.indexOf( - "The source field for $rename may not be dynamic: a.$[i]"), - "update failed for a reason other than using $[] syntax in $rename path"); - res = coll.update({id: 0}, {$rename: {"a": "b"}}, {arrayFilters: [{i: 0}]}); - assert.writeErrorWithCode(res, ErrorCodes.FailedToParse); - assert.neq( - -1, - res.getWriteError().errmsg.indexOf( - "The array filter for identifier 'i' was not used in the update { $rename: { a: \"b\" } }"), - "updated failed for reason other than unused array filter"); - } - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [0], b: [1]})); - res = coll.update({_id: 0}, {$rename: {"a.$[]": "b"}}); - assert.writeErrorWithCode(res, ErrorCodes.BadValue); - assert.neq(-1, - res.getWriteError().errmsg.indexOf( - "The source field for $rename may not be dynamic: a.$[]"), - "update failed for a reason other than using array updates with $rename"); - res = coll.update({_id: 0}, {$rename: {"a": "b.$[]"}}); +// update(). +if (db.getMongo().writeMode() !== "commands") { + assert.throws(function() { + coll.explain().update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]}); + }); +} else { + assert.throws(function() { + coll.explain().update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: "bad"}); + }); + assert.commandWorked( + coll.explain().update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0}]})); +} + +// findAndModify(). +assert.throws(function() { + coll.explain().findAndModify( + {query: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: "bad"}); +}); +assert.commandWorked(coll.explain().findAndModify( + {query: {_id: 0}, update: {$set: {"a.$[i]": 5}}, arrayFilters: [{i: 0}]})); + +// +// Tests for individual update modifiers. +// + +// $set. +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: [0, 1, 0, 1]})); +if (db.getMongo().writeMode() === "commands") { + assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[i]": 2}}, {arrayFilters: [{i: 0}]})); + assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [2, 1, 2, 1]}); +} +assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[]": 3}})); +assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [3, 3, 3, 3]}); + +// $unset. +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: [0, 1, 0, 1]})); +if (db.getMongo().writeMode() === "commands") { + assert.writeOK(coll.update({_id: 0}, {$unset: {"a.$[i]": true}}, {arrayFilters: [{i: 0}]})); + assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [null, 1, null, 1]}); +} +assert.writeOK(coll.update({_id: 0}, {$unset: {"a.$[]": true}})); +assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [null, null, null, null]}); + +// $inc. +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: [0, 1, 0, 1]})); +if (db.getMongo().writeMode() === "commands") { + assert.writeOK(coll.update({_id: 0}, {$inc: {"a.$[i]": 1}}, {arrayFilters: [{i: 1}]})); + assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [0, 2, 0, 2]}); +} +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: [0, 1, 0, 1]})); +assert.writeOK(coll.update({_id: 0}, {$inc: {"a.$[]": 1}})); +assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [1, 2, 1, 2]}); + +// $mul. +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: [0, 2, 0, 2]})); +if (db.getMongo().writeMode() === "commands") { + assert.writeOK(coll.update({_id: 0}, {$mul: {"a.$[i]": 3}}, {arrayFilters: [{i: 2}]})); + assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [0, 6, 0, 6]}); +} +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: [1, 2, 1, 2]})); +assert.writeOK(coll.update({_id: 0}, {$mul: {"a.$[]": 3}})); +assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [3, 6, 3, 6]}); + +// $rename. +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: [1, 2, 3, 4]})); +if (db.getMongo().writeMode() === "commands") { + res = coll.update({_id: 0}, {$rename: {"a.$[i]": "b"}}, {arrayFilters: [{i: 0}]}); assert.writeErrorWithCode(res, ErrorCodes.BadValue); assert.neq(-1, res.getWriteError().errmsg.indexOf( - "The destination field for $rename may not be dynamic: b.$[]"), - "update failed for a reason other than using array updates with $rename"); - assert.writeOK(coll.update({_id: 0}, {$rename: {"a": "b"}})); - assert.eq(coll.findOne({_id: 0}), {_id: 0, b: [0]}); - - // $setOnInsert. - coll.drop(); - if (db.getMongo().writeMode() === "commands") { - assert.writeOK(coll.update({_id: 0, a: [0]}, - {$setOnInsert: {"a.$[i]": 1}}, - {arrayFilters: [{i: 0}], upsert: true})); - assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [1]}); - } - coll.drop(); - assert.writeOK(coll.update({_id: 0, a: [0]}, {$setOnInsert: {"a.$[]": 1}}, {upsert: true})); + "The source field for $rename may not be dynamic: a.$[i]"), + "update failed for a reason other than using $[] syntax in $rename path"); + res = coll.update({id: 0}, {$rename: {"a": "b"}}, {arrayFilters: [{i: 0}]}); + assert.writeErrorWithCode(res, ErrorCodes.FailedToParse); + assert.neq( + -1, + res.getWriteError().errmsg.indexOf( + "The array filter for identifier 'i' was not used in the update { $rename: { a: \"b\" } }"), + "updated failed for reason other than unused array filter"); +} +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: [0], b: [1]})); +res = coll.update({_id: 0}, {$rename: {"a.$[]": "b"}}); +assert.writeErrorWithCode(res, ErrorCodes.BadValue); +assert.neq( + -1, + res.getWriteError().errmsg.indexOf("The source field for $rename may not be dynamic: a.$[]"), + "update failed for a reason other than using array updates with $rename"); +res = coll.update({_id: 0}, {$rename: {"a": "b.$[]"}}); +assert.writeErrorWithCode(res, ErrorCodes.BadValue); +assert.neq(-1, + res.getWriteError().errmsg.indexOf( + "The destination field for $rename may not be dynamic: b.$[]"), + "update failed for a reason other than using array updates with $rename"); +assert.writeOK(coll.update({_id: 0}, {$rename: {"a": "b"}})); +assert.eq(coll.findOne({_id: 0}), {_id: 0, b: [0]}); + +// $setOnInsert. +coll.drop(); +if (db.getMongo().writeMode() === "commands") { + assert.writeOK(coll.update( + {_id: 0, a: [0]}, {$setOnInsert: {"a.$[i]": 1}}, {arrayFilters: [{i: 0}], upsert: true})); assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [1]}); - - // $min. - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [{b: 0, c: 1}, {b: 0, c: -1}, {b: 1, c: 1}]})); - if (db.getMongo().writeMode() === "commands") { - assert.writeOK( - coll.update({_id: 0}, {$min: {"a.$[i].c": 0}}, {arrayFilters: [{"i.b": 0}]})); - assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: 0, c: 0}, {b: 0, c: -1}, {b: 1, c: 1}]}); - } - assert.writeOK(coll.update({_id: 0}, {$min: {"a.$[].c": 0}})); - assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: 0, c: 0}, {b: 0, c: -1}, {b: 1, c: 0}]}); - - // $max. - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [{b: 0, c: 1}, {b: 0, c: -1}, {b: 1, c: -1}]})); - if (db.getMongo().writeMode() === "commands") { - assert.writeOK( - coll.update({_id: 0}, {$max: {"a.$[i].c": 0}}, {arrayFilters: [{"i.b": 0}]})); - assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: 0, c: 1}, {b: 0, c: 0}, {b: 1, c: -1}]}); - } - assert.writeOK(coll.update({_id: 0}, {$max: {"a.$[].c": 0}})); - assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: 0, c: 1}, {b: 0, c: 0}, {b: 1, c: 0}]}); - - // $currentDate. - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [0, 1]})); - if (db.getMongo().writeMode() === "commands") { - assert.writeOK( - coll.update({_id: 0}, {$currentDate: {"a.$[i]": true}}, {arrayFilters: [{i: 0}]})); - let doc = coll.findOne({_id: 0}); - assert(doc.a[0].constructor == Date, tojson(doc)); - assert.eq(doc.a[1], 1, printjson(doc)); - } - assert.writeOK(coll.update({_id: 0}, {$currentDate: {"a.$[]": true}})); +} +coll.drop(); +assert.writeOK(coll.update({_id: 0, a: [0]}, {$setOnInsert: {"a.$[]": 1}}, {upsert: true})); +assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [1]}); + +// $min. +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: [{b: 0, c: 1}, {b: 0, c: -1}, {b: 1, c: 1}]})); +if (db.getMongo().writeMode() === "commands") { + assert.writeOK(coll.update({_id: 0}, {$min: {"a.$[i].c": 0}}, {arrayFilters: [{"i.b": 0}]})); + assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: 0, c: 0}, {b: 0, c: -1}, {b: 1, c: 1}]}); +} +assert.writeOK(coll.update({_id: 0}, {$min: {"a.$[].c": 0}})); +assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: 0, c: 0}, {b: 0, c: -1}, {b: 1, c: 0}]}); + +// $max. +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: [{b: 0, c: 1}, {b: 0, c: -1}, {b: 1, c: -1}]})); +if (db.getMongo().writeMode() === "commands") { + assert.writeOK(coll.update({_id: 0}, {$max: {"a.$[i].c": 0}}, {arrayFilters: [{"i.b": 0}]})); + assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: 0, c: 1}, {b: 0, c: 0}, {b: 1, c: -1}]}); +} +assert.writeOK(coll.update({_id: 0}, {$max: {"a.$[].c": 0}})); +assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: 0, c: 1}, {b: 0, c: 0}, {b: 1, c: 0}]}); + +// $currentDate. +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: [0, 1]})); +if (db.getMongo().writeMode() === "commands") { + assert.writeOK( + coll.update({_id: 0}, {$currentDate: {"a.$[i]": true}}, {arrayFilters: [{i: 0}]})); let doc = coll.findOne({_id: 0}); assert(doc.a[0].constructor == Date, tojson(doc)); - assert(doc.a[1].constructor == Date, tojson(doc)); - - // $addToSet. - coll.drop(); - if (db.getMongo().writeMode() === "commands") { - assert.writeOK(coll.insert({_id: 0, a: [[0], [1]]})); - assert.writeOK(coll.update({_id: 0}, {$addToSet: {"a.$[i]": 2}}, {arrayFilters: [{i: 0}]})); - assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [[0, 2], [1]]}); - } - coll.drop(); + assert.eq(doc.a[1], 1, printjson(doc)); +} +assert.writeOK(coll.update({_id: 0}, {$currentDate: {"a.$[]": true}})); +let doc = coll.findOne({_id: 0}); +assert(doc.a[0].constructor == Date, tojson(doc)); +assert(doc.a[1].constructor == Date, tojson(doc)); + +// $addToSet. +coll.drop(); +if (db.getMongo().writeMode() === "commands") { assert.writeOK(coll.insert({_id: 0, a: [[0], [1]]})); - assert.writeOK(coll.update({_id: 0}, {$addToSet: {"a.$[]": 2}})); - assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [[0, 2], [1, 2]]}); - - // $pop. + assert.writeOK(coll.update({_id: 0}, {$addToSet: {"a.$[i]": 2}}, {arrayFilters: [{i: 0}]})); + assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [[0, 2], [1]]}); +} +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: [[0], [1]]})); +assert.writeOK(coll.update({_id: 0}, {$addToSet: {"a.$[]": 2}})); +assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [[0, 2], [1, 2]]}); + +// $pop. +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: [[0, 1], [1, 2]]})); +if (db.getMongo().writeMode() === "commands") { + assert.writeOK(coll.update({_id: 0}, {$pop: {"a.$[i]": 1}}, {arrayFilters: [{i: 0}]})); + assert.eq({_id: 0, a: [[0], [1, 2]]}, coll.findOne()); +} +assert.writeOK(coll.remove({})); +assert.writeOK(coll.insert({_id: 0, a: [[0]]})); +assert.writeOK(coll.update({_id: 0}, {$pop: {"a.$[]": 1}})); +assert.eq({_id: 0, a: [[]]}, coll.findOne()); + +// $pullAll. +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: [[0, 1, 2, 3], [1, 2, 3, 4]]})); +if (db.getMongo().writeMode() === "commands") { + assert.writeOK(coll.update({_id: 0}, {$pullAll: {"a.$[i]": [0, 2]}}, {arrayFilters: [{i: 0}]})); + assert.eq({_id: 0, a: [[1, 3], [1, 2, 3, 4]]}, coll.findOne()); +} +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: [[0, 1, 2, 3], [1, 2, 3, 4]]})); +res = coll.update({_id: 0}, {$pullAll: {"a.$[]": [0, 2]}}); +assert.eq({_id: 0, a: [[1, 3], [1, 3, 4]]}, coll.findOne()); + +// $pull. +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: [[0, 1], [1, 2]]})); +if (db.getMongo().writeMode() === "commands") { + assert.writeOK(coll.update({_id: 0}, {$pull: {"a.$[i]": 1}}, {arrayFilters: [{i: 2}]})); + assert.eq({_id: 0, a: [[0, 1], [2]]}, coll.findOne()); +} +assert.writeOK(coll.remove({})); +assert.writeOK(coll.insert({_id: 0, a: [[0, 1], [1, 2]]})); +assert.writeOK(coll.update({_id: 0}, {$pull: {"a.$[]": 1}})); +assert.eq({_id: 0, a: [[0], [2]]}, coll.findOne()); + +// $push. +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: [[0, 1], [2, 3]]})); +if (db.getMongo().writeMode() === "commands") { + assert.writeOK(coll.update({_id: 0}, {$push: {"a.$[i]": 4}}, {arrayFilters: [{i: 0}]})); + assert.eq({_id: 0, a: [[0, 1, 4], [2, 3]]}, coll.findOne()); +} +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: [[0, 1], [2, 3]]})); +assert.writeOK(coll.update({_id: 0}, {$push: {"a.$[]": 4}})); +assert.eq({_id: 0, a: [[0, 1, 4], [2, 3, 4]]}, coll.findOne()); + +// $bit. +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: [NumberInt(0), NumberInt(2)]})); +if (db.getMongo().writeMode() === "commands") { + assert.writeOK( + coll.update({_id: 0}, {$bit: {"a.$[i]": {or: NumberInt(10)}}}, {arrayFilters: [{i: 0}]})); + assert.eq({_id: 0, a: [NumberInt(10), NumberInt(2)]}, coll.findOne()); +} +assert.writeOK(coll.remove({})); +assert.writeOK(coll.insert({_id: 0, a: [NumberInt(0), NumberInt(2)]})); +assert.writeOK(coll.update({_id: 0}, {$bit: {"a.$[]": {or: NumberInt(10)}}})); +assert.eq({_id: 0, a: [NumberInt(10), NumberInt(10)]}, coll.findOne()); + +// +// Multi update tests. +// + +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: [0, 1, 0, 1]})); +assert.writeOK(coll.insert({_id: 1, a: [0, 2, 0, 2]})); +if (db.getMongo().writeMode() === "commands") { + assert.writeOK(coll.update({}, {$set: {"a.$[i]": 3}}, {multi: true, arrayFilters: [{i: 0}]})); + assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [3, 1, 3, 1]}); + assert.eq(coll.findOne({_id: 1}), {_id: 1, a: [3, 2, 3, 2]}); +} +assert.writeOK(coll.update({}, {$set: {"a.$[]": 3}}, {multi: true})); +assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [3, 3, 3, 3]}); +assert.eq(coll.findOne({_id: 1}), {_id: 1, a: [3, 3, 3, 3]}); + +// +// Collation tests. +// + +if (db.getMongo().writeMode() === "commands") { + // arrayFilters respect operation collation. coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [[0, 1], [1, 2]]})); - if (db.getMongo().writeMode() === "commands") { - assert.writeOK(coll.update({_id: 0}, {$pop: {"a.$[i]": 1}}, {arrayFilters: [{i: 0}]})); - assert.eq({_id: 0, a: [[0], [1, 2]]}, coll.findOne()); - } - assert.writeOK(coll.remove({})); - assert.writeOK(coll.insert({_id: 0, a: [[0]]})); - assert.writeOK(coll.update({_id: 0}, {$pop: {"a.$[]": 1}})); - assert.eq({_id: 0, a: [[]]}, coll.findOne()); - - // $pullAll. + assert.writeOK(coll.insert({_id: 0, a: ["foo", "FOO"]})); + assert.writeOK( + coll.update({_id: 0}, + {$set: {"a.$[i]": "bar"}}, + {arrayFilters: [{i: "foo"}], collation: {locale: "en_US", strength: 2}})); + assert.eq(coll.findOne({_id: 0}), {_id: 0, a: ["bar", "bar"]}); + + // arrayFilters respect the collection default collation. coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [[0, 1, 2, 3], [1, 2, 3, 4]]})); - if (db.getMongo().writeMode() === "commands") { - assert.writeOK( - coll.update({_id: 0}, {$pullAll: {"a.$[i]": [0, 2]}}, {arrayFilters: [{i: 0}]})); - assert.eq({_id: 0, a: [[1, 3], [1, 2, 3, 4]]}, coll.findOne()); - } + assert.commandWorked( + db.createCollection(collName, {collation: {locale: "en_US", strength: 2}})); + coll = db[collName]; + assert.writeOK(coll.insert({_id: 0, a: ["foo", "FOO"]})); + assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[i]": "bar"}}, {arrayFilters: [{i: "foo"}]})); + assert.eq(coll.findOne({_id: 0}), {_id: 0, a: ["bar", "bar"]}); +} + +// +// Examples. +// + +// Update all documents in array. +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: [{b: 0}, {b: 1}]})); +assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[].b": 2}})); +assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: 2}, {b: 2}]}); + +// Update all matching documents in array. +if (db.getMongo().writeMode() === "commands") { coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [[0, 1, 2, 3], [1, 2, 3, 4]]})); - res = coll.update({_id: 0}, {$pullAll: {"a.$[]": [0, 2]}}); - assert.eq({_id: 0, a: [[1, 3], [1, 3, 4]]}, coll.findOne()); + assert.writeOK(coll.insert({_id: 0, a: [{b: 0}, {b: 1}]})); + assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[i].b": 2}}, {arrayFilters: [{"i.b": 0}]})); + assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: 2}, {b: 1}]}); +} - // $pull. +// Update all matching scalars in array. +if (db.getMongo().writeMode() === "commands") { coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [[0, 1], [1, 2]]})); - if (db.getMongo().writeMode() === "commands") { - assert.writeOK(coll.update({_id: 0}, {$pull: {"a.$[i]": 1}}, {arrayFilters: [{i: 2}]})); - assert.eq({_id: 0, a: [[0, 1], [2]]}, coll.findOne()); - } - assert.writeOK(coll.remove({})); - assert.writeOK(coll.insert({_id: 0, a: [[0, 1], [1, 2]]})); - assert.writeOK(coll.update({_id: 0}, {$pull: {"a.$[]": 1}})); - assert.eq({_id: 0, a: [[0], [2]]}, coll.findOne()); - - // $push. - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [[0, 1], [2, 3]]})); - if (db.getMongo().writeMode() === "commands") { - assert.writeOK(coll.update({_id: 0}, {$push: {"a.$[i]": 4}}, {arrayFilters: [{i: 0}]})); - assert.eq({_id: 0, a: [[0, 1, 4], [2, 3]]}, coll.findOne()); - } - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [[0, 1], [2, 3]]})); - assert.writeOK(coll.update({_id: 0}, {$push: {"a.$[]": 4}})); - assert.eq({_id: 0, a: [[0, 1, 4], [2, 3, 4]]}, coll.findOne()); + assert.writeOK(coll.insert({_id: 0, a: [0, 1]})); + assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[i]": 2}}, {arrayFilters: [{i: 0}]})); + assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [2, 1]}); +} - // $bit. +// Update all matching scalars in array of arrays. +if (db.getMongo().writeMode() === "commands") { coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [NumberInt(0), NumberInt(2)]})); - if (db.getMongo().writeMode() === "commands") { - assert.writeOK(coll.update( - {_id: 0}, {$bit: {"a.$[i]": {or: NumberInt(10)}}}, {arrayFilters: [{i: 0}]})); - assert.eq({_id: 0, a: [NumberInt(10), NumberInt(2)]}, coll.findOne()); - } - assert.writeOK(coll.remove({})); - assert.writeOK(coll.insert({_id: 0, a: [NumberInt(0), NumberInt(2)]})); - assert.writeOK(coll.update({_id: 0}, {$bit: {"a.$[]": {or: NumberInt(10)}}})); - assert.eq({_id: 0, a: [NumberInt(10), NumberInt(10)]}, coll.findOne()); - - // - // Multi update tests. - // + assert.writeOK(coll.insert({_id: 0, a: [[0, 1], [0, 1]]})); + assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[].$[j]": 2}}, {arrayFilters: [{j: 0}]})); + assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [[2, 1], [2, 1]]}); +} +// Update all matching documents in nested array. +if (db.getMongo().writeMode() === "commands") { coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [0, 1, 0, 1]})); - assert.writeOK(coll.insert({_id: 1, a: [0, 2, 0, 2]})); - if (db.getMongo().writeMode() === "commands") { - assert.writeOK( - coll.update({}, {$set: {"a.$[i]": 3}}, {multi: true, arrayFilters: [{i: 0}]})); - assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [3, 1, 3, 1]}); - assert.eq(coll.findOne({_id: 1}), {_id: 1, a: [3, 2, 3, 2]}); - } - assert.writeOK(coll.update({}, {$set: {"a.$[]": 3}}, {multi: true})); - assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [3, 3, 3, 3]}); - assert.eq(coll.findOne({_id: 1}), {_id: 1, a: [3, 3, 3, 3]}); - - // - // Collation tests. - // - - if (db.getMongo().writeMode() === "commands") { - // arrayFilters respect operation collation. - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: ["foo", "FOO"]})); - assert.writeOK( - coll.update({_id: 0}, - {$set: {"a.$[i]": "bar"}}, - {arrayFilters: [{i: "foo"}], collation: {locale: "en_US", strength: 2}})); - assert.eq(coll.findOne({_id: 0}), {_id: 0, a: ["bar", "bar"]}); - - // arrayFilters respect the collection default collation. - coll.drop(); - assert.commandWorked( - db.createCollection(collName, {collation: {locale: "en_US", strength: 2}})); - coll = db[collName]; - assert.writeOK(coll.insert({_id: 0, a: ["foo", "FOO"]})); - assert.writeOK( - coll.update({_id: 0}, {$set: {"a.$[i]": "bar"}}, {arrayFilters: [{i: "foo"}]})); - assert.eq(coll.findOne({_id: 0}), {_id: 0, a: ["bar", "bar"]}); - } - - // - // Examples. - // - - // Update all documents in array. + assert.writeOK( + coll.insert({_id: 0, a: [{b: 0, c: [{d: 0}, {d: 1}]}, {b: 1, c: [{d: 0}, {d: 1}]}]})); + assert.writeOK(coll.update( + {_id: 0}, {$set: {"a.$[i].c.$[j].d": 2}}, {arrayFilters: [{"i.b": 0}, {"j.d": 0}]})); + assert.eq(coll.findOne({_id: 0}), + {_id: 0, a: [{b: 0, c: [{d: 2}, {d: 1}]}, {b: 1, c: [{d: 0}, {d: 1}]}]}); +} + +// Update all scalars in array matching a logical predicate. +if (db.getMongo().writeMode() === "commands") { coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [{b: 0}, {b: 1}]})); - assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[].b": 2}})); - assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: 2}, {b: 2}]}); - - // Update all matching documents in array. - if (db.getMongo().writeMode() === "commands") { - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [{b: 0}, {b: 1}]})); - assert.writeOK( - coll.update({_id: 0}, {$set: {"a.$[i].b": 2}}, {arrayFilters: [{"i.b": 0}]})); - assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: 2}, {b: 1}]}); - } - - // Update all matching scalars in array. - if (db.getMongo().writeMode() === "commands") { - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [0, 1]})); - assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[i]": 2}}, {arrayFilters: [{i: 0}]})); - assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [2, 1]}); - } - - // Update all matching scalars in array of arrays. - if (db.getMongo().writeMode() === "commands") { - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [[0, 1], [0, 1]]})); - assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[].$[j]": 2}}, {arrayFilters: [{j: 0}]})); - assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [[2, 1], [2, 1]]}); - } - - // Update all matching documents in nested array. - if (db.getMongo().writeMode() === "commands") { - coll.drop(); - assert.writeOK( - coll.insert({_id: 0, a: [{b: 0, c: [{d: 0}, {d: 1}]}, {b: 1, c: [{d: 0}, {d: 1}]}]})); - assert.writeOK(coll.update( - {_id: 0}, {$set: {"a.$[i].c.$[j].d": 2}}, {arrayFilters: [{"i.b": 0}, {"j.d": 0}]})); - assert.eq(coll.findOne({_id: 0}), - {_id: 0, a: [{b: 0, c: [{d: 2}, {d: 1}]}, {b: 1, c: [{d: 0}, {d: 1}]}]}); - } - - // Update all scalars in array matching a logical predicate. - if (db.getMongo().writeMode() === "commands") { - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [0, 1, 3]})); - assert.writeOK(coll.update( - {_id: 0}, {$set: {"a.$[i]": 2}}, {arrayFilters: [{$or: [{i: 0}, {i: 3}]}]})); - assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [2, 1, 2]}); - } - - // - // Error cases. - // - - // Provide an <id> with no array filter. + assert.writeOK(coll.insert({_id: 0, a: [0, 1, 3]})); + assert.writeOK( + coll.update({_id: 0}, {$set: {"a.$[i]": 2}}, {arrayFilters: [{$or: [{i: 0}, {i: 3}]}]})); + assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [2, 1, 2]}); +} + +// +// Error cases. +// + +// Provide an <id> with no array filter. +coll.drop(); +res = coll.update({_id: 0}, {$set: {"a.$[i]": 0}}); +assert.writeErrorWithCode(res, ErrorCodes.BadValue); +assert.neq( + -1, + res.getWriteError().errmsg.indexOf("No array filter found for identifier 'i' in path 'a.$[i]'"), + "update failed for a reason other than missing array filter"); + +// Use an <id> at the same position as a $, integer, or field name. +if (db.getMongo().writeMode() === "commands") { coll.drop(); - res = coll.update({_id: 0}, {$set: {"a.$[i]": 0}}); - assert.writeErrorWithCode(res, ErrorCodes.BadValue); + + res = coll.update({_id: 0}, {$set: {"a.$[i]": 0, "a.$": 0}}, {arrayFilters: [{i: 0}]}); + assert.writeErrorWithCode(res, ErrorCodes.ConflictingUpdateOperators); + assert.neq( + -1, + res.getWriteError().errmsg.indexOf( + "Updating the path 'a.$' would create a conflict at 'a'"), + "update failed for a reason other than conflicting array update and positional operator"); + + res = coll.update({_id: 0}, {$set: {"a.$[i]": 0, "a.0": 0}}, {arrayFilters: [{i: 0}]}); + assert.writeErrorWithCode(res, ErrorCodes.ConflictingUpdateOperators); + assert.neq( + -1, + res.getWriteError().errmsg.indexOf( + "Updating the path 'a.0' would create a conflict at 'a'"), + "update failed for a reason other than conflicting array update and integer field name"); + + res = coll.update({_id: 0}, {$set: {"a.$[i]": 0, "a.b": 0}}, {arrayFilters: [{i: 0}]}); + assert.writeErrorWithCode(res, ErrorCodes.ConflictingUpdateOperators); assert.neq(-1, res.getWriteError().errmsg.indexOf( - "No array filter found for identifier 'i' in path 'a.$[i]'"), - "update failed for a reason other than missing array filter"); - - // Use an <id> at the same position as a $, integer, or field name. - if (db.getMongo().writeMode() === "commands") { - coll.drop(); - - res = coll.update({_id: 0}, {$set: {"a.$[i]": 0, "a.$": 0}}, {arrayFilters: [{i: 0}]}); - assert.writeErrorWithCode(res, ErrorCodes.ConflictingUpdateOperators); - assert.neq( - -1, - res.getWriteError().errmsg.indexOf( - "Updating the path 'a.$' would create a conflict at 'a'"), - "update failed for a reason other than conflicting array update and positional operator"); - - res = coll.update({_id: 0}, {$set: {"a.$[i]": 0, "a.0": 0}}, {arrayFilters: [{i: 0}]}); - assert.writeErrorWithCode(res, ErrorCodes.ConflictingUpdateOperators); - assert.neq( - -1, - res.getWriteError().errmsg.indexOf( - "Updating the path 'a.0' would create a conflict at 'a'"), - "update failed for a reason other than conflicting array update and integer field name"); - - res = coll.update({_id: 0}, {$set: {"a.$[i]": 0, "a.b": 0}}, {arrayFilters: [{i: 0}]}); - assert.writeErrorWithCode(res, ErrorCodes.ConflictingUpdateOperators); - assert.neq(-1, - res.getWriteError().errmsg.indexOf( - "Updating the path 'a.b' would create a conflict at 'a'"), - "update failed for a reason other than conflicting array update and field name"); - } - - // Include an implicit array traversal in a path in an update modifier. + "Updating the path 'a.b' would create a conflict at 'a'"), + "update failed for a reason other than conflicting array update and field name"); +} + +// Include an implicit array traversal in a path in an update modifier. +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: [{b: 0}]})); +res = coll.update({_id: 0}, {$set: {"a.b": 1}}); +assert.writeErrorWithCode(res, ErrorCodes.PathNotViable); +assert.neq( + -1, + res.getWriteError().errmsg.indexOf("Cannot create field 'b' in element {a: [ { b: 0.0 } ]}"), + "update failed for a reason other than implicit array traversal"); + +// <id> contains special characters or does not begin with a lowercase letter. +if (db.getMongo().writeMode() === "commands") { coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [{b: 0}]})); - res = coll.update({_id: 0}, {$set: {"a.b": 1}}); + + res = coll.update({_id: 0}, {$set: {"a.$[$i]": 1}}, {arrayFilters: [{"$i": 0}]}); + assert.writeErrorWithCode(res, ErrorCodes.BadValue); + assert.neq(-1, + res.getWriteError().errmsg.indexOf("unknown top level operator: $i"), + "update failed for a reason other than bad array filter identifier"); + + res = coll.update({_id: 0}, {$set: {"a.$[I]": 1}}, {arrayFilters: [{"I": 0}]}); + assert.writeErrorWithCode(res, ErrorCodes.BadValue); + assert(res.getWriteError().errmsg.startsWith("Error parsing array filter") && + res.getWriteError().errmsg.endsWith( + "The top-level field name must be an alphanumeric " + + "string beginning with a lowercase letter, found 'I'"), + "update failed for a reason other than bad array filter identifier: " + + tojson(res.getWriteError())); + + assert.writeOK(coll.insert({_id: 0, a: [0], b: [{j: 0}]})); + res = coll.update({_id: 0}, {$set: {"a.$[i.j]": 1, "b.$[i]": 1}}, {arrayFilters: [{"i.j": 0}]}); assert.writeErrorWithCode(res, ErrorCodes.PathNotViable); + assert.neq( + -1, + res.getWriteError().errmsg.indexOf("Cannot create field '$[i' in element {a: [ 0.0 ]}"), + "update failed for a reason other than bad array filter identifier"); +} + +// +// Nested array update conflict detection. +// + +if (db.getMongo().writeMode() === "commands") { + // "a.$[i].b.$[k].c" and "a.$[j].b.$[k].d" are not a conflict, even if i and j are not + // disjoint. + coll.drop(); + assert.writeOK(coll.insert({_id: 0, a: [{x: 0, b: [{y: 0, c: 0, d: 0}]}]})); + assert.writeOK(coll.update({_id: 0}, + {$set: {"a.$[i].b.$[k].c": 1, "a.$[j].b.$[k].d": 1}}, + {arrayFilters: [{"i.x": 0}, {"j.x": 0}, {"k.y": 0}]})); + assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{x: 0, b: [{y: 0, c: 1, d: 1}]}]}); + + // "a.$[i].b.$[k].c" and "a.$[j].b.$[k].c" are a conflict iff i and j are not disjoint. + coll.drop(); + assert.writeOK( + coll.insert({_id: 0, a: [{x: 0, b: [{y: 0, c: 0}]}, {x: 1, b: [{y: 0, c: 0}]}]})); + + res = coll.update({_id: 0}, + {$set: {"a.$[i].b.$[k].c": 1, "a.$[j].b.$[k].c": 2}}, + {arrayFilters: [{"i.x": 0}, {"j.x": 0}, {"k.y": 0}]}); + assert.writeErrorWithCode(res, ErrorCodes.ConflictingUpdateOperators); assert.neq(-1, - res.getWriteError().errmsg.indexOf( - "Cannot create field 'b' in element {a: [ { b: 0.0 } ]}"), - "update failed for a reason other than implicit array traversal"); - - // <id> contains special characters or does not begin with a lowercase letter. - if (db.getMongo().writeMode() === "commands") { - coll.drop(); - - res = coll.update({_id: 0}, {$set: {"a.$[$i]": 1}}, {arrayFilters: [{"$i": 0}]}); - assert.writeErrorWithCode(res, ErrorCodes.BadValue); - assert.neq(-1, - res.getWriteError().errmsg.indexOf("unknown top level operator: $i"), - "update failed for a reason other than bad array filter identifier"); - - res = coll.update({_id: 0}, {$set: {"a.$[I]": 1}}, {arrayFilters: [{"I": 0}]}); - assert.writeErrorWithCode(res, ErrorCodes.BadValue); - assert(res.getWriteError().errmsg.startsWith("Error parsing array filter") && - res.getWriteError().errmsg.endsWith( - "The top-level field name must be an alphanumeric " + - "string beginning with a lowercase letter, found 'I'"), - "update failed for a reason other than bad array filter identifier: " + - tojson(res.getWriteError())); - - assert.writeOK(coll.insert({_id: 0, a: [0], b: [{j: 0}]})); - res = coll.update( - {_id: 0}, {$set: {"a.$[i.j]": 1, "b.$[i]": 1}}, {arrayFilters: [{"i.j": 0}]}); - assert.writeErrorWithCode(res, ErrorCodes.PathNotViable); - assert.neq( - -1, - res.getWriteError().errmsg.indexOf("Cannot create field '$[i' in element {a: [ 0.0 ]}"), - "update failed for a reason other than bad array filter identifier"); - } - - // - // Nested array update conflict detection. - // - - if (db.getMongo().writeMode() === "commands") { - // "a.$[i].b.$[k].c" and "a.$[j].b.$[k].d" are not a conflict, even if i and j are not - // disjoint. - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [{x: 0, b: [{y: 0, c: 0, d: 0}]}]})); - assert.writeOK(coll.update({_id: 0}, - {$set: {"a.$[i].b.$[k].c": 1, "a.$[j].b.$[k].d": 1}}, - {arrayFilters: [{"i.x": 0}, {"j.x": 0}, {"k.y": 0}]})); - assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{x: 0, b: [{y: 0, c: 1, d: 1}]}]}); - - // "a.$[i].b.$[k].c" and "a.$[j].b.$[k].c" are a conflict iff i and j are not disjoint. - coll.drop(); - assert.writeOK( - coll.insert({_id: 0, a: [{x: 0, b: [{y: 0, c: 0}]}, {x: 1, b: [{y: 0, c: 0}]}]})); - - res = coll.update({_id: 0}, - {$set: {"a.$[i].b.$[k].c": 1, "a.$[j].b.$[k].c": 2}}, - {arrayFilters: [{"i.x": 0}, {"j.x": 0}, {"k.y": 0}]}); - assert.writeErrorWithCode(res, ErrorCodes.ConflictingUpdateOperators); - assert.neq( - -1, - res.getWriteError().errmsg.indexOf("Update created a conflict at 'a.0.b.$[k].c'"), - "update failed for a reason other than conflicting array updates"); - - assert.writeOK(coll.update({_id: 0}, - {$set: {"a.$[i].b.$[k].c": 1, "a.$[j].b.$[k].c": 2}}, - {arrayFilters: [{"i.x": 0}, {"j.x": 1}, {"k.y": 0}]})); - assert.eq(coll.findOne({_id: 0}), - {_id: 0, a: [{x: 0, b: [{y: 0, c: 1}]}, {x: 1, b: [{y: 0, c: 2}]}]}); - - // "a.$[i].b.$[k].c" and "a.$[j].b.$[m].c" are a conflict iff k and m intersect for some - // element of a matching i and j. - coll.drop(); - assert.writeOK(coll.insert( - {_id: 0, a: [{x: 0, b: [{y: 0, c: 0}]}, {x: 1, b: [{y: 0, c: 0}, {y: 1, c: 0}]}]})); - - res = coll.update({_id: 0}, - {$set: {"a.$[i].b.$[k].c": 1, "a.$[j].b.$[m].c": 2}}, - {arrayFilters: [{"i.x": 0}, {"j.x": 0}, {"k.y": 0}, {"m.y": 0}]}); - assert.writeErrorWithCode(res, ErrorCodes.ConflictingUpdateOperators); - assert.neq(-1, - res.getWriteError().errmsg.indexOf("Update created a conflict at 'a.0.b.0.c'"), - "update failed for a reason other than conflicting array updates"); - - assert.writeOK(coll.update({_id: 0}, {$set: {"a.$[i].b.$[k].c": 1, "a.$[j].b.$[m].c": 2}}, { - arrayFilters: [{"i.x": 1}, {"j.x": 1}, {"k.y": 0}, {"m.y": 1}] - })); - assert.eq( - coll.findOne({_id: 0}), - {_id: 0, a: [{x: 0, b: [{y: 0, c: 0}]}, {x: 1, b: [{y: 0, c: 1}, {y: 1, c: 2}]}]}); - } + res.getWriteError().errmsg.indexOf("Update created a conflict at 'a.0.b.$[k].c'"), + "update failed for a reason other than conflicting array updates"); + + assert.writeOK(coll.update({_id: 0}, + {$set: {"a.$[i].b.$[k].c": 1, "a.$[j].b.$[k].c": 2}}, + {arrayFilters: [{"i.x": 0}, {"j.x": 1}, {"k.y": 0}]})); + assert.eq(coll.findOne({_id: 0}), + {_id: 0, a: [{x: 0, b: [{y: 0, c: 1}]}, {x: 1, b: [{y: 0, c: 2}]}]}); + // "a.$[i].b.$[k].c" and "a.$[j].b.$[m].c" are a conflict iff k and m intersect for some + // element of a matching i and j. + coll.drop(); + assert.writeOK(coll.insert( + {_id: 0, a: [{x: 0, b: [{y: 0, c: 0}]}, {x: 1, b: [{y: 0, c: 0}, {y: 1, c: 0}]}]})); + + res = coll.update({_id: 0}, + {$set: {"a.$[i].b.$[k].c": 1, "a.$[j].b.$[m].c": 2}}, + {arrayFilters: [{"i.x": 0}, {"j.x": 0}, {"k.y": 0}, {"m.y": 0}]}); + assert.writeErrorWithCode(res, ErrorCodes.ConflictingUpdateOperators); + assert.neq(-1, + res.getWriteError().errmsg.indexOf("Update created a conflict at 'a.0.b.0.c'"), + "update failed for a reason other than conflicting array updates"); + + assert.writeOK(coll.update({_id: 0}, + {$set: {"a.$[i].b.$[k].c": 1, "a.$[j].b.$[m].c": 2}}, + {arrayFilters: [{"i.x": 1}, {"j.x": 1}, {"k.y": 0}, {"m.y": 1}]})); + assert.eq(coll.findOne({_id: 0}), + {_id: 0, a: [{x: 0, b: [{y: 0, c: 0}]}, {x: 1, b: [{y: 0, c: 1}, {y: 1, c: 2}]}]}); +} })(); diff --git a/jstests/core/update_array_offset_positional.js b/jstests/core/update_array_offset_positional.js index 216399c86a1..8e433831c01 100644 --- a/jstests/core/update_array_offset_positional.js +++ b/jstests/core/update_array_offset_positional.js @@ -2,68 +2,68 @@ * Tests that array offset matches are not used to provide values for the positional operator. */ (function() { - "use strict"; +"use strict"; - let coll = db.jstest_update_array_offset_positional; - coll.drop(); +let coll = db.jstest_update_array_offset_positional; +coll.drop(); - // - // If there is no implicit array traversal, the positional operator cannot be used. - // +// +// If there is no implicit array traversal, the positional operator cannot be used. +// - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [0]})); - assert.writeError(coll.update({_id: 0, "a.0": 0}, {$set: {"a.$": 1}})); +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: [0]})); +assert.writeError(coll.update({_id: 0, "a.0": 0}, {$set: {"a.$": 1}})); - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [{b: 0}]})); - assert.writeError(coll.update({_id: 0, "a.0.b": 0}, {$set: {"a.$.b": 1}})); +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: [{b: 0}]})); +assert.writeError(coll.update({_id: 0, "a.0.b": 0}, {$set: {"a.$.b": 1}})); - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [[0]]})); - assert.writeError(coll.update({_id: 0, "a.0.0": 0}, {$set: {"a.$.0": 1}})); +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: [[0]]})); +assert.writeError(coll.update({_id: 0, "a.0.0": 0}, {$set: {"a.$.0": 1}})); - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [{b: [0]}]})); - assert.writeError(coll.update({_id: 0, "a.0.b.0": 0}, {$set: {"a.$.b.0": 1}})); +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: [{b: [0]}]})); +assert.writeError(coll.update({_id: 0, "a.0.b.0": 0}, {$set: {"a.$.b.0": 1}})); - // - // Array offset matches are not used to provide values for the positional operator on the same - // path. - // +// +// Array offset matches are not used to provide values for the positional operator on the same +// path. +// - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [{b: [0, 1]}]})); - assert.writeOK(coll.update({_id: 0, "a.0.b": 1}, {$set: {"a.0.b.$": 2}})); - assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: [0, 2]}]}); +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: [{b: [0, 1]}]})); +assert.writeOK(coll.update({_id: 0, "a.0.b": 1}, {$set: {"a.0.b.$": 2}})); +assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: [0, 2]}]}); - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [{b: [0, 1]}]})); - assert.writeOK(coll.update({_id: 0, "a.b.1": 1}, {$set: {"a.$.b.1": 2}})); - assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: [0, 2]}]}); +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: [{b: [0, 1]}]})); +assert.writeOK(coll.update({_id: 0, "a.b.1": 1}, {$set: {"a.$.b.1": 2}})); +assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [{b: [0, 2]}]}); - // - // Array offset matches are not used to provide values for the positional operator on a - // different path. - // +// +// Array offset matches are not used to provide values for the positional operator on a +// different path. +// - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [0, 1], b: [0]})); - assert.writeOK(coll.update({_id: 0, a: 1, "b.0": 0}, {$set: {"a.$": 2}})); - assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [0, 2], b: [0]}); +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: [0, 1], b: [0]})); +assert.writeOK(coll.update({_id: 0, a: 1, "b.0": 0}, {$set: {"a.$": 2}})); +assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [0, 2], b: [0]}); - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [0, 1], b: [{c: 0}]})); - assert.writeOK(coll.update({_id: 0, a: 1, "b.0.c": 0}, {$set: {"a.$": 2}})); - assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [0, 2], b: [{c: 0}]}); +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: [0, 1], b: [{c: 0}]})); +assert.writeOK(coll.update({_id: 0, a: 1, "b.0.c": 0}, {$set: {"a.$": 2}})); +assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [0, 2], b: [{c: 0}]}); - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [0, 1], b: [[0]]})); - assert.writeOK(coll.update({_id: 0, a: 1, "b.0.0": 0}, {$set: {"a.$": 2}})); - assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [0, 2], b: [[0]]}); +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: [0, 1], b: [[0]]})); +assert.writeOK(coll.update({_id: 0, a: 1, "b.0.0": 0}, {$set: {"a.$": 2}})); +assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [0, 2], b: [[0]]}); - coll.drop(); - assert.writeOK(coll.insert({_id: 0, a: [0, 1], b: [{c: [0]}]})); - assert.writeOK(coll.update({_id: 0, a: 1, "b.0.c.0": 0}, {$set: {"a.$": 2}})); - assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [0, 2], b: [{c: [0]}]}); +coll.drop(); +assert.writeOK(coll.insert({_id: 0, a: [0, 1], b: [{c: [0]}]})); +assert.writeOK(coll.update({_id: 0, a: 1, "b.0.c.0": 0}, {$set: {"a.$": 2}})); +assert.eq(coll.findOne({_id: 0}), {_id: 0, a: [0, 2], b: [{c: [0]}]}); }()); diff --git a/jstests/core/update_blank1.js b/jstests/core/update_blank1.js index 0a42114ed1c..cd8f7433ebe 100644 --- a/jstests/core/update_blank1.js +++ b/jstests/core/update_blank1.js @@ -8,7 +8,9 @@ t.drop(); orig = { "": 1, - _id: 2, "a": 3, "b": 4 + _id: 2, + "a": 3, + "b": 4 }; t.insert(orig); var res = t.update({}, {$set: {"c": 5}}); diff --git a/jstests/core/update_hint.js b/jstests/core/update_hint.js index 64841ab2803..9412f84d71f 100644 --- a/jstests/core/update_hint.js +++ b/jstests/core/update_hint.js @@ -8,74 +8,88 @@ */ (function() { - "use strict"; - - load("jstests/libs/analyze_plan.js"); - - const coll = db.jstests_update_hint; - coll.drop(); - - assert.commandWorked(coll.insert({x: 1, y: 1})); - assert.commandWorked(coll.insert({x: 1, y: 1})); - - assert.commandWorked(coll.createIndex({x: 1})); - assert.commandWorked(coll.createIndex({y: -1})); - - function assertCommandUsesIndex(command, expectedHintKeyPattern) { - const out = assert.commandWorked(coll.runCommand({explain: command})); - const planStage = getPlanStage(out, "IXSCAN"); - assert.neq(null, planStage); - assert.eq(planStage.keyPattern, expectedHintKeyPattern, tojson(planStage)); - } - - const updateCmd = { - update: 'jstests_update_hint', - }; - - const updates = [{q: {x: 1}, u: {$set: {y: 1}}, hint: {x: 1}}]; - - updateCmd.updates = updates; - // Hint using a key pattern. - assertCommandUsesIndex(updateCmd, {x: 1}); - - // Hint using an index name. - updates[0].hint = 'y_-1'; - assertCommandUsesIndex(updateCmd, {y: -1}); - - // Passing a hint should not use the idhack fast-path. - updates[0].q = {_id: 1}; - assertCommandUsesIndex(updateCmd, {y: -1}); - - // Create a sparse index. - assert.commandWorked(coll.createIndex({s: 1}, {sparse: true})); - - // Hint should be respected, even on incomplete indexes. - updates[0].hint = {s: 1}; - assertCommandUsesIndex(updateCmd, {s: 1}); - - // Command should fail with incorrectly formatted hints. - updates[0].hint = 1; - assert.commandFailedWithCode(coll.runCommand(updateCmd), ErrorCodes.FailedToParse); - updates[0].hint = true; - assert.commandFailedWithCode(coll.runCommand(updateCmd), ErrorCodes.FailedToParse); - - // Command should fail with hints to non-existent indexes. - updates[0].hint = {badHint: 1}; - assert.commandFailedWithCode(coll.runCommand(updateCmd), ErrorCodes.BadValue); - - // Insert document that will be in the sparse index. - assert.commandWorked(coll.insert({x: 1, s: 0})); - - // Update hinting a sparse index updates only the document in the sparse index. - updates[0] = {q: {}, u: {$set: {s: 1}}, hint: {s: 1}}; - assert.commandWorked(coll.runCommand(updateCmd)); - assert.eq(1, coll.count({s: 1})); - - // Update hinting a sparse index with upsert option can result in an insert even if the correct - // behaviour would be to update an existing document. - assert.commandWorked(coll.insert({x: 2})); - updates[0] = {q: {x: 2}, u: {$set: {s: 1}}, hint: {s: 1}, upsert: true}; - assert.commandWorked(coll.runCommand(updateCmd)); - assert.eq(2, coll.count({x: 2})); - +"use strict"; + +load("jstests/libs/analyze_plan.js"); + +const coll = db.jstests_update_hint; +coll.drop(); + +assert.commandWorked(coll.insert({x: 1, y: 1})); +assert.commandWorked(coll.insert({x: 1, y: 1})); + +assert.commandWorked(coll.createIndex({x: 1})); +assert.commandWorked(coll.createIndex({y: -1})); + +function assertCommandUsesIndex(command, expectedHintKeyPattern) { + const out = assert.commandWorked(coll.runCommand({explain: command})); + const planStage = getPlanStage(out, "IXSCAN"); + assert.neq(null, planStage); + assert.eq(planStage.keyPattern, expectedHintKeyPattern, tojson(planStage)); +} + +const updateCmd = { + update: 'jstests_update_hint', +}; + +const updates = [{q: {x: 1}, u: {$set: {y: 1}}, hint: {x: 1}}]; + +updateCmd.updates = updates; +// Hint using a key pattern. +assertCommandUsesIndex(updateCmd, {x: 1}); + +// Hint using an index name. +updates[0].hint = 'y_-1'; +assertCommandUsesIndex(updateCmd, {y: -1}); + +// Passing a hint should not use the idhack fast-path. +updates[0].q = { + _id: 1 +}; +assertCommandUsesIndex(updateCmd, {y: -1}); + +// Create a sparse index. +assert.commandWorked(coll.createIndex({s: 1}, {sparse: true})); + +// Hint should be respected, even on incomplete indexes. +updates[0].hint = { + s: 1 +}; +assertCommandUsesIndex(updateCmd, {s: 1}); + +// Command should fail with incorrectly formatted hints. +updates[0].hint = 1; +assert.commandFailedWithCode(coll.runCommand(updateCmd), ErrorCodes.FailedToParse); +updates[0].hint = true; +assert.commandFailedWithCode(coll.runCommand(updateCmd), ErrorCodes.FailedToParse); + +// Command should fail with hints to non-existent indexes. +updates[0].hint = { + badHint: 1 +}; +assert.commandFailedWithCode(coll.runCommand(updateCmd), ErrorCodes.BadValue); + +// Insert document that will be in the sparse index. +assert.commandWorked(coll.insert({x: 1, s: 0})); + +// Update hinting a sparse index updates only the document in the sparse index. +updates[0] = { + q: {}, + u: {$set: {s: 1}}, + hint: {s: 1} +}; +assert.commandWorked(coll.runCommand(updateCmd)); +assert.eq(1, coll.count({s: 1})); + +// Update hinting a sparse index with upsert option can result in an insert even if the correct +// behaviour would be to update an existing document. +assert.commandWorked(coll.insert({x: 2})); +updates[0] = { + q: {x: 2}, + u: {$set: {s: 1}}, + hint: {s: 1}, + upsert: true +}; +assert.commandWorked(coll.runCommand(updateCmd)); +assert.eq(2, coll.count({x: 2})); })(); diff --git a/jstests/core/update_min_max_examples.js b/jstests/core/update_min_max_examples.js index 62e870147bf..3ec86705a1f 100644 --- a/jstests/core/update_min_max_examples.js +++ b/jstests/core/update_min_max_examples.js @@ -1,71 +1,74 @@ // Basic examples for $min/$max (function() { - "use strict"; +"use strict"; - let res; - const coll = db.update_min_max; - coll.drop(); +let res; +const coll = db.update_min_max; +coll.drop(); - // $min for number - coll.insert({_id: 1, a: 2}); - res = coll.update({_id: 1}, {$min: {a: 1}}); - assert.writeOK(res); - assert.eq(coll.findOne({_id: 1}).a, 1); +// $min for number +coll.insert({_id: 1, a: 2}); +res = coll.update({_id: 1}, {$min: {a: 1}}); +assert.writeOK(res); +assert.eq(coll.findOne({_id: 1}).a, 1); - // $max for number - coll.insert({_id: 2, a: 2}); - res = coll.update({_id: 2}, {$max: {a: 1}}); - assert.writeOK(res); - assert.eq(coll.findOne({_id: 2}).a, 2); +// $max for number +coll.insert({_id: 2, a: 2}); +res = coll.update({_id: 2}, {$max: {a: 1}}); +assert.writeOK(res); +assert.eq(coll.findOne({_id: 2}).a, 2); - // $min for Date - let date = new Date(); - coll.insert({_id: 3, a: date}); - // setMilliseconds() will roll over to change seconds if necessary. - date.setMilliseconds(date.getMilliseconds() + 2); - // Test that we have advanced the date and it's no longer the same as the one we inserted. - assert.eq(null, coll.findOne({_id: 3, a: date})); - const origDoc = coll.findOne({_id: 3}); - assert.commandWorked(coll.update({_id: 3}, {$min: {a: date}})); - assert.eq(coll.findOne({_id: 3}).a, origDoc.a); +// $min for Date +let date = new Date(); +coll.insert({_id: 3, a: date}); +// setMilliseconds() will roll over to change seconds if necessary. +date.setMilliseconds(date.getMilliseconds() + 2); +// Test that we have advanced the date and it's no longer the same as the one we inserted. +assert.eq(null, coll.findOne({_id: 3, a: date})); +const origDoc = coll.findOne({_id: 3}); +assert.commandWorked(coll.update({_id: 3}, {$min: {a: date}})); +assert.eq(coll.findOne({_id: 3}).a, origDoc.a); - // $max for Date - coll.insert({_id: 4, a: date}); - // setMilliseconds() will roll over to change seconds if necessary. - date.setMilliseconds(date.getMilliseconds() + 2); - // Test that we have advanced the date and it's no longer the same as the one we inserted. - assert.eq(null, coll.findOne({_id: 4, a: date})); - res = coll.update({_id: 4}, {$max: {a: date}}); - assert.writeOK(res); - assert.eq(coll.findOne({_id: 4}).a, date); +// $max for Date +coll.insert({_id: 4, a: date}); +// setMilliseconds() will roll over to change seconds if necessary. +date.setMilliseconds(date.getMilliseconds() + 2); +// Test that we have advanced the date and it's no longer the same as the one we inserted. +assert.eq(null, coll.findOne({_id: 4, a: date})); +res = coll.update({_id: 4}, {$max: {a: date}}); +assert.writeOK(res); +assert.eq(coll.findOne({_id: 4}).a, date); - // $max for small number - coll.insert({_id: 5, a: 1e-15}); - // Slightly bigger than 1e-15. - const biggerval = 0.000000000000001000000000000001; - res = coll.update({_id: 5}, {$max: {a: biggerval}}); - assert.writeOK(res); - assert.eq(coll.findOne({_id: 5}).a, biggerval); +// $max for small number +coll.insert({_id: 5, a: 1e-15}); +// Slightly bigger than 1e-15. +const biggerval = 0.000000000000001000000000000001; +res = coll.update({_id: 5}, {$max: {a: biggerval}}); +assert.writeOK(res); +assert.eq(coll.findOne({_id: 5}).a, biggerval); - // $min for a small number - coll.insert({_id: 6, a: biggerval}); - res = coll.update({_id: 6}, {$min: {a: 1e-15}}); - assert.writeOK(res); - assert.eq(coll.findOne({_id: 6}).a, 1e-15); +// $min for a small number +coll.insert({_id: 6, a: biggerval}); +res = coll.update({_id: 6}, {$min: {a: 1e-15}}); +assert.writeOK(res); +assert.eq(coll.findOne({_id: 6}).a, 1e-15); - // $max with positional operator - let insertdoc = {_id: 7, y: [{a: 2}, {a: 6}, {a: [9, 1, 1]}]}; - coll.insert(insertdoc); - res = coll.update({_id: 7, "y.a": 6}, {$max: {"y.$.a": 7}}); - assert.writeOK(res); - insertdoc.y[1].a = 7; - assert.docEq(coll.findOne({_id: 7}), insertdoc); +// $max with positional operator +let insertdoc = {_id: 7, y: [{a: 2}, {a: 6}, {a: [9, 1, 1]}]}; +coll.insert(insertdoc); +res = coll.update({_id: 7, "y.a": 6}, {$max: {"y.$.a": 7}}); +assert.writeOK(res); +insertdoc.y[1].a = 7; +assert.docEq(coll.findOne({_id: 7}), insertdoc); - // $min with positional operator - insertdoc = {_id: 8, y: [{a: 2}, {a: 6}, {a: [9, 1, 1]}]}; - coll.insert(insertdoc); - res = coll.update({_id: 8, "y.a": 6}, {$min: {"y.$.a": 5}}); - assert.writeOK(res); - insertdoc.y[1].a = 5; - assert.docEq(coll.findOne({_id: 8}), insertdoc); +// $min with positional operator +insertdoc = { + _id: 8, + y: [{a: 2}, {a: 6}, {a: [9, 1, 1]}] +}; +coll.insert(insertdoc); +res = coll.update({_id: 8, "y.a": 6}, {$min: {"y.$.a": 5}}); +assert.writeOK(res); +insertdoc.y[1].a = 5; +assert.docEq(coll.findOne({_id: 8}), insertdoc); }()); diff --git a/jstests/core/update_modifier_pop.js b/jstests/core/update_modifier_pop.js index c74d7f254bf..77c6bae702c 100644 --- a/jstests/core/update_modifier_pop.js +++ b/jstests/core/update_modifier_pop.js @@ -1,115 +1,112 @@ // @tags: [requires_non_retryable_writes] (function() { - "use strict"; - - let coll = db.update_modifier_pop; - coll.drop(); - - assert.writeOK(coll.insert({_id: 0})); - - // $pop with value of 0 fails to parse. - assert.writeErrorWithCode(coll.update({_id: 0}, {$pop: {"a.b": 0}}), ErrorCodes.FailedToParse); - - // $pop with value of -2 fails to parse. - assert.writeErrorWithCode(coll.update({_id: 0}, {$pop: {"a.b": -2}}), ErrorCodes.FailedToParse); - - // $pop with value of 2.5 fails to parse. - assert.writeErrorWithCode(coll.update({_id: 0}, {$pop: {"a.b": 2.5}}), - ErrorCodes.FailedToParse); - - // $pop with value of 1.1 fails to parse. - assert.writeErrorWithCode(coll.update({_id: 0}, {$pop: {"a.b": 1.1}}), - ErrorCodes.FailedToParse); - - // $pop with a nested object fails to parse. - assert.writeErrorWithCode(coll.update({_id: 0}, {$pop: {a: {b: 1}}}), ErrorCodes.FailedToParse); - - // $pop is a no-op when the path does not exist. - let writeRes = assert.writeOK(coll.update({_id: 0}, {$pop: {"a.b": 1}})); - assert.eq(writeRes.nMatched, 1); - if (db.getMongo().writeMode() === "commands") { - assert.eq(writeRes.nModified, 0); - } - - // $pop is a no-op when the path partially exists. - assert.writeOK(coll.remove({})); - assert.writeOK(coll.insert({_id: 0, a: {c: 1}})); - writeRes = assert.writeOK(coll.update({_id: 0}, {$pop: {"a.b": 1}})); - assert.eq(writeRes.nMatched, 1); - if (db.getMongo().writeMode() === "commands") { - assert.eq(writeRes.nModified, 0); - } - - // $pop fails when the path is blocked by a scalar element. - assert.writeOK(coll.remove({})); - assert.writeOK(coll.insert({_id: 0, a: {b: 1}})); - assert.writeError(coll.update({_id: 0}, {$pop: {"a.b.c": 1}})); - - // $pop fails when the path is blocked by an array element. - assert.writeOK(coll.remove({})); - assert.writeOK(coll.insert({_id: 0, a: {b: [1, 2]}})); - assert.writeError(coll.update({_id: 0}, {$pop: {"a.b.c": 1}})); - - // $pop fails when the path exists but is not an array. - assert.writeOK(coll.remove({})); - assert.writeOK(coll.insert({_id: 0, a: {b: {c: 1}}})); - assert.writeError(coll.update({_id: 0}, {$pop: {"a.b": 1}})); - - // $pop is a no-op when the path contains an empty array. - assert.writeOK(coll.remove({})); - assert.writeOK(coll.insert({_id: 0, a: {b: []}})); - writeRes = assert.writeOK(coll.update({_id: 0}, {$pop: {"a.b": 1}})); - assert.eq(writeRes.nMatched, 1); - if (db.getMongo().writeMode() === "commands") { - assert.eq(writeRes.nModified, 0); - } - - // Successfully pop from the end of an array. - assert.writeOK(coll.remove({})); - assert.writeOK(coll.insert({_id: 0, a: {b: [1, 2, 3]}})); - writeRes = assert.writeOK(coll.update({_id: 0}, {$pop: {"a.b": 1}})); - assert.eq(writeRes.nMatched, 1); - if (db.getMongo().writeMode() === "commands") { - assert.eq(writeRes.nModified, 1); - } - assert.eq({_id: 0, a: {b: [1, 2]}}, coll.findOne()); - - // Successfully pop from the beginning of an array. - writeRes = assert.writeOK(coll.update({_id: 0}, {$pop: {"a.b": -1}})); - assert.eq(writeRes.nMatched, 1); - if (db.getMongo().writeMode() === "commands") { - assert.eq(writeRes.nModified, 1); - } - assert.eq({_id: 0, a: {b: [2]}}, coll.findOne()); - - // $pop with the positional ($) operator. - assert.writeOK(coll.remove({})); - assert.writeOK(coll.insert({_id: 0, a: [{b: [1, 2, 3]}, {b: [4, 5, 6]}]})); - assert.writeOK(coll.update({_id: 0, "a.b": 5}, {$pop: {"a.$.b": 1}})); - assert.eq({_id: 0, a: [{b: [1, 2, 3]}, {b: [4, 5]}]}, coll.findOne()); - - // $pop with arrayFilters. - if (db.getMongo().writeMode() === "commands") { - assert.writeOK(coll.remove({})); - assert.writeOK(coll.insert({_id: 0, a: [{b: [1, 2]}, {b: [4, 5]}, {b: [2, 3]}]})); - assert.writeOK( - coll.update({_id: 0}, {$pop: {"a.$[i].b": -1}}, {arrayFilters: [{"i.b": 2}]})); - assert.eq({_id: 0, a: [{b: [2]}, {b: [4, 5]}, {b: [3]}]}, coll.findOne()); - } - - // $pop from a nested array. - assert.writeOK(coll.remove({})); - assert.writeOK(coll.insert({_id: 0, a: [1, [2, 3, 4]]})); - assert.writeOK(coll.update({_id: 0}, {$pop: {"a.1": 1}})); - assert.eq({_id: 0, a: [1, [2, 3]]}, coll.findOne()); - - // $pop is a no-op when array element in path does not exist. +"use strict"; + +let coll = db.update_modifier_pop; +coll.drop(); + +assert.writeOK(coll.insert({_id: 0})); + +// $pop with value of 0 fails to parse. +assert.writeErrorWithCode(coll.update({_id: 0}, {$pop: {"a.b": 0}}), ErrorCodes.FailedToParse); + +// $pop with value of -2 fails to parse. +assert.writeErrorWithCode(coll.update({_id: 0}, {$pop: {"a.b": -2}}), ErrorCodes.FailedToParse); + +// $pop with value of 2.5 fails to parse. +assert.writeErrorWithCode(coll.update({_id: 0}, {$pop: {"a.b": 2.5}}), ErrorCodes.FailedToParse); + +// $pop with value of 1.1 fails to parse. +assert.writeErrorWithCode(coll.update({_id: 0}, {$pop: {"a.b": 1.1}}), ErrorCodes.FailedToParse); + +// $pop with a nested object fails to parse. +assert.writeErrorWithCode(coll.update({_id: 0}, {$pop: {a: {b: 1}}}), ErrorCodes.FailedToParse); + +// $pop is a no-op when the path does not exist. +let writeRes = assert.writeOK(coll.update({_id: 0}, {$pop: {"a.b": 1}})); +assert.eq(writeRes.nMatched, 1); +if (db.getMongo().writeMode() === "commands") { + assert.eq(writeRes.nModified, 0); +} + +// $pop is a no-op when the path partially exists. +assert.writeOK(coll.remove({})); +assert.writeOK(coll.insert({_id: 0, a: {c: 1}})); +writeRes = assert.writeOK(coll.update({_id: 0}, {$pop: {"a.b": 1}})); +assert.eq(writeRes.nMatched, 1); +if (db.getMongo().writeMode() === "commands") { + assert.eq(writeRes.nModified, 0); +} + +// $pop fails when the path is blocked by a scalar element. +assert.writeOK(coll.remove({})); +assert.writeOK(coll.insert({_id: 0, a: {b: 1}})); +assert.writeError(coll.update({_id: 0}, {$pop: {"a.b.c": 1}})); + +// $pop fails when the path is blocked by an array element. +assert.writeOK(coll.remove({})); +assert.writeOK(coll.insert({_id: 0, a: {b: [1, 2]}})); +assert.writeError(coll.update({_id: 0}, {$pop: {"a.b.c": 1}})); + +// $pop fails when the path exists but is not an array. +assert.writeOK(coll.remove({})); +assert.writeOK(coll.insert({_id: 0, a: {b: {c: 1}}})); +assert.writeError(coll.update({_id: 0}, {$pop: {"a.b": 1}})); + +// $pop is a no-op when the path contains an empty array. +assert.writeOK(coll.remove({})); +assert.writeOK(coll.insert({_id: 0, a: {b: []}})); +writeRes = assert.writeOK(coll.update({_id: 0}, {$pop: {"a.b": 1}})); +assert.eq(writeRes.nMatched, 1); +if (db.getMongo().writeMode() === "commands") { + assert.eq(writeRes.nModified, 0); +} + +// Successfully pop from the end of an array. +assert.writeOK(coll.remove({})); +assert.writeOK(coll.insert({_id: 0, a: {b: [1, 2, 3]}})); +writeRes = assert.writeOK(coll.update({_id: 0}, {$pop: {"a.b": 1}})); +assert.eq(writeRes.nMatched, 1); +if (db.getMongo().writeMode() === "commands") { + assert.eq(writeRes.nModified, 1); +} +assert.eq({_id: 0, a: {b: [1, 2]}}, coll.findOne()); + +// Successfully pop from the beginning of an array. +writeRes = assert.writeOK(coll.update({_id: 0}, {$pop: {"a.b": -1}})); +assert.eq(writeRes.nMatched, 1); +if (db.getMongo().writeMode() === "commands") { + assert.eq(writeRes.nModified, 1); +} +assert.eq({_id: 0, a: {b: [2]}}, coll.findOne()); + +// $pop with the positional ($) operator. +assert.writeOK(coll.remove({})); +assert.writeOK(coll.insert({_id: 0, a: [{b: [1, 2, 3]}, {b: [4, 5, 6]}]})); +assert.writeOK(coll.update({_id: 0, "a.b": 5}, {$pop: {"a.$.b": 1}})); +assert.eq({_id: 0, a: [{b: [1, 2, 3]}, {b: [4, 5]}]}, coll.findOne()); + +// $pop with arrayFilters. +if (db.getMongo().writeMode() === "commands") { assert.writeOK(coll.remove({})); - assert.writeOK(coll.insert({_id: 0, a: [{b: 0}, {b: 1}]})); - writeRes = assert.writeOK(coll.update({_id: 0}, {$pop: {"a.2.b": 1}})); - assert.eq(writeRes.nMatched, 1); - if (db.getMongo().writeMode() === "commands") { - assert.eq(writeRes.nModified, 0); - } + assert.writeOK(coll.insert({_id: 0, a: [{b: [1, 2]}, {b: [4, 5]}, {b: [2, 3]}]})); + assert.writeOK(coll.update({_id: 0}, {$pop: {"a.$[i].b": -1}}, {arrayFilters: [{"i.b": 2}]})); + assert.eq({_id: 0, a: [{b: [2]}, {b: [4, 5]}, {b: [3]}]}, coll.findOne()); +} + +// $pop from a nested array. +assert.writeOK(coll.remove({})); +assert.writeOK(coll.insert({_id: 0, a: [1, [2, 3, 4]]})); +assert.writeOK(coll.update({_id: 0}, {$pop: {"a.1": 1}})); +assert.eq({_id: 0, a: [1, [2, 3]]}, coll.findOne()); + +// $pop is a no-op when array element in path does not exist. +assert.writeOK(coll.remove({})); +assert.writeOK(coll.insert({_id: 0, a: [{b: 0}, {b: 1}]})); +writeRes = assert.writeOK(coll.update({_id: 0}, {$pop: {"a.2.b": 1}})); +assert.eq(writeRes.nMatched, 1); +if (db.getMongo().writeMode() === "commands") { + assert.eq(writeRes.nModified, 0); +} }()); diff --git a/jstests/core/update_multi5.js b/jstests/core/update_multi5.js index 871f10cbc07..8f797d8de2f 100644 --- a/jstests/core/update_multi5.js +++ b/jstests/core/update_multi5.js @@ -2,19 +2,18 @@ // tests that $addToSet works in a multi-update. (function() { - "use strict"; - var t = db.update_multi5; - t.drop(); +"use strict"; +var t = db.update_multi5; +t.drop(); - assert.writeOK(t.insert({path: 'r1', subscribers: [1, 2]})); - assert.writeOK(t.insert({path: 'r2', subscribers: [3, 4]})); +assert.writeOK(t.insert({path: 'r1', subscribers: [1, 2]})); +assert.writeOK(t.insert({path: 'r2', subscribers: [3, 4]})); - var res = - assert.writeOK(t.update({}, {$addToSet: {subscribers: 5}}, {upsert: false, multi: true})); +var res = assert.writeOK(t.update({}, {$addToSet: {subscribers: 5}}, {upsert: false, multi: true})); - assert.eq(res.nMatched, 2, tojson(res)); +assert.eq(res.nMatched, 2, tojson(res)); - t.find().forEach(function(z) { - assert.eq(3, z.subscribers.length, tojson(z)); - }); +t.find().forEach(function(z) { + assert.eq(3, z.subscribers.length, tojson(z)); +}); })(); diff --git a/jstests/core/update_numeric_field_name.js b/jstests/core/update_numeric_field_name.js index 2d1a4899adc..33b72e69f2f 100644 --- a/jstests/core/update_numeric_field_name.js +++ b/jstests/core/update_numeric_field_name.js @@ -1,29 +1,29 @@ // Test that update operations correctly fail if they violate the "ambiguous field name in array" // constraint for indexes. This is designed to reproduce SERVER-37058. (function() { - "use strict"; +"use strict"; - const coll = db.update_numeric_field_name; - coll.drop(); +const coll = db.update_numeric_field_name; +coll.drop(); - assert.commandWorked(coll.insert({_id: 0, 'a': [{}]})); - assert.commandWorked(coll.createIndex({'a.0.c': 1})); +assert.commandWorked(coll.insert({_id: 0, 'a': [{}]})); +assert.commandWorked(coll.createIndex({'a.0.c': 1})); - // Attempt to insert a field name '0'. The first '0' refers to the first element of the array - // 'a'. - assert.commandFailedWithCode(coll.update({_id: 0}, {$set: {'a.0.0': 1}}), 16746); +// Attempt to insert a field name '0'. The first '0' refers to the first element of the array +// 'a'. +assert.commandFailedWithCode(coll.update({_id: 0}, {$set: {'a.0.0': 1}}), 16746); - // Verify that the indexes were not affected. - let res = assert.commandWorked(coll.validate(true)); - assert(res.valid, tojson(res)); +// Verify that the indexes were not affected. +let res = assert.commandWorked(coll.validate(true)); +assert(res.valid, tojson(res)); - assert.commandFailedWithCode(coll.update({_id: 0}, {$set: {'a.0.0.b': 1}}), 16746); - res = assert.commandWorked(coll.validate(true)); - assert(res.valid, tojson(res)); +assert.commandFailedWithCode(coll.update({_id: 0}, {$set: {'a.0.0.b': 1}}), 16746); +res = assert.commandWorked(coll.validate(true)); +assert(res.valid, tojson(res)); - // An update which does not violate the ambiguous field name in array constraint should succeed. - assert.commandWorked(coll.update({_id: 0}, {$set: {'a.1.b.0.0': 1}})); +// An update which does not violate the ambiguous field name in array constraint should succeed. +assert.commandWorked(coll.update({_id: 0}, {$set: {'a.1.b.0.0': 1}})); - res = assert.commandWorked(coll.validate(true)); - assert(res.valid, tojson(res)); +res = assert.commandWorked(coll.validate(true)); +assert(res.valid, tojson(res)); })(); diff --git a/jstests/core/update_pipeline_shell_helpers.js b/jstests/core/update_pipeline_shell_helpers.js index d45830a06b5..d8bb7d7eb3d 100644 --- a/jstests/core/update_pipeline_shell_helpers.js +++ b/jstests/core/update_pipeline_shell_helpers.js @@ -7,84 +7,84 @@ * @tags: [requires_find_command, requires_non_retryable_writes, assumes_write_concern_unchanged] */ (function() { - "use strict"; +"use strict"; - load("jstests/aggregation/extras/utils.js"); // For 'arrayEq'. +load("jstests/aggregation/extras/utils.js"); // For 'arrayEq'. - // Make sure that the test collection is empty before starting the test. - const testColl = db.update_pipeline_shell_helpers_test; - testColl.drop(); +// Make sure that the test collection is empty before starting the test. +const testColl = db.update_pipeline_shell_helpers_test; +testColl.drop(); - // Insert some test documents. - assert.commandWorked(testColl.insert({_id: 1, a: 1, b: 2})); - assert.commandWorked(testColl.insert({_id: 2, a: 2, b: 3})); +// Insert some test documents. +assert.commandWorked(testColl.insert({_id: 1, a: 1, b: 2})); +assert.commandWorked(testColl.insert({_id: 2, a: 2, b: 3})); - // Test that each of the update shell helpers permits pipeline-style updates. - assert.commandWorked(testColl.update({_id: 1}, [{$set: {update: true}}])); - assert.commandWorked(testColl.update({}, [{$set: {updateMulti: true}}], {multi: true})); - assert.commandWorked(testColl.updateOne({_id: 1}, [{$set: {updateOne: true}}])); - assert.commandWorked(testColl.updateMany({}, [{$set: {updateMany: true}}])); - assert.commandWorked(testColl.bulkWrite([ - {updateOne: {filter: {_id: 1}, update: [{$set: {bulkWriteUpdateOne: true}}]}}, - {updateMany: {filter: {}, update: [{$set: {bulkWriteUpdateMany: true}}]}} - ])); +// Test that each of the update shell helpers permits pipeline-style updates. +assert.commandWorked(testColl.update({_id: 1}, [{$set: {update: true}}])); +assert.commandWorked(testColl.update({}, [{$set: {updateMulti: true}}], {multi: true})); +assert.commandWorked(testColl.updateOne({_id: 1}, [{$set: {updateOne: true}}])); +assert.commandWorked(testColl.updateMany({}, [{$set: {updateMany: true}}])); +assert.commandWorked(testColl.bulkWrite([ + {updateOne: {filter: {_id: 1}, update: [{$set: {bulkWriteUpdateOne: true}}]}}, + {updateMany: {filter: {}, update: [{$set: {bulkWriteUpdateMany: true}}]}} +])); - // Test that each of the Bulk API update functions correctly handle pipeline syntax. - const unorderedBulkOp = testColl.initializeUnorderedBulkOp(); - const orderedBulkOp = testColl.initializeOrderedBulkOp(); +// Test that each of the Bulk API update functions correctly handle pipeline syntax. +const unorderedBulkOp = testColl.initializeUnorderedBulkOp(); +const orderedBulkOp = testColl.initializeOrderedBulkOp(); - unorderedBulkOp.find({_id: 1}).updateOne([{$set: {unorderedBulkOpUpdateOne: true}}]); - unorderedBulkOp.find({}).update([{$set: {unorderedBulkOpUpdateMulti: true}}]); - orderedBulkOp.find({_id: 1}).updateOne([{$set: {orderedBulkOpUpdateOne: true}}]); - orderedBulkOp.find({}).update([{$set: {orderedBulkOpUpdateMulti: true}}]); - assert.commandWorked(unorderedBulkOp.execute()); - assert.commandWorked(orderedBulkOp.execute()); +unorderedBulkOp.find({_id: 1}).updateOne([{$set: {unorderedBulkOpUpdateOne: true}}]); +unorderedBulkOp.find({}).update([{$set: {unorderedBulkOpUpdateMulti: true}}]); +orderedBulkOp.find({_id: 1}).updateOne([{$set: {orderedBulkOpUpdateOne: true}}]); +orderedBulkOp.find({}).update([{$set: {orderedBulkOpUpdateMulti: true}}]); +assert.commandWorked(unorderedBulkOp.execute()); +assert.commandWorked(orderedBulkOp.execute()); - // Verify that the results of the various update operations are as expected. - const observedResults = testColl.find().toArray(); - const expectedResults = [ - { - _id: 1, - a: 1, - b: 2, - update: true, - updateMulti: true, - updateOne: true, - updateMany: true, - bulkWriteUpdateOne: true, - bulkWriteUpdateMany: true, - unorderedBulkOpUpdateOne: true, - unorderedBulkOpUpdateMulti: true, - orderedBulkOpUpdateOne: true, - orderedBulkOpUpdateMulti: true - }, - { - _id: 2, - a: 2, - b: 3, - updateMulti: true, - updateMany: true, - bulkWriteUpdateMany: true, - unorderedBulkOpUpdateMulti: true, - orderedBulkOpUpdateMulti: true - } - ]; - assert(arrayEq(observedResults, expectedResults)); +// Verify that the results of the various update operations are as expected. +const observedResults = testColl.find().toArray(); +const expectedResults = [ + { + _id: 1, + a: 1, + b: 2, + update: true, + updateMulti: true, + updateOne: true, + updateMany: true, + bulkWriteUpdateOne: true, + bulkWriteUpdateMany: true, + unorderedBulkOpUpdateOne: true, + unorderedBulkOpUpdateMulti: true, + orderedBulkOpUpdateOne: true, + orderedBulkOpUpdateMulti: true + }, + { + _id: 2, + a: 2, + b: 3, + updateMulti: true, + updateMany: true, + bulkWriteUpdateMany: true, + unorderedBulkOpUpdateMulti: true, + orderedBulkOpUpdateMulti: true + } +]; +assert(arrayEq(observedResults, expectedResults)); - // Test that findAndModify and associated helpers correctly handle pipeline syntax. - const expectedFindAndModifyPostImage = Object.merge(expectedResults[0], {findAndModify: true}); - const expectedFindOneAndUpdatePostImage = - Object.merge(expectedFindAndModifyPostImage, {findOneAndUpdate: true}); - const findAndModifyPostImage = testColl.findAndModify( - {query: {_id: 1}, update: [{$set: {findAndModify: true}}], new: true}); - assert.docEq(findAndModifyPostImage, expectedFindAndModifyPostImage); - const findOneAndUpdatePostImage = testColl.findOneAndUpdate( - {_id: 1}, [{$set: {findOneAndUpdate: true}}], {returnNewDocument: true}); - assert.docEq(findOneAndUpdatePostImage, expectedFindOneAndUpdatePostImage); +// Test that findAndModify and associated helpers correctly handle pipeline syntax. +const expectedFindAndModifyPostImage = Object.merge(expectedResults[0], {findAndModify: true}); +const expectedFindOneAndUpdatePostImage = + Object.merge(expectedFindAndModifyPostImage, {findOneAndUpdate: true}); +const findAndModifyPostImage = + testColl.findAndModify({query: {_id: 1}, update: [{$set: {findAndModify: true}}], new: true}); +assert.docEq(findAndModifyPostImage, expectedFindAndModifyPostImage); +const findOneAndUpdatePostImage = testColl.findOneAndUpdate( + {_id: 1}, [{$set: {findOneAndUpdate: true}}], {returnNewDocument: true}); +assert.docEq(findOneAndUpdatePostImage, expectedFindOneAndUpdatePostImage); - // Shell helpers for replacement updates should reject pipeline-style updates. - assert.throws(() => testColl.replaceOne({_id: 1}, [{$replaceWith: {}}])); - assert.throws(() => testColl.findOneAndReplace({_id: 1}, [{$replaceWith: {}}])); - assert.throws(() => testColl.bulkWrite( - [{replaceOne: {filter: {_id: 1}, replacement: [{$replaceWith: {}}]}}])); +// Shell helpers for replacement updates should reject pipeline-style updates. +assert.throws(() => testColl.replaceOne({_id: 1}, [{$replaceWith: {}}])); +assert.throws(() => testColl.findOneAndReplace({_id: 1}, [{$replaceWith: {}}])); +assert.throws(() => testColl.bulkWrite( + [{replaceOne: {filter: {_id: 1}, replacement: [{$replaceWith: {}}]}}])); })(); diff --git a/jstests/core/update_with_pipeline.js b/jstests/core/update_with_pipeline.js index 07d92c718df..963d72b6592 100644 --- a/jstests/core/update_with_pipeline.js +++ b/jstests/core/update_with_pipeline.js @@ -7,227 +7,214 @@ * @tags: [requires_find_command, requires_non_retryable_writes] */ (function() { - "use strict"; - - load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. - - const collName = "update_with_pipeline"; - const coll = db[collName]; - - assert.commandWorked(coll.createIndex({x: 1})); - assert.commandWorked(coll.createIndex({"y.$**": 1})); - - /** - * Confirms that an update returns the expected set of documents. 'nModified' documents from - * 'resultDocList' must match. 'nModified' may be smaller then the number of elements in - * 'resultDocList'. This allows for the case where there are multiple documents that could be - * updated, but only one is actually updated due to a 'multi: false' argument. Constant values - * to the update command are passed in the 'constants' argument. - */ - function testUpdate({ - query, - initialDocumentList, - update, - resultDocList, - nModified, - options = {}, - constants = undefined - }) { - assert.eq(initialDocumentList.length, resultDocList.length); - assert.commandWorked(coll.remove({})); - assert.commandWorked(coll.insert(initialDocumentList)); - const upd = Object.assign({q: query, u: update}, options); - if (constants !== undefined) { - upd.c = constants; - } - const res = assert.commandWorked(db.runCommand({update: collName, updates: [upd]})); - assert.eq(nModified, res.nModified); - - let nMatched = 0; - for (let i = 0; i < resultDocList.length; ++i) { - if (0 === bsonWoCompare(coll.findOne(resultDocList[i]), resultDocList[i])) { - ++nMatched; - } - } - assert.eq( - nModified, nMatched, `actual=${coll.find().toArray()}, expected=${resultDocList}`); - } +"use strict"; - function testUpsertDoesInsert(query, update, resultDoc) { - assert.commandWorked(coll.remove({})); - assert.commandWorked(coll.update(query, update, {upsert: true})); - assert.eq(coll.findOne({}), resultDoc, coll.find({}).toArray()); - } +load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. - // Update with existing document. - testUpdate({ - query: {_id: 1}, - initialDocumentList: [{_id: 1, x: 1}], - update: [{$set: {foo: 4}}], - resultDocList: [{_id: 1, x: 1, foo: 4}], - nModified: 1 - }); - testUpdate({ - query: {_id: 1}, - initialDocumentList: [{_id: 1, x: 1, y: 1}], - update: [{$project: {x: 1}}], - resultDocList: [{_id: 1, x: 1}], - nModified: 1 - }); - testUpdate({ - query: {_id: 1}, - initialDocumentList: [{_id: 1, x: 1, y: [{z: 1, foo: 1}]}], - update: [{$unset: ["x", "y.z"]}], - resultDocList: [{_id: 1, y: [{foo: 1}]}], - nModified: 1 - }); - testUpdate({ - query: {_id: 1}, - initialDocumentList: [{_id: 1, x: 1, t: {u: {v: 1}}}], - update: [{$replaceWith: "$t"}], - resultDocList: [{_id: 1, u: {v: 1}}], - nModified: 1 - }); +const collName = "update_with_pipeline"; +const coll = db[collName]; - // Multi-update. - testUpdate({ - query: {x: 1}, - initialDocumentList: [{_id: 1, x: 1}, {_id: 2, x: 1}], - update: [{$set: {bar: 4}}], - resultDocList: [{_id: 1, x: 1, bar: 4}, {_id: 2, x: 1, bar: 4}], - nModified: 2, - options: {multi: true} - }); +assert.commandWorked(coll.createIndex({x: 1})); +assert.commandWorked(coll.createIndex({"y.$**": 1})); - // This test will fail in a sharded cluster when the 2 initial documents live on different - // shards. - if (!FixtureHelpers.isMongos(db)) { - testUpdate({ - query: {_id: {$in: [1, 2]}}, - initialDocumentList: [{_id: 1, x: 1}, {_id: 2, x: 2}], - update: [{$set: {bar: 4}}], - resultDocList: [{_id: 1, x: 1, bar: 4}, {_id: 2, x: 2, bar: 4}], - nModified: 1, - options: {multi: false} - }); +/** + * Confirms that an update returns the expected set of documents. 'nModified' documents from + * 'resultDocList' must match. 'nModified' may be smaller then the number of elements in + * 'resultDocList'. This allows for the case where there are multiple documents that could be + * updated, but only one is actually updated due to a 'multi: false' argument. Constant values + * to the update command are passed in the 'constants' argument. + */ +function testUpdate({ + query, + initialDocumentList, + update, + resultDocList, + nModified, + options = {}, + constants = undefined +}) { + assert.eq(initialDocumentList.length, resultDocList.length); + assert.commandWorked(coll.remove({})); + assert.commandWorked(coll.insert(initialDocumentList)); + const upd = Object.assign({q: query, u: update}, options); + if (constants !== undefined) { + upd.c = constants; } + const res = assert.commandWorked(db.runCommand({update: collName, updates: [upd]})); + assert.eq(nModified, res.nModified); - // Upsert performs insert. - testUpsertDoesInsert({_id: 1, x: 1}, [{$set: {foo: 4}}], {_id: 1, x: 1, foo: 4}); - testUpsertDoesInsert({_id: 1, x: 1}, [{$project: {x: 1}}], {_id: 1, x: 1}); - testUpsertDoesInsert({_id: 1, x: 1}, [{$project: {x: "foo"}}], {_id: 1, x: "foo"}); - testUpsertDoesInsert({_id: 1, x: 1, y: 1}, [{$unset: ["x"]}], {_id: 1, y: 1}); - - // Update fails when invalid stage is specified. This is a sanity check rather than an - // exhaustive test of all stages. - assert.commandFailedWithCode(coll.update({x: 1}, [{$match: {x: 1}}]), - ErrorCodes.InvalidOptions); - assert.commandFailedWithCode(coll.update({x: 1}, [{$sort: {x: 1}}]), ErrorCodes.InvalidOptions); - assert.commandFailedWithCode(coll.update({x: 1}, [{$facet: {a: [{$match: {x: 1}}]}}]), - ErrorCodes.InvalidOptions); - assert.commandFailedWithCode(coll.update({x: 1}, [{$indexStats: {}}]), - ErrorCodes.InvalidOptions); - assert.commandFailedWithCode(coll.update({x: 1}, [{ - $bucket: { - groupBy: "$a", - boundaries: [0, 1], - default: "foo", - output: {count: {$sum: 1}} - } - }]), - ErrorCodes.InvalidOptions); - assert.commandFailedWithCode( - coll.update({x: 1}, - [{$lookup: {from: "foo", as: "as", localField: "a", foreignField: "b"}}]), - ErrorCodes.InvalidOptions); - assert.commandFailedWithCode(coll.update({x: 1}, [{ - $graphLookup: { - from: "foo", - startWith: "$a", - connectFromField: "a", - connectToField: "b", - as: "as" - } - }]), - ErrorCodes.InvalidOptions); - - // Update fails when supported agg stage is specified outside of pipeline. - assert.commandFailedWithCode(coll.update({_id: 1}, {$addFields: {x: 1}}), - ErrorCodes.FailedToParse); - - // The 'arrayFilters' option is not valid for pipeline updates. - assert.commandFailedWithCode( - coll.update({_id: 1}, [{$set: {x: 1}}], {arrayFilters: [{x: {$eq: 1}}]}), - ErrorCodes.FailedToParse); - - // Constants can be specified with pipeline-style updates. - testUpdate({ - query: {_id: 1}, - initialDocumentList: [{_id: 1, x: 1}], - useUpdateCommand: true, - constants: {foo: "bar"}, - update: [{$set: {foo: "$$foo"}}], - resultDocList: [{_id: 1, x: 1, foo: "bar"}], - nModified: 1 - }); - testUpdate({ - query: {_id: 1}, - initialDocumentList: [{_id: 1, x: 1}], - useUpdateCommand: true, - constants: {foo: {a: {b: {c: "bar"}}}}, - update: [{$set: {foo: "$$foo"}}], - resultDocList: [{_id: 1, x: 1, foo: {a: {b: {c: "bar"}}}}], - nModified: 1 - }); - testUpdate({ - query: {_id: 1}, - initialDocumentList: [{_id: 1, x: 1}], - useUpdateCommand: true, - constants: {foo: [1, 2, 3]}, - update: [{$set: {foo: {$arrayElemAt: ["$$foo", 2]}}}], - resultDocList: [{_id: 1, x: 1, foo: 3}], - nModified: 1 - }); - - const largeStr = "x".repeat(1000); - testUpdate({ - query: {_id: 1}, - initialDocumentList: [{_id: 1, x: 1}], - useUpdateCommand: true, - constants: {largeStr: largeStr}, - update: [{$set: {foo: "$$largeStr"}}], - resultDocList: [{_id: 1, x: 1, foo: largeStr}], - nModified: 1 - }); - - // References to document fields are not resolved in constants. + let nMatched = 0; + for (let i = 0; i < resultDocList.length; ++i) { + if (0 === bsonWoCompare(coll.findOne(resultDocList[i]), resultDocList[i])) { + ++nMatched; + } + } + assert.eq(nModified, nMatched, `actual=${coll.find().toArray()}, expected=${resultDocList}`); +} + +function testUpsertDoesInsert(query, update, resultDoc) { + assert.commandWorked(coll.remove({})); + assert.commandWorked(coll.update(query, update, {upsert: true})); + assert.eq(coll.findOne({}), resultDoc, coll.find({}).toArray()); +} + +// Update with existing document. +testUpdate({ + query: {_id: 1}, + initialDocumentList: [{_id: 1, x: 1}], + update: [{$set: {foo: 4}}], + resultDocList: [{_id: 1, x: 1, foo: 4}], + nModified: 1 +}); +testUpdate({ + query: {_id: 1}, + initialDocumentList: [{_id: 1, x: 1, y: 1}], + update: [{$project: {x: 1}}], + resultDocList: [{_id: 1, x: 1}], + nModified: 1 +}); +testUpdate({ + query: {_id: 1}, + initialDocumentList: [{_id: 1, x: 1, y: [{z: 1, foo: 1}]}], + update: [{$unset: ["x", "y.z"]}], + resultDocList: [{_id: 1, y: [{foo: 1}]}], + nModified: 1 +}); +testUpdate({ + query: {_id: 1}, + initialDocumentList: [{_id: 1, x: 1, t: {u: {v: 1}}}], + update: [{$replaceWith: "$t"}], + resultDocList: [{_id: 1, u: {v: 1}}], + nModified: 1 +}); + +// Multi-update. +testUpdate({ + query: {x: 1}, + initialDocumentList: [{_id: 1, x: 1}, {_id: 2, x: 1}], + update: [{$set: {bar: 4}}], + resultDocList: [{_id: 1, x: 1, bar: 4}, {_id: 2, x: 1, bar: 4}], + nModified: 2, + options: {multi: true} +}); + +// This test will fail in a sharded cluster when the 2 initial documents live on different +// shards. +if (!FixtureHelpers.isMongos(db)) { testUpdate({ - query: {_id: 1}, - initialDocumentList: [{_id: 1, x: 1}], - useUpdateCommand: true, - constants: {foo: "$x"}, - update: [{$set: {foo: "$$foo"}}], - resultDocList: [{_id: 1, x: 1, foo: "$x"}], - nModified: 1 + query: {_id: {$in: [1, 2]}}, + initialDocumentList: [{_id: 1, x: 1}, {_id: 2, x: 2}], + update: [{$set: {bar: 4}}], + resultDocList: [{_id: 1, x: 1, bar: 4}, {_id: 2, x: 2, bar: 4}], + nModified: 1, + options: {multi: false} }); - - // Cannot use expressions in constants. - assert.commandFailedWithCode(db.runCommand({ - update: collName, - updates: [{q: {_id: 1}, u: [{$set: {x: "$$foo"}}], c: {foo: {$add: [1, 2]}}}] - }), - ErrorCodes.DollarPrefixedFieldName); - - // Cannot use constants with regular updates. - assert.commandFailedWithCode( - db.runCommand( - {update: collName, updates: [{q: {_id: 1}, u: {x: "$$foo"}, c: {foo: "bar"}}]}), - 51198); - assert.commandFailedWithCode( - db.runCommand( - {update: collName, updates: [{q: {_id: 1}, u: {$set: {x: "$$foo"}}, c: {foo: "bar"}}]}), - 51198); - assert.commandFailedWithCode( - db.runCommand({update: collName, updates: [{q: {_id: 1}, u: {$set: {x: "1"}}, c: {}}]}), - 51198); +} + +// Upsert performs insert. +testUpsertDoesInsert({_id: 1, x: 1}, [{$set: {foo: 4}}], {_id: 1, x: 1, foo: 4}); +testUpsertDoesInsert({_id: 1, x: 1}, [{$project: {x: 1}}], {_id: 1, x: 1}); +testUpsertDoesInsert({_id: 1, x: 1}, [{$project: {x: "foo"}}], {_id: 1, x: "foo"}); +testUpsertDoesInsert({_id: 1, x: 1, y: 1}, [{$unset: ["x"]}], {_id: 1, y: 1}); + +// Update fails when invalid stage is specified. This is a sanity check rather than an +// exhaustive test of all stages. +assert.commandFailedWithCode(coll.update({x: 1}, [{$match: {x: 1}}]), ErrorCodes.InvalidOptions); +assert.commandFailedWithCode(coll.update({x: 1}, [{$sort: {x: 1}}]), ErrorCodes.InvalidOptions); +assert.commandFailedWithCode(coll.update({x: 1}, [{$facet: {a: [{$match: {x: 1}}]}}]), + ErrorCodes.InvalidOptions); +assert.commandFailedWithCode(coll.update({x: 1}, [{$indexStats: {}}]), ErrorCodes.InvalidOptions); +assert.commandFailedWithCode( + coll.update( + {x: 1}, [{ + $bucket: {groupBy: "$a", boundaries: [0, 1], default: "foo", output: {count: {$sum: 1}}} + }]), + ErrorCodes.InvalidOptions); +assert.commandFailedWithCode( + coll.update({x: 1}, [{$lookup: {from: "foo", as: "as", localField: "a", foreignField: "b"}}]), + ErrorCodes.InvalidOptions); +assert.commandFailedWithCode( + coll.update( + {x: 1}, [{ + $graphLookup: + {from: "foo", startWith: "$a", connectFromField: "a", connectToField: "b", as: "as"} + }]), + ErrorCodes.InvalidOptions); + +// Update fails when supported agg stage is specified outside of pipeline. +assert.commandFailedWithCode(coll.update({_id: 1}, {$addFields: {x: 1}}), ErrorCodes.FailedToParse); + +// The 'arrayFilters' option is not valid for pipeline updates. +assert.commandFailedWithCode( + coll.update({_id: 1}, [{$set: {x: 1}}], {arrayFilters: [{x: {$eq: 1}}]}), + ErrorCodes.FailedToParse); + +// Constants can be specified with pipeline-style updates. +testUpdate({ + query: {_id: 1}, + initialDocumentList: [{_id: 1, x: 1}], + useUpdateCommand: true, + constants: {foo: "bar"}, + update: [{$set: {foo: "$$foo"}}], + resultDocList: [{_id: 1, x: 1, foo: "bar"}], + nModified: 1 +}); +testUpdate({ + query: {_id: 1}, + initialDocumentList: [{_id: 1, x: 1}], + useUpdateCommand: true, + constants: {foo: {a: {b: {c: "bar"}}}}, + update: [{$set: {foo: "$$foo"}}], + resultDocList: [{_id: 1, x: 1, foo: {a: {b: {c: "bar"}}}}], + nModified: 1 +}); +testUpdate({ + query: {_id: 1}, + initialDocumentList: [{_id: 1, x: 1}], + useUpdateCommand: true, + constants: {foo: [1, 2, 3]}, + update: [{$set: {foo: {$arrayElemAt: ["$$foo", 2]}}}], + resultDocList: [{_id: 1, x: 1, foo: 3}], + nModified: 1 +}); + +const largeStr = "x".repeat(1000); +testUpdate({ + query: {_id: 1}, + initialDocumentList: [{_id: 1, x: 1}], + useUpdateCommand: true, + constants: {largeStr: largeStr}, + update: [{$set: {foo: "$$largeStr"}}], + resultDocList: [{_id: 1, x: 1, foo: largeStr}], + nModified: 1 +}); + +// References to document fields are not resolved in constants. +testUpdate({ + query: {_id: 1}, + initialDocumentList: [{_id: 1, x: 1}], + useUpdateCommand: true, + constants: {foo: "$x"}, + update: [{$set: {foo: "$$foo"}}], + resultDocList: [{_id: 1, x: 1, foo: "$x"}], + nModified: 1 +}); + +// Cannot use expressions in constants. +assert.commandFailedWithCode(db.runCommand({ + update: collName, + updates: [{q: {_id: 1}, u: [{$set: {x: "$$foo"}}], c: {foo: {$add: [1, 2]}}}] +}), + ErrorCodes.DollarPrefixedFieldName); + +// Cannot use constants with regular updates. +assert.commandFailedWithCode( + db.runCommand({update: collName, updates: [{q: {_id: 1}, u: {x: "$$foo"}, c: {foo: "bar"}}]}), + 51198); +assert.commandFailedWithCode( + db.runCommand( + {update: collName, updates: [{q: {_id: 1}, u: {$set: {x: "$$foo"}}, c: {foo: "bar"}}]}), + 51198); +assert.commandFailedWithCode( + db.runCommand({update: collName, updates: [{q: {_id: 1}, u: {$set: {x: "1"}}, c: {}}]}), 51198); })(); diff --git a/jstests/core/views/duplicate_ns.js b/jstests/core/views/duplicate_ns.js index 2ef02cd6bc1..f7693549164 100644 --- a/jstests/core/views/duplicate_ns.js +++ b/jstests/core/views/duplicate_ns.js @@ -6,25 +6,25 @@ // Test the creation of view with a duplicate name to a collection. (function() { - "use strict"; +"use strict"; - const dbName = "views_duplicate_ns"; - const viewsDb = db.getSiblingDB(dbName); - const collName = "myns"; - const viewId = dbName + "." + collName; +const dbName = "views_duplicate_ns"; +const viewsDb = db.getSiblingDB(dbName); +const collName = "myns"; +const viewId = dbName + "." + collName; - assert.commandWorked(viewsDb.dropDatabase()); - assert.writeOK(viewsDb.system.views.remove({_id: viewId})); - assert.commandWorked(viewsDb.runCommand({create: collName})); - assert.writeOK(viewsDb.system.views.insert({ - _id: viewId, - viewOn: "coll", - pipeline: [], - })); - assert.eq(2, - viewsDb.getCollectionInfos() - .filter(coll => { - return coll.name === collName; - }) - .length); +assert.commandWorked(viewsDb.dropDatabase()); +assert.writeOK(viewsDb.system.views.remove({_id: viewId})); +assert.commandWorked(viewsDb.runCommand({create: collName})); +assert.writeOK(viewsDb.system.views.insert({ + _id: viewId, + viewOn: "coll", + pipeline: [], +})); +assert.eq(2, + viewsDb.getCollectionInfos() + .filter(coll => { + return coll.name === collName; + }) + .length); }());
\ No newline at end of file diff --git a/jstests/core/views/invalid_system_views.js b/jstests/core/views/invalid_system_views.js index c7d758415a7..cdfd8240589 100644 --- a/jstests/core/views/invalid_system_views.js +++ b/jstests/core/views/invalid_system_views.js @@ -12,69 +12,66 @@ */ (function() { - "use strict"; - const isMongos = db.runCommand({isdbgrid: 1}).isdbgrid; - - function runTest(badViewDefinition) { - let viewsDB = db.getSiblingDB("invalid_system_views"); - assert.commandWorked(viewsDB.dropDatabase()); - - // Create a regular collection, then insert an invalid view into system.views. - assert.writeOK(viewsDB.collection.insert({x: 1})); - assert.commandWorked(viewsDB.runCommand({create: "collection2"})); - assert.commandWorked(viewsDB.runCommand({create: "collection3"})); - assert.commandWorked(viewsDB.collection.createIndex({x: 1})); - assert.writeOK(viewsDB.system.views.insert(badViewDefinition), - "failed to insert " + tojson(badViewDefinition)); - - // Test that a command involving views properly fails with a views-specific error code. - assert.commandFailedWithCode( - viewsDB.runCommand({listCollections: 1}), - ErrorCodes.InvalidViewDefinition, - "listCollections should have failed in the presence of an invalid view"); - - // Helper function to create a message to use if an assertion fails. - function makeErrorMessage(msg) { - return msg + - " should work on a valid, existing collection, despite the presence of bad views" + - " in system.views"; - } - - if (!isMongos) { - // Commands that run on existing regular collections should not be impacted by the - // presence of invalid views. However, applyOps doesn't work on mongos. - assert.commandWorked( - db.adminCommand( // - { - applyOps: - [{op: "c", ns: "invalid_system_views.$cmd", o: {drop: "collection3"}}] - }), - makeErrorMessage("applyOps")); - } - - assert.writeOK(viewsDB.collection.insert({y: "baz"}), makeErrorMessage("insert")); - - assert.writeOK(viewsDB.collection.update({y: "baz"}, {$set: {y: "qux"}}), - makeErrorMessage("update")); - - assert.writeOK(viewsDB.collection.remove({y: "baz"}), makeErrorMessage("remove")); +"use strict"; +const isMongos = db.runCommand({isdbgrid: 1}).isdbgrid; + +function runTest(badViewDefinition) { + let viewsDB = db.getSiblingDB("invalid_system_views"); + assert.commandWorked(viewsDB.dropDatabase()); + + // Create a regular collection, then insert an invalid view into system.views. + assert.writeOK(viewsDB.collection.insert({x: 1})); + assert.commandWorked(viewsDB.runCommand({create: "collection2"})); + assert.commandWorked(viewsDB.runCommand({create: "collection3"})); + assert.commandWorked(viewsDB.collection.createIndex({x: 1})); + assert.writeOK(viewsDB.system.views.insert(badViewDefinition), + "failed to insert " + tojson(badViewDefinition)); + + // Test that a command involving views properly fails with a views-specific error code. + assert.commandFailedWithCode( + viewsDB.runCommand({listCollections: 1}), + ErrorCodes.InvalidViewDefinition, + "listCollections should have failed in the presence of an invalid view"); + + // Helper function to create a message to use if an assertion fails. + function makeErrorMessage(msg) { + return msg + + " should work on a valid, existing collection, despite the presence of bad views" + + " in system.views"; + } + if (!isMongos) { + // Commands that run on existing regular collections should not be impacted by the + // presence of invalid views. However, applyOps doesn't work on mongos. assert.commandWorked( - viewsDB.runCommand({findAndModify: "collection", query: {x: 1}, update: {x: 2}}), - makeErrorMessage("findAndModify with update")); + db.adminCommand( // + {applyOps: [{op: "c", ns: "invalid_system_views.$cmd", o: {drop: "collection3"}}]}), + makeErrorMessage("applyOps")); + } - assert.commandWorked( - viewsDB.runCommand({findAndModify: "collection", query: {x: 2}, remove: true}), - makeErrorMessage("findAndModify with remove")); + assert.writeOK(viewsDB.collection.insert({y: "baz"}), makeErrorMessage("insert")); - const lookup = { - $lookup: {from: "collection2", localField: "_id", foreignField: "_id", as: "match"} - }; - assert.commandWorked( - viewsDB.runCommand({aggregate: "collection", pipeline: [lookup], cursor: {}}), - makeErrorMessage("aggregate with $lookup")); + assert.writeOK(viewsDB.collection.update({y: "baz"}, {$set: {y: "qux"}}), + makeErrorMessage("update")); + + assert.writeOK(viewsDB.collection.remove({y: "baz"}), makeErrorMessage("remove")); + + assert.commandWorked( + viewsDB.runCommand({findAndModify: "collection", query: {x: 1}, update: {x: 2}}), + makeErrorMessage("findAndModify with update")); + + assert.commandWorked( + viewsDB.runCommand({findAndModify: "collection", query: {x: 2}, remove: true}), + makeErrorMessage("findAndModify with remove")); + + const lookup = { + $lookup: {from: "collection2", localField: "_id", foreignField: "_id", as: "match"} + }; + assert.commandWorked( + viewsDB.runCommand({aggregate: "collection", pipeline: [lookup], cursor: {}}), + makeErrorMessage("aggregate with $lookup")); - const graphLookup = { + const graphLookup = { $graphLookup: { from: "collection2", startWith: "$_id", @@ -83,68 +80,61 @@ as: "match" } }; - assert.commandWorked( - viewsDB.runCommand({aggregate: "collection", pipeline: [graphLookup], cursor: {}}), - makeErrorMessage("aggregate with $graphLookup")); - - assert.commandWorked(viewsDB.runCommand({dropIndexes: "collection", index: "x_1"}), - makeErrorMessage("dropIndexes")); - - assert.commandWorked(viewsDB.collection.createIndex({x: 1}), - makeErrorMessage("createIndexes")); - - if (!isMongos) { - assert.commandWorked(viewsDB.collection.reIndex(), makeErrorMessage("reIndex")); - } + assert.commandWorked( + viewsDB.runCommand({aggregate: "collection", pipeline: [graphLookup], cursor: {}}), + makeErrorMessage("aggregate with $graphLookup")); - const storageEngine = jsTest.options().storageEngine; - if (isMongos || storageEngine === "ephemeralForTest" || storageEngine === "inMemory" || - storageEngine === "biggie") { - print("Not testing compact command on mongos or ephemeral storage engine"); - } else { - assert.commandWorked(viewsDB.runCommand({compact: "collection", force: true}), - makeErrorMessage("compact")); - } + assert.commandWorked(viewsDB.runCommand({dropIndexes: "collection", index: "x_1"}), + makeErrorMessage("dropIndexes")); - assert.commandWorked( - viewsDB.runCommand({collMod: "collection", validator: {x: {$type: "string"}}}), - makeErrorMessage("collMod")); - - const renameCommand = { - renameCollection: "invalid_system_views.collection", - to: "invalid_system_views.collection2", - dropTarget: true - }; - assert.commandWorked(viewsDB.adminCommand(renameCommand), - makeErrorMessage("renameCollection")); + assert.commandWorked(viewsDB.collection.createIndex({x: 1}), makeErrorMessage("createIndexes")); - assert.commandWorked(viewsDB.runCommand({drop: "collection2"}), makeErrorMessage("drop")); + if (!isMongos) { + assert.commandWorked(viewsDB.collection.reIndex(), makeErrorMessage("reIndex")); + } - // Drop the offending view so that the validate hook succeeds. - assert.writeOK(viewsDB.system.views.remove(badViewDefinition)); + const storageEngine = jsTest.options().storageEngine; + if (isMongos || storageEngine === "ephemeralForTest" || storageEngine === "inMemory" || + storageEngine === "biggie") { + print("Not testing compact command on mongos or ephemeral storage engine"); + } else { + assert.commandWorked(viewsDB.runCommand({compact: "collection", force: true}), + makeErrorMessage("compact")); } - runTest( - {_id: "invalid_system_views.badViewStringPipeline", viewOn: "collection", pipeline: "bad"}); - runTest({ - _id: "invalid_system_views.badViewEmptyObjectPipeline", - viewOn: "collection", - pipeline: {} - }); - runTest( - {_id: "invalid_system_views.badViewNumericalPipeline", viewOn: "collection", pipeline: 7}); - runTest({ - _id: "invalid_system_views.badViewArrayWithIntegerPipeline", - viewOn: "collection", - pipeline: [1] - }); - runTest({ - _id: "invalid_system_views.badViewArrayWithEmptyArrayPipeline", - viewOn: "collection", - pipeline: [[]] - }); - runTest({_id: 7, viewOn: "collection", pipeline: []}); - runTest({_id: "invalid_system_views.embedded\0null", viewOn: "collection", pipeline: []}); - runTest({_id: "invalidNotFullyQualifiedNs", viewOn: "collection", pipeline: []}); - runTest({_id: "invalid_system_views.missingViewOnField", pipeline: []}); + assert.commandWorked( + viewsDB.runCommand({collMod: "collection", validator: {x: {$type: "string"}}}), + makeErrorMessage("collMod")); + + const renameCommand = { + renameCollection: "invalid_system_views.collection", + to: "invalid_system_views.collection2", + dropTarget: true + }; + assert.commandWorked(viewsDB.adminCommand(renameCommand), makeErrorMessage("renameCollection")); + + assert.commandWorked(viewsDB.runCommand({drop: "collection2"}), makeErrorMessage("drop")); + + // Drop the offending view so that the validate hook succeeds. + assert.writeOK(viewsDB.system.views.remove(badViewDefinition)); +} + +runTest({_id: "invalid_system_views.badViewStringPipeline", viewOn: "collection", pipeline: "bad"}); +runTest( + {_id: "invalid_system_views.badViewEmptyObjectPipeline", viewOn: "collection", pipeline: {}}); +runTest({_id: "invalid_system_views.badViewNumericalPipeline", viewOn: "collection", pipeline: 7}); +runTest({ + _id: "invalid_system_views.badViewArrayWithIntegerPipeline", + viewOn: "collection", + pipeline: [1] +}); +runTest({ + _id: "invalid_system_views.badViewArrayWithEmptyArrayPipeline", + viewOn: "collection", + pipeline: [[]] +}); +runTest({_id: 7, viewOn: "collection", pipeline: []}); +runTest({_id: "invalid_system_views.embedded\0null", viewOn: "collection", pipeline: []}); +runTest({_id: "invalidNotFullyQualifiedNs", viewOn: "collection", pipeline: []}); +runTest({_id: "invalid_system_views.missingViewOnField", pipeline: []}); }()); diff --git a/jstests/core/views/view_with_invalid_dbname.js b/jstests/core/views/view_with_invalid_dbname.js index a39ca49e934..8305c9970f4 100644 --- a/jstests/core/views/view_with_invalid_dbname.js +++ b/jstests/core/views/view_with_invalid_dbname.js @@ -6,24 +6,28 @@ // @tags: [ incompatible_with_embedded, SERVER-38379 ] (function() { - "use strict"; +"use strict"; - // Create a view whose dbname has an invalid embedded NULL character. That's not possible with - // the 'create' command, but it is possible by manually inserting into the 'system.views' - // collection. - const viewName = "dbNameWithEmbedded\0Character.collectionName"; - const collName = "viewOnForViewWithInvalidDBNameTest"; - const viewDef = {_id: viewName, viewOn: collName, pipeline: []}; - assert.commandWorked(db.system.views.insert(viewDef)); +// Create a view whose dbname has an invalid embedded NULL character. That's not possible with +// the 'create' command, but it is possible by manually inserting into the 'system.views' +// collection. +const viewName = "dbNameWithEmbedded\0Character.collectionName"; +const collName = "viewOnForViewWithInvalidDBNameTest"; +const viewDef = { + _id: viewName, + viewOn: collName, + pipeline: [] +}; +assert.commandWorked(db.system.views.insert(viewDef)); - // If the reinitialization of the durable view catalog tries to create a NamespaceString using - // the 'viewName' field, it will throw an exception in a place that is not exception safe, - // resulting in an invariant failure. This previously occurred because validation was only - // checking the collection part of the namespace, not the dbname part. With correct validation - // in place, reinitialization succeeds despite the invalid name. - assert.commandWorked(db.adminCommand({restartCatalog: 1})); +// If the reinitialization of the durable view catalog tries to create a NamespaceString using +// the 'viewName' field, it will throw an exception in a place that is not exception safe, +// resulting in an invariant failure. This previously occurred because validation was only +// checking the collection part of the namespace, not the dbname part. With correct validation +// in place, reinitialization succeeds despite the invalid name. +assert.commandWorked(db.adminCommand({restartCatalog: 1})); - // Don't let the bogus view stick around, or else it will cause an error in validation. - const res = db.system.views.deleteOne({_id: viewName}); - assert.eq(1, res.deletedCount); +// Don't let the bogus view stick around, or else it will cause an error in validation. +const res = db.system.views.deleteOne({_id: viewName}); +assert.eq(1, res.deletedCount); }()); diff --git a/jstests/core/views/views_aggregation.js b/jstests/core/views/views_aggregation.js index 2b7f78f319f..fb78211307f 100644 --- a/jstests/core/views/views_aggregation.js +++ b/jstests/core/views/views_aggregation.js @@ -4,205 +4,206 @@ * requires_non_retryable_commands] */ (function() { - "use strict"; - - // For assertMergeFailsForAllModesWithCode. - load("jstests/aggregation/extras/merge_helpers.js"); - load("jstests/aggregation/extras/utils.js"); // For arrayEq, assertErrorCode, and - // orderedArrayEq. - - let viewsDB = db.getSiblingDB("views_aggregation"); - assert.commandWorked(viewsDB.dropDatabase()); - - // Helper functions. - let assertAggResultEq = function(collection, pipeline, expected, ordered) { - let coll = viewsDB.getCollection(collection); - let arr = coll.aggregate(pipeline).toArray(); - let success = (typeof(ordered) === "undefined" || !ordered) ? arrayEq(arr, expected) - : orderedArrayEq(arr, expected); - assert(success, tojson({got: arr, expected: expected})); - }; - let byPopulation = function(a, b) { - if (a.pop < b.pop) - return -1; - else if (a.pop > b.pop) - return 1; - else - return 0; - }; - - // Populate a collection with some test data. - let allDocuments = []; - allDocuments.push({_id: "New York", state: "NY", pop: 7}); - allDocuments.push({_id: "Newark", state: "NJ", pop: 3}); - allDocuments.push({_id: "Palo Alto", state: "CA", pop: 10}); - allDocuments.push({_id: "San Francisco", state: "CA", pop: 4}); - allDocuments.push({_id: "Trenton", state: "NJ", pop: 5}); - - let coll = viewsDB.coll; - let bulk = coll.initializeUnorderedBulkOp(); - allDocuments.forEach(function(doc) { - bulk.insert(doc); +"use strict"; + +// For assertMergeFailsForAllModesWithCode. +load("jstests/aggregation/extras/merge_helpers.js"); +load("jstests/aggregation/extras/utils.js"); // For arrayEq, assertErrorCode, and + // orderedArrayEq. + +let viewsDB = db.getSiblingDB("views_aggregation"); +assert.commandWorked(viewsDB.dropDatabase()); + +// Helper functions. +let assertAggResultEq = function(collection, pipeline, expected, ordered) { + let coll = viewsDB.getCollection(collection); + let arr = coll.aggregate(pipeline).toArray(); + let success = (typeof (ordered) === "undefined" || !ordered) ? arrayEq(arr, expected) + : orderedArrayEq(arr, expected); + assert(success, tojson({got: arr, expected: expected})); +}; +let byPopulation = function(a, b) { + if (a.pop < b.pop) + return -1; + else if (a.pop > b.pop) + return 1; + else + return 0; +}; + +// Populate a collection with some test data. +let allDocuments = []; +allDocuments.push({_id: "New York", state: "NY", pop: 7}); +allDocuments.push({_id: "Newark", state: "NJ", pop: 3}); +allDocuments.push({_id: "Palo Alto", state: "CA", pop: 10}); +allDocuments.push({_id: "San Francisco", state: "CA", pop: 4}); +allDocuments.push({_id: "Trenton", state: "NJ", pop: 5}); + +let coll = viewsDB.coll; +let bulk = coll.initializeUnorderedBulkOp(); +allDocuments.forEach(function(doc) { + bulk.insert(doc); +}); +assert.writeOK(bulk.execute()); + +// Create views on the data. +assert.commandWorked(viewsDB.runCommand({create: "emptyPipelineView", viewOn: "coll"})); +assert.commandWorked( + viewsDB.runCommand({create: "identityView", viewOn: "coll", pipeline: [{$match: {}}]})); +assert.commandWorked(viewsDB.runCommand( + {create: "noIdView", viewOn: "coll", pipeline: [{$project: {_id: 0, state: 1, pop: 1}}]})); +assert.commandWorked(viewsDB.runCommand({ + create: "popSortedView", + viewOn: "identityView", + pipeline: [{$match: {pop: {$gte: 0}}}, {$sort: {pop: 1}}] +})); + +// Find all documents with empty aggregations. +assertAggResultEq("emptyPipelineView", [], allDocuments); +assertAggResultEq("identityView", [], allDocuments); +assertAggResultEq("identityView", [{$match: {}}], allDocuments); + +// Filter documents on a view with $match. +assertAggResultEq( + "popSortedView", [{$match: {state: "NY"}}], [{_id: "New York", state: "NY", pop: 7}]); + +// An aggregation still works on a view that strips _id. +assertAggResultEq("noIdView", [{$match: {state: "NY"}}], [{state: "NY", pop: 7}]); + +// Aggregations work on views that sort. +const doOrderedSort = true; +assertAggResultEq("popSortedView", [], allDocuments.sort(byPopulation), doOrderedSort); +assertAggResultEq("popSortedView", [{$limit: 1}, {$project: {_id: 1}}], [{_id: "Palo Alto"}]); + +// Test that the $out stage errors when writing to a view namespace. +assertErrorCode(coll, [{$out: "emptyPipelineView"}], ErrorCodes.CommandNotSupportedOnView); + +// Test that the $merge stage errors when writing to a view namespace. +assertMergeFailsForAllModesWithCode({ + source: viewsDB.coll, + target: viewsDB.emptyPipelineView, + errorCodes: [ErrorCodes.CommandNotSupportedOnView] +}); + +// Test that the $merge stage errors when writing to a view namespace in a foreign database. +let foreignDB = db.getSiblingDB("views_aggregation_foreign"); +foreignDB.view.drop(); +assert.commandWorked(foreignDB.createView("view", "coll", [])); + +assertMergeFailsForAllModesWithCode({ + source: viewsDB.coll, + target: foreignDB.view, + errorCodes: [ErrorCodes.CommandNotSupportedOnView] +}); + +// Test that an aggregate on a view propagates the 'bypassDocumentValidation' option. +const validatedCollName = "collectionWithValidator"; +viewsDB[validatedCollName].drop(); +assert.commandWorked( + viewsDB.createCollection(validatedCollName, {validator: {illegalField: {$exists: false}}})); + +viewsDB.invalidDocs.drop(); +viewsDB.invalidDocsView.drop(); +assert.writeOK(viewsDB.invalidDocs.insert({illegalField: "present"})); +assert.commandWorked(viewsDB.createView("invalidDocsView", "invalidDocs", [])); + +assert.commandWorked( + viewsDB.runCommand({ + aggregate: "invalidDocsView", + pipeline: [{$out: validatedCollName}], + cursor: {}, + bypassDocumentValidation: true + }), + "Expected $out insertions to succeed since 'bypassDocumentValidation' was specified"); + +// Test that an aggregate on a view propagates the 'allowDiskUse' option. +const extSortLimit = 100 * 1024 * 1024; +const largeStrSize = 10 * 1024 * 1024; +const largeStr = new Array(largeStrSize).join('x'); +viewsDB.largeColl.drop(); +for (let i = 0; i <= extSortLimit / largeStrSize; ++i) { + assert.writeOK(viewsDB.largeColl.insert({x: i, largeStr: largeStr})); +} +assertErrorCode(viewsDB.largeColl, + [{$sort: {x: -1}}], + 16819, + "Expected in-memory sort to fail due to excessive memory usage"); +viewsDB.largeView.drop(); +assert.commandWorked(viewsDB.createView("largeView", "largeColl", [])); +assertErrorCode(viewsDB.largeView, + [{$sort: {x: -1}}], + 16819, + "Expected in-memory sort to fail due to excessive memory usage"); + +assert.commandWorked( + viewsDB.runCommand( + {aggregate: "largeView", pipeline: [{$sort: {x: -1}}], cursor: {}, allowDiskUse: true}), + "Expected aggregate to succeed since 'allowDiskUse' was specified"); + +// Test explain modes on a view. +let explainPlan = assert.commandWorked( + viewsDB.popSortedView.explain("queryPlanner").aggregate([{$limit: 1}, {$match: {pop: 3}}])); +assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_aggregation.coll"); +assert(!explainPlan.stages[0].$cursor.hasOwnProperty("executionStats")); + +explainPlan = assert.commandWorked( + viewsDB.popSortedView.explain("executionStats").aggregate([{$limit: 1}, {$match: {pop: 3}}])); +assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_aggregation.coll"); +assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats")); +assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 5); +assert(!explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution")); + +explainPlan = assert.commandWorked(viewsDB.popSortedView.explain("allPlansExecution") + .aggregate([{$limit: 1}, {$match: {pop: 3}}])); +assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_aggregation.coll"); +assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats")); +assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 5); +assert(explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution")); + +// Passing a value of true for the explain option to the aggregation command, without using the +// shell explain helper, should continue to work. +explainPlan = assert.commandWorked( + viewsDB.popSortedView.aggregate([{$limit: 1}, {$match: {pop: 3}}], {explain: true})); +assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_aggregation.coll"); +assert(!explainPlan.stages[0].$cursor.hasOwnProperty("executionStats")); + +// Test allPlansExecution explain mode on the base collection. +explainPlan = assert.commandWorked( + viewsDB.coll.explain("allPlansExecution").aggregate([{$limit: 1}, {$match: {pop: 3}}])); +assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_aggregation.coll"); +assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats")); +assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 1); +assert(explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution")); + +// The explain:true option should not work when paired with the explain shell helper. +assert.throws(function() { + viewsDB.popSortedView.explain("executionStats").aggregate([{$limit: 1}, {$match: {pop: 3}}], { + explain: true }); - assert.writeOK(bulk.execute()); - - // Create views on the data. - assert.commandWorked(viewsDB.runCommand({create: "emptyPipelineView", viewOn: "coll"})); - assert.commandWorked( - viewsDB.runCommand({create: "identityView", viewOn: "coll", pipeline: [{$match: {}}]})); - assert.commandWorked(viewsDB.runCommand( - {create: "noIdView", viewOn: "coll", pipeline: [{$project: {_id: 0, state: 1, pop: 1}}]})); - assert.commandWorked(viewsDB.runCommand({ - create: "popSortedView", - viewOn: "identityView", - pipeline: [{$match: {pop: {$gte: 0}}}, {$sort: {pop: 1}}] - })); - - // Find all documents with empty aggregations. - assertAggResultEq("emptyPipelineView", [], allDocuments); - assertAggResultEq("identityView", [], allDocuments); - assertAggResultEq("identityView", [{$match: {}}], allDocuments); - - // Filter documents on a view with $match. - assertAggResultEq( - "popSortedView", [{$match: {state: "NY"}}], [{_id: "New York", state: "NY", pop: 7}]); - - // An aggregation still works on a view that strips _id. - assertAggResultEq("noIdView", [{$match: {state: "NY"}}], [{state: "NY", pop: 7}]); - - // Aggregations work on views that sort. - const doOrderedSort = true; - assertAggResultEq("popSortedView", [], allDocuments.sort(byPopulation), doOrderedSort); - assertAggResultEq("popSortedView", [{$limit: 1}, {$project: {_id: 1}}], [{_id: "Palo Alto"}]); - - // Test that the $out stage errors when writing to a view namespace. - assertErrorCode(coll, [{$out: "emptyPipelineView"}], ErrorCodes.CommandNotSupportedOnView); - - // Test that the $merge stage errors when writing to a view namespace. - assertMergeFailsForAllModesWithCode({ - source: viewsDB.coll, - target: viewsDB.emptyPipelineView, - errorCodes: [ErrorCodes.CommandNotSupportedOnView] - }); - - // Test that the $merge stage errors when writing to a view namespace in a foreign database. - let foreignDB = db.getSiblingDB("views_aggregation_foreign"); - foreignDB.view.drop(); - assert.commandWorked(foreignDB.createView("view", "coll", [])); - - assertMergeFailsForAllModesWithCode({ - source: viewsDB.coll, - target: foreignDB.view, - errorCodes: [ErrorCodes.CommandNotSupportedOnView] - }); - - // Test that an aggregate on a view propagates the 'bypassDocumentValidation' option. - const validatedCollName = "collectionWithValidator"; - viewsDB[validatedCollName].drop(); - assert.commandWorked( - viewsDB.createCollection(validatedCollName, {validator: {illegalField: {$exists: false}}})); - - viewsDB.invalidDocs.drop(); - viewsDB.invalidDocsView.drop(); - assert.writeOK(viewsDB.invalidDocs.insert({illegalField: "present"})); - assert.commandWorked(viewsDB.createView("invalidDocsView", "invalidDocs", [])); - - assert.commandWorked( - viewsDB.runCommand({ - aggregate: "invalidDocsView", - pipeline: [{$out: validatedCollName}], - cursor: {}, - bypassDocumentValidation: true - }), - "Expected $out insertions to succeed since 'bypassDocumentValidation' was specified"); - - // Test that an aggregate on a view propagates the 'allowDiskUse' option. - const extSortLimit = 100 * 1024 * 1024; - const largeStrSize = 10 * 1024 * 1024; - const largeStr = new Array(largeStrSize).join('x'); - viewsDB.largeColl.drop(); - for (let i = 0; i <= extSortLimit / largeStrSize; ++i) { - assert.writeOK(viewsDB.largeColl.insert({x: i, largeStr: largeStr})); - } - assertErrorCode(viewsDB.largeColl, - [{$sort: {x: -1}}], - 16819, - "Expected in-memory sort to fail due to excessive memory usage"); - viewsDB.largeView.drop(); - assert.commandWorked(viewsDB.createView("largeView", "largeColl", [])); - assertErrorCode(viewsDB.largeView, - [{$sort: {x: -1}}], - 16819, - "Expected in-memory sort to fail due to excessive memory usage"); - - assert.commandWorked( - viewsDB.runCommand( - {aggregate: "largeView", pipeline: [{$sort: {x: -1}}], cursor: {}, allowDiskUse: true}), - "Expected aggregate to succeed since 'allowDiskUse' was specified"); - - // Test explain modes on a view. - let explainPlan = assert.commandWorked( - viewsDB.popSortedView.explain("queryPlanner").aggregate([{$limit: 1}, {$match: {pop: 3}}])); - assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_aggregation.coll"); - assert(!explainPlan.stages[0].$cursor.hasOwnProperty("executionStats")); - - explainPlan = assert.commandWorked(viewsDB.popSortedView.explain("executionStats") - .aggregate([{$limit: 1}, {$match: {pop: 3}}])); - assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_aggregation.coll"); - assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats")); - assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 5); - assert(!explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution")); - - explainPlan = assert.commandWorked(viewsDB.popSortedView.explain("allPlansExecution") - .aggregate([{$limit: 1}, {$match: {pop: 3}}])); - assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_aggregation.coll"); - assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats")); - assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 5); - assert(explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution")); - - // Passing a value of true for the explain option to the aggregation command, without using the - // shell explain helper, should continue to work. - explainPlan = assert.commandWorked( - viewsDB.popSortedView.aggregate([{$limit: 1}, {$match: {pop: 3}}], {explain: true})); - assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_aggregation.coll"); - assert(!explainPlan.stages[0].$cursor.hasOwnProperty("executionStats")); - - // Test allPlansExecution explain mode on the base collection. - explainPlan = assert.commandWorked( - viewsDB.coll.explain("allPlansExecution").aggregate([{$limit: 1}, {$match: {pop: 3}}])); - assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_aggregation.coll"); - assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats")); - assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 1); - assert(explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution")); - - // The explain:true option should not work when paired with the explain shell helper. - assert.throws(function() { - viewsDB.popSortedView.explain("executionStats") - .aggregate([{$limit: 1}, {$match: {pop: 3}}], {explain: true}); - }); - - // The remaining tests involve $lookup and $graphLookup. We cannot lookup into sharded - // collections, so skip these tests if running in a sharded configuration. - let isMasterResponse = assert.commandWorked(viewsDB.runCommand("isMaster")); - const isMongos = (isMasterResponse.msg === "isdbgrid"); - if (isMongos) { - jsTest.log("Tests are being run on a mongos; skipping all $lookup and $graphLookup tests."); - return; - } - - // Test that the $lookup stage resolves the view namespace referenced in the 'from' field. - assertAggResultEq( - coll.getName(), - [ - {$match: {_id: "New York"}}, - {$lookup: {from: "identityView", localField: "_id", foreignField: "_id", as: "matched"}}, - {$unwind: "$matched"}, - {$project: {_id: 1, matchedId: "$matched._id"}} - ], - [{_id: "New York", matchedId: "New York"}]); +}); + +// The remaining tests involve $lookup and $graphLookup. We cannot lookup into sharded +// collections, so skip these tests if running in a sharded configuration. +let isMasterResponse = assert.commandWorked(viewsDB.runCommand("isMaster")); +const isMongos = (isMasterResponse.msg === "isdbgrid"); +if (isMongos) { + jsTest.log("Tests are being run on a mongos; skipping all $lookup and $graphLookup tests."); + return; +} + +// Test that the $lookup stage resolves the view namespace referenced in the 'from' field. +assertAggResultEq( + coll.getName(), + [ + {$match: {_id: "New York"}}, + {$lookup: {from: "identityView", localField: "_id", foreignField: "_id", as: "matched"}}, + {$unwind: "$matched"}, + {$project: {_id: 1, matchedId: "$matched._id"}} + ], + [{_id: "New York", matchedId: "New York"}]); - // Test that the $graphLookup stage resolves the view namespace referenced in the 'from' field. - assertAggResultEq(coll.getName(), +// Test that the $graphLookup stage resolves the view namespace referenced in the 'from' field. +assertAggResultEq(coll.getName(), [ {$match: {_id: "New York"}}, { @@ -219,22 +220,19 @@ ], [{_id: "New York", matchedId: "New York"}]); - // Test that the $lookup stage resolves the view namespace referenced in the 'from' field of - // another $lookup stage nested inside of it. - assert.commandWorked(viewsDB.runCommand({ - create: "viewWithLookupInside", - viewOn: coll.getName(), - pipeline: [ - { - $lookup: - {from: "identityView", localField: "_id", foreignField: "_id", as: "matched"} - }, - {$unwind: "$matched"}, - {$project: {_id: 1, matchedId: "$matched._id"}} - ] - })); - - assertAggResultEq( +// Test that the $lookup stage resolves the view namespace referenced in the 'from' field of +// another $lookup stage nested inside of it. +assert.commandWorked(viewsDB.runCommand({ + create: "viewWithLookupInside", + viewOn: coll.getName(), + pipeline: [ + {$lookup: {from: "identityView", localField: "_id", foreignField: "_id", as: "matched"}}, + {$unwind: "$matched"}, + {$project: {_id: 1, matchedId: "$matched._id"}} + ] +})); + +assertAggResultEq( coll.getName(), [ {$match: {_id: "New York"}}, @@ -251,9 +249,9 @@ ], [{_id: "New York", matchedId1: "New York", matchedId2: "New York"}]); - // Test that the $graphLookup stage resolves the view namespace referenced in the 'from' field - // of a $lookup stage nested inside of it. - let graphLookupPipeline = [ +// Test that the $graphLookup stage resolves the view namespace referenced in the 'from' field +// of a $lookup stage nested inside of it. +let graphLookupPipeline = [ {$match: {_id: "New York"}}, { $graphLookup: { @@ -268,13 +266,13 @@ {$project: {_id: 1, matchedId1: "$matched._id", matchedId2: "$matched.matchedId"}} ]; - assertAggResultEq(coll.getName(), - graphLookupPipeline, - [{_id: "New York", matchedId1: "New York", matchedId2: "New York"}]); +assertAggResultEq(coll.getName(), + graphLookupPipeline, + [{_id: "New York", matchedId1: "New York", matchedId2: "New York"}]); - // Test that the $lookup stage on a view with a nested $lookup on a different view resolves the - // view namespaces referenced in their respective 'from' fields. - assertAggResultEq( +// Test that the $lookup stage on a view with a nested $lookup on a different view resolves the +// view namespaces referenced in their respective 'from' fields. +assertAggResultEq( coll.getName(), [ {$match: {_id: "Trenton"}}, @@ -307,11 +305,9 @@ }] }]); - // Test that the $facet stage resolves the view namespace referenced in the 'from' field of a - // $lookup stage nested inside of a $graphLookup stage. - assertAggResultEq( - coll.getName(), - [{$facet: {nested: graphLookupPipeline}}], - [{nested: [{_id: "New York", matchedId1: "New York", matchedId2: "New York"}]}]); - +// Test that the $facet stage resolves the view namespace referenced in the 'from' field of a +// $lookup stage nested inside of a $graphLookup stage. +assertAggResultEq(coll.getName(), + [{$facet: {nested: graphLookupPipeline}}], + [{nested: [{_id: "New York", matchedId1: "New York", matchedId2: "New York"}]}]); }()); diff --git a/jstests/core/views/views_all_commands.js b/jstests/core/views/views_all_commands.js index 4a17d6642cb..ed6f137d6d3 100644 --- a/jstests/core/views/views_all_commands.js +++ b/jstests/core/views/views_all_commands.js @@ -60,574 +60,567 @@ */ (function() { - "use strict"; +"use strict"; - // Pre-written reasons for skipping a test. - const isAnInternalCommand = "internal command"; - const isUnrelated = "is unrelated"; +// Pre-written reasons for skipping a test. +const isAnInternalCommand = "internal command"; +const isUnrelated = "is unrelated"; - let viewsCommandTests = { - _addShard: {skip: isAnInternalCommand}, - _cloneCollectionOptionsFromPrimaryShard: {skip: isAnInternalCommand}, - _configsvrAddShard: {skip: isAnInternalCommand}, - _configsvrAddShardToZone: {skip: isAnInternalCommand}, - _configsvrBalancerStart: {skip: isAnInternalCommand}, - _configsvrBalancerStatus: {skip: isAnInternalCommand}, - _configsvrBalancerStop: {skip: isAnInternalCommand}, - _configsvrCommitChunkMerge: {skip: isAnInternalCommand}, - _configsvrCommitChunkMigration: {skip: isAnInternalCommand}, - _configsvrCommitChunkSplit: {skip: isAnInternalCommand}, - _configsvrCommitMovePrimary: {skip: isAnInternalCommand}, - _configsvrCreateCollection: {skip: isAnInternalCommand}, - _configsvrCreateDatabase: {skip: isAnInternalCommand}, - _configsvrDropCollection: {skip: isAnInternalCommand}, - _configsvrDropDatabase: {skip: isAnInternalCommand}, - _configsvrEnableSharding: {skip: isAnInternalCommand}, - _configsvrMoveChunk: {skip: isAnInternalCommand}, - _configsvrMovePrimary: {skip: isAnInternalCommand}, - _configsvrRefineCollectionShardKey: {skip: isAnInternalCommand}, - _configsvrRenameCollection: {skip: isAnInternalCommand}, - _configsvrRemoveShard: {skip: isAnInternalCommand}, - _configsvrRemoveShardFromZone: {skip: isAnInternalCommand}, - _configsvrShardCollection: {skip: isAnInternalCommand}, - _configsvrUpdateZoneKeyRange: {skip: isAnInternalCommand}, - _cpuProfilerStart: {skip: isAnInternalCommand}, - _cpuProfilerStop: {skip: isAnInternalCommand}, - _flushDatabaseCacheUpdates: {skip: isUnrelated}, - _flushRoutingTableCacheUpdates: {skip: isUnrelated}, - _getNextSessionMods: {skip: isAnInternalCommand}, - _getUserCacheGeneration: {skip: isAnInternalCommand}, - _hashBSONElement: {skip: isAnInternalCommand}, - _isSelf: {skip: isAnInternalCommand}, - _mergeAuthzCollections: {skip: isAnInternalCommand}, - _migrateClone: {skip: isAnInternalCommand}, - _recvChunkAbort: {skip: isAnInternalCommand}, - _recvChunkCommit: {skip: isAnInternalCommand}, - _recvChunkStart: {skip: isAnInternalCommand}, - _recvChunkStatus: {skip: isAnInternalCommand}, - _shardsvrCloneCatalogData: {skip: isAnInternalCommand}, - _shardsvrMovePrimary: {skip: isAnInternalCommand}, - _shardsvrRenameCollection: {skip: isAnInternalCommand}, - _shardsvrShardCollection: {skip: isAnInternalCommand}, - _transferMods: {skip: isAnInternalCommand}, - abortTransaction: {skip: isUnrelated}, - addShard: {skip: isUnrelated}, - addShardToZone: {skip: isUnrelated}, - aggregate: {command: {aggregate: "view", pipeline: [{$match: {}}], cursor: {}}}, - appendOplogNote: {skip: isUnrelated}, - applyOps: { - command: {applyOps: [{op: "i", o: {_id: 1}, ns: "test.view"}]}, - expectFailure: true, - skipSharded: true, - }, - authenticate: {skip: isUnrelated}, - availableQueryOptions: {skip: isAnInternalCommand}, - balancerStart: {skip: isUnrelated}, - balancerStatus: {skip: isUnrelated}, - balancerStop: {skip: isUnrelated}, - buildInfo: {skip: isUnrelated}, - captrunc: { - command: {captrunc: "view", n: 2, inc: false}, - expectFailure: true, - }, - checkShardingIndex: {skip: isUnrelated}, - cleanupOrphaned: { - skip: "Tested in views/views_sharded.js", - }, - clearLog: {skip: isUnrelated}, - cloneCollectionAsCapped: { - command: {cloneCollectionAsCapped: "view", toCollection: "testcapped", size: 10240}, - expectFailure: true, - }, - collMod: {command: {collMod: "view", viewOn: "other", pipeline: []}}, - collStats: {skip: "Tested in views/views_coll_stats.js"}, - commitTransaction: {skip: isUnrelated}, - compact: {command: {compact: "view", force: true}, expectFailure: true, skipSharded: true}, - configureFailPoint: {skip: isUnrelated}, - connPoolStats: {skip: isUnrelated}, - connPoolSync: {skip: isUnrelated}, - connectionStatus: {skip: isUnrelated}, - convertToCapped: {command: {convertToCapped: "view", size: 12345}, expectFailure: true}, - coordinateCommitTransaction: {skip: isUnrelated}, - count: {command: {count: "view"}}, - cpuload: {skip: isAnInternalCommand}, - create: {skip: "tested in views/views_creation.js"}, - createIndexes: { - command: {createIndexes: "view", indexes: [{key: {x: 1}, name: "x_1"}]}, - expectFailure: true, - }, - createRole: { - command: {createRole: "testrole", privileges: [], roles: []}, - setup: function(conn) { - assert.commandWorked(conn.runCommand({dropAllRolesFromDatabase: 1})); - }, - teardown: function(conn) { - assert.commandWorked(conn.runCommand({dropAllRolesFromDatabase: 1})); - } - }, - createUser: { - command: {createUser: "testuser", pwd: "testpass", roles: []}, - setup: function(conn) { - assert.commandWorked(conn.runCommand({dropAllUsersFromDatabase: 1})); - }, - teardown: function(conn) { - assert.commandWorked(conn.runCommand({dropAllUsersFromDatabase: 1})); - } - }, - currentOp: {skip: isUnrelated}, - dataSize: { - command: {dataSize: "test.view"}, - expectFailure: true, - }, - dbCheck: {command: {dbCheck: "view"}, expectFailure: true}, - dbHash: { - command: function(conn) { - let getHash = function() { - let cmd = {dbHash: 1}; - let res = conn.runCommand(cmd); - assert.commandWorked(res, tojson(cmd)); - return res.collections["system.views"]; - }; - // The checksum below should change if we change the views, but not otherwise. - let hash1 = getHash(); - assert.commandWorked(conn.runCommand({create: "view2", viewOn: "view"}), - "could not create view 'view2' on 'view'"); - let hash2 = getHash(); - assert.neq(hash1, hash2, "expected hash to change after creating new view"); - assert.commandWorked(conn.runCommand({drop: "view2"}), "problem dropping view2"); - let hash3 = getHash(); - assert.eq(hash1, hash3, "hash should be the same again after removing 'view2'"); - } +let viewsCommandTests = { + _addShard: {skip: isAnInternalCommand}, + _cloneCollectionOptionsFromPrimaryShard: {skip: isAnInternalCommand}, + _configsvrAddShard: {skip: isAnInternalCommand}, + _configsvrAddShardToZone: {skip: isAnInternalCommand}, + _configsvrBalancerStart: {skip: isAnInternalCommand}, + _configsvrBalancerStatus: {skip: isAnInternalCommand}, + _configsvrBalancerStop: {skip: isAnInternalCommand}, + _configsvrCommitChunkMerge: {skip: isAnInternalCommand}, + _configsvrCommitChunkMigration: {skip: isAnInternalCommand}, + _configsvrCommitChunkSplit: {skip: isAnInternalCommand}, + _configsvrCommitMovePrimary: {skip: isAnInternalCommand}, + _configsvrCreateCollection: {skip: isAnInternalCommand}, + _configsvrCreateDatabase: {skip: isAnInternalCommand}, + _configsvrDropCollection: {skip: isAnInternalCommand}, + _configsvrDropDatabase: {skip: isAnInternalCommand}, + _configsvrEnableSharding: {skip: isAnInternalCommand}, + _configsvrMoveChunk: {skip: isAnInternalCommand}, + _configsvrMovePrimary: {skip: isAnInternalCommand}, + _configsvrRefineCollectionShardKey: {skip: isAnInternalCommand}, + _configsvrRenameCollection: {skip: isAnInternalCommand}, + _configsvrRemoveShard: {skip: isAnInternalCommand}, + _configsvrRemoveShardFromZone: {skip: isAnInternalCommand}, + _configsvrShardCollection: {skip: isAnInternalCommand}, + _configsvrUpdateZoneKeyRange: {skip: isAnInternalCommand}, + _cpuProfilerStart: {skip: isAnInternalCommand}, + _cpuProfilerStop: {skip: isAnInternalCommand}, + _flushDatabaseCacheUpdates: {skip: isUnrelated}, + _flushRoutingTableCacheUpdates: {skip: isUnrelated}, + _getNextSessionMods: {skip: isAnInternalCommand}, + _getUserCacheGeneration: {skip: isAnInternalCommand}, + _hashBSONElement: {skip: isAnInternalCommand}, + _isSelf: {skip: isAnInternalCommand}, + _mergeAuthzCollections: {skip: isAnInternalCommand}, + _migrateClone: {skip: isAnInternalCommand}, + _recvChunkAbort: {skip: isAnInternalCommand}, + _recvChunkCommit: {skip: isAnInternalCommand}, + _recvChunkStart: {skip: isAnInternalCommand}, + _recvChunkStatus: {skip: isAnInternalCommand}, + _shardsvrCloneCatalogData: {skip: isAnInternalCommand}, + _shardsvrMovePrimary: {skip: isAnInternalCommand}, + _shardsvrRenameCollection: {skip: isAnInternalCommand}, + _shardsvrShardCollection: {skip: isAnInternalCommand}, + _transferMods: {skip: isAnInternalCommand}, + abortTransaction: {skip: isUnrelated}, + addShard: {skip: isUnrelated}, + addShardToZone: {skip: isUnrelated}, + aggregate: {command: {aggregate: "view", pipeline: [{$match: {}}], cursor: {}}}, + appendOplogNote: {skip: isUnrelated}, + applyOps: { + command: {applyOps: [{op: "i", o: {_id: 1}, ns: "test.view"}]}, + expectFailure: true, + skipSharded: true, + }, + authenticate: {skip: isUnrelated}, + availableQueryOptions: {skip: isAnInternalCommand}, + balancerStart: {skip: isUnrelated}, + balancerStatus: {skip: isUnrelated}, + balancerStop: {skip: isUnrelated}, + buildInfo: {skip: isUnrelated}, + captrunc: { + command: {captrunc: "view", n: 2, inc: false}, + expectFailure: true, + }, + checkShardingIndex: {skip: isUnrelated}, + cleanupOrphaned: { + skip: "Tested in views/views_sharded.js", + }, + clearLog: {skip: isUnrelated}, + cloneCollectionAsCapped: { + command: {cloneCollectionAsCapped: "view", toCollection: "testcapped", size: 10240}, + expectFailure: true, + }, + collMod: {command: {collMod: "view", viewOn: "other", pipeline: []}}, + collStats: {skip: "Tested in views/views_coll_stats.js"}, + commitTransaction: {skip: isUnrelated}, + compact: {command: {compact: "view", force: true}, expectFailure: true, skipSharded: true}, + configureFailPoint: {skip: isUnrelated}, + connPoolStats: {skip: isUnrelated}, + connPoolSync: {skip: isUnrelated}, + connectionStatus: {skip: isUnrelated}, + convertToCapped: {command: {convertToCapped: "view", size: 12345}, expectFailure: true}, + coordinateCommitTransaction: {skip: isUnrelated}, + count: {command: {count: "view"}}, + cpuload: {skip: isAnInternalCommand}, + create: {skip: "tested in views/views_creation.js"}, + createIndexes: { + command: {createIndexes: "view", indexes: [{key: {x: 1}, name: "x_1"}]}, + expectFailure: true, + }, + createRole: { + command: {createRole: "testrole", privileges: [], roles: []}, + setup: function(conn) { + assert.commandWorked(conn.runCommand({dropAllRolesFromDatabase: 1})); }, - dbStats: {skip: "TODO(SERVER-25948)"}, - delete: {command: {delete: "view", deletes: [{q: {x: 1}, limit: 1}]}, expectFailure: true}, - distinct: {command: {distinct: "view", key: "_id"}}, - driverOIDTest: {skip: isUnrelated}, - drop: {command: {drop: "view"}}, - dropAllRolesFromDatabase: {skip: isUnrelated}, - dropAllUsersFromDatabase: {skip: isUnrelated}, - dropConnections: {skip: isUnrelated}, - dropDatabase: {command: {dropDatabase: 1}}, - dropIndexes: {command: {dropIndexes: "view"}, expectFailure: true}, - dropRole: { - command: {dropRole: "testrole"}, - setup: function(conn) { - assert.commandWorked( - conn.runCommand({createRole: "testrole", privileges: [], roles: []})); - }, - teardown: function(conn) { - assert.commandWorked(conn.runCommand({dropAllRolesFromDatabase: 1})); - } - }, - dropUser: {skip: isUnrelated}, - echo: {skip: isUnrelated}, - emptycapped: { - command: {emptycapped: "view"}, - expectFailure: true, - }, - enableSharding: {skip: "Tested as part of shardCollection"}, - endSessions: {skip: isUnrelated}, - explain: {command: {explain: {count: "view"}}}, - features: {skip: isUnrelated}, - filemd5: {skip: isUnrelated}, - find: {skip: "tested in views/views_find.js & views/views_sharded.js"}, - findAndModify: { - command: {findAndModify: "view", query: {a: 1}, update: {$set: {a: 2}}}, - expectFailure: true + teardown: function(conn) { + assert.commandWorked(conn.runCommand({dropAllRolesFromDatabase: 1})); + } + }, + createUser: { + command: {createUser: "testuser", pwd: "testpass", roles: []}, + setup: function(conn) { + assert.commandWorked(conn.runCommand({dropAllUsersFromDatabase: 1})); }, - flushRouterConfig: {skip: isUnrelated}, - fsync: {skip: isUnrelated}, - fsyncUnlock: {skip: isUnrelated}, - getDatabaseVersion: {skip: isUnrelated}, - geoSearch: { - command: { - geoSearch: "view", - search: {}, - near: [-50, 37], - }, - expectFailure: true + teardown: function(conn) { + assert.commandWorked(conn.runCommand({dropAllUsersFromDatabase: 1})); + } + }, + currentOp: {skip: isUnrelated}, + dataSize: { + command: {dataSize: "test.view"}, + expectFailure: true, + }, + dbCheck: {command: {dbCheck: "view"}, expectFailure: true}, + dbHash: { + command: function(conn) { + let getHash = function() { + let cmd = {dbHash: 1}; + let res = conn.runCommand(cmd); + assert.commandWorked(res, tojson(cmd)); + return res.collections["system.views"]; + }; + // The checksum below should change if we change the views, but not otherwise. + let hash1 = getHash(); + assert.commandWorked(conn.runCommand({create: "view2", viewOn: "view"}), + "could not create view 'view2' on 'view'"); + let hash2 = getHash(); + assert.neq(hash1, hash2, "expected hash to change after creating new view"); + assert.commandWorked(conn.runCommand({drop: "view2"}), "problem dropping view2"); + let hash3 = getHash(); + assert.eq(hash1, hash3, "hash should be the same again after removing 'view2'"); + } + }, + dbStats: {skip: "TODO(SERVER-25948)"}, + delete: {command: {delete: "view", deletes: [{q: {x: 1}, limit: 1}]}, expectFailure: true}, + distinct: {command: {distinct: "view", key: "_id"}}, + driverOIDTest: {skip: isUnrelated}, + drop: {command: {drop: "view"}}, + dropAllRolesFromDatabase: {skip: isUnrelated}, + dropAllUsersFromDatabase: {skip: isUnrelated}, + dropConnections: {skip: isUnrelated}, + dropDatabase: {command: {dropDatabase: 1}}, + dropIndexes: {command: {dropIndexes: "view"}, expectFailure: true}, + dropRole: { + command: {dropRole: "testrole"}, + setup: function(conn) { + assert.commandWorked( + conn.runCommand({createRole: "testrole", privileges: [], roles: []})); }, - getCmdLineOpts: {skip: isUnrelated}, - getDiagnosticData: {skip: isUnrelated}, - getFreeMonitoringStatus: {skip: isUnrelated}, - getLastError: {skip: isUnrelated}, - getLog: {skip: isUnrelated}, - getMore: { - setup: function(conn) { - assert.writeOK(conn.collection.remove({})); - assert.writeOK(conn.collection.insert([{_id: 1}, {_id: 2}, {_id: 3}])); - }, - command: function(conn) { - function testGetMoreForCommand(cmd) { - let res = conn.runCommand(cmd); - assert.commandWorked(res, tojson(cmd)); - let cursor = res.cursor; - assert.eq(cursor.ns, - "test.view", - "expected view namespace in cursor: " + tojson(cursor)); - let expectedFirstBatch = [{_id: 1}, {_id: 2}]; - assert.eq(cursor.firstBatch, expectedFirstBatch, "returned wrong firstBatch"); - let getmoreCmd = {getMore: cursor.id, collection: "view"}; - res = conn.runCommand(getmoreCmd); - - assert.commandWorked(res, tojson(getmoreCmd)); - assert.eq("test.view", - res.cursor.ns, - "expected view namespace in cursor: " + tojson(res)); - } - // find command. - let findCmd = {find: "view", filter: {_id: {$gt: 0}}, batchSize: 2}; - testGetMoreForCommand(findCmd); - - // aggregate command. - let aggCmd = { - aggregate: "view", - pipeline: [{$match: {_id: {$gt: 0}}}], - cursor: {batchSize: 2} - }; - testGetMoreForCommand(aggCmd); - } + teardown: function(conn) { + assert.commandWorked(conn.runCommand({dropAllRolesFromDatabase: 1})); + } + }, + dropUser: {skip: isUnrelated}, + echo: {skip: isUnrelated}, + emptycapped: { + command: {emptycapped: "view"}, + expectFailure: true, + }, + enableSharding: {skip: "Tested as part of shardCollection"}, + endSessions: {skip: isUnrelated}, + explain: {command: {explain: {count: "view"}}}, + features: {skip: isUnrelated}, + filemd5: {skip: isUnrelated}, + find: {skip: "tested in views/views_find.js & views/views_sharded.js"}, + findAndModify: { + command: {findAndModify: "view", query: {a: 1}, update: {$set: {a: 2}}}, + expectFailure: true + }, + flushRouterConfig: {skip: isUnrelated}, + fsync: {skip: isUnrelated}, + fsyncUnlock: {skip: isUnrelated}, + getDatabaseVersion: {skip: isUnrelated}, + geoSearch: { + command: { + geoSearch: "view", + search: {}, + near: [-50, 37], }, - getParameter: {skip: isUnrelated}, - getShardMap: {skip: isUnrelated}, - getShardVersion: { - command: {getShardVersion: "test.view"}, - isAdminCommand: true, - expectFailure: true, - skipSharded: true, // mongos is tested in views/views_sharded.js + expectFailure: true + }, + getCmdLineOpts: {skip: isUnrelated}, + getDiagnosticData: {skip: isUnrelated}, + getFreeMonitoringStatus: {skip: isUnrelated}, + getLastError: {skip: isUnrelated}, + getLog: {skip: isUnrelated}, + getMore: { + setup: function(conn) { + assert.writeOK(conn.collection.remove({})); + assert.writeOK(conn.collection.insert([{_id: 1}, {_id: 2}, {_id: 3}])); }, - getnonce: {skip: isUnrelated}, - godinsert: {skip: isAnInternalCommand}, - grantPrivilegesToRole: {skip: "tested in auth/commands_user_defined_roles.js"}, - grantRolesToRole: {skip: isUnrelated}, - grantRolesToUser: {skip: isUnrelated}, - handshake: {skip: isUnrelated}, - hostInfo: {skip: isUnrelated}, - httpClientRequest: {skip: isAnInternalCommand}, - insert: {command: {insert: "view", documents: [{x: 1}]}, expectFailure: true}, - invalidateUserCache: {skip: isUnrelated}, - isdbgrid: {skip: isUnrelated}, - isMaster: {skip: isUnrelated}, - killCursors: { - setup: function(conn) { - assert.writeOK(conn.collection.remove({})); - assert.writeOK(conn.collection.insert([{_id: 1}, {_id: 2}, {_id: 3}])); - }, - command: function(conn) { - // First get and check a partial result for an aggregate command. - let aggCmd = { - aggregate: "view", - pipeline: [{$sort: {_id: 1}}], - cursor: {batchSize: 2} - }; - let res = conn.runCommand(aggCmd); - assert.commandWorked(res, tojson(aggCmd)); + command: function(conn) { + function testGetMoreForCommand(cmd) { + let res = conn.runCommand(cmd); + assert.commandWorked(res, tojson(cmd)); let cursor = res.cursor; assert.eq( cursor.ns, "test.view", "expected view namespace in cursor: " + tojson(cursor)); let expectedFirstBatch = [{_id: 1}, {_id: 2}]; - assert.eq( - cursor.firstBatch, expectedFirstBatch, "aggregate returned wrong firstBatch"); + assert.eq(cursor.firstBatch, expectedFirstBatch, "returned wrong firstBatch"); + let getmoreCmd = {getMore: cursor.id, collection: "view"}; + res = conn.runCommand(getmoreCmd); - // Then check correct execution of the killCursors command. - let killCursorsCmd = {killCursors: "view", cursors: [cursor.id]}; - res = conn.runCommand(killCursorsCmd); - assert.commandWorked(res, tojson(killCursorsCmd)); - let expectedRes = { - cursorsKilled: [cursor.id], - cursorsNotFound: [], - cursorsAlive: [], - cursorsUnknown: [], - ok: 1 - }; - delete res.operationTime; - delete res.$clusterTime; - assert.eq(expectedRes, res, "unexpected result for: " + tojson(killCursorsCmd)); + assert.commandWorked(res, tojson(getmoreCmd)); + assert.eq("test.view", + res.cursor.ns, + "expected view namespace in cursor: " + tojson(res)); } + // find command. + let findCmd = {find: "view", filter: {_id: {$gt: 0}}, batchSize: 2}; + testGetMoreForCommand(findCmd); + + // aggregate command. + let aggCmd = { + aggregate: "view", + pipeline: [{$match: {_id: {$gt: 0}}}], + cursor: {batchSize: 2} + }; + testGetMoreForCommand(aggCmd); + } + }, + getParameter: {skip: isUnrelated}, + getShardMap: {skip: isUnrelated}, + getShardVersion: { + command: {getShardVersion: "test.view"}, + isAdminCommand: true, + expectFailure: true, + skipSharded: true, // mongos is tested in views/views_sharded.js + }, + getnonce: {skip: isUnrelated}, + godinsert: {skip: isAnInternalCommand}, + grantPrivilegesToRole: {skip: "tested in auth/commands_user_defined_roles.js"}, + grantRolesToRole: {skip: isUnrelated}, + grantRolesToUser: {skip: isUnrelated}, + handshake: {skip: isUnrelated}, + hostInfo: {skip: isUnrelated}, + httpClientRequest: {skip: isAnInternalCommand}, + insert: {command: {insert: "view", documents: [{x: 1}]}, expectFailure: true}, + invalidateUserCache: {skip: isUnrelated}, + isdbgrid: {skip: isUnrelated}, + isMaster: {skip: isUnrelated}, + killCursors: { + setup: function(conn) { + assert.writeOK(conn.collection.remove({})); + assert.writeOK(conn.collection.insert([{_id: 1}, {_id: 2}, {_id: 3}])); }, - killOp: {skip: isUnrelated}, - killSessions: {skip: isUnrelated}, - killAllSessions: {skip: isUnrelated}, - killAllSessionsByPattern: {skip: isUnrelated}, - listCollections: {skip: "tested in views/views_creation.js"}, - listCommands: {skip: isUnrelated}, - listDatabases: {skip: isUnrelated}, - listIndexes: {command: {listIndexes: "view"}, expectFailure: true}, - listShards: {skip: isUnrelated}, - lockInfo: {skip: isUnrelated}, - logApplicationMessage: {skip: isUnrelated}, - logRotate: {skip: isUnrelated}, - logout: {skip: isUnrelated}, - makeSnapshot: {skip: isAnInternalCommand}, - mapReduce: { - command: - {mapReduce: "view", map: function() {}, reduce: function(key, vals) {}, out: "out"}, - expectFailure: true - }, - "mapreduce.shardedfinish": {skip: isAnInternalCommand}, - mergeChunks: { - command: {mergeChunks: "test.view", bounds: [{x: 0}, {x: 10}]}, - skipStandalone: true, + command: function(conn) { + // First get and check a partial result for an aggregate command. + let aggCmd = {aggregate: "view", pipeline: [{$sort: {_id: 1}}], cursor: {batchSize: 2}}; + let res = conn.runCommand(aggCmd); + assert.commandWorked(res, tojson(aggCmd)); + let cursor = res.cursor; + assert.eq( + cursor.ns, "test.view", "expected view namespace in cursor: " + tojson(cursor)); + let expectedFirstBatch = [{_id: 1}, {_id: 2}]; + assert.eq(cursor.firstBatch, expectedFirstBatch, "aggregate returned wrong firstBatch"); + + // Then check correct execution of the killCursors command. + let killCursorsCmd = {killCursors: "view", cursors: [cursor.id]}; + res = conn.runCommand(killCursorsCmd); + assert.commandWorked(res, tojson(killCursorsCmd)); + let expectedRes = { + cursorsKilled: [cursor.id], + cursorsNotFound: [], + cursorsAlive: [], + cursorsUnknown: [], + ok: 1 + }; + delete res.operationTime; + delete res.$clusterTime; + assert.eq(expectedRes, res, "unexpected result for: " + tojson(killCursorsCmd)); + } + }, + killOp: {skip: isUnrelated}, + killSessions: {skip: isUnrelated}, + killAllSessions: {skip: isUnrelated}, + killAllSessionsByPattern: {skip: isUnrelated}, + listCollections: {skip: "tested in views/views_creation.js"}, + listCommands: {skip: isUnrelated}, + listDatabases: {skip: isUnrelated}, + listIndexes: {command: {listIndexes: "view"}, expectFailure: true}, + listShards: {skip: isUnrelated}, + lockInfo: {skip: isUnrelated}, + logApplicationMessage: {skip: isUnrelated}, + logRotate: {skip: isUnrelated}, + logout: {skip: isUnrelated}, + makeSnapshot: {skip: isAnInternalCommand}, + mapReduce: { + command: + {mapReduce: "view", map: function() {}, reduce: function(key, vals) {}, out: "out"}, + expectFailure: true + }, + "mapreduce.shardedfinish": {skip: isAnInternalCommand}, + mergeChunks: { + command: {mergeChunks: "test.view", bounds: [{x: 0}, {x: 10}]}, + skipStandalone: true, + isAdminCommand: true, + expectFailure: true, + expectedErrorCode: ErrorCodes.NamespaceNotSharded, + }, + moveChunk: { + command: {moveChunk: "test.view"}, + skipStandalone: true, + isAdminCommand: true, + expectFailure: true, + expectedErrorCode: ErrorCodes.NamespaceNotSharded, + }, + movePrimary: {skip: "Tested in sharding/movePrimary1.js"}, + multicast: {skip: isUnrelated}, + netstat: {skip: isAnInternalCommand}, + ping: {command: {ping: 1}}, + planCacheClear: {command: {planCacheClear: "view"}, expectFailure: true}, + planCacheClearFilters: {command: {planCacheClearFilters: "view"}, expectFailure: true}, + planCacheListFilters: {command: {planCacheListFilters: "view"}, expectFailure: true}, + planCacheListPlans: {command: {planCacheListPlans: "view"}, expectFailure: true}, + planCacheListQueryShapes: {command: {planCacheListQueryShapes: "view"}, expectFailure: true}, + planCacheSetFilter: {command: {planCacheSetFilter: "view"}, expectFailure: true}, + prepareTransaction: {skip: isUnrelated}, + profile: {skip: isUnrelated}, + refineCollectionShardKey: {skip: isUnrelated}, + refreshLogicalSessionCacheNow: {skip: isAnInternalCommand}, + reapLogicalSessionCacheNow: {skip: isAnInternalCommand}, + refreshSessions: {skip: isUnrelated}, + restartCatalog: {skip: isAnInternalCommand}, + reIndex: {command: {reIndex: "view"}, expectFailure: true}, + removeShard: {skip: isUnrelated}, + removeShardFromZone: {skip: isUnrelated}, + renameCollection: [ + { isAdminCommand: true, + command: {renameCollection: "test.view", to: "test.otherview"}, expectFailure: true, - expectedErrorCode: ErrorCodes.NamespaceNotSharded, + skipSharded: true, }, - moveChunk: { - command: {moveChunk: "test.view"}, - skipStandalone: true, + { isAdminCommand: true, + command: {renameCollection: "test.collection", to: "test.view"}, expectFailure: true, - expectedErrorCode: ErrorCodes.NamespaceNotSharded, - }, - movePrimary: {skip: "Tested in sharding/movePrimary1.js"}, - multicast: {skip: isUnrelated}, - netstat: {skip: isAnInternalCommand}, - ping: {command: {ping: 1}}, - planCacheClear: {command: {planCacheClear: "view"}, expectFailure: true}, - planCacheClearFilters: {command: {planCacheClearFilters: "view"}, expectFailure: true}, - planCacheListFilters: {command: {planCacheListFilters: "view"}, expectFailure: true}, - planCacheListPlans: {command: {planCacheListPlans: "view"}, expectFailure: true}, - planCacheListQueryShapes: - {command: {planCacheListQueryShapes: "view"}, expectFailure: true}, - planCacheSetFilter: {command: {planCacheSetFilter: "view"}, expectFailure: true}, - prepareTransaction: {skip: isUnrelated}, - profile: {skip: isUnrelated}, - refineCollectionShardKey: {skip: isUnrelated}, - refreshLogicalSessionCacheNow: {skip: isAnInternalCommand}, - reapLogicalSessionCacheNow: {skip: isAnInternalCommand}, - refreshSessions: {skip: isUnrelated}, - restartCatalog: {skip: isAnInternalCommand}, - reIndex: {command: {reIndex: "view"}, expectFailure: true}, - removeShard: {skip: isUnrelated}, - removeShardFromZone: {skip: isUnrelated}, - renameCollection: [ - { - isAdminCommand: true, - command: {renameCollection: "test.view", to: "test.otherview"}, - expectFailure: true, - skipSharded: true, - }, - { - isAdminCommand: true, - command: {renameCollection: "test.collection", to: "test.view"}, - expectFailure: true, - expectedErrorCode: ErrorCodes.NamespaceExists, - skipSharded: true, - } - ], - repairCursor: {command: {repairCursor: "view"}, expectFailure: true}, - repairDatabase: {skip: isUnrelated}, - replSetAbortPrimaryCatchUp: {skip: isUnrelated}, - replSetFreeze: {skip: isUnrelated}, - replSetGetConfig: {skip: isUnrelated}, - replSetGetRBID: {skip: isUnrelated}, - replSetGetStatus: {skip: isUnrelated}, - replSetHeartbeat: {skip: isUnrelated}, - replSetInitiate: {skip: isUnrelated}, - replSetMaintenance: {skip: isUnrelated}, - replSetReconfig: {skip: isUnrelated}, - replSetRequestVotes: {skip: isUnrelated}, - replSetStepDown: {skip: isUnrelated}, - replSetStepUp: {skip: isUnrelated}, - replSetSyncFrom: {skip: isUnrelated}, - replSetTest: {skip: isUnrelated}, - replSetUpdatePosition: {skip: isUnrelated}, - replSetResizeOplog: {skip: isUnrelated}, - resetError: {skip: isUnrelated}, - revokePrivilegesFromRole: { - command: { - revokePrivilegesFromRole: "testrole", - privileges: [{resource: {db: "test", collection: "view"}, actions: ["find"]}] - }, - setup: function(conn) { - assert.commandWorked( - conn.runCommand({createRole: "testrole", privileges: [], roles: []})); - }, - teardown: function(conn) { - assert.commandWorked(conn.runCommand({dropAllRolesFromDatabase: 1})); - } - }, - revokeRolesFromRole: {skip: isUnrelated}, - revokeRolesFromUser: {skip: isUnrelated}, - rolesInfo: {skip: isUnrelated}, - saslContinue: {skip: isUnrelated}, - saslStart: {skip: isUnrelated}, - serverStatus: {command: {serverStatus: 1}, skip: isUnrelated}, - setIndexCommitQuorum: {skip: isUnrelated}, - setCommittedSnapshot: {skip: isAnInternalCommand}, - setFeatureCompatibilityVersion: {skip: isUnrelated}, - setFreeMonitoring: {skip: isUnrelated}, - setParameter: {skip: isUnrelated}, - setShardVersion: {skip: isUnrelated}, - shardCollection: { - command: {shardCollection: "test.view", key: {_id: 1}}, - setup: function(conn) { - assert.commandWorked(conn.adminCommand({enableSharding: "test"})); - }, - skipStandalone: true, - expectFailure: true, - isAdminCommand: true, + expectedErrorCode: ErrorCodes.NamespaceExists, + skipSharded: true, + } + ], + repairCursor: {command: {repairCursor: "view"}, expectFailure: true}, + repairDatabase: {skip: isUnrelated}, + replSetAbortPrimaryCatchUp: {skip: isUnrelated}, + replSetFreeze: {skip: isUnrelated}, + replSetGetConfig: {skip: isUnrelated}, + replSetGetRBID: {skip: isUnrelated}, + replSetGetStatus: {skip: isUnrelated}, + replSetHeartbeat: {skip: isUnrelated}, + replSetInitiate: {skip: isUnrelated}, + replSetMaintenance: {skip: isUnrelated}, + replSetReconfig: {skip: isUnrelated}, + replSetRequestVotes: {skip: isUnrelated}, + replSetStepDown: {skip: isUnrelated}, + replSetStepUp: {skip: isUnrelated}, + replSetSyncFrom: {skip: isUnrelated}, + replSetTest: {skip: isUnrelated}, + replSetUpdatePosition: {skip: isUnrelated}, + replSetResizeOplog: {skip: isUnrelated}, + resetError: {skip: isUnrelated}, + revokePrivilegesFromRole: { + command: { + revokePrivilegesFromRole: "testrole", + privileges: [{resource: {db: "test", collection: "view"}, actions: ["find"]}] }, - shardConnPoolStats: {skip: isUnrelated}, - shardingState: {skip: isUnrelated}, - shutdown: {skip: isUnrelated}, - sleep: {skip: isUnrelated}, - split: { - command: {split: "test.view", find: {_id: 1}}, - skipStandalone: true, - expectFailure: true, - expectedErrorCode: ErrorCodes.NamespaceNotSharded, - isAdminCommand: true, + setup: function(conn) { + assert.commandWorked( + conn.runCommand({createRole: "testrole", privileges: [], roles: []})); }, - splitChunk: { - command: { - splitChunk: "test.view", - from: "shard0000", - min: {x: MinKey}, - max: {x: 0}, - keyPattern: {x: 1}, - splitKeys: [{x: -2}, {x: -1}], - shardVersion: [Timestamp(1, 2), ObjectId()] - }, - skipSharded: true, - expectFailure: true, - expectedErrorCode: 193, - isAdminCommand: true, + teardown: function(conn) { + assert.commandWorked(conn.runCommand({dropAllRolesFromDatabase: 1})); + } + }, + revokeRolesFromRole: {skip: isUnrelated}, + revokeRolesFromUser: {skip: isUnrelated}, + rolesInfo: {skip: isUnrelated}, + saslContinue: {skip: isUnrelated}, + saslStart: {skip: isUnrelated}, + serverStatus: {command: {serverStatus: 1}, skip: isUnrelated}, + setIndexCommitQuorum: {skip: isUnrelated}, + setCommittedSnapshot: {skip: isAnInternalCommand}, + setFeatureCompatibilityVersion: {skip: isUnrelated}, + setFreeMonitoring: {skip: isUnrelated}, + setParameter: {skip: isUnrelated}, + setShardVersion: {skip: isUnrelated}, + shardCollection: { + command: {shardCollection: "test.view", key: {_id: 1}}, + setup: function(conn) { + assert.commandWorked(conn.adminCommand({enableSharding: "test"})); }, - splitVector: { - command: { - splitVector: "test.view", - keyPattern: {x: 1}, - maxChunkSize: 1, - }, - expectFailure: true, + skipStandalone: true, + expectFailure: true, + isAdminCommand: true, + }, + shardConnPoolStats: {skip: isUnrelated}, + shardingState: {skip: isUnrelated}, + shutdown: {skip: isUnrelated}, + sleep: {skip: isUnrelated}, + split: { + command: {split: "test.view", find: {_id: 1}}, + skipStandalone: true, + expectFailure: true, + expectedErrorCode: ErrorCodes.NamespaceNotSharded, + isAdminCommand: true, + }, + splitChunk: { + command: { + splitChunk: "test.view", + from: "shard0000", + min: {x: MinKey}, + max: {x: 0}, + keyPattern: {x: 1}, + splitKeys: [{x: -2}, {x: -1}], + shardVersion: [Timestamp(1, 2), ObjectId()] }, - stageDebug: {skip: isAnInternalCommand}, - startRecordingTraffic: {skip: isUnrelated}, - startSession: {skip: isAnInternalCommand}, - stopRecordingTraffic: {skip: isUnrelated}, - top: {skip: "tested in views/views_stats.js"}, - touch: { - command: {touch: "view", data: true}, - expectFailure: true, + skipSharded: true, + expectFailure: true, + expectedErrorCode: 193, + isAdminCommand: true, + }, + splitVector: { + command: { + splitVector: "test.view", + keyPattern: {x: 1}, + maxChunkSize: 1, }, - twoPhaseCreateIndexes: { - command: {twoPhaseCreateIndexes: "view", indexes: [{key: {x: 1}, name: "x_1"}]}, - expectFailure: true, + expectFailure: true, + }, + stageDebug: {skip: isAnInternalCommand}, + startRecordingTraffic: {skip: isUnrelated}, + startSession: {skip: isAnInternalCommand}, + stopRecordingTraffic: {skip: isUnrelated}, + top: {skip: "tested in views/views_stats.js"}, + touch: { + command: {touch: "view", data: true}, + expectFailure: true, + }, + twoPhaseCreateIndexes: { + command: {twoPhaseCreateIndexes: "view", indexes: [{key: {x: 1}, name: "x_1"}]}, + expectFailure: true, + }, + unsetSharding: {skip: isAnInternalCommand}, + update: {command: {update: "view", updates: [{q: {x: 1}, u: {x: 2}}]}, expectFailure: true}, + updateRole: { + command: { + updateRole: "testrole", + privileges: [{resource: {db: "test", collection: "view"}, actions: ["find"]}] }, - unsetSharding: {skip: isAnInternalCommand}, - update: {command: {update: "view", updates: [{q: {x: 1}, u: {x: 2}}]}, expectFailure: true}, - updateRole: { - command: { - updateRole: "testrole", - privileges: [{resource: {db: "test", collection: "view"}, actions: ["find"]}] - }, - setup: function(conn) { - assert.commandWorked( - conn.runCommand({createRole: "testrole", privileges: [], roles: []})); - }, - teardown: function(conn) { - assert.commandWorked(conn.runCommand({dropAllRolesFromDatabase: 1})); - } + setup: function(conn) { + assert.commandWorked( + conn.runCommand({createRole: "testrole", privileges: [], roles: []})); }, - updateUser: {skip: isUnrelated}, - updateZoneKeyRange: {skip: isUnrelated}, - usersInfo: {skip: isUnrelated}, - validate: {command: {validate: "view"}, expectFailure: true}, - waitForOngoingChunkSplits: {skip: isUnrelated}, - voteCommitIndexBuild: {skip: isUnrelated}, - voteCommitTransaction: {skip: isUnrelated}, - voteAbortTransaction: {skip: isUnrelated}, - whatsmyuri: {skip: isUnrelated} - }; + teardown: function(conn) { + assert.commandWorked(conn.runCommand({dropAllRolesFromDatabase: 1})); + } + }, + updateUser: {skip: isUnrelated}, + updateZoneKeyRange: {skip: isUnrelated}, + usersInfo: {skip: isUnrelated}, + validate: {command: {validate: "view"}, expectFailure: true}, + waitForOngoingChunkSplits: {skip: isUnrelated}, + voteCommitIndexBuild: {skip: isUnrelated}, + voteCommitTransaction: {skip: isUnrelated}, + voteAbortTransaction: {skip: isUnrelated}, + whatsmyuri: {skip: isUnrelated} +}; - /** - * Helper function for failing commands or writes that checks the result 'res' of either. - * If 'code' is null we only check for failure, otherwise we confirm error code matches as - * well. On assert 'msg' is printed. - */ - let assertCommandOrWriteFailed = function(res, code, msg) { - if (res.writeErrors !== undefined) - assert.neq(0, res.writeErrors.length, msg); - else if (res.code !== null) - assert.commandFailedWithCode(res, code, msg); - else - assert.commandFailed(res, msg); - }; +/** + * Helper function for failing commands or writes that checks the result 'res' of either. + * If 'code' is null we only check for failure, otherwise we confirm error code matches as + * well. On assert 'msg' is printed. + */ +let assertCommandOrWriteFailed = function(res, code, msg) { + if (res.writeErrors !== undefined) + assert.neq(0, res.writeErrors.length, msg); + else if (res.code !== null) + assert.commandFailedWithCode(res, code, msg); + else + assert.commandFailed(res, msg); +}; - // Are we on a mongos? - var isMaster = db.runCommand("ismaster"); - assert.commandWorked(isMaster); - var isMongos = (isMaster.msg === "isdbgrid"); +// Are we on a mongos? +var isMaster = db.runCommand("ismaster"); +assert.commandWorked(isMaster); +var isMongos = (isMaster.msg === "isdbgrid"); - // Obtain a list of all commands. - let res = db.runCommand({listCommands: 1}); - assert.commandWorked(res); +// Obtain a list of all commands. +let res = db.runCommand({listCommands: 1}); +assert.commandWorked(res); - let commands = Object.keys(res.commands); - for (let command of commands) { - let test = viewsCommandTests[command]; - assert(test !== undefined, - "Coverage failure: must explicitly define a views test for " + command); +let commands = Object.keys(res.commands); +for (let command of commands) { + let test = viewsCommandTests[command]; + assert(test !== undefined, + "Coverage failure: must explicitly define a views test for " + command); - if (!(test instanceof Array)) - test = [test]; - let subtest_nr = 0; - for (let subtest of test) { - // Tests can be explicitly skipped. Print the name of the skipped test, as well as - // the reason why. - if (subtest.skip !== undefined) { - print("Skipping " + command + ": " + subtest.skip); - continue; - } + if (!(test instanceof Array)) + test = [test]; + let subtest_nr = 0; + for (let subtest of test) { + // Tests can be explicitly skipped. Print the name of the skipped test, as well as + // the reason why. + if (subtest.skip !== undefined) { + print("Skipping " + command + ": " + subtest.skip); + continue; + } - let dbHandle = db.getSiblingDB("test"); - let commandHandle = dbHandle; + let dbHandle = db.getSiblingDB("test"); + let commandHandle = dbHandle; - // Skip tests depending on sharding configuration. - if (subtest.skipSharded && isMongos) { - print("Skipping " + command + ": not applicable to mongoS"); - continue; - } + // Skip tests depending on sharding configuration. + if (subtest.skipSharded && isMongos) { + print("Skipping " + command + ": not applicable to mongoS"); + continue; + } - if (subtest.skipStandalone && !isMongos) { - print("Skipping " + command + ": not applicable to mongoD"); - continue; - } + if (subtest.skipStandalone && !isMongos) { + print("Skipping " + command + ": not applicable to mongoD"); + continue; + } - // Perform test setup, and call any additional setup callbacks provided by the test. - // All tests assume that there exists a view named 'view' that is backed by - // 'collection'. - assert.commandWorked(dbHandle.dropDatabase()); - assert.commandWorked(dbHandle.runCommand({create: "view", viewOn: "collection"})); - assert.writeOK(dbHandle.collection.insert({x: 1})); - if (subtest.setup !== undefined) - subtest.setup(dbHandle); + // Perform test setup, and call any additional setup callbacks provided by the test. + // All tests assume that there exists a view named 'view' that is backed by + // 'collection'. + assert.commandWorked(dbHandle.dropDatabase()); + assert.commandWorked(dbHandle.runCommand({create: "view", viewOn: "collection"})); + assert.writeOK(dbHandle.collection.insert({x: 1})); + if (subtest.setup !== undefined) + subtest.setup(dbHandle); - // Execute the command. Print the command name for the first subtest, as otherwise - // it may be hard to figure out what command caused a failure. - if (!subtest_nr++) - print("Testing " + command); + // Execute the command. Print the command name for the first subtest, as otherwise + // it may be hard to figure out what command caused a failure. + if (!subtest_nr++) + print("Testing " + command); - if (subtest.isAdminCommand) - commandHandle = db.getSiblingDB("admin"); + if (subtest.isAdminCommand) + commandHandle = db.getSiblingDB("admin"); - if (subtest.expectFailure) { - let expectedErrorCode = subtest.expectedErrorCode; - if (expectedErrorCode === undefined) - expectedErrorCode = ErrorCodes.CommandNotSupportedOnView; + if (subtest.expectFailure) { + let expectedErrorCode = subtest.expectedErrorCode; + if (expectedErrorCode === undefined) + expectedErrorCode = ErrorCodes.CommandNotSupportedOnView; - assertCommandOrWriteFailed(commandHandle.runCommand(subtest.command), - expectedErrorCode, - tojson(subtest.command)); - } else if (subtest.command instanceof Function) - subtest.command(commandHandle); - else - assert.commandWorked(commandHandle.runCommand(subtest.command), - tojson(subtest.command)); + assertCommandOrWriteFailed(commandHandle.runCommand(subtest.command), + expectedErrorCode, + tojson(subtest.command)); + } else if (subtest.command instanceof Function) + subtest.command(commandHandle); + else + assert.commandWorked(commandHandle.runCommand(subtest.command), + tojson(subtest.command)); - if (subtest.teardown !== undefined) - subtest.teardown(dbHandle); - } + if (subtest.teardown !== undefined) + subtest.teardown(dbHandle); } +} }()); diff --git a/jstests/core/views/views_basic.js b/jstests/core/views/views_basic.js index 18bd486df8f..1186dbcd779 100644 --- a/jstests/core/views/views_basic.js +++ b/jstests/core/views/views_basic.js @@ -1,54 +1,53 @@ // Tests basic functionality of read-only, non-materialized views. (function() { - "use strict"; - - // For arrayEq. - load("jstests/aggregation/extras/utils.js"); - - let viewsDB = db.getSiblingDB("views_basic"); - assert.commandWorked(viewsDB.dropDatabase()); - - let assertCmdResultEq = function(cmd, expected) { - let res = viewsDB.runCommand(cmd); - assert.commandWorked(res); - - let cursor = new DBCommandCursor(db, res, 5); - let actual = cursor.toArray(); - assert(arrayEq(actual, expected), - "actual: " + tojson(cursor.toArray()) + ", expected:" + tojson(expected)); - }; - - // Insert some control documents. - let coll = viewsDB.getCollection("collection"); - let bulk = coll.initializeUnorderedBulkOp(); - bulk.insert({_id: "New York", state: "NY", pop: 7}); - bulk.insert({_id: "Oakland", state: "CA", pop: 3}); - bulk.insert({_id: "Palo Alto", state: "CA", pop: 10}); - bulk.insert({_id: "San Francisco", state: "CA", pop: 4}); - bulk.insert({_id: "Trenton", state: "NJ", pop: 5}); - assert.writeOK(bulk.execute()); - - // Test creating views on both collections and other views, using the database command and the - // shell helper. - assert.commandWorked(viewsDB.runCommand( - {create: "californiaCities", viewOn: "collection", pipeline: [{$match: {state: "CA"}}]})); - assert.commandWorked(viewsDB.createView("largeCaliforniaCities", - "californiaCities", - [{$match: {pop: {$gte: 10}}}, {$sort: {pop: 1}}])); - - // Use the find command on a view with various options. - assertCmdResultEq( - {find: "californiaCities", filter: {}, projection: {_id: 1, pop: 1}}, - [{_id: "Oakland", pop: 3}, {_id: "Palo Alto", pop: 10}, {_id: "San Francisco", pop: 4}]); - assertCmdResultEq({find: "largeCaliforniaCities", filter: {pop: {$lt: 50}}, limit: 1}, - [{_id: "Palo Alto", state: "CA", pop: 10}]); - - // Use aggregation on a view. - assertCmdResultEq({ - aggregate: "californiaCities", - pipeline: [{$group: {_id: "$state", totalPop: {$sum: "$pop"}}}], - cursor: {} - }, - [{_id: "CA", totalPop: 17}]); +"use strict"; + +// For arrayEq. +load("jstests/aggregation/extras/utils.js"); + +let viewsDB = db.getSiblingDB("views_basic"); +assert.commandWorked(viewsDB.dropDatabase()); + +let assertCmdResultEq = function(cmd, expected) { + let res = viewsDB.runCommand(cmd); + assert.commandWorked(res); + + let cursor = new DBCommandCursor(db, res, 5); + let actual = cursor.toArray(); + assert(arrayEq(actual, expected), + "actual: " + tojson(cursor.toArray()) + ", expected:" + tojson(expected)); +}; + +// Insert some control documents. +let coll = viewsDB.getCollection("collection"); +let bulk = coll.initializeUnorderedBulkOp(); +bulk.insert({_id: "New York", state: "NY", pop: 7}); +bulk.insert({_id: "Oakland", state: "CA", pop: 3}); +bulk.insert({_id: "Palo Alto", state: "CA", pop: 10}); +bulk.insert({_id: "San Francisco", state: "CA", pop: 4}); +bulk.insert({_id: "Trenton", state: "NJ", pop: 5}); +assert.writeOK(bulk.execute()); + +// Test creating views on both collections and other views, using the database command and the +// shell helper. +assert.commandWorked(viewsDB.runCommand( + {create: "californiaCities", viewOn: "collection", pipeline: [{$match: {state: "CA"}}]})); +assert.commandWorked(viewsDB.createView( + "largeCaliforniaCities", "californiaCities", [{$match: {pop: {$gte: 10}}}, {$sort: {pop: 1}}])); + +// Use the find command on a view with various options. +assertCmdResultEq( + {find: "californiaCities", filter: {}, projection: {_id: 1, pop: 1}}, + [{_id: "Oakland", pop: 3}, {_id: "Palo Alto", pop: 10}, {_id: "San Francisco", pop: 4}]); +assertCmdResultEq({find: "largeCaliforniaCities", filter: {pop: {$lt: 50}}, limit: 1}, + [{_id: "Palo Alto", state: "CA", pop: 10}]); + +// Use aggregation on a view. +assertCmdResultEq({ + aggregate: "californiaCities", + pipeline: [{$group: {_id: "$state", totalPop: {$sum: "$pop"}}}], + cursor: {} +}, + [{_id: "CA", totalPop: 17}]); }()); diff --git a/jstests/core/views/views_change.js b/jstests/core/views/views_change.js index f3bd8880a8c..94521013136 100644 --- a/jstests/core/views/views_change.js +++ b/jstests/core/views/views_change.js @@ -7,96 +7,96 @@ * ] */ (function() { - "use strict"; - - // For arrayEq. - load("jstests/aggregation/extras/utils.js"); - - let viewDB = db.getSiblingDB("views_change"); - let collection = viewDB.collection; - let view = viewDB.view; - let viewOnView = viewDB.viewOnView; - - // Convenience functions. - let resetCollectionAndViews = function() { - viewDB.runCommand({drop: "collection"}); - viewDB.runCommand({drop: "view"}); - viewDB.runCommand({drop: "viewOnView"}); - assert.commandWorked(viewDB.runCommand({create: "collection"})); - assert.commandWorked(viewDB.runCommand( - {create: "view", viewOn: "collection", pipeline: [{$match: {a: 1}}]})); - assert.commandWorked(viewDB.runCommand( - {create: "viewOnView", viewOn: "view", pipeline: [{$match: {b: 1}}]})); - }; - let assertFindResultEq = function(collName, expected) { - let res = viewDB.runCommand({find: collName, filter: {}, projection: {_id: 0, a: 1, b: 1}}); - assert.commandWorked(res); - let arr = new DBCommandCursor(db, res).toArray(); - let errmsg = tojson({expected: expected, got: arr}); - assert(arrayEq(arr, expected), errmsg); - }; - - let doc = {a: 1, b: 1}; - - resetCollectionAndViews(); - - // A view is updated when its viewOn is modified. When auth is enabled, we expect collMod to - // fail when specifying "viewOn" but not "pipeline". - assert.writeOK(collection.insert(doc)); - assertFindResultEq("view", [doc]); - let res = viewDB.runCommand({collMod: "view", viewOn: "nonexistent"}); - if (jsTest.options().auth) { - assert.commandFailedWithCode(res, ErrorCodes.InvalidOptions); - } else { - assert.commandWorked(res); - assertFindResultEq("view", []); - } - - resetCollectionAndViews(); - - // A view is updated when its pipeline is modified. When auth is enabled, we expect collMod to - // fail when specifying "pipeline" but not "viewOn". - assert.writeOK(collection.insert(doc)); - assert.writeOK(collection.insert({a: 7})); - assertFindResultEq("view", [doc]); - res = viewDB.runCommand({collMod: "view", pipeline: [{$match: {a: {$gt: 4}}}]}); - if (jsTest.options().auth) { - assert.commandFailedWithCode(res, ErrorCodes.InvalidOptions); - } else { - assert.commandWorked(res); - assertFindResultEq("view", [{a: 7}]); - } - - resetCollectionAndViews(); - - // A view is updated when the backing collection is updated. - assert.writeOK(collection.insert(doc)); - assertFindResultEq("view", [doc]); - assert.writeOK(collection.update({a: 1}, {$set: {a: 2}})); +"use strict"; + +// For arrayEq. +load("jstests/aggregation/extras/utils.js"); + +let viewDB = db.getSiblingDB("views_change"); +let collection = viewDB.collection; +let view = viewDB.view; +let viewOnView = viewDB.viewOnView; + +// Convenience functions. +let resetCollectionAndViews = function() { + viewDB.runCommand({drop: "collection"}); + viewDB.runCommand({drop: "view"}); + viewDB.runCommand({drop: "viewOnView"}); + assert.commandWorked(viewDB.runCommand({create: "collection"})); + assert.commandWorked( + viewDB.runCommand({create: "view", viewOn: "collection", pipeline: [{$match: {a: 1}}]})); + assert.commandWorked( + viewDB.runCommand({create: "viewOnView", viewOn: "view", pipeline: [{$match: {b: 1}}]})); +}; +let assertFindResultEq = function(collName, expected) { + let res = viewDB.runCommand({find: collName, filter: {}, projection: {_id: 0, a: 1, b: 1}}); + assert.commandWorked(res); + let arr = new DBCommandCursor(db, res).toArray(); + let errmsg = tojson({expected: expected, got: arr}); + assert(arrayEq(arr, expected), errmsg); +}; + +let doc = {a: 1, b: 1}; + +resetCollectionAndViews(); + +// A view is updated when its viewOn is modified. When auth is enabled, we expect collMod to +// fail when specifying "viewOn" but not "pipeline". +assert.writeOK(collection.insert(doc)); +assertFindResultEq("view", [doc]); +let res = viewDB.runCommand({collMod: "view", viewOn: "nonexistent"}); +if (jsTest.options().auth) { + assert.commandFailedWithCode(res, ErrorCodes.InvalidOptions); +} else { + assert.commandWorked(res); assertFindResultEq("view", []); - - resetCollectionAndViews(); - - // A view is updated when a backing view is updated. - assert.writeOK(collection.insert(doc)); - assertFindResultEq("viewOnView", [doc]); - assert.commandWorked(viewDB.runCommand( - {collMod: "view", viewOn: "collection", pipeline: [{$match: {nonexistent: 1}}]})); - assertFindResultEq("viewOnView", []); - - resetCollectionAndViews(); - - // A view appears empty if the backing collection is dropped. - assert.writeOK(collection.insert(doc)); - assertFindResultEq("view", [doc]); - assert.commandWorked(viewDB.runCommand({drop: "collection"})); - assertFindResultEq("view", []); - - resetCollectionAndViews(); - - // A view appears empty if a backing view is dropped. - assert.writeOK(collection.insert(doc)); - assertFindResultEq("viewOnView", [doc]); - assert.commandWorked(viewDB.runCommand({drop: "view"})); - assertFindResultEq("viewOnView", []); +} + +resetCollectionAndViews(); + +// A view is updated when its pipeline is modified. When auth is enabled, we expect collMod to +// fail when specifying "pipeline" but not "viewOn". +assert.writeOK(collection.insert(doc)); +assert.writeOK(collection.insert({a: 7})); +assertFindResultEq("view", [doc]); +res = viewDB.runCommand({collMod: "view", pipeline: [{$match: {a: {$gt: 4}}}]}); +if (jsTest.options().auth) { + assert.commandFailedWithCode(res, ErrorCodes.InvalidOptions); +} else { + assert.commandWorked(res); + assertFindResultEq("view", [{a: 7}]); +} + +resetCollectionAndViews(); + +// A view is updated when the backing collection is updated. +assert.writeOK(collection.insert(doc)); +assertFindResultEq("view", [doc]); +assert.writeOK(collection.update({a: 1}, {$set: {a: 2}})); +assertFindResultEq("view", []); + +resetCollectionAndViews(); + +// A view is updated when a backing view is updated. +assert.writeOK(collection.insert(doc)); +assertFindResultEq("viewOnView", [doc]); +assert.commandWorked(viewDB.runCommand( + {collMod: "view", viewOn: "collection", pipeline: [{$match: {nonexistent: 1}}]})); +assertFindResultEq("viewOnView", []); + +resetCollectionAndViews(); + +// A view appears empty if the backing collection is dropped. +assert.writeOK(collection.insert(doc)); +assertFindResultEq("view", [doc]); +assert.commandWorked(viewDB.runCommand({drop: "collection"})); +assertFindResultEq("view", []); + +resetCollectionAndViews(); + +// A view appears empty if a backing view is dropped. +assert.writeOK(collection.insert(doc)); +assertFindResultEq("viewOnView", [doc]); +assert.commandWorked(viewDB.runCommand({drop: "view"})); +assertFindResultEq("viewOnView", []); }()); diff --git a/jstests/core/views/views_coll_stats.js b/jstests/core/views/views_coll_stats.js index bae2aa4e41f..cb09c41bb70 100644 --- a/jstests/core/views/views_coll_stats.js +++ b/jstests/core/views/views_coll_stats.js @@ -1,83 +1,85 @@ // Test that $collStats works on a view and in view pipelines as expected. (function() { - "use strict"; +"use strict"; - let viewsDB = db.getSiblingDB("views_coll_stats"); - const matchStage = {$match: {}}; - const collStatsStage = {$collStats: {latencyStats: {}}}; +let viewsDB = db.getSiblingDB("views_coll_stats"); +const matchStage = { + $match: {} +}; +const collStatsStage = { + $collStats: {latencyStats: {}} +}; - function clear() { - assert.commandWorked(viewsDB.dropDatabase()); - } +function clear() { + assert.commandWorked(viewsDB.dropDatabase()); +} - function getCollStats(ns) { - return viewsDB[ns].latencyStats().next(); - } +function getCollStats(ns) { + return viewsDB[ns].latencyStats().next(); +} - function checkCollStatsBelongTo(stats, expectedNs) { - assert.eq(stats.ns, - viewsDB[expectedNs].getFullName(), - "Expected coll stats for " + expectedNs + " but got " + stats.ns); - } +function checkCollStatsBelongTo(stats, expectedNs) { + assert.eq(stats.ns, + viewsDB[expectedNs].getFullName(), + "Expected coll stats for " + expectedNs + " but got " + stats.ns); +} - function makeView(viewNs, viewOnNs, pipeline) { - if (!pipeline) { - pipeline = []; - } - let res = viewsDB.runCommand({create: viewNs, viewOn: viewOnNs, pipeline: pipeline}); - assert.commandWorked(res); +function makeView(viewNs, viewOnNs, pipeline) { + if (!pipeline) { + pipeline = []; } + let res = viewsDB.runCommand({create: viewNs, viewOn: viewOnNs, pipeline: pipeline}); + assert.commandWorked(res); +} - clear(); - - // Check basic latency stats on a view. - makeView("a", "b"); - checkCollStatsBelongTo(viewsDB["a"].latencyStats().next(), "a"); - clear(); +clear(); - // Check that latency stats does not prepend the view pipeline. - makeView("a", "b", [matchStage]); - checkCollStatsBelongTo(viewsDB["a"].latencyStats().next(), "a"); - clear(); +// Check basic latency stats on a view. +makeView("a", "b"); +checkCollStatsBelongTo(viewsDB["a"].latencyStats().next(), "a"); +clear(); - // Check that latency stats works inside a pipeline. - makeView("a", "b", [collStatsStage]); - checkCollStatsBelongTo(viewsDB["a"].latencyStats().next(), "a"); - checkCollStatsBelongTo(viewsDB["b"].latencyStats().next(), "b"); - // Since the $collStats stage is in the pipeline, it should refer to the viewOn namespace. - checkCollStatsBelongTo(viewsDB["a"].aggregate().next(), "b"); - clear(); +// Check that latency stats does not prepend the view pipeline. +makeView("a", "b", [matchStage]); +checkCollStatsBelongTo(viewsDB["a"].latencyStats().next(), "a"); +clear(); - // Check that the first $collStats pipeline stage found will not resolve further views. - makeView("a", "b", [collStatsStage, matchStage]); - makeView("b", "c", [collStatsStage]); - checkCollStatsBelongTo(viewsDB["a"].latencyStats().next(), "a"); - checkCollStatsBelongTo(viewsDB["b"].latencyStats().next(), "b"); - checkCollStatsBelongTo(viewsDB["c"].latencyStats().next(), "c"); - checkCollStatsBelongTo(viewsDB["a"].aggregate().next(), "b"); - checkCollStatsBelongTo(viewsDB["b"].aggregate().next(), "c"); - clear(); +// Check that latency stats works inside a pipeline. +makeView("a", "b", [collStatsStage]); +checkCollStatsBelongTo(viewsDB["a"].latencyStats().next(), "a"); +checkCollStatsBelongTo(viewsDB["b"].latencyStats().next(), "b"); +// Since the $collStats stage is in the pipeline, it should refer to the viewOn namespace. +checkCollStatsBelongTo(viewsDB["a"].aggregate().next(), "b"); +clear(); - // Assert that attempting to retrieve storageStats fails. - makeView("a", "b"); - assert.commandFailedWithCode( - viewsDB.runCommand( - {aggregate: "a", pipeline: [{$collStats: {storageStats: {}}}], cursor: {}}), - ErrorCodes.CommandNotSupportedOnView); - clear(); +// Check that the first $collStats pipeline stage found will not resolve further views. +makeView("a", "b", [collStatsStage, matchStage]); +makeView("b", "c", [collStatsStage]); +checkCollStatsBelongTo(viewsDB["a"].latencyStats().next(), "a"); +checkCollStatsBelongTo(viewsDB["b"].latencyStats().next(), "b"); +checkCollStatsBelongTo(viewsDB["c"].latencyStats().next(), "c"); +checkCollStatsBelongTo(viewsDB["a"].aggregate().next(), "b"); +checkCollStatsBelongTo(viewsDB["b"].aggregate().next(), "c"); +clear(); - // Assert that attempting to retrieve collection record count on an identity views fails. - makeView("a", "b"); - assert.commandFailedWithCode( - viewsDB.runCommand({aggregate: "a", pipeline: [{$collStats: {count: {}}}], cursor: {}}), - ErrorCodes.CommandNotSupportedOnView); - clear(); +// Assert that attempting to retrieve storageStats fails. +makeView("a", "b"); +assert.commandFailedWithCode( + viewsDB.runCommand({aggregate: "a", pipeline: [{$collStats: {storageStats: {}}}], cursor: {}}), + ErrorCodes.CommandNotSupportedOnView); +clear(); - // Assert that attempting to retrieve collection record count on a non-identity view fails. - makeView("a", "b", [{$match: {a: 0}}]); - assert.commandFailedWithCode( - viewsDB.runCommand({aggregate: "a", pipeline: [{$collStats: {count: {}}}], cursor: {}}), - ErrorCodes.CommandNotSupportedOnView); - clear(); +// Assert that attempting to retrieve collection record count on an identity views fails. +makeView("a", "b"); +assert.commandFailedWithCode( + viewsDB.runCommand({aggregate: "a", pipeline: [{$collStats: {count: {}}}], cursor: {}}), + ErrorCodes.CommandNotSupportedOnView); +clear(); +// Assert that attempting to retrieve collection record count on a non-identity view fails. +makeView("a", "b", [{$match: {a: 0}}]); +assert.commandFailedWithCode( + viewsDB.runCommand({aggregate: "a", pipeline: [{$collStats: {count: {}}}], cursor: {}}), + ErrorCodes.CommandNotSupportedOnView); +clear(); }()); diff --git a/jstests/core/views/views_collation.js b/jstests/core/views/views_collation.js index 32b103ae2fb..9c18c27a41b 100644 --- a/jstests/core/views/views_collation.js +++ b/jstests/core/views/views_collation.js @@ -4,164 +4,157 @@ * Tests the behavior of operations when interacting with a view's default collation. */ (function() { - "use strict"; - - load("jstests/libs/analyze_plan.js"); - - let viewsDB = db.getSiblingDB("views_collation"); - assert.commandWorked(viewsDB.dropDatabase()); - assert.commandWorked(viewsDB.runCommand({create: "simpleCollection"})); - assert.commandWorked(viewsDB.runCommand({create: "ukCollection", collation: {locale: "uk"}})); - assert.commandWorked(viewsDB.runCommand({create: "filCollection", collation: {locale: "fil"}})); - - // Creating a view without specifying a collation defaults to the simple collation. - assert.commandWorked(viewsDB.runCommand({create: "simpleView", viewOn: "ukCollection"})); - let listCollectionsOutput = viewsDB.runCommand({listCollections: 1, filter: {type: "view"}}); - assert.commandWorked(listCollectionsOutput); - assert(!listCollectionsOutput.cursor.firstBatch[0].options.hasOwnProperty("collation")); - - // Operations that do not specify a collation succeed. - assert.commandWorked(viewsDB.runCommand({aggregate: "simpleView", pipeline: [], cursor: {}})); - assert.commandWorked(viewsDB.runCommand({find: "simpleView"})); - assert.commandWorked(viewsDB.runCommand({count: "simpleView"})); - assert.commandWorked(viewsDB.runCommand({distinct: "simpleView", key: "x"})); - - // Operations that explicitly ask for the "simple" locale succeed against a view with the - // simple collation. - assert.commandWorked(viewsDB.runCommand( - {aggregate: "simpleView", pipeline: [], cursor: {}, collation: {locale: "simple"}})); - assert.commandWorked(viewsDB.runCommand({find: "simpleView", collation: {locale: "simple"}})); - assert.commandWorked(viewsDB.runCommand({count: "simpleView", collation: {locale: "simple"}})); - assert.commandWorked( - viewsDB.runCommand({distinct: "simpleView", key: "x", collation: {locale: "simple"}})); - - // Attempting to override a view's simple collation fails. - assert.commandFailedWithCode( - viewsDB.runCommand( - {aggregate: "simpleView", pipeline: [], cursor: {}, collation: {locale: "en"}}), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode( - viewsDB.runCommand({find: "simpleView", collation: {locale: "fr"}}), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode( - viewsDB.runCommand({count: "simpleView", collation: {locale: "fil"}}), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode( - viewsDB.runCommand({distinct: "simpleView", key: "x", collation: {locale: "es"}}), - ErrorCodes.OptionNotSupportedOnView); - - // Create a view with an explicit, non-simple collation. - assert.commandWorked( - viewsDB.createView("filView", "ukCollection", [], {collation: {locale: "fil"}})); - listCollectionsOutput = viewsDB.runCommand({listCollections: 1, filter: {name: "filView"}}); - assert.commandWorked(listCollectionsOutput); - assert.eq(listCollectionsOutput.cursor.firstBatch[0].options.collation.locale, "fil"); - - // Operations that do not specify a collation succeed. - assert.commandWorked(viewsDB.runCommand({aggregate: "filView", pipeline: [], cursor: {}})); - assert.commandWorked(viewsDB.runCommand({find: "filView"})); - assert.commandWorked(viewsDB.runCommand({count: "filView"})); - assert.commandWorked(viewsDB.runCommand({distinct: "filView", key: "x"})); - - // Explain of operations that do not specify a collation succeed. - assert.commandWorked(viewsDB.runCommand({aggregate: "filView", pipeline: [], explain: true})); - assert.commandWorked( - viewsDB.runCommand({explain: {find: "filView"}, verbosity: "allPlansExecution"})); - assert.commandWorked( - viewsDB.runCommand({explain: {count: "filView"}, verbosity: "allPlansExecution"})); - assert.commandWorked(viewsDB.runCommand( - {explain: {distinct: "filView", key: "x"}, verbosity: "allPlansExecution"})); - - // Operations with a matching collation succeed. - assert.commandWorked(viewsDB.runCommand( - {aggregate: "filView", pipeline: [], cursor: {}, collation: {locale: "fil"}})); - assert.commandWorked(viewsDB.runCommand({find: "filView", collation: {locale: "fil"}})); - assert.commandWorked(viewsDB.runCommand({count: "filView", collation: {locale: "fil"}})); - assert.commandWorked( - viewsDB.runCommand({distinct: "filView", key: "x", collation: {locale: "fil"}})); - - // Explain of operations with a matching collation succeed. - assert.commandWorked(viewsDB.runCommand( - {aggregate: "filView", pipeline: [], explain: true, collation: {locale: "fil"}})); - assert.commandWorked(viewsDB.runCommand( - {explain: {find: "filView", collation: {locale: "fil"}}, verbosity: "allPlansExecution"})); - assert.commandWorked(viewsDB.runCommand( - {explain: {count: "filView", collation: {locale: "fil"}}, verbosity: "allPlansExecution"})); - assert.commandWorked(viewsDB.runCommand({ - explain: {distinct: "filView", key: "x", collation: {locale: "fil"}}, - verbosity: "allPlansExecution" - })); - - // Attempting to override the non-simple default collation of a view fails. - assert.commandFailedWithCode( - viewsDB.runCommand( - {aggregate: "filView", pipeline: [], cursor: {}, collation: {locale: "en"}}), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode( - viewsDB.runCommand( - {aggregate: "filView", pipeline: [], cursor: {}, collation: {locale: "simple"}}), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode(viewsDB.runCommand({find: "filView", collation: {locale: "fr"}}), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode( - viewsDB.runCommand({find: "filView", collation: {locale: "simple"}}), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode(viewsDB.runCommand({count: "filView", collation: {locale: "zh"}}), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode( - viewsDB.runCommand({count: "filView", collation: {locale: "simple"}}), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode( - viewsDB.runCommand({distinct: "filView", key: "x", collation: {locale: "es"}}), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode( - viewsDB.runCommand({distinct: "filView", key: "x", collation: {locale: "simple"}}), - ErrorCodes.OptionNotSupportedOnView); - - // Attempting to override the default collation of a view with explain fails. - assert.commandFailedWithCode( - viewsDB.runCommand( - {aggregate: "filView", pipeline: [], explain: true, collation: {locale: "en"}}), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode( - viewsDB.runCommand( - {aggregate: "filView", pipeline: [], explain: true, collation: {locale: "simple"}}), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode(viewsDB.runCommand({ - explain: {find: "filView", collation: {locale: "fr"}}, - verbosity: "allPlansExecution" - }), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode(viewsDB.runCommand({ - explain: {find: "filView", collation: {locale: "simple"}}, - verbosity: "allPlansExecution" - }), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode(viewsDB.runCommand({ - explain: {count: "filView", collation: {locale: "zh"}}, - verbosity: "allPlansExecution" - }), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode(viewsDB.runCommand({ - explain: {count: "filView", collation: {locale: "simple"}}, - verbosity: "allPlansExecution" - }), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode(viewsDB.runCommand({ - explain: {distinct: "filView", key: "x", collation: {locale: "es"}}, - verbosity: "allPlansExecution" - }), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode(viewsDB.runCommand({ - explain: {distinct: "filView", key: "x", collation: {locale: "simple"}}, - verbosity: "allPlansExecution" - }), - ErrorCodes.OptionNotSupportedOnView); - - const lookupSimpleView = { - $lookup: {from: "simpleView", localField: "x", foreignField: "x", as: "result"} - }; - const nestedLookupSimpleView = { +"use strict"; + +load("jstests/libs/analyze_plan.js"); + +let viewsDB = db.getSiblingDB("views_collation"); +assert.commandWorked(viewsDB.dropDatabase()); +assert.commandWorked(viewsDB.runCommand({create: "simpleCollection"})); +assert.commandWorked(viewsDB.runCommand({create: "ukCollection", collation: {locale: "uk"}})); +assert.commandWorked(viewsDB.runCommand({create: "filCollection", collation: {locale: "fil"}})); + +// Creating a view without specifying a collation defaults to the simple collation. +assert.commandWorked(viewsDB.runCommand({create: "simpleView", viewOn: "ukCollection"})); +let listCollectionsOutput = viewsDB.runCommand({listCollections: 1, filter: {type: "view"}}); +assert.commandWorked(listCollectionsOutput); +assert(!listCollectionsOutput.cursor.firstBatch[0].options.hasOwnProperty("collation")); + +// Operations that do not specify a collation succeed. +assert.commandWorked(viewsDB.runCommand({aggregate: "simpleView", pipeline: [], cursor: {}})); +assert.commandWorked(viewsDB.runCommand({find: "simpleView"})); +assert.commandWorked(viewsDB.runCommand({count: "simpleView"})); +assert.commandWorked(viewsDB.runCommand({distinct: "simpleView", key: "x"})); + +// Operations that explicitly ask for the "simple" locale succeed against a view with the +// simple collation. +assert.commandWorked(viewsDB.runCommand( + {aggregate: "simpleView", pipeline: [], cursor: {}, collation: {locale: "simple"}})); +assert.commandWorked(viewsDB.runCommand({find: "simpleView", collation: {locale: "simple"}})); +assert.commandWorked(viewsDB.runCommand({count: "simpleView", collation: {locale: "simple"}})); +assert.commandWorked( + viewsDB.runCommand({distinct: "simpleView", key: "x", collation: {locale: "simple"}})); + +// Attempting to override a view's simple collation fails. +assert.commandFailedWithCode( + viewsDB.runCommand( + {aggregate: "simpleView", pipeline: [], cursor: {}, collation: {locale: "en"}}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode(viewsDB.runCommand({find: "simpleView", collation: {locale: "fr"}}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode(viewsDB.runCommand({count: "simpleView", collation: {locale: "fil"}}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode( + viewsDB.runCommand({distinct: "simpleView", key: "x", collation: {locale: "es"}}), + ErrorCodes.OptionNotSupportedOnView); + +// Create a view with an explicit, non-simple collation. +assert.commandWorked( + viewsDB.createView("filView", "ukCollection", [], {collation: {locale: "fil"}})); +listCollectionsOutput = viewsDB.runCommand({listCollections: 1, filter: {name: "filView"}}); +assert.commandWorked(listCollectionsOutput); +assert.eq(listCollectionsOutput.cursor.firstBatch[0].options.collation.locale, "fil"); + +// Operations that do not specify a collation succeed. +assert.commandWorked(viewsDB.runCommand({aggregate: "filView", pipeline: [], cursor: {}})); +assert.commandWorked(viewsDB.runCommand({find: "filView"})); +assert.commandWorked(viewsDB.runCommand({count: "filView"})); +assert.commandWorked(viewsDB.runCommand({distinct: "filView", key: "x"})); + +// Explain of operations that do not specify a collation succeed. +assert.commandWorked(viewsDB.runCommand({aggregate: "filView", pipeline: [], explain: true})); +assert.commandWorked( + viewsDB.runCommand({explain: {find: "filView"}, verbosity: "allPlansExecution"})); +assert.commandWorked( + viewsDB.runCommand({explain: {count: "filView"}, verbosity: "allPlansExecution"})); +assert.commandWorked( + viewsDB.runCommand({explain: {distinct: "filView", key: "x"}, verbosity: "allPlansExecution"})); + +// Operations with a matching collation succeed. +assert.commandWorked(viewsDB.runCommand( + {aggregate: "filView", pipeline: [], cursor: {}, collation: {locale: "fil"}})); +assert.commandWorked(viewsDB.runCommand({find: "filView", collation: {locale: "fil"}})); +assert.commandWorked(viewsDB.runCommand({count: "filView", collation: {locale: "fil"}})); +assert.commandWorked( + viewsDB.runCommand({distinct: "filView", key: "x", collation: {locale: "fil"}})); + +// Explain of operations with a matching collation succeed. +assert.commandWorked(viewsDB.runCommand( + {aggregate: "filView", pipeline: [], explain: true, collation: {locale: "fil"}})); +assert.commandWorked(viewsDB.runCommand( + {explain: {find: "filView", collation: {locale: "fil"}}, verbosity: "allPlansExecution"})); +assert.commandWorked(viewsDB.runCommand( + {explain: {count: "filView", collation: {locale: "fil"}}, verbosity: "allPlansExecution"})); +assert.commandWorked(viewsDB.runCommand({ + explain: {distinct: "filView", key: "x", collation: {locale: "fil"}}, + verbosity: "allPlansExecution" +})); + +// Attempting to override the non-simple default collation of a view fails. +assert.commandFailedWithCode( + viewsDB.runCommand({aggregate: "filView", pipeline: [], cursor: {}, collation: {locale: "en"}}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode( + viewsDB.runCommand( + {aggregate: "filView", pipeline: [], cursor: {}, collation: {locale: "simple"}}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode(viewsDB.runCommand({find: "filView", collation: {locale: "fr"}}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode(viewsDB.runCommand({find: "filView", collation: {locale: "simple"}}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode(viewsDB.runCommand({count: "filView", collation: {locale: "zh"}}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode(viewsDB.runCommand({count: "filView", collation: {locale: "simple"}}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode( + viewsDB.runCommand({distinct: "filView", key: "x", collation: {locale: "es"}}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode( + viewsDB.runCommand({distinct: "filView", key: "x", collation: {locale: "simple"}}), + ErrorCodes.OptionNotSupportedOnView); + +// Attempting to override the default collation of a view with explain fails. +assert.commandFailedWithCode( + viewsDB.runCommand( + {aggregate: "filView", pipeline: [], explain: true, collation: {locale: "en"}}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode( + viewsDB.runCommand( + {aggregate: "filView", pipeline: [], explain: true, collation: {locale: "simple"}}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode( + viewsDB.runCommand( + {explain: {find: "filView", collation: {locale: "fr"}}, verbosity: "allPlansExecution"}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode(viewsDB.runCommand({ + explain: {find: "filView", collation: {locale: "simple"}}, + verbosity: "allPlansExecution" +}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode( + viewsDB.runCommand( + {explain: {count: "filView", collation: {locale: "zh"}}, verbosity: "allPlansExecution"}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode(viewsDB.runCommand({ + explain: {count: "filView", collation: {locale: "simple"}}, + verbosity: "allPlansExecution" +}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode(viewsDB.runCommand({ + explain: {distinct: "filView", key: "x", collation: {locale: "es"}}, + verbosity: "allPlansExecution" +}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode(viewsDB.runCommand({ + explain: {distinct: "filView", key: "x", collation: {locale: "simple"}}, + verbosity: "allPlansExecution" +}), + ErrorCodes.OptionNotSupportedOnView); + +const lookupSimpleView = { + $lookup: {from: "simpleView", localField: "x", foreignField: "x", as: "result"} +}; +const nestedLookupSimpleView = { $lookup: { from: "simpleCollection", pipeline: [{ @@ -171,7 +164,7 @@ as: "result" } }; - const graphLookupSimpleView = { +const graphLookupSimpleView = { $graphLookup: { from: "simpleView", startWith: "$_id", @@ -181,65 +174,65 @@ } }; - // You can lookup into a view with the simple collation if the collection also has the same - // default collation. - assert.commandWorked(viewsDB.runCommand( - {aggregate: "simpleCollection", pipeline: [lookupSimpleView], cursor: {}})); - assert.commandWorked(viewsDB.runCommand( - {aggregate: "simpleCollection", pipeline: [nestedLookupSimpleView], cursor: {}})); - assert.commandWorked(viewsDB.runCommand( - {aggregate: "simpleCollection", pipeline: [graphLookupSimpleView], cursor: {}})); - - // You can lookup into a view with the simple collation if the operation has a matching - // collation. - assert.commandWorked(viewsDB.runCommand({ - aggregate: "ukCollection", - pipeline: [lookupSimpleView], - cursor: {}, - collation: {locale: "simple"} - })); - assert.commandWorked(viewsDB.runCommand({ - aggregate: "ukCollection", - pipeline: [nestedLookupSimpleView], - cursor: {}, - collation: {locale: "simple"} - })); - assert.commandWorked(viewsDB.runCommand({ - aggregate: "ukCollection", - pipeline: [graphLookupSimpleView], - cursor: {}, - collation: {locale: "simple"} - })); - - // You can't lookup into a view with the simple collation if the operation has a conflicting - // collation. - assert.commandFailedWithCode(viewsDB.runCommand({ - aggregate: "simpleCollection", - pipeline: [lookupSimpleView], - cursor: {}, - collation: {locale: "en"} - }), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode(viewsDB.runCommand({ - aggregate: "simpleCollection", - pipeline: [nestedLookupSimpleView], - cursor: {}, - collation: {locale: "en"} - }), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode(viewsDB.runCommand({ - aggregate: "simpleCollection", - pipeline: [graphLookupSimpleView], - cursor: {}, - collation: {locale: "zh"} - }), - ErrorCodes.OptionNotSupportedOnView); - - const lookupFilView = { - $lookup: {from: "filView", localField: "x", foreignField: "x", as: "result"} - }; - function makeNestedLookupFilView(sourceCollName) { - return { +// You can lookup into a view with the simple collation if the collection also has the same +// default collation. +assert.commandWorked( + viewsDB.runCommand({aggregate: "simpleCollection", pipeline: [lookupSimpleView], cursor: {}})); +assert.commandWorked(viewsDB.runCommand( + {aggregate: "simpleCollection", pipeline: [nestedLookupSimpleView], cursor: {}})); +assert.commandWorked(viewsDB.runCommand( + {aggregate: "simpleCollection", pipeline: [graphLookupSimpleView], cursor: {}})); + +// You can lookup into a view with the simple collation if the operation has a matching +// collation. +assert.commandWorked(viewsDB.runCommand({ + aggregate: "ukCollection", + pipeline: [lookupSimpleView], + cursor: {}, + collation: {locale: "simple"} +})); +assert.commandWorked(viewsDB.runCommand({ + aggregate: "ukCollection", + pipeline: [nestedLookupSimpleView], + cursor: {}, + collation: {locale: "simple"} +})); +assert.commandWorked(viewsDB.runCommand({ + aggregate: "ukCollection", + pipeline: [graphLookupSimpleView], + cursor: {}, + collation: {locale: "simple"} +})); + +// You can't lookup into a view with the simple collation if the operation has a conflicting +// collation. +assert.commandFailedWithCode(viewsDB.runCommand({ + aggregate: "simpleCollection", + pipeline: [lookupSimpleView], + cursor: {}, + collation: {locale: "en"} +}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode(viewsDB.runCommand({ + aggregate: "simpleCollection", + pipeline: [nestedLookupSimpleView], + cursor: {}, + collation: {locale: "en"} +}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode(viewsDB.runCommand({ + aggregate: "simpleCollection", + pipeline: [graphLookupSimpleView], + cursor: {}, + collation: {locale: "zh"} +}), + ErrorCodes.OptionNotSupportedOnView); + +const lookupFilView = { + $lookup: {from: "filView", localField: "x", foreignField: "x", as: "result"} +}; +function makeNestedLookupFilView(sourceCollName) { + return { $lookup: { from: sourceCollName, pipeline: [{ @@ -249,8 +242,8 @@ as: "result" } }; - } - const graphLookupFilView = { +} +const graphLookupFilView = { $graphLookup: { from: "filView", startWith: "$_id", @@ -260,91 +253,90 @@ } }; - // You can lookup into a view with no operation collation specified if the collection's - // collation matches the collation of the view. - assert.commandWorked( - viewsDB.runCommand({aggregate: "filCollection", pipeline: [lookupFilView], cursor: {}})); - assert.commandWorked(viewsDB.runCommand({ - aggregate: "filCollection", - pipeline: [makeNestedLookupFilView("filCollection")], - cursor: {} - })); - assert.commandWorked(viewsDB.runCommand( - {aggregate: "filCollection", pipeline: [graphLookupFilView], cursor: {}})); - - // You can lookup into a view with a non-simple collation if the operation's collation - // matches. - assert.commandWorked(viewsDB.runCommand({ - aggregate: "ukCollection", - pipeline: [lookupFilView], - cursor: {}, - collation: {locale: "fil"} - })); - assert.commandWorked(viewsDB.runCommand({ - aggregate: "ukCollection", - pipeline: [makeNestedLookupFilView("ukCollection")], - cursor: {}, - collation: {locale: "fil"} - })); - assert.commandWorked(viewsDB.runCommand({ - aggregate: "ukCollection", - pipeline: [graphLookupFilView], - cursor: {}, - collation: {locale: "fil"} - })); - - // You can't lookup into a view when aggregating a collection whose default collation does - // not match the view's default collation. - assert.commandFailedWithCode( - viewsDB.runCommand({aggregate: "simpleCollection", cursor: {}, pipeline: [lookupFilView]}), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode(viewsDB.runCommand({ - aggregate: "simpleCollection", - cursor: {}, - pipeline: [makeNestedLookupFilView("simpleCollation")] - }), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode( - viewsDB.runCommand( - {aggregate: "simpleCollection", cursor: {}, pipeline: [graphLookupFilView]}), - ErrorCodes.OptionNotSupportedOnView); - - // You can't lookup into a view when aggregating a collection and the operation's collation - // does not match the view's default collation. - assert.commandFailedWithCode(viewsDB.runCommand({ - aggregate: "filCollection", - pipeline: [lookupFilView], - cursor: {}, - collation: {locale: "zh"} - }), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode(viewsDB.runCommand({ - aggregate: "filCollection", - pipeline: [makeNestedLookupFilView("filCollection")], - cursor: {}, - collation: {locale: "zh"} - }), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode(viewsDB.runCommand({ - aggregate: "filCollection", - pipeline: [graphLookupFilView], - cursor: {}, - collation: {locale: "zh"} - }), - ErrorCodes.OptionNotSupportedOnView); - - // You may perform an aggregation involving multiple views if they all have the same default - // collation. - assert.commandWorked(viewsDB.runCommand( - {create: "simpleView2", viewOn: "simpleCollection", collation: {locale: "simple"}})); - assert.commandWorked( - viewsDB.runCommand({aggregate: "simpleView2", pipeline: [lookupSimpleView], cursor: {}})); - assert.commandWorked(viewsDB.runCommand( - {aggregate: "simpleView2", pipeline: [graphLookupSimpleView], cursor: {}})); - - // You may perform an aggregation involving multiple views and collections if all the views - // have the same default collation. - const graphLookupUkCollection = { +// You can lookup into a view with no operation collation specified if the collection's +// collation matches the collation of the view. +assert.commandWorked( + viewsDB.runCommand({aggregate: "filCollection", pipeline: [lookupFilView], cursor: {}})); +assert.commandWorked(viewsDB.runCommand({ + aggregate: "filCollection", + pipeline: [makeNestedLookupFilView("filCollection")], + cursor: {} +})); +assert.commandWorked( + viewsDB.runCommand({aggregate: "filCollection", pipeline: [graphLookupFilView], cursor: {}})); + +// You can lookup into a view with a non-simple collation if the operation's collation +// matches. +assert.commandWorked(viewsDB.runCommand({ + aggregate: "ukCollection", + pipeline: [lookupFilView], + cursor: {}, + collation: {locale: "fil"} +})); +assert.commandWorked(viewsDB.runCommand({ + aggregate: "ukCollection", + pipeline: [makeNestedLookupFilView("ukCollection")], + cursor: {}, + collation: {locale: "fil"} +})); +assert.commandWorked(viewsDB.runCommand({ + aggregate: "ukCollection", + pipeline: [graphLookupFilView], + cursor: {}, + collation: {locale: "fil"} +})); + +// You can't lookup into a view when aggregating a collection whose default collation does +// not match the view's default collation. +assert.commandFailedWithCode( + viewsDB.runCommand({aggregate: "simpleCollection", cursor: {}, pipeline: [lookupFilView]}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode(viewsDB.runCommand({ + aggregate: "simpleCollection", + cursor: {}, + pipeline: [makeNestedLookupFilView("simpleCollation")] +}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode( + viewsDB.runCommand({aggregate: "simpleCollection", cursor: {}, pipeline: [graphLookupFilView]}), + ErrorCodes.OptionNotSupportedOnView); + +// You can't lookup into a view when aggregating a collection and the operation's collation +// does not match the view's default collation. +assert.commandFailedWithCode(viewsDB.runCommand({ + aggregate: "filCollection", + pipeline: [lookupFilView], + cursor: {}, + collation: {locale: "zh"} +}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode(viewsDB.runCommand({ + aggregate: "filCollection", + pipeline: [makeNestedLookupFilView("filCollection")], + cursor: {}, + collation: {locale: "zh"} +}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode(viewsDB.runCommand({ + aggregate: "filCollection", + pipeline: [graphLookupFilView], + cursor: {}, + collation: {locale: "zh"} +}), + ErrorCodes.OptionNotSupportedOnView); + +// You may perform an aggregation involving multiple views if they all have the same default +// collation. +assert.commandWorked(viewsDB.runCommand( + {create: "simpleView2", viewOn: "simpleCollection", collation: {locale: "simple"}})); +assert.commandWorked( + viewsDB.runCommand({aggregate: "simpleView2", pipeline: [lookupSimpleView], cursor: {}})); +assert.commandWorked( + viewsDB.runCommand({aggregate: "simpleView2", pipeline: [graphLookupSimpleView], cursor: {}})); + +// You may perform an aggregation involving multiple views and collections if all the views +// have the same default collation. +const graphLookupUkCollection = { $graphLookup: { from: "ukCollection", startWith: "$_id", @@ -353,181 +345,170 @@ as: "matched" } }; - assert.commandWorked(viewsDB.runCommand({ - aggregate: "simpleView2", - pipeline: [lookupSimpleView, graphLookupUkCollection], - cursor: {} - })); - - // You cannot perform an aggregation involving multiple views if the views don't all have - // the same default collation. - assert.commandFailedWithCode( - viewsDB.runCommand({aggregate: "filView", pipeline: [lookupSimpleView], cursor: {}}), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode( - viewsDB.runCommand({aggregate: "simpleView", pipeline: [lookupFilView], cursor: {}}), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode(viewsDB.runCommand({ - aggregate: "simpleCollection", - pipeline: [lookupFilView, graphLookupSimpleView], - cursor: {} - }), - ErrorCodes.OptionNotSupportedOnView); - - // You cannot create a view that depends on another view with a different default collation. - assert.commandFailedWithCode( - viewsDB.runCommand({create: "zhView", viewOn: "filView", collation: {locale: "zh"}}), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode(viewsDB.runCommand({ - create: "zhView", - viewOn: "simpleCollection", - pipeline: [lookupFilView], - collation: {locale: "zh"} - }), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode(viewsDB.runCommand({ - create: "zhView", - viewOn: "simpleCollection", - pipeline: [makeNestedLookupFilView("zhView")], - collation: {locale: "zh"} - }), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode(viewsDB.runCommand({ - create: "zhView", - viewOn: "simpleCollection", - pipeline: [graphLookupSimpleView], - collation: {locale: "zh"} - }), - ErrorCodes.OptionNotSupportedOnView); - - // You cannot modify a view to depend on another view with a different default collation. - assert.commandWorked(viewsDB.runCommand( - {create: "esView", viewOn: "simpleCollection", collation: {locale: "es"}})); - assert.commandFailedWithCode( - viewsDB.runCommand({collMod: "esView", viewOn: "filView", pipeline: []}), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode( - viewsDB.runCommand( - {collMod: "esView", viewOn: "simpleCollection", pipeline: [lookupSimpleView]}), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode( - viewsDB.runCommand( - {collMod: "esView", viewOn: "simpleCollection", pipeline: [graphLookupFilView]}), - ErrorCodes.OptionNotSupportedOnView); - - // Views cannot be dropped and recreated with a different collation if other views depend on - // that view. - assert.commandWorked( - viewsDB.runCommand({create: "filView2", viewOn: "filView", collation: {locale: "fil"}})); - assert.commandWorked(viewsDB.runCommand({drop: "filView"})); - assert.commandFailedWithCode( - viewsDB.runCommand({create: "filView", viewOn: "simpleCollection"}), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode( - viewsDB.runCommand( - {create: "filView", viewOn: "simpleCollection", collation: {locale: "en"}}), - ErrorCodes.OptionNotSupportedOnView); - assert.commandWorked( - viewsDB.createView("filView", "ukCollection", [], {collation: {locale: "fil"}})); - - // Views cannot be dropped and recreated with a different collation if other views depend on - // that view via $lookup or $graphLookup. - assert.commandWorked(viewsDB.runCommand( - {collMod: "filView2", viewOn: "simpleCollection", pipeline: [lookupFilView]})); - assert.commandWorked(viewsDB.runCommand({drop: "filView"})); - assert.commandFailedWithCode( - viewsDB.runCommand({create: "filView", viewOn: "simpleCollection"}), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode( - viewsDB.runCommand( - {create: "filView", viewOn: "simpleCollection", collation: {locale: "en"}}), - ErrorCodes.OptionNotSupportedOnView); - assert.commandWorked(viewsDB.runCommand( - {create: "filView", viewOn: "ukCollection", pipeline: [], collation: {locale: "fil"}})); - - assert.commandWorked(viewsDB.runCommand( - {collMod: "filView2", viewOn: "simpleCollection", pipeline: [graphLookupFilView]})); - assert.commandWorked(viewsDB.runCommand({drop: "filView"})); - assert.commandFailedWithCode( - viewsDB.runCommand({create: "filView", viewOn: "simpleCollection"}), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode( - viewsDB.runCommand( - {create: "filView", viewOn: "simpleCollection", collation: {locale: "en"}}), - ErrorCodes.OptionNotSupportedOnView); - - // If two views "A" and "C" have different collations and depend on the namespace "B", then "B" - // cannot be created as a view. - assert.commandWorked( - viewsDB.runCommand({create: "A", viewOn: "B", collation: {locale: "hsb"}})); - assert.commandWorked( - viewsDB.runCommand({create: "B", viewOn: "other", collation: {locale: "hsb"}})); - assert.commandFailedWithCode( - viewsDB.runCommand({create: "C", viewOn: "B", collation: {locale: "wae"}}), - ErrorCodes.OptionNotSupportedOnView); - assert.commandWorked(viewsDB.runCommand({drop: "B"})); - assert.commandWorked( - viewsDB.runCommand({create: "C", viewOn: "B", collation: {locale: "wae"}})); - assert.commandFailedWithCode( - viewsDB.runCommand({create: "B", viewOn: "other", collation: {locale: "hsb"}}), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode( - viewsDB.runCommand({create: "B", viewOn: "other", collation: {locale: "wae"}}), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode(viewsDB.runCommand({create: "B", viewOn: "other"}), - ErrorCodes.OptionNotSupportedOnView); - - // Make sure that when an operation does not specify the collation, it correctly uses the - // default collation associated with the view. For this, we set up a new backing collection with - // a case-insensitive view. - assert.commandWorked(viewsDB.runCommand({create: "case_sensitive_coll"})); - assert.commandWorked(viewsDB.runCommand({ - create: "case_insensitive_view", - viewOn: "case_sensitive_coll", - collation: {locale: "en", strength: 1} - })); - - assert.writeOK(viewsDB.case_sensitive_coll.insert({f: "case"})); - assert.writeOK(viewsDB.case_sensitive_coll.insert({f: "Case"})); - assert.writeOK(viewsDB.case_sensitive_coll.insert({f: "CASE"})); - - let explain, cursorStage; - - // Test that aggregate against a view with a default collation correctly uses the collation. - // We expect the pipeline to be optimized away, so there should be no pipeline stages in - // the explain. - assert.eq(1, viewsDB.case_sensitive_coll.aggregate([{$match: {f: "case"}}]).itcount()); - assert.eq(3, viewsDB.case_insensitive_view.aggregate([{$match: {f: "case"}}]).itcount()); - explain = viewsDB.case_insensitive_view.explain().aggregate([{$match: {f: "case"}}]); - assert.neq(null, explain.queryPlanner, tojson(explain)); - assert.eq(1, explain.queryPlanner.collation.strength, tojson(explain)); - - // Test that count against a view with a default collation correctly uses the collation. - assert.eq(1, viewsDB.case_sensitive_coll.count({f: "case"})); - assert.eq(3, viewsDB.case_insensitive_view.count({f: "case"})); - explain = viewsDB.case_insensitive_view.explain().count({f: "case"}); - cursorStage = getAggPlanStage(explain, "$cursor"); - assert.neq(null, cursorStage, tojson(explain)); - assert.eq(1, cursorStage.$cursor.queryPlanner.collation.strength, tojson(cursorStage)); - - // Test that distinct against a view with a default collation correctly uses the collation. - assert.eq(3, viewsDB.case_sensitive_coll.distinct("f").length); - assert.eq(1, viewsDB.case_insensitive_view.distinct("f").length); - explain = viewsDB.case_insensitive_view.explain().distinct("f"); - cursorStage = getAggPlanStage(explain, "$cursor"); - assert.neq(null, cursorStage, tojson(explain)); - assert.eq(1, cursorStage.$cursor.queryPlanner.collation.strength, tojson(cursorStage)); - - // Test that find against a view with a default collation correctly uses the collation. - // We expect the pipeline to be optimized away, so there should be no pipeline stages in - // the explain output. - let findRes = viewsDB.runCommand({find: "case_sensitive_coll", filter: {f: "case"}}); - assert.commandWorked(findRes); - assert.eq(1, findRes.cursor.firstBatch.length); - findRes = viewsDB.runCommand({find: "case_insensitive_view", filter: {f: "case"}}); - assert.commandWorked(findRes); - assert.eq(3, findRes.cursor.firstBatch.length); - explain = viewsDB.runCommand({explain: {find: "case_insensitive_view", filter: {f: "case"}}}); - assert.neq(null, explain.queryPlanner, tojson(explain)); - assert.eq(1, explain.queryPlanner.collation.strength, tojson(explain)); +assert.commandWorked(viewsDB.runCommand( + {aggregate: "simpleView2", pipeline: [lookupSimpleView, graphLookupUkCollection], cursor: {}})); + +// You cannot perform an aggregation involving multiple views if the views don't all have +// the same default collation. +assert.commandFailedWithCode( + viewsDB.runCommand({aggregate: "filView", pipeline: [lookupSimpleView], cursor: {}}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode( + viewsDB.runCommand({aggregate: "simpleView", pipeline: [lookupFilView], cursor: {}}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode(viewsDB.runCommand({ + aggregate: "simpleCollection", + pipeline: [lookupFilView, graphLookupSimpleView], + cursor: {} +}), + ErrorCodes.OptionNotSupportedOnView); + +// You cannot create a view that depends on another view with a different default collation. +assert.commandFailedWithCode( + viewsDB.runCommand({create: "zhView", viewOn: "filView", collation: {locale: "zh"}}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode(viewsDB.runCommand({ + create: "zhView", + viewOn: "simpleCollection", + pipeline: [lookupFilView], + collation: {locale: "zh"} +}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode(viewsDB.runCommand({ + create: "zhView", + viewOn: "simpleCollection", + pipeline: [makeNestedLookupFilView("zhView")], + collation: {locale: "zh"} +}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode(viewsDB.runCommand({ + create: "zhView", + viewOn: "simpleCollection", + pipeline: [graphLookupSimpleView], + collation: {locale: "zh"} +}), + ErrorCodes.OptionNotSupportedOnView); + +// You cannot modify a view to depend on another view with a different default collation. +assert.commandWorked( + viewsDB.runCommand({create: "esView", viewOn: "simpleCollection", collation: {locale: "es"}})); +assert.commandFailedWithCode( + viewsDB.runCommand({collMod: "esView", viewOn: "filView", pipeline: []}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode( + viewsDB.runCommand( + {collMod: "esView", viewOn: "simpleCollection", pipeline: [lookupSimpleView]}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode( + viewsDB.runCommand( + {collMod: "esView", viewOn: "simpleCollection", pipeline: [graphLookupFilView]}), + ErrorCodes.OptionNotSupportedOnView); + +// Views cannot be dropped and recreated with a different collation if other views depend on +// that view. +assert.commandWorked( + viewsDB.runCommand({create: "filView2", viewOn: "filView", collation: {locale: "fil"}})); +assert.commandWorked(viewsDB.runCommand({drop: "filView"})); +assert.commandFailedWithCode(viewsDB.runCommand({create: "filView", viewOn: "simpleCollection"}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode( + viewsDB.runCommand({create: "filView", viewOn: "simpleCollection", collation: {locale: "en"}}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandWorked( + viewsDB.createView("filView", "ukCollection", [], {collation: {locale: "fil"}})); + +// Views cannot be dropped and recreated with a different collation if other views depend on +// that view via $lookup or $graphLookup. +assert.commandWorked(viewsDB.runCommand( + {collMod: "filView2", viewOn: "simpleCollection", pipeline: [lookupFilView]})); +assert.commandWorked(viewsDB.runCommand({drop: "filView"})); +assert.commandFailedWithCode(viewsDB.runCommand({create: "filView", viewOn: "simpleCollection"}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode( + viewsDB.runCommand({create: "filView", viewOn: "simpleCollection", collation: {locale: "en"}}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandWorked(viewsDB.runCommand( + {create: "filView", viewOn: "ukCollection", pipeline: [], collation: {locale: "fil"}})); + +assert.commandWorked(viewsDB.runCommand( + {collMod: "filView2", viewOn: "simpleCollection", pipeline: [graphLookupFilView]})); +assert.commandWorked(viewsDB.runCommand({drop: "filView"})); +assert.commandFailedWithCode(viewsDB.runCommand({create: "filView", viewOn: "simpleCollection"}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode( + viewsDB.runCommand({create: "filView", viewOn: "simpleCollection", collation: {locale: "en"}}), + ErrorCodes.OptionNotSupportedOnView); + +// If two views "A" and "C" have different collations and depend on the namespace "B", then "B" +// cannot be created as a view. +assert.commandWorked(viewsDB.runCommand({create: "A", viewOn: "B", collation: {locale: "hsb"}})); +assert.commandWorked( + viewsDB.runCommand({create: "B", viewOn: "other", collation: {locale: "hsb"}})); +assert.commandFailedWithCode( + viewsDB.runCommand({create: "C", viewOn: "B", collation: {locale: "wae"}}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandWorked(viewsDB.runCommand({drop: "B"})); +assert.commandWorked(viewsDB.runCommand({create: "C", viewOn: "B", collation: {locale: "wae"}})); +assert.commandFailedWithCode( + viewsDB.runCommand({create: "B", viewOn: "other", collation: {locale: "hsb"}}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode( + viewsDB.runCommand({create: "B", viewOn: "other", collation: {locale: "wae"}}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode(viewsDB.runCommand({create: "B", viewOn: "other"}), + ErrorCodes.OptionNotSupportedOnView); + +// Make sure that when an operation does not specify the collation, it correctly uses the +// default collation associated with the view. For this, we set up a new backing collection with +// a case-insensitive view. +assert.commandWorked(viewsDB.runCommand({create: "case_sensitive_coll"})); +assert.commandWorked(viewsDB.runCommand({ + create: "case_insensitive_view", + viewOn: "case_sensitive_coll", + collation: {locale: "en", strength: 1} +})); + +assert.writeOK(viewsDB.case_sensitive_coll.insert({f: "case"})); +assert.writeOK(viewsDB.case_sensitive_coll.insert({f: "Case"})); +assert.writeOK(viewsDB.case_sensitive_coll.insert({f: "CASE"})); + +let explain, cursorStage; + +// Test that aggregate against a view with a default collation correctly uses the collation. +// We expect the pipeline to be optimized away, so there should be no pipeline stages in +// the explain. +assert.eq(1, viewsDB.case_sensitive_coll.aggregate([{$match: {f: "case"}}]).itcount()); +assert.eq(3, viewsDB.case_insensitive_view.aggregate([{$match: {f: "case"}}]).itcount()); +explain = viewsDB.case_insensitive_view.explain().aggregate([{$match: {f: "case"}}]); +assert.neq(null, explain.queryPlanner, tojson(explain)); +assert.eq(1, explain.queryPlanner.collation.strength, tojson(explain)); + +// Test that count against a view with a default collation correctly uses the collation. +assert.eq(1, viewsDB.case_sensitive_coll.count({f: "case"})); +assert.eq(3, viewsDB.case_insensitive_view.count({f: "case"})); +explain = viewsDB.case_insensitive_view.explain().count({f: "case"}); +cursorStage = getAggPlanStage(explain, "$cursor"); +assert.neq(null, cursorStage, tojson(explain)); +assert.eq(1, cursorStage.$cursor.queryPlanner.collation.strength, tojson(cursorStage)); + +// Test that distinct against a view with a default collation correctly uses the collation. +assert.eq(3, viewsDB.case_sensitive_coll.distinct("f").length); +assert.eq(1, viewsDB.case_insensitive_view.distinct("f").length); +explain = viewsDB.case_insensitive_view.explain().distinct("f"); +cursorStage = getAggPlanStage(explain, "$cursor"); +assert.neq(null, cursorStage, tojson(explain)); +assert.eq(1, cursorStage.$cursor.queryPlanner.collation.strength, tojson(cursorStage)); + +// Test that find against a view with a default collation correctly uses the collation. +// We expect the pipeline to be optimized away, so there should be no pipeline stages in +// the explain output. +let findRes = viewsDB.runCommand({find: "case_sensitive_coll", filter: {f: "case"}}); +assert.commandWorked(findRes); +assert.eq(1, findRes.cursor.firstBatch.length); +findRes = viewsDB.runCommand({find: "case_insensitive_view", filter: {f: "case"}}); +assert.commandWorked(findRes); +assert.eq(3, findRes.cursor.firstBatch.length); +explain = viewsDB.runCommand({explain: {find: "case_insensitive_view", filter: {f: "case"}}}); +assert.neq(null, explain.queryPlanner, tojson(explain)); +assert.eq(1, explain.queryPlanner.collation.strength, tojson(explain)); }()); diff --git a/jstests/core/views/views_count.js b/jstests/core/views/views_count.js index cfef3775569..8fa24191959 100644 --- a/jstests/core/views/views_count.js +++ b/jstests/core/views/views_count.js @@ -3,83 +3,82 @@ // @tags: [requires_fastcount] (function() { - "use strict"; +"use strict"; - var viewsDB = db.getSiblingDB("views_count"); - assert.commandWorked(viewsDB.dropDatabase()); +var viewsDB = db.getSiblingDB("views_count"); +assert.commandWorked(viewsDB.dropDatabase()); - // Insert documents into a collection. - let coll = viewsDB.getCollection("coll"); - let bulk = coll.initializeUnorderedBulkOp(); - for (let i = 0; i < 10; i++) { - bulk.insert({x: i}); - } - assert.writeOK(bulk.execute()); +// Insert documents into a collection. +let coll = viewsDB.getCollection("coll"); +let bulk = coll.initializeUnorderedBulkOp(); +for (let i = 0; i < 10; i++) { + bulk.insert({x: i}); +} +assert.writeOK(bulk.execute()); - // Create views on the data. - assert.commandWorked(viewsDB.runCommand({create: "identityView", viewOn: "coll"})); - assert.commandWorked(viewsDB.runCommand( - {create: "greaterThanThreeView", viewOn: "coll", pipeline: [{$match: {x: {$gt: 3}}}]})); - assert.commandWorked(viewsDB.runCommand({ - create: "lessThanSevenView", - viewOn: "greaterThanThreeView", - pipeline: [{$match: {x: {$lt: 7}}}] - })); - let identityView = viewsDB.getCollection("identityView"); - let greaterThanThreeView = viewsDB.getCollection("greaterThanThreeView"); - let lessThanSevenView = viewsDB.getCollection("lessThanSevenView"); +// Create views on the data. +assert.commandWorked(viewsDB.runCommand({create: "identityView", viewOn: "coll"})); +assert.commandWorked(viewsDB.runCommand( + {create: "greaterThanThreeView", viewOn: "coll", pipeline: [{$match: {x: {$gt: 3}}}]})); +assert.commandWorked(viewsDB.runCommand({ + create: "lessThanSevenView", + viewOn: "greaterThanThreeView", + pipeline: [{$match: {x: {$lt: 7}}}] +})); +let identityView = viewsDB.getCollection("identityView"); +let greaterThanThreeView = viewsDB.getCollection("greaterThanThreeView"); +let lessThanSevenView = viewsDB.getCollection("lessThanSevenView"); - // Count on a view, with or without a query. - assert.eq(coll.count(), identityView.count()); - assert.eq(coll.count({}), identityView.count({})); - assert.eq(coll.count({x: {$exists: true}}), identityView.count({x: {$exists: true}})); - assert.eq(coll.count({x: 0}), identityView.count({x: 0})); - assert.eq(6, greaterThanThreeView.count()); - assert.eq(6, greaterThanThreeView.count({})); - assert.eq(3, lessThanSevenView.count()); - assert.eq(3, lessThanSevenView.count({})); +// Count on a view, with or without a query. +assert.eq(coll.count(), identityView.count()); +assert.eq(coll.count({}), identityView.count({})); +assert.eq(coll.count({x: {$exists: true}}), identityView.count({x: {$exists: true}})); +assert.eq(coll.count({x: 0}), identityView.count({x: 0})); +assert.eq(6, greaterThanThreeView.count()); +assert.eq(6, greaterThanThreeView.count({})); +assert.eq(3, lessThanSevenView.count()); +assert.eq(3, lessThanSevenView.count({})); - // Test empty counts. - assert.eq(coll.count({x: -1}), identityView.count({x: -1})); - assert.eq(0, greaterThanThreeView.count({x: 2})); - assert.eq(0, lessThanSevenView.count({x: 9})); +// Test empty counts. +assert.eq(coll.count({x: -1}), identityView.count({x: -1})); +assert.eq(0, greaterThanThreeView.count({x: 2})); +assert.eq(0, lessThanSevenView.count({x: 9})); - // Counting on views works with limit and skip. - assert.eq(7, identityView.count({x: {$exists: true}}, {skip: 3})); - assert.eq(3, greaterThanThreeView.count({x: {$lt: 100}}, {limit: 3})); - assert.eq(1, lessThanSevenView.count({}, {skip: 1, limit: 1})); +// Counting on views works with limit and skip. +assert.eq(7, identityView.count({x: {$exists: true}}, {skip: 3})); +assert.eq(3, greaterThanThreeView.count({x: {$lt: 100}}, {limit: 3})); +assert.eq(1, lessThanSevenView.count({}, {skip: 1, limit: 1})); - // Count with explain works on a view. - assert.commandWorked(lessThanSevenView.explain().count()); - assert.commandWorked(greaterThanThreeView.explain().count({x: 6})); - let explainPlan = lessThanSevenView.explain().count({foo: "bar"}); - assert.commandWorked(explainPlan); - assert.eq(explainPlan["stages"][0]["$cursor"]["queryPlanner"]["namespace"], "views_count.coll"); +// Count with explain works on a view. +assert.commandWorked(lessThanSevenView.explain().count()); +assert.commandWorked(greaterThanThreeView.explain().count({x: 6})); +let explainPlan = lessThanSevenView.explain().count({foo: "bar"}); +assert.commandWorked(explainPlan); +assert.eq(explainPlan["stages"][0]["$cursor"]["queryPlanner"]["namespace"], "views_count.coll"); - // Count with explicit explain modes works on a view. - explainPlan = - assert.commandWorked(lessThanSevenView.explain("queryPlanner").count({x: {$gte: 5}})); - assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_count.coll"); - assert(!explainPlan.stages[0].$cursor.hasOwnProperty("executionStats")); +// Count with explicit explain modes works on a view. +explainPlan = assert.commandWorked(lessThanSevenView.explain("queryPlanner").count({x: {$gte: 5}})); +assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_count.coll"); +assert(!explainPlan.stages[0].$cursor.hasOwnProperty("executionStats")); - explainPlan = - assert.commandWorked(lessThanSevenView.explain("executionStats").count({x: {$gte: 5}})); - assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_count.coll"); - assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats")); - assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 2); - assert(!explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution")); +explainPlan = + assert.commandWorked(lessThanSevenView.explain("executionStats").count({x: {$gte: 5}})); +assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_count.coll"); +assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats")); +assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 2); +assert(!explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution")); - explainPlan = - assert.commandWorked(lessThanSevenView.explain("allPlansExecution").count({x: {$gte: 5}})); - assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_count.coll"); - assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats")); - assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 2); - assert(explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution")); +explainPlan = + assert.commandWorked(lessThanSevenView.explain("allPlansExecution").count({x: {$gte: 5}})); +assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_count.coll"); +assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats")); +assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 2); +assert(explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution")); - // Count with hint works on a view. - assert.commandWorked(viewsDB.runCommand({count: "identityView", hint: "_id_"})); +// Count with hint works on a view. +assert.commandWorked(viewsDB.runCommand({count: "identityView", hint: "_id_"})); - assert.commandFailedWithCode( - viewsDB.runCommand({count: "identityView", collation: {locale: "en_US"}}), - ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode( + viewsDB.runCommand({count: "identityView", collation: {locale: "en_US"}}), + ErrorCodes.OptionNotSupportedOnView); }()); diff --git a/jstests/core/views/views_creation.js b/jstests/core/views/views_creation.js index 2312b78e646..1765b9c4182 100644 --- a/jstests/core/views/views_creation.js +++ b/jstests/core/views/views_creation.js @@ -4,112 +4,110 @@ // ] (function() { - "use strict"; - - // For arrayEq. - load("jstests/aggregation/extras/utils.js"); - - const viewsDBName = "views_creation"; - - let viewsDB = db.getSiblingDB(viewsDBName); - assert.commandWorked(viewsDB.dropDatabase()); - - let collNames = viewsDB.getCollectionNames(); - assert.eq(0, collNames.length, tojson(collNames)); - - // You cannot create a view that starts with 'system.'. - assert.commandFailedWithCode( - viewsDB.runCommand({create: "system.special", viewOn: "collection"}), - ErrorCodes.InvalidNamespace, - "Created an illegal view named 'system.views'"); - - // Collections that start with 'system.' that are not special to MongoDB fail with a different - // error code. - assert.commandFailedWithCode(viewsDB.runCommand({create: "system.foo", viewOn: "collection"}), - ErrorCodes.InvalidNamespace, - "Created an illegal view named 'system.foo'"); - - // Create a collection for test purposes. - assert.commandWorked(viewsDB.runCommand({create: "collection"})); - - let pipe = [{$match: {}}]; - - // Create a "regular" view on a collection. - assert.commandWorked( - viewsDB.runCommand({create: "view", viewOn: "collection", pipeline: pipe})); - - collNames = viewsDB.getCollectionNames().filter((function(coll) { - return !coll.startsWith("system."); - })); - assert.eq(2, collNames.length, tojson(collNames)); - let res = viewsDB.runCommand({listCollections: 1, filter: {type: "view"}}); - assert.commandWorked(res); - - // Ensure that the output of listCollections has all the expected options for a view. - let expectedListCollectionsOutput = [{ - name: "view", - type: "view", - options: {viewOn: "collection", pipeline: pipe}, - info: {readOnly: true} - }]; - assert(arrayEq(res.cursor.firstBatch, expectedListCollectionsOutput), tojson({ - expectedListCollectionsOutput: expectedListCollectionsOutput, - got: res.cursor.firstBatch - })); - - // Create a view on a non-existent collection. - assert.commandWorked( - viewsDB.runCommand({create: "viewOnNonexistent", viewOn: "nonexistent", pipeline: pipe})); - - // Create a view but don't specify a pipeline; this should default to something sane. - assert.commandWorked( - viewsDB.runCommand({create: "viewWithDefaultPipeline", viewOn: "collection"})); - - // Specifying a pipeline but no view namespace must fail. - assert.commandFailed(viewsDB.runCommand({create: "viewNoViewNamespace", pipeline: pipe})); - - // Create a view on another view. - assert.commandWorked( - viewsDB.runCommand({create: "viewOnView", viewOn: "view", pipeline: pipe})); - - // View names are constrained to the same limitations as collection names. - assert.commandFailed(viewsDB.runCommand({create: "", viewOn: "collection", pipeline: pipe})); - assert.commandFailedWithCode( - viewsDB.runCommand({create: "system.local.new", viewOn: "collection", pipeline: pipe}), - ErrorCodes.InvalidNamespace); - assert.commandFailedWithCode( - viewsDB.runCommand({create: "dollar$", viewOn: "collection", pipeline: pipe}), - ErrorCodes.InvalidNamespace); - - // You cannot create a view with a $out stage, by itself or nested inside of a different stage. - const ERROR_CODE_OUT_BANNED_IN_LOOKUP = 51047; - const outStage = {$out: "nonExistentCollection"}; - assert.commandFailedWithCode( - viewsDB.runCommand({create: "viewWithOut", viewOn: "collection", pipeline: [outStage]}), - ErrorCodes.OptionNotSupportedOnView); - assert.commandFailedWithCode(viewsDB.runCommand({ - create: "viewWithOutInLookup", - viewOn: "collection", - pipeline: [{$lookup: {from: "other", pipeline: [outStage], as: "result"}}] - }), - ERROR_CODE_OUT_BANNED_IN_LOOKUP); - assert.commandFailedWithCode(viewsDB.runCommand({ - create: "viewWithOutInFacet", - viewOn: "collection", - pipeline: [{$facet: {output: [outStage]}}] - }), - 40600); - - // These test that, when an existing view in system.views is invalid because of a $out in the - // pipeline, the database errors on creation of a new view. - assert.commandWorked(viewsDB.system.views.insert({ - _id: `${viewsDBName}.invalidView`, - viewOn: "collection", - pipeline: [{$project: {_id: false}}, {$out: "notExistingCollection"}] - })); - assert.commandFailedWithCode( - viewsDB.runCommand({create: "viewWithBadViewCatalog", viewOn: "collection", pipeline: []}), - ErrorCodes.OptionNotSupportedOnView); - assert.commandWorked( - viewsDB.system.views.remove({_id: `${viewsDBName}.invalidView`}, {justOne: true})); +"use strict"; + +// For arrayEq. +load("jstests/aggregation/extras/utils.js"); + +const viewsDBName = "views_creation"; + +let viewsDB = db.getSiblingDB(viewsDBName); +assert.commandWorked(viewsDB.dropDatabase()); + +let collNames = viewsDB.getCollectionNames(); +assert.eq(0, collNames.length, tojson(collNames)); + +// You cannot create a view that starts with 'system.'. +assert.commandFailedWithCode(viewsDB.runCommand({create: "system.special", viewOn: "collection"}), + ErrorCodes.InvalidNamespace, + "Created an illegal view named 'system.views'"); + +// Collections that start with 'system.' that are not special to MongoDB fail with a different +// error code. +assert.commandFailedWithCode(viewsDB.runCommand({create: "system.foo", viewOn: "collection"}), + ErrorCodes.InvalidNamespace, + "Created an illegal view named 'system.foo'"); + +// Create a collection for test purposes. +assert.commandWorked(viewsDB.runCommand({create: "collection"})); + +let pipe = [{$match: {}}]; + +// Create a "regular" view on a collection. +assert.commandWorked(viewsDB.runCommand({create: "view", viewOn: "collection", pipeline: pipe})); + +collNames = viewsDB.getCollectionNames().filter((function(coll) { + return !coll.startsWith("system."); +})); +assert.eq(2, collNames.length, tojson(collNames)); +let res = viewsDB.runCommand({listCollections: 1, filter: {type: "view"}}); +assert.commandWorked(res); + +// Ensure that the output of listCollections has all the expected options for a view. +let expectedListCollectionsOutput = [{ + name: "view", + type: "view", + options: {viewOn: "collection", pipeline: pipe}, + info: {readOnly: true} +}]; +assert(arrayEq(res.cursor.firstBatch, expectedListCollectionsOutput), tojson({ + expectedListCollectionsOutput: expectedListCollectionsOutput, + got: res.cursor.firstBatch + })); + +// Create a view on a non-existent collection. +assert.commandWorked( + viewsDB.runCommand({create: "viewOnNonexistent", viewOn: "nonexistent", pipeline: pipe})); + +// Create a view but don't specify a pipeline; this should default to something sane. +assert.commandWorked(viewsDB.runCommand({create: "viewWithDefaultPipeline", viewOn: "collection"})); + +// Specifying a pipeline but no view namespace must fail. +assert.commandFailed(viewsDB.runCommand({create: "viewNoViewNamespace", pipeline: pipe})); + +// Create a view on another view. +assert.commandWorked(viewsDB.runCommand({create: "viewOnView", viewOn: "view", pipeline: pipe})); + +// View names are constrained to the same limitations as collection names. +assert.commandFailed(viewsDB.runCommand({create: "", viewOn: "collection", pipeline: pipe})); +assert.commandFailedWithCode( + viewsDB.runCommand({create: "system.local.new", viewOn: "collection", pipeline: pipe}), + ErrorCodes.InvalidNamespace); +assert.commandFailedWithCode( + viewsDB.runCommand({create: "dollar$", viewOn: "collection", pipeline: pipe}), + ErrorCodes.InvalidNamespace); + +// You cannot create a view with a $out stage, by itself or nested inside of a different stage. +const ERROR_CODE_OUT_BANNED_IN_LOOKUP = 51047; +const outStage = { + $out: "nonExistentCollection" +}; +assert.commandFailedWithCode( + viewsDB.runCommand({create: "viewWithOut", viewOn: "collection", pipeline: [outStage]}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandFailedWithCode(viewsDB.runCommand({ + create: "viewWithOutInLookup", + viewOn: "collection", + pipeline: [{$lookup: {from: "other", pipeline: [outStage], as: "result"}}] +}), + ERROR_CODE_OUT_BANNED_IN_LOOKUP); +assert.commandFailedWithCode(viewsDB.runCommand({ + create: "viewWithOutInFacet", + viewOn: "collection", + pipeline: [{$facet: {output: [outStage]}}] +}), + 40600); + +// These test that, when an existing view in system.views is invalid because of a $out in the +// pipeline, the database errors on creation of a new view. +assert.commandWorked(viewsDB.system.views.insert({ + _id: `${viewsDBName}.invalidView`, + viewOn: "collection", + pipeline: [{$project: {_id: false}}, {$out: "notExistingCollection"}] +})); +assert.commandFailedWithCode( + viewsDB.runCommand({create: "viewWithBadViewCatalog", viewOn: "collection", pipeline: []}), + ErrorCodes.OptionNotSupportedOnView); +assert.commandWorked( + viewsDB.system.views.remove({_id: `${viewsDBName}.invalidView`}, {justOne: true})); }()); diff --git a/jstests/core/views/views_distinct.js b/jstests/core/views/views_distinct.js index 29ddcdc5269..8ef9e208a81 100644 --- a/jstests/core/views/views_distinct.js +++ b/jstests/core/views/views_distinct.js @@ -1,142 +1,140 @@ // Test the distinct command with views. (function() { - "use strict"; - - // For arrayEq. We don't use array.eq as it does an ordered comparison on arrays but we don't - // care about order in the distinct response. - load("jstests/aggregation/extras/utils.js"); - - var viewsDB = db.getSiblingDB("views_distinct"); - assert.commandWorked(viewsDB.dropDatabase()); - - // Populate a collection with some test data. - let allDocuments = []; - allDocuments.push({_id: "New York", state: "NY", pop: 7}); - allDocuments.push({_id: "Newark", state: "NJ", pop: 3}); - allDocuments.push({_id: "Palo Alto", state: "CA", pop: 10}); - allDocuments.push({_id: "San Francisco", state: "CA", pop: 4}); - allDocuments.push({_id: "Trenton", state: "NJ", pop: 5}); - - let coll = viewsDB.getCollection("coll"); - let bulk = coll.initializeUnorderedBulkOp(); - allDocuments.forEach(function(doc) { - bulk.insert(doc); - }); - assert.writeOK(bulk.execute()); - - // Create views on the data. - assert.commandWorked(viewsDB.runCommand({create: "identityView", viewOn: "coll"})); - assert.commandWorked(viewsDB.runCommand( - {create: "largePopView", viewOn: "identityView", pipeline: [{$match: {pop: {$gt: 5}}}]})); - let identityView = viewsDB.getCollection("identityView"); - let largePopView = viewsDB.getCollection("largePopView"); - - function assertIdentityViewDistinctMatchesCollection(key, query) { - query = (query === undefined) ? {} : query; - const collDistinct = coll.distinct(key, query); - const viewDistinct = identityView.distinct(key, query); - assert(arrayEq(collDistinct, viewDistinct), - "Distinct on a collection did not match distinct on its identity view; got " + - tojson(viewDistinct) + " but expected " + tojson(collDistinct)); - } - - // Test basic distinct requests on known fields without a query. - assertIdentityViewDistinctMatchesCollection("pop"); - assertIdentityViewDistinctMatchesCollection("_id"); - assert(arrayEq([7, 10], largePopView.distinct("pop"))); - assert(arrayEq(["New York", "Palo Alto"], largePopView.distinct("_id"))); - - // Test distinct with the presence of a query. - assertIdentityViewDistinctMatchesCollection("state", {}); - assertIdentityViewDistinctMatchesCollection("pop", {pop: {$exists: true}}); - assertIdentityViewDistinctMatchesCollection("state", {pop: {$gt: 3}}); - assertIdentityViewDistinctMatchesCollection("_id", {state: "CA"}); - assert(arrayEq(["CA"], largePopView.distinct("state", {pop: {$gte: 8}}))); - assert(arrayEq([7], largePopView.distinct("pop", {state: "NY"}))); - - // Test distinct where we expect an empty set response. - assertIdentityViewDistinctMatchesCollection("nonexistent"); - assertIdentityViewDistinctMatchesCollection("pop", {pop: {$gt: 1000}}); - assert.eq([], largePopView.distinct("nonexistent")); - assert.eq([], largePopView.distinct("_id", {state: "FL"})); - - // Explain works with distinct. - assert.commandWorked(identityView.explain().distinct("_id")); - assert.commandWorked(largePopView.explain().distinct("pop", {state: "CA"})); - let explainPlan = largePopView.explain().count({foo: "bar"}); - assert.commandWorked(explainPlan); - assert.eq(explainPlan["stages"][0]["$cursor"]["queryPlanner"]["namespace"], - "views_distinct.coll"); - - // Distinct with explicit explain modes works on a view. - explainPlan = assert.commandWorked(largePopView.explain("queryPlanner").distinct("pop")); - assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_distinct.coll"); - assert(!explainPlan.stages[0].$cursor.hasOwnProperty("executionStats")); - - explainPlan = assert.commandWorked(largePopView.explain("executionStats").distinct("pop")); - assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_distinct.coll"); - assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats")); - assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 2); - assert(!explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution")); - - explainPlan = assert.commandWorked(largePopView.explain("allPlansExecution").distinct("pop")); - assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_distinct.coll"); - assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats")); - assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 2); - assert(explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution")); - - // Distinct commands fail when they try to change the collation of a view. - assert.commandFailedWithCode( - viewsDB.runCommand({distinct: "identityView", key: "state", collation: {locale: "en_US"}}), - ErrorCodes.OptionNotSupportedOnView); - - // Test distinct on nested objects, nested arrays and nullish values. - coll.drop(); - allDocuments = []; - allDocuments.push({a: 1, b: [2, 3, [4, 5], {c: 6}], d: {e: [1, 2]}}); - allDocuments.push({a: [1], b: [2, 3, 4, [5]], c: 6, d: {e: 1}}); - allDocuments.push({a: [[1]], b: [2, 3, [4], [5]], c: 6, d: [[{e: 1}]]}); - allDocuments.push({a: [[1]], b: [2, 3, [4], [5]], c: 6, d: [{e: {f: 1}}]}); - allDocuments.push({a: [[1]], b: [2, 3, [4], [5]], c: 6, d: {e: [[{f: 1}]]}}); - allDocuments.push({a: [1, 2], b: 3, c: [6], d: [{e: 1}, {e: [1, 2]}, {e: {someObject: 1}}]}); - allDocuments.push({a: [1, 2], b: [4, 5], c: [undefined], d: [1]}); - allDocuments.push({a: null, b: [4, 5, null, undefined], c: [], d: {e: null}}); - allDocuments.push({a: undefined, b: null, c: [null], d: {e: undefined}}); - - bulk = coll.initializeUnorderedBulkOp(); - allDocuments.forEach(function(doc) { - bulk.insert(doc); - }); - assert.writeOK(bulk.execute()); - - assertIdentityViewDistinctMatchesCollection("a"); - assertIdentityViewDistinctMatchesCollection("b"); - assertIdentityViewDistinctMatchesCollection("c"); - assertIdentityViewDistinctMatchesCollection("d"); - assertIdentityViewDistinctMatchesCollection("e"); - assertIdentityViewDistinctMatchesCollection("d.e"); - assertIdentityViewDistinctMatchesCollection("d.e.f"); - - // Test distinct on a deeply nested object through arrays. - coll.drop(); - assert.commandWorked(coll.insert({ - a: [ - {b: [{c: [{d: 1}]}]}, - {b: {c: "not leaf"}}, - {b: {c: [{d: 2, "not leaf": "not leaf"}]}}, - {b: [{c: {d: 3}}]}, - {b: {c: {d: 4}}, "not leaf": "not leaf"}, - "not leaf", - // The documents below should not get traversed by the distinct() because of the - // doubly-nested arrays. - [[{b: {c: {d: "not leaf"}}}]], - [{b: {c: [[{d: "not leaf"}]]}}], - ] - })); - assert.commandWorked(coll.insert({a: "not leaf"})); - assertIdentityViewDistinctMatchesCollection("a"); - assertIdentityViewDistinctMatchesCollection("a.b"); - assertIdentityViewDistinctMatchesCollection("a.b.c"); - assertIdentityViewDistinctMatchesCollection("a.b.c.d"); - +"use strict"; + +// For arrayEq. We don't use array.eq as it does an ordered comparison on arrays but we don't +// care about order in the distinct response. +load("jstests/aggregation/extras/utils.js"); + +var viewsDB = db.getSiblingDB("views_distinct"); +assert.commandWorked(viewsDB.dropDatabase()); + +// Populate a collection with some test data. +let allDocuments = []; +allDocuments.push({_id: "New York", state: "NY", pop: 7}); +allDocuments.push({_id: "Newark", state: "NJ", pop: 3}); +allDocuments.push({_id: "Palo Alto", state: "CA", pop: 10}); +allDocuments.push({_id: "San Francisco", state: "CA", pop: 4}); +allDocuments.push({_id: "Trenton", state: "NJ", pop: 5}); + +let coll = viewsDB.getCollection("coll"); +let bulk = coll.initializeUnorderedBulkOp(); +allDocuments.forEach(function(doc) { + bulk.insert(doc); +}); +assert.writeOK(bulk.execute()); + +// Create views on the data. +assert.commandWorked(viewsDB.runCommand({create: "identityView", viewOn: "coll"})); +assert.commandWorked(viewsDB.runCommand( + {create: "largePopView", viewOn: "identityView", pipeline: [{$match: {pop: {$gt: 5}}}]})); +let identityView = viewsDB.getCollection("identityView"); +let largePopView = viewsDB.getCollection("largePopView"); + +function assertIdentityViewDistinctMatchesCollection(key, query) { + query = (query === undefined) ? {} : query; + const collDistinct = coll.distinct(key, query); + const viewDistinct = identityView.distinct(key, query); + assert(arrayEq(collDistinct, viewDistinct), + "Distinct on a collection did not match distinct on its identity view; got " + + tojson(viewDistinct) + " but expected " + tojson(collDistinct)); +} + +// Test basic distinct requests on known fields without a query. +assertIdentityViewDistinctMatchesCollection("pop"); +assertIdentityViewDistinctMatchesCollection("_id"); +assert(arrayEq([7, 10], largePopView.distinct("pop"))); +assert(arrayEq(["New York", "Palo Alto"], largePopView.distinct("_id"))); + +// Test distinct with the presence of a query. +assertIdentityViewDistinctMatchesCollection("state", {}); +assertIdentityViewDistinctMatchesCollection("pop", {pop: {$exists: true}}); +assertIdentityViewDistinctMatchesCollection("state", {pop: {$gt: 3}}); +assertIdentityViewDistinctMatchesCollection("_id", {state: "CA"}); +assert(arrayEq(["CA"], largePopView.distinct("state", {pop: {$gte: 8}}))); +assert(arrayEq([7], largePopView.distinct("pop", {state: "NY"}))); + +// Test distinct where we expect an empty set response. +assertIdentityViewDistinctMatchesCollection("nonexistent"); +assertIdentityViewDistinctMatchesCollection("pop", {pop: {$gt: 1000}}); +assert.eq([], largePopView.distinct("nonexistent")); +assert.eq([], largePopView.distinct("_id", {state: "FL"})); + +// Explain works with distinct. +assert.commandWorked(identityView.explain().distinct("_id")); +assert.commandWorked(largePopView.explain().distinct("pop", {state: "CA"})); +let explainPlan = largePopView.explain().count({foo: "bar"}); +assert.commandWorked(explainPlan); +assert.eq(explainPlan["stages"][0]["$cursor"]["queryPlanner"]["namespace"], "views_distinct.coll"); + +// Distinct with explicit explain modes works on a view. +explainPlan = assert.commandWorked(largePopView.explain("queryPlanner").distinct("pop")); +assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_distinct.coll"); +assert(!explainPlan.stages[0].$cursor.hasOwnProperty("executionStats")); + +explainPlan = assert.commandWorked(largePopView.explain("executionStats").distinct("pop")); +assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_distinct.coll"); +assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats")); +assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 2); +assert(!explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution")); + +explainPlan = assert.commandWorked(largePopView.explain("allPlansExecution").distinct("pop")); +assert.eq(explainPlan.stages[0].$cursor.queryPlanner.namespace, "views_distinct.coll"); +assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats")); +assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 2); +assert(explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution")); + +// Distinct commands fail when they try to change the collation of a view. +assert.commandFailedWithCode( + viewsDB.runCommand({distinct: "identityView", key: "state", collation: {locale: "en_US"}}), + ErrorCodes.OptionNotSupportedOnView); + +// Test distinct on nested objects, nested arrays and nullish values. +coll.drop(); +allDocuments = []; +allDocuments.push({a: 1, b: [2, 3, [4, 5], {c: 6}], d: {e: [1, 2]}}); +allDocuments.push({a: [1], b: [2, 3, 4, [5]], c: 6, d: {e: 1}}); +allDocuments.push({a: [[1]], b: [2, 3, [4], [5]], c: 6, d: [[{e: 1}]]}); +allDocuments.push({a: [[1]], b: [2, 3, [4], [5]], c: 6, d: [{e: {f: 1}}]}); +allDocuments.push({a: [[1]], b: [2, 3, [4], [5]], c: 6, d: {e: [[{f: 1}]]}}); +allDocuments.push({a: [1, 2], b: 3, c: [6], d: [{e: 1}, {e: [1, 2]}, {e: {someObject: 1}}]}); +allDocuments.push({a: [1, 2], b: [4, 5], c: [undefined], d: [1]}); +allDocuments.push({a: null, b: [4, 5, null, undefined], c: [], d: {e: null}}); +allDocuments.push({a: undefined, b: null, c: [null], d: {e: undefined}}); + +bulk = coll.initializeUnorderedBulkOp(); +allDocuments.forEach(function(doc) { + bulk.insert(doc); +}); +assert.writeOK(bulk.execute()); + +assertIdentityViewDistinctMatchesCollection("a"); +assertIdentityViewDistinctMatchesCollection("b"); +assertIdentityViewDistinctMatchesCollection("c"); +assertIdentityViewDistinctMatchesCollection("d"); +assertIdentityViewDistinctMatchesCollection("e"); +assertIdentityViewDistinctMatchesCollection("d.e"); +assertIdentityViewDistinctMatchesCollection("d.e.f"); + +// Test distinct on a deeply nested object through arrays. +coll.drop(); +assert.commandWorked(coll.insert({ + a: [ + {b: [{c: [{d: 1}]}]}, + {b: {c: "not leaf"}}, + {b: {c: [{d: 2, "not leaf": "not leaf"}]}}, + {b: [{c: {d: 3}}]}, + {b: {c: {d: 4}}, "not leaf": "not leaf"}, + "not leaf", + // The documents below should not get traversed by the distinct() because of the + // doubly-nested arrays. + [[{b: {c: {d: "not leaf"}}}]], + [{b: {c: [[{d: "not leaf"}]]}}], + ] +})); +assert.commandWorked(coll.insert({a: "not leaf"})); +assertIdentityViewDistinctMatchesCollection("a"); +assertIdentityViewDistinctMatchesCollection("a.b"); +assertIdentityViewDistinctMatchesCollection("a.b.c"); +assertIdentityViewDistinctMatchesCollection("a.b.c.d"); }()); diff --git a/jstests/core/views/views_drop.js b/jstests/core/views/views_drop.js index d93def18eae..2f0b9b7e62e 100644 --- a/jstests/core/views/views_drop.js +++ b/jstests/core/views/views_drop.js @@ -7,30 +7,29 @@ * ] */ (function() { - "use strict"; +"use strict"; - let viewsDBName = "views_drop"; - let viewsDB = db.getSiblingDB(viewsDBName); - viewsDB.dropDatabase(); +let viewsDBName = "views_drop"; +let viewsDB = db.getSiblingDB(viewsDBName); +viewsDB.dropDatabase(); - // Create collection and a view on it. - assert.writeOK(viewsDB.coll.insert({x: 1})); - assert.commandWorked(viewsDB.createView("view", "coll", [])); - assert.eq( - viewsDB.view.find({}, {_id: 0}).toArray(), [{x: 1}], "couldn't find expected doc in view"); +// Create collection and a view on it. +assert.writeOK(viewsDB.coll.insert({x: 1})); +assert.commandWorked(viewsDB.createView("view", "coll", [])); +assert.eq( + viewsDB.view.find({}, {_id: 0}).toArray(), [{x: 1}], "couldn't find expected doc in view"); - // Drop collection, view and system.views in that order, checking along the way. - assert(viewsDB.coll.drop(), "couldn't drop coll"); - assert.eq(viewsDB.view.find().toArray(), [], "view isn't empty after dropping coll"); - assert(viewsDB.view.drop(), "couldn't drop view"); - assert.eq( - viewsDB.system.views.find().toArray(), [], "system.views isn't empty after dropping view"); - assert(viewsDB.system.views.drop(), "couldn't drop system.views"); +// Drop collection, view and system.views in that order, checking along the way. +assert(viewsDB.coll.drop(), "couldn't drop coll"); +assert.eq(viewsDB.view.find().toArray(), [], "view isn't empty after dropping coll"); +assert(viewsDB.view.drop(), "couldn't drop view"); +assert.eq( + viewsDB.system.views.find().toArray(), [], "system.views isn't empty after dropping view"); +assert(viewsDB.system.views.drop(), "couldn't drop system.views"); - // Database should now be empty. - let res = viewsDB.runCommand({listCollections: 1}); - assert.commandWorked(res); - assert.eq(res.cursor.firstBatch, - [], - viewsDBName + " is not empty after deleting views and system.views"); +// Database should now be empty. +let res = viewsDB.runCommand({listCollections: 1}); +assert.commandWorked(res); +assert.eq( + res.cursor.firstBatch, [], viewsDBName + " is not empty after deleting views and system.views"); })(); diff --git a/jstests/core/views/views_find.js b/jstests/core/views/views_find.js index f4a9785a0d8..3a7f5f80ce6 100644 --- a/jstests/core/views/views_find.js +++ b/jstests/core/views/views_find.js @@ -3,111 +3,110 @@ * @tags: [requires_find_command, requires_getmore] */ (function() { - "use strict"; - - // For arrayEq and orderedArrayEq. - load("jstests/aggregation/extras/utils.js"); - - let viewsDB = db.getSiblingDB("views_find"); - assert.commandWorked(viewsDB.dropDatabase()); - - // Helper functions. - let assertFindResultEq = function(cmd, expected, ordered) { - let res = viewsDB.runCommand(cmd); - assert.commandWorked(res); - let arr = new DBCommandCursor(viewsDB, res, 5).toArray(); - let errmsg = tojson({expected: expected, got: arr}); - - if (typeof(ordered) === "undefined" || !ordered) - assert(arrayEq(arr, expected), errmsg); - else - assert(orderedArrayEq(arr, expected), errmsg); - }; - - // Populate a collection with some test data. - let allDocuments = []; - allDocuments.push({_id: "New York", state: "NY", pop: 7}); - allDocuments.push({_id: "Newark", state: "NJ", pop: 3}); - allDocuments.push({_id: "Palo Alto", state: "CA", pop: 10}); - allDocuments.push({_id: "San Francisco", state: "CA", pop: 4}); - allDocuments.push({_id: "Trenton", state: "NJ", pop: 5}); - - let coll = viewsDB.coll; - let bulk = coll.initializeUnorderedBulkOp(); - allDocuments.forEach(function(doc) { - bulk.insert(doc); - }); - assert.writeOK(bulk.execute()); - - // Create views on the data. - assert.commandWorked( - viewsDB.runCommand({create: "identityView", viewOn: "coll", pipeline: [{$match: {}}]})); - assert.commandWorked(viewsDB.runCommand({ - create: "noIdView", - viewOn: "coll", - pipeline: [{$match: {}}, {$project: {_id: 0, state: 1, pop: 1}}] - })); - - // Filters and "simple" projections. - assertFindResultEq({find: "identityView"}, allDocuments); - assertFindResultEq({find: "identityView", filter: {state: "NJ"}, projection: {_id: 1}}, - [{_id: "Trenton"}, {_id: "Newark"}]); - - // A view that projects out the _id should still work with the find command. - assertFindResultEq({find: "noIdView", filter: {state: "NY"}, projection: {pop: 1}}, [{pop: 7}]); - - // Sort, limit and batchSize. - const doOrderedSort = true; - assertFindResultEq({find: "identityView", sort: {_id: 1}}, allDocuments, doOrderedSort); - assertFindResultEq( - {find: "identityView", limit: 1, batchSize: 1, sort: {_id: 1}, projection: {_id: 1}}, - [{_id: "New York"}]); - assert.commandFailedWithCode(viewsDB.runCommand({find: "identityView", sort: {$natural: 1}}), - ErrorCodes.InvalidPipelineOperator); - - // Negative batch size and limit should fail. - assert.commandFailed(viewsDB.runCommand({find: "identityView", batchSize: -1})); - assert.commandFailed(viewsDB.runCommand({find: "identityView", limit: -1})); - - // Comment should succeed. - assert.commandWorked( - viewsDB.runCommand({find: "identityView", filter: {}, comment: "views_find"})); - - // Views support find with explain. - assert.commandWorked(viewsDB.identityView.find().explain()); - - // Find with explicit explain modes works on a view. - let explainPlan = assert.commandWorked(viewsDB.identityView.find().explain("queryPlanner")); - assert.eq(explainPlan.queryPlanner.namespace, "views_find.coll"); - assert(!explainPlan.hasOwnProperty("executionStats")); - - explainPlan = assert.commandWorked(viewsDB.identityView.find().explain("executionStats")); - assert.eq(explainPlan.queryPlanner.namespace, "views_find.coll"); - assert(explainPlan.hasOwnProperty("executionStats")); - assert.eq(explainPlan.executionStats.nReturned, 5); - assert(!explainPlan.executionStats.hasOwnProperty("allPlansExecution")); - - explainPlan = assert.commandWorked(viewsDB.identityView.find().explain("allPlansExecution")); - assert.eq(explainPlan.queryPlanner.namespace, "views_find.coll"); - assert(explainPlan.hasOwnProperty("executionStats")); - assert.eq(explainPlan.executionStats.nReturned, 5); - assert(explainPlan.executionStats.hasOwnProperty("allPlansExecution")); - - // Only simple 0 or 1 projections are allowed on views. - assert.writeOK(viewsDB.coll.insert({arr: [{x: 1}]})); - assert.commandFailedWithCode( - viewsDB.runCommand({find: "identityView", projection: {arr: {$elemMatch: {x: 1}}}}), - ErrorCodes.InvalidPipelineOperator); - - // Views can support a "findOne" if singleBatch: true and limit: 1. - assertFindResultEq({find: "identityView", filter: {state: "NY"}, singleBatch: true, limit: 1}, - [{_id: "New York", state: "NY", pop: 7}]); - assert.eq(viewsDB.identityView.findOne({_id: "San Francisco"}), - {_id: "San Francisco", state: "CA", pop: 4}); - - // The readOnce cursor option is not allowed on views. But if we're in a transaction, - // the error code saying that it's not allowed in a transaction takes precedence. - assert.commandFailedWithCode( - viewsDB.runCommand({find: "identityView", readOnce: true}), - [ErrorCodes.OperationNotSupportedInTransaction, ErrorCodes.InvalidPipelineOperator]); +"use strict"; + +// For arrayEq and orderedArrayEq. +load("jstests/aggregation/extras/utils.js"); + +let viewsDB = db.getSiblingDB("views_find"); +assert.commandWorked(viewsDB.dropDatabase()); + +// Helper functions. +let assertFindResultEq = function(cmd, expected, ordered) { + let res = viewsDB.runCommand(cmd); + assert.commandWorked(res); + let arr = new DBCommandCursor(viewsDB, res, 5).toArray(); + let errmsg = tojson({expected: expected, got: arr}); + + if (typeof (ordered) === "undefined" || !ordered) + assert(arrayEq(arr, expected), errmsg); + else + assert(orderedArrayEq(arr, expected), errmsg); +}; + +// Populate a collection with some test data. +let allDocuments = []; +allDocuments.push({_id: "New York", state: "NY", pop: 7}); +allDocuments.push({_id: "Newark", state: "NJ", pop: 3}); +allDocuments.push({_id: "Palo Alto", state: "CA", pop: 10}); +allDocuments.push({_id: "San Francisco", state: "CA", pop: 4}); +allDocuments.push({_id: "Trenton", state: "NJ", pop: 5}); + +let coll = viewsDB.coll; +let bulk = coll.initializeUnorderedBulkOp(); +allDocuments.forEach(function(doc) { + bulk.insert(doc); +}); +assert.writeOK(bulk.execute()); + +// Create views on the data. +assert.commandWorked( + viewsDB.runCommand({create: "identityView", viewOn: "coll", pipeline: [{$match: {}}]})); +assert.commandWorked(viewsDB.runCommand({ + create: "noIdView", + viewOn: "coll", + pipeline: [{$match: {}}, {$project: {_id: 0, state: 1, pop: 1}}] +})); + +// Filters and "simple" projections. +assertFindResultEq({find: "identityView"}, allDocuments); +assertFindResultEq({find: "identityView", filter: {state: "NJ"}, projection: {_id: 1}}, + [{_id: "Trenton"}, {_id: "Newark"}]); + +// A view that projects out the _id should still work with the find command. +assertFindResultEq({find: "noIdView", filter: {state: "NY"}, projection: {pop: 1}}, [{pop: 7}]); + +// Sort, limit and batchSize. +const doOrderedSort = true; +assertFindResultEq({find: "identityView", sort: {_id: 1}}, allDocuments, doOrderedSort); +assertFindResultEq( + {find: "identityView", limit: 1, batchSize: 1, sort: {_id: 1}, projection: {_id: 1}}, + [{_id: "New York"}]); +assert.commandFailedWithCode(viewsDB.runCommand({find: "identityView", sort: {$natural: 1}}), + ErrorCodes.InvalidPipelineOperator); + +// Negative batch size and limit should fail. +assert.commandFailed(viewsDB.runCommand({find: "identityView", batchSize: -1})); +assert.commandFailed(viewsDB.runCommand({find: "identityView", limit: -1})); + +// Comment should succeed. +assert.commandWorked(viewsDB.runCommand({find: "identityView", filter: {}, comment: "views_find"})); + +// Views support find with explain. +assert.commandWorked(viewsDB.identityView.find().explain()); + +// Find with explicit explain modes works on a view. +let explainPlan = assert.commandWorked(viewsDB.identityView.find().explain("queryPlanner")); +assert.eq(explainPlan.queryPlanner.namespace, "views_find.coll"); +assert(!explainPlan.hasOwnProperty("executionStats")); + +explainPlan = assert.commandWorked(viewsDB.identityView.find().explain("executionStats")); +assert.eq(explainPlan.queryPlanner.namespace, "views_find.coll"); +assert(explainPlan.hasOwnProperty("executionStats")); +assert.eq(explainPlan.executionStats.nReturned, 5); +assert(!explainPlan.executionStats.hasOwnProperty("allPlansExecution")); + +explainPlan = assert.commandWorked(viewsDB.identityView.find().explain("allPlansExecution")); +assert.eq(explainPlan.queryPlanner.namespace, "views_find.coll"); +assert(explainPlan.hasOwnProperty("executionStats")); +assert.eq(explainPlan.executionStats.nReturned, 5); +assert(explainPlan.executionStats.hasOwnProperty("allPlansExecution")); + +// Only simple 0 or 1 projections are allowed on views. +assert.writeOK(viewsDB.coll.insert({arr: [{x: 1}]})); +assert.commandFailedWithCode( + viewsDB.runCommand({find: "identityView", projection: {arr: {$elemMatch: {x: 1}}}}), + ErrorCodes.InvalidPipelineOperator); + +// Views can support a "findOne" if singleBatch: true and limit: 1. +assertFindResultEq({find: "identityView", filter: {state: "NY"}, singleBatch: true, limit: 1}, + [{_id: "New York", state: "NY", pop: 7}]); +assert.eq(viewsDB.identityView.findOne({_id: "San Francisco"}), + {_id: "San Francisco", state: "CA", pop: 4}); + +// The readOnce cursor option is not allowed on views. But if we're in a transaction, +// the error code saying that it's not allowed in a transaction takes precedence. +assert.commandFailedWithCode( + viewsDB.runCommand({find: "identityView", readOnce: true}), + [ErrorCodes.OperationNotSupportedInTransaction, ErrorCodes.InvalidPipelineOperator]); }()); diff --git a/jstests/core/views/views_rename.js b/jstests/core/views/views_rename.js index 3ece5d8269c..9d4f1238810 100644 --- a/jstests/core/views/views_rename.js +++ b/jstests/core/views/views_rename.js @@ -5,23 +5,23 @@ // ] (function() { - // SERVER-30406 Test that renaming system.views correctly invalidates the view catalog - 'use strict'; +// SERVER-30406 Test that renaming system.views correctly invalidates the view catalog +'use strict'; - const collName = "views_rename_test"; - let coll = db.getCollection(collName); +const collName = "views_rename_test"; +let coll = db.getCollection(collName); - db.view.drop(); - coll.drop(); - assert.commandWorked(db.createView("view", collName, [])); - assert.writeOK(coll.insert({_id: 1})); - assert.eq(db.view.find().count(), 1, "couldn't find document in view"); - assert.commandWorked(db.system.views.renameCollection("views", /*dropTarget*/ true)); - assert.eq(db.view.find().count(), - 0, - "find on view should have returned no results after renaming away system.views"); - assert.commandWorked(db.views.renameCollection("system.views")); - assert.eq(db.view.find().count(), - 1, - "find on view should have worked again after renaming system.views back in place"); +db.view.drop(); +coll.drop(); +assert.commandWorked(db.createView("view", collName, [])); +assert.writeOK(coll.insert({_id: 1})); +assert.eq(db.view.find().count(), 1, "couldn't find document in view"); +assert.commandWorked(db.system.views.renameCollection("views", /*dropTarget*/ true)); +assert.eq(db.view.find().count(), + 0, + "find on view should have returned no results after renaming away system.views"); +assert.commandWorked(db.views.renameCollection("system.views")); +assert.eq(db.view.find().count(), + 1, + "find on view should have worked again after renaming system.views back in place"); })(); diff --git a/jstests/core/views/views_stats.js b/jstests/core/views/views_stats.js index 6c1b4b976d6..017d546bb4d 100644 --- a/jstests/core/views/views_stats.js +++ b/jstests/core/views/views_stats.js @@ -12,62 +12,62 @@ // ] (function() { - "use strict"; - load("jstests/libs/stats.js"); +"use strict"; +load("jstests/libs/stats.js"); - let viewsDB = db.getSiblingDB("views_stats"); - assert.commandWorked(viewsDB.dropDatabase()); - assert.commandWorked(viewsDB.runCommand({create: "view", viewOn: "collection"})); +let viewsDB = db.getSiblingDB("views_stats"); +assert.commandWorked(viewsDB.dropDatabase()); +assert.commandWorked(viewsDB.runCommand({create: "view", viewOn: "collection"})); - let view = viewsDB["view"]; - let coll = viewsDB["collection"]; +let view = viewsDB["view"]; +let coll = viewsDB["collection"]; - // Check the histogram counters. - let lastHistogram = getHistogramStats(view); - view.aggregate([{$match: {}}]); - lastHistogram = assertHistogramDiffEq(view, lastHistogram, 1, 0, 0); +// Check the histogram counters. +let lastHistogram = getHistogramStats(view); +view.aggregate([{$match: {}}]); +lastHistogram = assertHistogramDiffEq(view, lastHistogram, 1, 0, 0); - // Check that failed inserts, updates, and deletes are counted. - assert.writeError(view.insert({})); - lastHistogram = assertHistogramDiffEq(view, lastHistogram, 0, 1, 0); +// Check that failed inserts, updates, and deletes are counted. +assert.writeError(view.insert({})); +lastHistogram = assertHistogramDiffEq(view, lastHistogram, 0, 1, 0); - assert.writeError(view.remove({})); - lastHistogram = assertHistogramDiffEq(view, lastHistogram, 0, 1, 0); +assert.writeError(view.remove({})); +lastHistogram = assertHistogramDiffEq(view, lastHistogram, 0, 1, 0); - assert.writeError(view.update({}, {})); - lastHistogram = assertHistogramDiffEq(view, lastHistogram, 0, 1, 0); +assert.writeError(view.update({}, {})); +lastHistogram = assertHistogramDiffEq(view, lastHistogram, 0, 1, 0); - let isMasterResponse = assert.commandWorked(viewsDB.runCommand("isMaster")); - const isMongos = (isMasterResponse.msg === "isdbgrid"); - if (isMongos) { - jsTest.log("Tests are being run on a mongos; skipping top tests."); - return; - } +let isMasterResponse = assert.commandWorked(viewsDB.runCommand("isMaster")); +const isMongos = (isMasterResponse.msg === "isdbgrid"); +if (isMongos) { + jsTest.log("Tests are being run on a mongos; skipping top tests."); + return; +} - // Check the top counters. - let lastTop = getTop(view); - view.aggregate([{$match: {}}]); - lastTop = assertTopDiffEq(view, lastTop, "commands", 1); +// Check the top counters. +let lastTop = getTop(view); +view.aggregate([{$match: {}}]); +lastTop = assertTopDiffEq(view, lastTop, "commands", 1); - assert.writeError(view.insert({})); - lastTop = assertTopDiffEq(view, lastTop, "insert", 1); +assert.writeError(view.insert({})); +lastTop = assertTopDiffEq(view, lastTop, "insert", 1); - assert.writeError(view.remove({})); - lastTop = assertTopDiffEq(view, lastTop, "remove", 1); +assert.writeError(view.remove({})); +lastTop = assertTopDiffEq(view, lastTop, "remove", 1); - assert.writeError(view.update({}, {})); - lastTop = assertTopDiffEq(view, lastTop, "update", 1); +assert.writeError(view.update({}, {})); +lastTop = assertTopDiffEq(view, lastTop, "update", 1); - // Check that operations on the backing collection do not modify the view stats. - lastTop = getTop(view); - lastHistogram = getHistogramStats(view); - assert.writeOK(coll.insert({})); - assert.writeOK(coll.update({}, {$set: {x: 1}})); - coll.aggregate([{$match: {}}]); - assert.writeOK(coll.remove({})); +// Check that operations on the backing collection do not modify the view stats. +lastTop = getTop(view); +lastHistogram = getHistogramStats(view); +assert.writeOK(coll.insert({})); +assert.writeOK(coll.update({}, {$set: {x: 1}})); +coll.aggregate([{$match: {}}]); +assert.writeOK(coll.remove({})); - assertTopDiffEq(view, lastTop, "insert", 0); - assertTopDiffEq(view, lastTop, "update", 0); - assertTopDiffEq(view, lastTop, "remove", 0); - assertHistogramDiffEq(view, lastHistogram, 0, 0, 0); +assertTopDiffEq(view, lastTop, "insert", 0); +assertTopDiffEq(view, lastTop, "update", 0); +assertTopDiffEq(view, lastTop, "remove", 0); +assertHistogramDiffEq(view, lastHistogram, 0, 0, 0); }()); diff --git a/jstests/core/views/views_validation.js b/jstests/core/views/views_validation.js index a0e02d0b2c7..e0c8aca80ea 100644 --- a/jstests/core/views/views_validation.js +++ b/jstests/core/views/views_validation.js @@ -1,35 +1,34 @@ // @tags: [requires_non_retryable_commands] (function() { - "use strict"; - let viewsDb = db.getSiblingDB("views_validation"); - const kMaxViewDepth = 20; - - function makeView(viewName, viewOn, pipeline, expectedErrorCode) { - let options = {create: viewName, viewOn: viewOn}; - if (pipeline) { - options["pipeline"] = pipeline; - } - let res = viewsDb.runCommand(options); - if (expectedErrorCode !== undefined) { - assert.commandFailedWithCode( - res, expectedErrorCode, "Invalid view created " + tojson(options)); - } else { - assert.commandWorked(res, "Could not create view " + tojson(options)); - } - - return viewsDb.getCollection(viewName); +"use strict"; +let viewsDb = db.getSiblingDB("views_validation"); +const kMaxViewDepth = 20; + +function makeView(viewName, viewOn, pipeline, expectedErrorCode) { + let options = {create: viewName, viewOn: viewOn}; + if (pipeline) { + options["pipeline"] = pipeline; } - - function makeLookup(from) { - return { - $lookup: - {from: from, as: "as", localField: "localField", foreignField: "foreignField"} - }; + let res = viewsDb.runCommand(options); + if (expectedErrorCode !== undefined) { + assert.commandFailedWithCode( + res, expectedErrorCode, "Invalid view created " + tojson(options)); + } else { + assert.commandWorked(res, "Could not create view " + tojson(options)); } - function makeGraphLookup(from) { - return { + return viewsDb.getCollection(viewName); +} + +function makeLookup(from) { + return { + $lookup: {from: from, as: "as", localField: "localField", foreignField: "foreignField"} + }; +} + +function makeGraphLookup(from) { + return { $graphLookup: { from: from, as: "as", @@ -38,96 +37,96 @@ connectToField: "connectToField" } }; - } - - function makeFacet(from) { - return {$facet: {"Facet Key": [makeLookup(from)]}}; - } - - function clear() { - assert.commandWorked(viewsDb.dropDatabase()); - } - - clear(); - - // Check that simple cycles are disallowed. - makeView("a", "a", [], ErrorCodes.GraphContainsCycle); - makeView("a", "b", [makeLookup("a")], ErrorCodes.GraphContainsCycle); - clear(); - - makeView("a", "b", ErrorCodes.OK); - makeView("b", "a", [], ErrorCodes.GraphContainsCycle); - makeView("b", "c", [makeLookup("a")], ErrorCodes.GraphContainsCycle); - clear(); - - makeView("a", "b"); - makeView("b", "c"); - makeView("c", "a", [], ErrorCodes.GraphContainsCycle); - clear(); - - /* - * Check that view validation does not naively recurse on already visited views. - * - * Make a tree of depth 20 as with one view per level follows: - * 1 - * ----------------------------- - * 2 2 2 2 - * ----- ----- ----- ----- - * 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 - * ... ... ... ... - * - * So view i depends on the view (i+1) four times. Since it should only need to recurse - * down one branch completely for each creation, since this should only need to check a maximum - * of 20 views instead of 4^20 views. - */ - - for (let i = 1; i <= kMaxViewDepth; i++) { - let childView = "v" + (i + 1); - makeView("v" + i, - childView, - [makeLookup(childView), makeGraphLookup(childView), makeFacet(childView)]); - } - - // Check that any higher depth leads to failure - makeView("v21", "v22", [], ErrorCodes.ViewDepthLimitExceeded); - makeView("v0", "v1", [], ErrorCodes.ViewDepthLimitExceeded); - makeView("v0", "ok", [makeLookup("v1")], ErrorCodes.ViewDepthLimitExceeded); - - // But adding to the middle should be ok. - makeView("vMid", "v10"); - clear(); - - // Check that $graphLookup and $facet also check for cycles. - makeView("a", "b", [makeGraphLookup("a")], ErrorCodes.GraphContainsCycle); - makeView("a", "b", [makeGraphLookup("b")]); - makeView("b", "c", [makeGraphLookup("a")], ErrorCodes.GraphContainsCycle); - clear(); - - makeView("a", "b", [makeFacet("a")], ErrorCodes.GraphContainsCycle); - makeView("a", "b", [makeFacet("b")]); - makeView("b", "c", [makeFacet("a")], ErrorCodes.GraphContainsCycle); - clear(); - - // Check that collMod also checks for cycles. - makeView("a", "b"); - makeView("b", "c"); - assert.commandFailedWithCode(viewsDb.runCommand({collMod: "b", viewOn: "a", pipeline: []}), - ErrorCodes.GraphContainsCycle, - "collmod changed view to create a cycle"); - - // Check that collMod disallows the specification of invalid pipelines. - assert.commandFailedWithCode(viewsDb.runCommand({collMod: "b", viewOn: "c", pipeline: {}}), - ErrorCodes.InvalidOptions, - "collMod modified view to have invalid pipeline"); - assert.commandFailedWithCode( - viewsDb.runCommand({collMod: "b", viewOn: "c", pipeline: {0: {$limit: 7}}}), - ErrorCodes.InvalidOptions, - "collMod modified view to have invalid pipeline"); - clear(); - - // Check that invalid pipelines are disallowed. The following $lookup is missing the 'as' field. - makeView("a", - "b", - [{"$lookup": {from: "a", localField: "b", foreignField: "c"}}], - ErrorCodes.FailedToParse); +} + +function makeFacet(from) { + return {$facet: {"Facet Key": [makeLookup(from)]}}; +} + +function clear() { + assert.commandWorked(viewsDb.dropDatabase()); +} + +clear(); + +// Check that simple cycles are disallowed. +makeView("a", "a", [], ErrorCodes.GraphContainsCycle); +makeView("a", "b", [makeLookup("a")], ErrorCodes.GraphContainsCycle); +clear(); + +makeView("a", "b", ErrorCodes.OK); +makeView("b", "a", [], ErrorCodes.GraphContainsCycle); +makeView("b", "c", [makeLookup("a")], ErrorCodes.GraphContainsCycle); +clear(); + +makeView("a", "b"); +makeView("b", "c"); +makeView("c", "a", [], ErrorCodes.GraphContainsCycle); +clear(); + +/* + * Check that view validation does not naively recurse on already visited views. + * + * Make a tree of depth 20 as with one view per level follows: + * 1 + * ----------------------------- + * 2 2 2 2 + * ----- ----- ----- ----- + * 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 + * ... ... ... ... + * + * So view i depends on the view (i+1) four times. Since it should only need to recurse + * down one branch completely for each creation, since this should only need to check a maximum + * of 20 views instead of 4^20 views. + */ + +for (let i = 1; i <= kMaxViewDepth; i++) { + let childView = "v" + (i + 1); + makeView("v" + i, + childView, + [makeLookup(childView), makeGraphLookup(childView), makeFacet(childView)]); +} + +// Check that any higher depth leads to failure +makeView("v21", "v22", [], ErrorCodes.ViewDepthLimitExceeded); +makeView("v0", "v1", [], ErrorCodes.ViewDepthLimitExceeded); +makeView("v0", "ok", [makeLookup("v1")], ErrorCodes.ViewDepthLimitExceeded); + +// But adding to the middle should be ok. +makeView("vMid", "v10"); +clear(); + +// Check that $graphLookup and $facet also check for cycles. +makeView("a", "b", [makeGraphLookup("a")], ErrorCodes.GraphContainsCycle); +makeView("a", "b", [makeGraphLookup("b")]); +makeView("b", "c", [makeGraphLookup("a")], ErrorCodes.GraphContainsCycle); +clear(); + +makeView("a", "b", [makeFacet("a")], ErrorCodes.GraphContainsCycle); +makeView("a", "b", [makeFacet("b")]); +makeView("b", "c", [makeFacet("a")], ErrorCodes.GraphContainsCycle); +clear(); + +// Check that collMod also checks for cycles. +makeView("a", "b"); +makeView("b", "c"); +assert.commandFailedWithCode(viewsDb.runCommand({collMod: "b", viewOn: "a", pipeline: []}), + ErrorCodes.GraphContainsCycle, + "collmod changed view to create a cycle"); + +// Check that collMod disallows the specification of invalid pipelines. +assert.commandFailedWithCode(viewsDb.runCommand({collMod: "b", viewOn: "c", pipeline: {}}), + ErrorCodes.InvalidOptions, + "collMod modified view to have invalid pipeline"); +assert.commandFailedWithCode( + viewsDb.runCommand({collMod: "b", viewOn: "c", pipeline: {0: {$limit: 7}}}), + ErrorCodes.InvalidOptions, + "collMod modified view to have invalid pipeline"); +clear(); + +// Check that invalid pipelines are disallowed. The following $lookup is missing the 'as' field. +makeView("a", + "b", + [{"$lookup": {from: "a", localField: "b", foreignField: "c"}}], + ErrorCodes.FailedToParse); }()); diff --git a/jstests/core/where_tolerates_js_exception.js b/jstests/core/where_tolerates_js_exception.js index b12a7c0a65e..ed11b3e64a5 100644 --- a/jstests/core/where_tolerates_js_exception.js +++ b/jstests/core/where_tolerates_js_exception.js @@ -8,28 +8,28 @@ * ] */ (function() { - "use strict"; +"use strict"; - const collection = db.where_tolerates_js_exception; - collection.drop(); +const collection = db.where_tolerates_js_exception; +collection.drop(); - assert.commandWorked(collection.save({a: 1})); +assert.commandWorked(collection.save({a: 1})); - const res = collection.runCommand("find", { - filter: { - $where: function myFunction() { - return a(); - } +const res = collection.runCommand("find", { + filter: { + $where: function myFunction() { + return a(); } - }); + } +}); - assert.commandFailedWithCode(res, ErrorCodes.JSInterpreterFailure); - assert(/ReferenceError/.test(res.errmsg), - () => "$where didn't failed with a ReferenceError: " + tojson(res)); - assert(/myFunction@/.test(res.errmsg), - () => "$where didn't return the JavaScript stacktrace: " + tojson(res)); - assert(!res.hasOwnProperty("stack"), - () => "$where shouldn't return JavaScript stacktrace separately: " + tojson(res)); - assert(!res.hasOwnProperty("originalError"), - () => "$where shouldn't return wrapped version of the error: " + tojson(res)); +assert.commandFailedWithCode(res, ErrorCodes.JSInterpreterFailure); +assert(/ReferenceError/.test(res.errmsg), + () => "$where didn't failed with a ReferenceError: " + tojson(res)); +assert(/myFunction@/.test(res.errmsg), + () => "$where didn't return the JavaScript stacktrace: " + tojson(res)); +assert(!res.hasOwnProperty("stack"), + () => "$where shouldn't return JavaScript stacktrace separately: " + tojson(res)); +assert(!res.hasOwnProperty("originalError"), + () => "$where shouldn't return wrapped version of the error: " + tojson(res)); })(); diff --git a/jstests/core/wildcard_and_text_indexes.js b/jstests/core/wildcard_and_text_indexes.js index a4b552e3220..639450b174c 100644 --- a/jstests/core/wildcard_and_text_indexes.js +++ b/jstests/core/wildcard_and_text_indexes.js @@ -3,81 +3,80 @@ * @tags: [assumes_balancer_off] */ (function() { - "use strict"; +"use strict"; - load("jstests/aggregation/extras/utils.js"); // For arrayEq. - load("jstests/libs/analyze_plan.js"); // For getPlanStages and planHasStage. - load("jstests/libs/fixture_helpers.js"); // For isMongos. +load("jstests/aggregation/extras/utils.js"); // For arrayEq. +load("jstests/libs/analyze_plan.js"); // For getPlanStages and planHasStage. +load("jstests/libs/fixture_helpers.js"); // For isMongos. - const assertArrayEq = (l, r) => assert(arrayEq(l, r), tojson(l) + " != " + tojson(r)); +const assertArrayEq = (l, r) => assert(arrayEq(l, r), tojson(l) + " != " + tojson(r)); - const coll = db.wildcard_and_text_indexes; - coll.drop(); +const coll = db.wildcard_and_text_indexes; +coll.drop(); - // Runs a single wildcard query test, confirming that an indexed solution exists, that the $** - // index on the given 'expectedPath' was used to answer the query, and that the results are - // identical to those obtained via COLLSCAN. - function assertWildcardQuery(query, expectedPath) { - // Explain the query, and determine whether an indexed solution is available. - const explainOutput = coll.find(query).explain("executionStats"); - const ixScans = getPlanStages(explainOutput.queryPlanner.winningPlan, "IXSCAN"); - // Verify that the winning plan uses the $** index with the expected path. - assert.eq(ixScans.length, FixtureHelpers.numberOfShardsForCollection(coll)); - assert.docEq(ixScans[0].keyPattern, {"$_path": 1, [expectedPath]: 1}); - // Verify that the results obtained from the $** index are identical to a COLLSCAN. - assertArrayEq(coll.find(query).toArray(), coll.find(query).hint({$natural: 1}).toArray()); - } +// Runs a single wildcard query test, confirming that an indexed solution exists, that the $** +// index on the given 'expectedPath' was used to answer the query, and that the results are +// identical to those obtained via COLLSCAN. +function assertWildcardQuery(query, expectedPath) { + // Explain the query, and determine whether an indexed solution is available. + const explainOutput = coll.find(query).explain("executionStats"); + const ixScans = getPlanStages(explainOutput.queryPlanner.winningPlan, "IXSCAN"); + // Verify that the winning plan uses the $** index with the expected path. + assert.eq(ixScans.length, FixtureHelpers.numberOfShardsForCollection(coll)); + assert.docEq(ixScans[0].keyPattern, {"$_path": 1, [expectedPath]: 1}); + // Verify that the results obtained from the $** index are identical to a COLLSCAN. + assertArrayEq(coll.find(query).toArray(), coll.find(query).hint({$natural: 1}).toArray()); +} - // Insert documents containing the field '_fts', which is reserved when using a $text index. - assert.commandWorked(coll.insert({_id: 1, a: 1, _fts: 1, textToSearch: "banana"})); - assert.commandWorked(coll.insert({_id: 2, a: 1, _fts: 2, textToSearch: "bananas"})); - assert.commandWorked(coll.insert({_id: 3, a: 1, _fts: 3})); +// Insert documents containing the field '_fts', which is reserved when using a $text index. +assert.commandWorked(coll.insert({_id: 1, a: 1, _fts: 1, textToSearch: "banana"})); +assert.commandWorked(coll.insert({_id: 2, a: 1, _fts: 2, textToSearch: "bananas"})); +assert.commandWorked(coll.insert({_id: 3, a: 1, _fts: 3})); - // Build a wildcard index, and verify that it can be used to query for the field '_fts'. - assert.commandWorked(coll.createIndex({"$**": 1})); - assertWildcardQuery({_fts: {$gt: 0, $lt: 4}}, '_fts'); +// Build a wildcard index, and verify that it can be used to query for the field '_fts'. +assert.commandWorked(coll.createIndex({"$**": 1})); +assertWildcardQuery({_fts: {$gt: 0, $lt: 4}}, '_fts'); - // Perform the tests below for simple and compound $text indexes. - for (let textIndex of[{'$**': 'text'}, {a: 1, '$**': 'text'}]) { - // Build the appropriate text index. - assert.commandWorked(coll.createIndex(textIndex, {name: "textIndex"})); +// Perform the tests below for simple and compound $text indexes. +for (let textIndex of [{'$**': 'text'}, {a: 1, '$**': 'text'}]) { + // Build the appropriate text index. + assert.commandWorked(coll.createIndex(textIndex, {name: "textIndex"})); - // Confirm that the $** index can still be used to query for the '_fts' field outside of - // $text queries. - assertWildcardQuery({_fts: {$gt: 0, $lt: 4}}, '_fts'); + // Confirm that the $** index can still be used to query for the '_fts' field outside of + // $text queries. + assertWildcardQuery({_fts: {$gt: 0, $lt: 4}}, '_fts'); - // Confirm that $** does not generate a candidate plan for $text search, including cases - // when the query filter contains a compound field in the $text index. - const textQuery = Object.assign(textIndex.a ? {a: 1} : {}, {$text: {$search: 'banana'}}); - let explainOut = assert.commandWorked(coll.find(textQuery).explain("executionStats")); - assert(planHasStage(coll.getDB(), explainOut.queryPlanner.winningPlan, "TEXT")); - assert.eq(getRejectedPlans(explainOut).length, 0); - assert.eq(explainOut.executionStats.nReturned, 2); + // Confirm that $** does not generate a candidate plan for $text search, including cases + // when the query filter contains a compound field in the $text index. + const textQuery = Object.assign(textIndex.a ? {a: 1} : {}, {$text: {$search: 'banana'}}); + let explainOut = assert.commandWorked(coll.find(textQuery).explain("executionStats")); + assert(planHasStage(coll.getDB(), explainOut.queryPlanner.winningPlan, "TEXT")); + assert.eq(getRejectedPlans(explainOut).length, 0); + assert.eq(explainOut.executionStats.nReturned, 2); - // Confirm that $** does not generate a candidate plan for $text search, including cases - // where the query filter contains a field which is not present in the text index. - explainOut = - assert.commandWorked(coll.find(Object.assign({_fts: {$gt: 0, $lt: 4}}, textQuery)) - .explain("executionStats")); - assert(planHasStage(coll.getDB(), explainOut.queryPlanner.winningPlan, "TEXT")); - assert.eq(getRejectedPlans(explainOut).length, 0); - assert.eq(explainOut.executionStats.nReturned, 2); + // Confirm that $** does not generate a candidate plan for $text search, including cases + // where the query filter contains a field which is not present in the text index. + explainOut = assert.commandWorked( + coll.find(Object.assign({_fts: {$gt: 0, $lt: 4}}, textQuery)).explain("executionStats")); + assert(planHasStage(coll.getDB(), explainOut.queryPlanner.winningPlan, "TEXT")); + assert.eq(getRejectedPlans(explainOut).length, 0); + assert.eq(explainOut.executionStats.nReturned, 2); - // Confirm that the $** index can be used alongside a $text predicate in an $or. - explainOut = assert.commandWorked( - coll.find({$or: [{_fts: 3}, textQuery]}).explain("executionStats")); - assert.eq(getRejectedPlans(explainOut).length, 0); - assert.eq(explainOut.executionStats.nReturned, 3); + // Confirm that the $** index can be used alongside a $text predicate in an $or. + explainOut = + assert.commandWorked(coll.find({$or: [{_fts: 3}, textQuery]}).explain("executionStats")); + assert.eq(getRejectedPlans(explainOut).length, 0); + assert.eq(explainOut.executionStats.nReturned, 3); - const textOrWildcard = getPlanStages(explainOut.queryPlanner.winningPlan, "OR").shift(); - assert.eq(textOrWildcard.inputStages.length, 2); - const textBranch = (textOrWildcard.inputStages[0].stage === "TEXT" ? 0 : 1); - const wildcardBranch = (textBranch + 1) % 2; - assert.eq(textOrWildcard.inputStages[textBranch].stage, "TEXT"); - assert.eq(textOrWildcard.inputStages[wildcardBranch].stage, "IXSCAN"); - assert.eq(textOrWildcard.inputStages[wildcardBranch].keyPattern, {$_path: 1, _fts: 1}); + const textOrWildcard = getPlanStages(explainOut.queryPlanner.winningPlan, "OR").shift(); + assert.eq(textOrWildcard.inputStages.length, 2); + const textBranch = (textOrWildcard.inputStages[0].stage === "TEXT" ? 0 : 1); + const wildcardBranch = (textBranch + 1) % 2; + assert.eq(textOrWildcard.inputStages[textBranch].stage, "TEXT"); + assert.eq(textOrWildcard.inputStages[wildcardBranch].stage, "IXSCAN"); + assert.eq(textOrWildcard.inputStages[wildcardBranch].keyPattern, {$_path: 1, _fts: 1}); - // Drop the index so that a different text index can be created. - assert.commandWorked(coll.dropIndex("textIndex")); - } + // Drop the index so that a different text index can be created. + assert.commandWorked(coll.dropIndex("textIndex")); +} })(); diff --git a/jstests/core/wildcard_index_basic_index_bounds.js b/jstests/core/wildcard_index_basic_index_bounds.js index e2ee7da8710..a685898ead7 100644 --- a/jstests/core/wildcard_index_basic_index_bounds.js +++ b/jstests/core/wildcard_index_basic_index_bounds.js @@ -7,229 +7,227 @@ * @tags: [does_not_support_stepdowns, assumes_balancer_off] */ (function() { - "use strict"; +"use strict"; - load("jstests/libs/analyze_plan.js"); // For getPlanStages. - load("jstests/libs/fixture_helpers.js"); // For isMongos and numberOfShardsForCollection. +load("jstests/libs/analyze_plan.js"); // For getPlanStages. +load("jstests/libs/fixture_helpers.js"); // For isMongos and numberOfShardsForCollection. - // Asserts that the given cursors produce identical result sets. - function assertResultsEq(cursor1, cursor2) { - while (cursor1.hasNext()) { - assert(cursor2.hasNext()); - assert.eq(cursor1.next()._id, cursor2.next()._id); - } - assert(!cursor2.hasNext()); - } - - const coll = db.wildcard_index_bounds; - coll.drop(); - - // Template document which defines the 'schema' of the documents in the test collection. - const templateDoc = {a: 0, b: {c: 0, d: {e: 0}, f: {}}}; - const pathList = ['a', 'b.c', 'b.d.e', 'b.f']; - - // Insert a set of documents into the collection, based on the template document and populated - // with an increasing sequence of values. This is to ensure that the range of values present for - // each field in the dataset is not entirely homogeneous. - for (let i = 0; i < 10; i++) { - (function populateDoc(doc, value) { - for (let key in doc) { - if (typeof doc[key] === 'object') - value = populateDoc(doc[key], value); - else - doc[key] = value++; - } - return value; - })(templateDoc, i); - - assert.commandWorked(coll.insert(templateDoc)); +// Asserts that the given cursors produce identical result sets. +function assertResultsEq(cursor1, cursor2) { + while (cursor1.hasNext()) { + assert(cursor2.hasNext()); + assert.eq(cursor1.next()._id, cursor2.next()._id); } - - // For sharded passthroughs, we need to know the number of shards occupied by the collection. - const numShards = FixtureHelpers.numberOfShardsForCollection(coll); - - // Set of operations which will be applied to each field in the index in turn. If the 'bounds' - // property is null, this indicates that the operation is not supported by $** indexes. The - // 'subpathBounds' property indicates whether the bounds for '$_path' are supposed to contain - // all subpaths rather than a single point-interval, i.e. ["path.to.field.", "path.to.field/"). - const operationList = [ - {expression: {$gte: 3}, bounds: ['[3.0, inf.0]']}, - {expression: {$gt: 3}, bounds: ['(3.0, inf.0]']}, - {expression: {$lt: 7}, bounds: ['[-inf.0, 7.0)']}, - {expression: {$lte: 7}, bounds: ['[-inf.0, 7.0]']}, - {expression: {$eq: 5}, bounds: ['[5.0, 5.0]']}, - { - expression: {$in: [3, 5, 7, 9]}, - bounds: ['[3.0, 3.0]', '[5.0, 5.0]', '[7.0, 7.0]', '[9.0, 9.0]'] - }, - {expression: {$exists: true}, bounds: ['[MinKey, MaxKey]'], subpathBounds: true}, - { - expression: {$gte: MinKey, $lte: MaxKey}, - bounds: ['[MinKey, MaxKey]'], - subpathBounds: true - }, - {expression: {$exists: false}, bounds: null}, - {expression: {$eq: null}, bounds: null}, - {expression: {$eq: {abc: 1}}, bounds: null}, - {expression: {$lt: {abc: 1}}, bounds: null}, - {expression: {$ne: {abc: 1}}, bounds: null}, - {expression: {$lt: {abc: 1}, $gt: {abc: 1}}, bounds: null}, - {expression: {$in: [{abc: 1}, 1, 2, 3]}, bounds: null}, - {expression: {$in: [null, 1, 2, 3]}, bounds: null}, - {expression: {$ne: null}, bounds: ["[MinKey, MaxKey]"], subpathBounds: true}, - {expression: {$ne: null, $exists: true}, bounds: ["[MinKey, MaxKey]"], subpathBounds: true}, - // In principle we could have tighter bounds for this. See SERVER-36765. - {expression: {$eq: null, $exists: true}, bounds: ['[MinKey, MaxKey]'], subpathBounds: true}, - {expression: {$eq: []}, bounds: ['[undefined, undefined]', '[[], []]']} - ]; - - // Given a keyPattern and (optional) pathProjection, this function builds a $** index on the - // collection and then tests each of the match expression in the 'operationList' on each indexed - // field in turn. The 'expectedPaths' argument lists the set of paths which we expect to have - // been indexed based on the spec; this function will confirm that only the appropriate paths - // are present in the $** index. Finally, for each match expression it will perform a rooted-$or - // with one predicate on each expected path, and a rooted $and over all predicates and paths. - function runWildcardIndexTest(keyPattern, pathProjection, expectedPaths) { - assert.commandWorked(coll.dropIndexes()); - assert.commandWorked(coll.createIndex( - keyPattern, pathProjection ? {wildcardProjection: pathProjection} : {})); - - // The 'expectedPaths' argument is the set of paths which we expect to be indexed, based on - // the keyPattern and projection. Make sure that the caller has provided this argument. - assert(expectedPaths); - - // Verify the expected behaviour for every combination of path and operator. - for (let op of operationList) { - // Build up a list of operations that will later be used to test rooted $or. - const multiFieldPreds = []; - const orQueryBounds = []; - - for (let path of pathList) { - // The bounds on '$_path' will always include a point-interval on the path, i.e. - // ["path.to.field", "path.to.field"]. If 'subpathBounds' is 'true' for this - // operation, then we add bounds that include all subpaths as well, i.e. - // ["path.to.field.", "path.to.field/") - const pointPathBound = `["${path}", "${path}"]`; - const pathBounds = op.subpathBounds ? [pointPathBound, `["${path}.", "${path}/")`] - : [pointPathBound]; - // {$_path: pathBounds, path.to.field: [[computed bounds]]} - const expectedBounds = {$_path: pathBounds, [path]: op.bounds}; - const query = {[path]: op.expression}; - - // Explain the query, and determine whether an indexed solution is available. - const ixScans = - getPlanStages(coll.find(query).explain().queryPlanner.winningPlan, "IXSCAN"); - - // If we expect the current path to have been excluded based on the $** keyPattern - // and projection, or if the current operation is not supported by $** indexes, - // confirm that no indexed solution was found. - if (!expectedPaths.includes(path) || op.bounds === null) { - assert.eq(ixScans.length, - 0, - () => "Bounds check for operation: " + tojson(op) + - " failed. Expected no IXSCAN plans to be generated, but got " + - tojson(ixScans)); - continue; - } - - // Verify that the winning plan uses the $** index with the expected bounds. - assert.eq(ixScans.length, FixtureHelpers.numberOfShardsForCollection(coll)); - assert.docEq(ixScans[0].keyPattern, {$_path: 1, [path]: 1}); - assert.docEq(ixScans[0].indexBounds, expectedBounds); - - // Verify that the results obtained from the $** index are identical to a COLLSCAN. - // We must explicitly hint the wildcard index, because we also sort on {_id: 1} to - // ensure that both result sets are in the same order. - assertResultsEq(coll.find(query).sort({_id: 1}).hint(keyPattern), - coll.find(query).sort({_id: 1}).hint({$natural: 1})); - - // Push the query into the $or and $and predicate arrays. - orQueryBounds.push(expectedBounds); - multiFieldPreds.push(query); - } - - // If the current operation could not use the $** index, skip to the next op. - if (multiFieldPreds.length === 0) { + assert(!cursor2.hasNext()); +} + +const coll = db.wildcard_index_bounds; +coll.drop(); + +// Template document which defines the 'schema' of the documents in the test collection. +const templateDoc = { + a: 0, + b: {c: 0, d: {e: 0}, f: {}} +}; +const pathList = ['a', 'b.c', 'b.d.e', 'b.f']; + +// Insert a set of documents into the collection, based on the template document and populated +// with an increasing sequence of values. This is to ensure that the range of values present for +// each field in the dataset is not entirely homogeneous. +for (let i = 0; i < 10; i++) { + (function populateDoc(doc, value) { + for (let key in doc) { + if (typeof doc[key] === 'object') + value = populateDoc(doc[key], value); + else + doc[key] = value++; + } + return value; + })(templateDoc, i); + + assert.commandWorked(coll.insert(templateDoc)); +} + +// For sharded passthroughs, we need to know the number of shards occupied by the collection. +const numShards = FixtureHelpers.numberOfShardsForCollection(coll); + +// Set of operations which will be applied to each field in the index in turn. If the 'bounds' +// property is null, this indicates that the operation is not supported by $** indexes. The +// 'subpathBounds' property indicates whether the bounds for '$_path' are supposed to contain +// all subpaths rather than a single point-interval, i.e. ["path.to.field.", "path.to.field/"). +const operationList = [ + {expression: {$gte: 3}, bounds: ['[3.0, inf.0]']}, + {expression: {$gt: 3}, bounds: ['(3.0, inf.0]']}, + {expression: {$lt: 7}, bounds: ['[-inf.0, 7.0)']}, + {expression: {$lte: 7}, bounds: ['[-inf.0, 7.0]']}, + {expression: {$eq: 5}, bounds: ['[5.0, 5.0]']}, + { + expression: {$in: [3, 5, 7, 9]}, + bounds: ['[3.0, 3.0]', '[5.0, 5.0]', '[7.0, 7.0]', '[9.0, 9.0]'] + }, + {expression: {$exists: true}, bounds: ['[MinKey, MaxKey]'], subpathBounds: true}, + {expression: {$gte: MinKey, $lte: MaxKey}, bounds: ['[MinKey, MaxKey]'], subpathBounds: true}, + {expression: {$exists: false}, bounds: null}, + {expression: {$eq: null}, bounds: null}, + {expression: {$eq: {abc: 1}}, bounds: null}, + {expression: {$lt: {abc: 1}}, bounds: null}, + {expression: {$ne: {abc: 1}}, bounds: null}, + {expression: {$lt: {abc: 1}, $gt: {abc: 1}}, bounds: null}, + {expression: {$in: [{abc: 1}, 1, 2, 3]}, bounds: null}, + {expression: {$in: [null, 1, 2, 3]}, bounds: null}, + {expression: {$ne: null}, bounds: ["[MinKey, MaxKey]"], subpathBounds: true}, + {expression: {$ne: null, $exists: true}, bounds: ["[MinKey, MaxKey]"], subpathBounds: true}, + // In principle we could have tighter bounds for this. See SERVER-36765. + {expression: {$eq: null, $exists: true}, bounds: ['[MinKey, MaxKey]'], subpathBounds: true}, + {expression: {$eq: []}, bounds: ['[undefined, undefined]', '[[], []]']} +]; + +// Given a keyPattern and (optional) pathProjection, this function builds a $** index on the +// collection and then tests each of the match expression in the 'operationList' on each indexed +// field in turn. The 'expectedPaths' argument lists the set of paths which we expect to have +// been indexed based on the spec; this function will confirm that only the appropriate paths +// are present in the $** index. Finally, for each match expression it will perform a rooted-$or +// with one predicate on each expected path, and a rooted $and over all predicates and paths. +function runWildcardIndexTest(keyPattern, pathProjection, expectedPaths) { + assert.commandWorked(coll.dropIndexes()); + assert.commandWorked( + coll.createIndex(keyPattern, pathProjection ? {wildcardProjection: pathProjection} : {})); + + // The 'expectedPaths' argument is the set of paths which we expect to be indexed, based on + // the keyPattern and projection. Make sure that the caller has provided this argument. + assert(expectedPaths); + + // Verify the expected behaviour for every combination of path and operator. + for (let op of operationList) { + // Build up a list of operations that will later be used to test rooted $or. + const multiFieldPreds = []; + const orQueryBounds = []; + + for (let path of pathList) { + // The bounds on '$_path' will always include a point-interval on the path, i.e. + // ["path.to.field", "path.to.field"]. If 'subpathBounds' is 'true' for this + // operation, then we add bounds that include all subpaths as well, i.e. + // ["path.to.field.", "path.to.field/") + const pointPathBound = `["${path}", "${path}"]`; + const pathBounds = + op.subpathBounds ? [pointPathBound, `["${path}.", "${path}/")`] : [pointPathBound]; + // {$_path: pathBounds, path.to.field: [[computed bounds]]} + const expectedBounds = {$_path: pathBounds, [path]: op.bounds}; + const query = {[path]: op.expression}; + + // Explain the query, and determine whether an indexed solution is available. + const ixScans = + getPlanStages(coll.find(query).explain().queryPlanner.winningPlan, "IXSCAN"); + + // If we expect the current path to have been excluded based on the $** keyPattern + // and projection, or if the current operation is not supported by $** indexes, + // confirm that no indexed solution was found. + if (!expectedPaths.includes(path) || op.bounds === null) { + assert.eq(ixScans.length, + 0, + () => "Bounds check for operation: " + tojson(op) + + " failed. Expected no IXSCAN plans to be generated, but got " + + tojson(ixScans)); continue; } - // Perform a rooted $or for this operation across all indexed fields; for instance: - // {$or: [{a: {$eq: 25}}, {'b.c': {$eq: 25}}, {'b.d.e': {$eq: 25}}]}. - const explainedOr = assert.commandWorked(coll.find({$or: multiFieldPreds}).explain()); - - // Obtain the list of index bounds from each individual IXSCAN stage across all shards. - const ixScanBounds = getPlanStages(explainedOr.queryPlanner.winningPlan, "IXSCAN") - .map(elem => elem.indexBounds); - - // We should find that each branch of the $or has used a separate $** sub-index. In the - // sharded passthroughs, we expect to have 'orQueryBounds' on each shard. - assert.eq(ixScanBounds.length, orQueryBounds.length * numShards); - for (let offset = 0; offset < ixScanBounds.length; offset += orQueryBounds.length) { - const ixBounds = ixScanBounds.slice(offset, offset + orQueryBounds.length); - orQueryBounds.forEach( - exBound => assert(ixBounds.some(ixBound => !bsonWoCompare(ixBound, exBound)))); - } + // Verify that the winning plan uses the $** index with the expected bounds. + assert.eq(ixScans.length, FixtureHelpers.numberOfShardsForCollection(coll)); + assert.docEq(ixScans[0].keyPattern, {$_path: 1, [path]: 1}); + assert.docEq(ixScans[0].indexBounds, expectedBounds); // Verify that the results obtained from the $** index are identical to a COLLSCAN. - assertResultsEq(coll.find({$or: multiFieldPreds}).sort({_id: 1}).hint(keyPattern), - coll.find({$or: multiFieldPreds}).sort({_id: 1}).hint({$natural: 1})); - - // Perform an $and for this operation across all indexed fields; for instance: - // {$and: [{a: {$gte: 50}}, {'b.c': {$gte: 50}}, {'b.d.e': {$gte: 50}}]}. - const explainedAnd = coll.find({$and: multiFieldPreds}).explain(); - const winningIxScan = getPlanStages(explainedAnd.queryPlanner.winningPlan, "IXSCAN"); - - // Extract information about the rejected plans. We should have one IXSCAN for each $** - // candidate that wasn't the winner. Before SERVER-36521 banned them for $** indexes, a - // number of AND_SORTED plans would also be generated here; we search for these in order - // to verify that no such plans now exist. - const rejectedPlans = getRejectedPlans(explainedAnd); - let rejectedIxScans = [], rejectedAndSorted = []; - for (let rejectedPlan of rejectedPlans) { - rejectedAndSorted = - rejectedAndSorted.concat(getPlanStages(rejectedPlan, "AND_SORTED")); - rejectedIxScans = rejectedIxScans.concat(getPlanStages(rejectedPlan, "IXSCAN")); - } + // We must explicitly hint the wildcard index, because we also sort on {_id: 1} to + // ensure that both result sets are in the same order. + assertResultsEq(coll.find(query).sort({_id: 1}).hint(keyPattern), + coll.find(query).sort({_id: 1}).hint({$natural: 1})); + + // Push the query into the $or and $and predicate arrays. + orQueryBounds.push(expectedBounds); + multiFieldPreds.push(query); + } - // Confirm that no AND_SORTED plans were generated. - assert.eq(rejectedAndSorted.length, 0); + // If the current operation could not use the $** index, skip to the next op. + if (multiFieldPreds.length === 0) { + continue; + } - // We should find that one of the available $** subindexes has been chosen as the - // winner, and all other candidate $** indexes are present in 'rejectedPlans'. - assert.eq(winningIxScan.length, numShards); - assert.eq(rejectedIxScans.length, numShards * (expectedPaths.length - 1)); + // Perform a rooted $or for this operation across all indexed fields; for instance: + // {$or: [{a: {$eq: 25}}, {'b.c': {$eq: 25}}, {'b.d.e': {$eq: 25}}]}. + const explainedOr = assert.commandWorked(coll.find({$or: multiFieldPreds}).explain()); + + // Obtain the list of index bounds from each individual IXSCAN stage across all shards. + const ixScanBounds = getPlanStages(explainedOr.queryPlanner.winningPlan, "IXSCAN") + .map(elem => elem.indexBounds); + + // We should find that each branch of the $or has used a separate $** sub-index. In the + // sharded passthroughs, we expect to have 'orQueryBounds' on each shard. + assert.eq(ixScanBounds.length, orQueryBounds.length * numShards); + for (let offset = 0; offset < ixScanBounds.length; offset += orQueryBounds.length) { + const ixBounds = ixScanBounds.slice(offset, offset + orQueryBounds.length); + orQueryBounds.forEach( + exBound => assert(ixBounds.some(ixBound => !bsonWoCompare(ixBound, exBound)))); + } - // Verify that each of the IXSCANs have the expected bounds and $_path key. - for (let ixScan of winningIxScan.concat(rejectedIxScans)) { - // {$_path: ["['path.to.field', 'path.to.field']"], path.to.field: [[bounds]]} - const ixScanPath = JSON.parse(ixScan.indexBounds.$_path[0])[0]; - assert.eq(ixScan.indexBounds[ixScanPath], op.bounds); - assert(expectedPaths.includes(ixScanPath)); - } + // Verify that the results obtained from the $** index are identical to a COLLSCAN. + assertResultsEq(coll.find({$or: multiFieldPreds}).sort({_id: 1}).hint(keyPattern), + coll.find({$or: multiFieldPreds}).sort({_id: 1}).hint({$natural: 1})); + + // Perform an $and for this operation across all indexed fields; for instance: + // {$and: [{a: {$gte: 50}}, {'b.c': {$gte: 50}}, {'b.d.e': {$gte: 50}}]}. + const explainedAnd = coll.find({$and: multiFieldPreds}).explain(); + const winningIxScan = getPlanStages(explainedAnd.queryPlanner.winningPlan, "IXSCAN"); + + // Extract information about the rejected plans. We should have one IXSCAN for each $** + // candidate that wasn't the winner. Before SERVER-36521 banned them for $** indexes, a + // number of AND_SORTED plans would also be generated here; we search for these in order + // to verify that no such plans now exist. + const rejectedPlans = getRejectedPlans(explainedAnd); + let rejectedIxScans = [], rejectedAndSorted = []; + for (let rejectedPlan of rejectedPlans) { + rejectedAndSorted = rejectedAndSorted.concat(getPlanStages(rejectedPlan, "AND_SORTED")); + rejectedIxScans = rejectedIxScans.concat(getPlanStages(rejectedPlan, "IXSCAN")); + } - // Verify that the results obtained from the $** index are identical to a COLLSCAN. - assertResultsEq(coll.find({$and: multiFieldPreds}).sort({_id: 1}).hint(keyPattern), - coll.find({$and: multiFieldPreds}).sort({_id: 1}).hint({$natural: 1})); + // Confirm that no AND_SORTED plans were generated. + assert.eq(rejectedAndSorted.length, 0); + + // We should find that one of the available $** subindexes has been chosen as the + // winner, and all other candidate $** indexes are present in 'rejectedPlans'. + assert.eq(winningIxScan.length, numShards); + assert.eq(rejectedIxScans.length, numShards * (expectedPaths.length - 1)); + + // Verify that each of the IXSCANs have the expected bounds and $_path key. + for (let ixScan of winningIxScan.concat(rejectedIxScans)) { + // {$_path: ["['path.to.field', 'path.to.field']"], path.to.field: [[bounds]]} + const ixScanPath = JSON.parse(ixScan.indexBounds.$_path[0])[0]; + assert.eq(ixScan.indexBounds[ixScanPath], op.bounds); + assert(expectedPaths.includes(ixScanPath)); } - } - // Test a $** index that indexes the entire document. - runWildcardIndexTest({'$**': 1}, null, ['a', 'b.c', 'b.d.e', 'b.f']); - - // Test a $** index on a single subtree. - runWildcardIndexTest({'a.$**': 1}, null, ['a']); - runWildcardIndexTest({'b.$**': 1}, null, ['b.c', 'b.d.e', 'b.f']); - runWildcardIndexTest({'b.d.$**': 1}, null, ['b.d.e']); - - // Test a $** index which includes a subset of paths. - runWildcardIndexTest({'$**': 1}, {a: 1}, ['a']); - runWildcardIndexTest({'$**': 1}, {b: 1}, ['b.c', 'b.d.e', 'b.f']); - runWildcardIndexTest({'$**': 1}, {'b.d': 1}, ['b.d.e']); - runWildcardIndexTest({'$**': 1}, {a: 1, 'b.d': 1}, ['a', 'b.d.e']); - - // Test a $** index which excludes a subset of paths. - runWildcardIndexTest({'$**': 1}, {a: 0}, ['b.c', 'b.d.e', 'b.f']); - runWildcardIndexTest({'$**': 1}, {b: 0}, ['a']); - runWildcardIndexTest({'$**': 1}, {'b.d': 0}, ['a', 'b.c', 'b.f']); - runWildcardIndexTest({'$**': 1}, {a: 0, 'b.d': 0}, ['b.c', 'b.f']); + // Verify that the results obtained from the $** index are identical to a COLLSCAN. + assertResultsEq(coll.find({$and: multiFieldPreds}).sort({_id: 1}).hint(keyPattern), + coll.find({$and: multiFieldPreds}).sort({_id: 1}).hint({$natural: 1})); + } +} + +// Test a $** index that indexes the entire document. +runWildcardIndexTest({'$**': 1}, null, ['a', 'b.c', 'b.d.e', 'b.f']); + +// Test a $** index on a single subtree. +runWildcardIndexTest({'a.$**': 1}, null, ['a']); +runWildcardIndexTest({'b.$**': 1}, null, ['b.c', 'b.d.e', 'b.f']); +runWildcardIndexTest({'b.d.$**': 1}, null, ['b.d.e']); + +// Test a $** index which includes a subset of paths. +runWildcardIndexTest({'$**': 1}, {a: 1}, ['a']); +runWildcardIndexTest({'$**': 1}, {b: 1}, ['b.c', 'b.d.e', 'b.f']); +runWildcardIndexTest({'$**': 1}, {'b.d': 1}, ['b.d.e']); +runWildcardIndexTest({'$**': 1}, {a: 1, 'b.d': 1}, ['a', 'b.d.e']); + +// Test a $** index which excludes a subset of paths. +runWildcardIndexTest({'$**': 1}, {a: 0}, ['b.c', 'b.d.e', 'b.f']); +runWildcardIndexTest({'$**': 1}, {b: 0}, ['a']); +runWildcardIndexTest({'$**': 1}, {'b.d': 0}, ['a', 'b.c', 'b.f']); +runWildcardIndexTest({'$**': 1}, {a: 0, 'b.d': 0}, ['b.c', 'b.f']); })(); diff --git a/jstests/core/wildcard_index_cached_plans.js b/jstests/core/wildcard_index_cached_plans.js index 634b7cb0368..9b42413955c 100644 --- a/jstests/core/wildcard_index_cached_plans.js +++ b/jstests/core/wildcard_index_cached_plans.js @@ -15,140 +15,142 @@ * ] */ (function() { - "use strict"; - - load('jstests/libs/analyze_plan.js'); // For getPlanStage(). - load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection. - load('jstests/libs/fixture_helpers.js'); // For getPrimaryForNodeHostingDatabase and isMongos. - - const coll = db.wildcard_cached_plans; - coll.drop(); - - assert.commandWorked(coll.createIndex({"b.$**": 1})); - assert.commandWorked(coll.createIndex({"a": 1})); - - // In order for the plan cache to be used, there must be more than one plan available. Insert - // data into the collection such that the b.$** index will be far more selective than the index - // on 'a' for the query {a: 1, b: 1}. - for (let i = 0; i < 1000; i++) { - assert.commandWorked(coll.insert({a: 1})); - } - assert.commandWorked(coll.insert({a: 1, b: 1})); - - function getCacheEntryForQuery(query) { - const aggRes = - FixtureHelpers.getPrimaryForNodeHostingDatabase(db) - .getCollection(coll.getFullName()) - .aggregate([ - {$planCacheStats: {}}, - {$match: {createdFromQuery: {query: query, sort: {}, projection: {}}}} - ]) - .toArray(); - assert.lte(aggRes.length, 1); - if (aggRes.length > 0) { - return aggRes[0]; - } - return null; +"use strict"; + +load('jstests/libs/analyze_plan.js'); // For getPlanStage(). +load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection. +load('jstests/libs/fixture_helpers.js'); // For getPrimaryForNodeHostingDatabase and isMongos. + +const coll = db.wildcard_cached_plans; +coll.drop(); + +assert.commandWorked(coll.createIndex({"b.$**": 1})); +assert.commandWorked(coll.createIndex({"a": 1})); + +// In order for the plan cache to be used, there must be more than one plan available. Insert +// data into the collection such that the b.$** index will be far more selective than the index +// on 'a' for the query {a: 1, b: 1}. +for (let i = 0; i < 1000; i++) { + assert.commandWorked(coll.insert({a: 1})); +} +assert.commandWorked(coll.insert({a: 1, b: 1})); + +function getCacheEntryForQuery(query) { + const aggRes = FixtureHelpers.getPrimaryForNodeHostingDatabase(db) + .getCollection(coll.getFullName()) + .aggregate([ + {$planCacheStats: {}}, + {$match: {createdFromQuery: {query: query, sort: {}, projection: {}}}} + ]) + .toArray(); + assert.lte(aggRes.length, 1); + if (aggRes.length > 0) { + return aggRes[0]; } - - function getPlanCacheKeyFromExplain(explainRes) { - const hash = FixtureHelpers.isMongos(db) - ? explainRes.queryPlanner.winningPlan.shards[0].planCacheKey - : explainRes.queryPlanner.planCacheKey; - assert.eq(typeof(hash), "string"); - return hash; - } - - function getPlanCacheKey(query) { - return getPlanCacheKeyFromExplain( - assert.commandWorked(coll.explain().find(query).finish())); - } - - const query = {a: 1, b: 1}; - - // The plan cache should be empty. - assert.eq(getCacheEntryForQuery(query), null); - - // Run the query twice, once to create the cache entry, and again to make the cache entry - // active. - for (let i = 0; i < 2; i++) { - assert.eq(coll.find(query).itcount(), 1); - } - - // The plan cache should no longer be empty. Check that the chosen plan uses the b.$** index. - const cacheEntry = getCacheEntryForQuery(query); - assert.neq(cacheEntry, null); - assert.eq(cacheEntry.isActive, true); - // Should be at least two plans: one using the {a: 1} index and the other using the b.$** index. - assert.gte(cacheEntry.creationExecStats.length, 2, tojson(cacheEntry.plans)); - const plan = cacheEntry.creationExecStats[0].executionStages; - const ixScanStage = getPlanStage(plan, "IXSCAN"); - assert.neq(ixScanStage, null, () => tojson(plan)); - assert.eq(ixScanStage.keyPattern, {"$_path": 1, "b": 1}, () => tojson(plan)); - - // Run the query again. This time it should use the cached plan. We should get the same result - // as earlier. + return null; +} + +function getPlanCacheKeyFromExplain(explainRes) { + const hash = FixtureHelpers.isMongos(db) + ? explainRes.queryPlanner.winningPlan.shards[0].planCacheKey + : explainRes.queryPlanner.planCacheKey; + assert.eq(typeof (hash), "string"); + return hash; +} + +function getPlanCacheKey(query) { + return getPlanCacheKeyFromExplain(assert.commandWorked(coll.explain().find(query).finish())); +} + +const query = { + a: 1, + b: 1 +}; + +// The plan cache should be empty. +assert.eq(getCacheEntryForQuery(query), null); + +// Run the query twice, once to create the cache entry, and again to make the cache entry +// active. +for (let i = 0; i < 2; i++) { assert.eq(coll.find(query).itcount(), 1); +} + +// The plan cache should no longer be empty. Check that the chosen plan uses the b.$** index. +const cacheEntry = getCacheEntryForQuery(query); +assert.neq(cacheEntry, null); +assert.eq(cacheEntry.isActive, true); +// Should be at least two plans: one using the {a: 1} index and the other using the b.$** index. +assert.gte(cacheEntry.creationExecStats.length, 2, tojson(cacheEntry.plans)); +const plan = cacheEntry.creationExecStats[0].executionStages; +const ixScanStage = getPlanStage(plan, "IXSCAN"); +assert.neq(ixScanStage, null, () => tojson(plan)); +assert.eq(ixScanStage.keyPattern, {"$_path": 1, "b": 1}, () => tojson(plan)); + +// Run the query again. This time it should use the cached plan. We should get the same result +// as earlier. +assert.eq(coll.find(query).itcount(), 1); + +// Now run a query where b is null. This should have a different shape key from the previous +// query since $** indexes are sparse. +const queryWithBNull = { + a: 1, + b: null +}; +for (let i = 0; i < 2; i++) { + assert.eq(coll.find({a: 1, b: null}).itcount(), 1000); +} +assert.neq(getPlanCacheKey(queryWithBNull), getPlanCacheKey(query)); + +// There should only have been one solution for the above query, so it would not get cached. +assert.eq(getCacheEntryForQuery({a: 1, b: null}), null); + +// Check that indexability discriminators work with collations. +(function() { +// Create wildcard index with a collation. +assertDropAndRecreateCollection(db, coll.getName(), {collation: {locale: "en_US", strength: 1}}); +assert.commandWorked(coll.createIndex({"b.$**": 1})); + +// Run a query which uses a different collation from that of the index, but does not use +// string bounds. +const queryWithoutStringExplain = + coll.explain().find({a: 5, b: 5}).collation({locale: "fr"}).finish(); +let ixScans = getPlanStages(queryWithoutStringExplain.queryPlanner.winningPlan, "IXSCAN"); +assert.eq(ixScans.length, FixtureHelpers.numberOfShardsForCollection(coll)); +assert.eq(ixScans[0].keyPattern, {$_path: 1, b: 1}); + +// Run a query which uses a different collation from that of the index and does have string +// bounds. +const queryWithStringExplain = + coll.explain().find({a: 5, b: "a string"}).collation({locale: "fr"}).finish(); +ixScans = getPlanStages(queryWithStringExplain.queryPlanner.winningPlan, "IXSCAN"); +assert.eq(ixScans.length, 0); + +// Check that the shapes are different since the query which matches on a string will not +// be eligible to use the b.$** index (since the index has a different collation). +assert.neq(getPlanCacheKeyFromExplain(queryWithoutStringExplain), + getPlanCacheKeyFromExplain(queryWithStringExplain)); +})(); - // Now run a query where b is null. This should have a different shape key from the previous - // query since $** indexes are sparse. - const queryWithBNull = {a: 1, b: null}; - for (let i = 0; i < 2; i++) { - assert.eq(coll.find({a: 1, b: null}).itcount(), 1000); - } - assert.neq(getPlanCacheKey(queryWithBNull), getPlanCacheKey(query)); - - // There should only have been one solution for the above query, so it would not get cached. - assert.eq(getCacheEntryForQuery({a: 1, b: null}), null); - - // Check that indexability discriminators work with collations. - (function() { - // Create wildcard index with a collation. - assertDropAndRecreateCollection( - db, coll.getName(), {collation: {locale: "en_US", strength: 1}}); - assert.commandWorked(coll.createIndex({"b.$**": 1})); - - // Run a query which uses a different collation from that of the index, but does not use - // string bounds. - const queryWithoutStringExplain = - coll.explain().find({a: 5, b: 5}).collation({locale: "fr"}).finish(); - let ixScans = getPlanStages(queryWithoutStringExplain.queryPlanner.winningPlan, "IXSCAN"); - assert.eq(ixScans.length, FixtureHelpers.numberOfShardsForCollection(coll)); - assert.eq(ixScans[0].keyPattern, {$_path: 1, b: 1}); - - // Run a query which uses a different collation from that of the index and does have string - // bounds. - const queryWithStringExplain = - coll.explain().find({a: 5, b: "a string"}).collation({locale: "fr"}).finish(); - ixScans = getPlanStages(queryWithStringExplain.queryPlanner.winningPlan, "IXSCAN"); - assert.eq(ixScans.length, 0); - - // Check that the shapes are different since the query which matches on a string will not - // be eligible to use the b.$** index (since the index has a different collation). - assert.neq(getPlanCacheKeyFromExplain(queryWithoutStringExplain), - getPlanCacheKeyFromExplain(queryWithStringExplain)); - })(); - - // Check that indexability discriminators work with partial wildcard indexes. - (function() { - assertDropAndRecreateCollection(db, coll.getName()); - assert.commandWorked( - coll.createIndex({"$**": 1}, {partialFilterExpression: {a: {$lte: 5}}})); - - // Run a query for a value included by the partial filter expression. - const queryIndexedExplain = coll.find({a: 4}).explain(); - let ixScans = getPlanStages(queryIndexedExplain.queryPlanner.winningPlan, "IXSCAN"); - assert.eq(ixScans.length, FixtureHelpers.numberOfShardsForCollection(coll)); - assert.eq(ixScans[0].keyPattern, {$_path: 1, a: 1}); - - // Run a query which tries to get a value not included by the partial filter expression. - const queryUnindexedExplain = coll.find({a: 100}).explain(); - ixScans = getPlanStages(queryUnindexedExplain.queryPlanner.winningPlan, "IXSCAN"); - assert.eq(ixScans.length, 0); - - // Check that the shapes are different since the query which searches for a value not - // included by the partial filter expression won't be eligible to use the $** index. - assert.neq(getPlanCacheKeyFromExplain(queryIndexedExplain), - getPlanCacheKeyFromExplain(queryUnindexedExplain)); - })(); +// Check that indexability discriminators work with partial wildcard indexes. +(function() { +assertDropAndRecreateCollection(db, coll.getName()); +assert.commandWorked(coll.createIndex({"$**": 1}, {partialFilterExpression: {a: {$lte: 5}}})); + +// Run a query for a value included by the partial filter expression. +const queryIndexedExplain = coll.find({a: 4}).explain(); +let ixScans = getPlanStages(queryIndexedExplain.queryPlanner.winningPlan, "IXSCAN"); +assert.eq(ixScans.length, FixtureHelpers.numberOfShardsForCollection(coll)); +assert.eq(ixScans[0].keyPattern, {$_path: 1, a: 1}); + +// Run a query which tries to get a value not included by the partial filter expression. +const queryUnindexedExplain = coll.find({a: 100}).explain(); +ixScans = getPlanStages(queryUnindexedExplain.queryPlanner.winningPlan, "IXSCAN"); +assert.eq(ixScans.length, 0); + +// Check that the shapes are different since the query which searches for a value not +// included by the partial filter expression won't be eligible to use the $** index. +assert.neq(getPlanCacheKeyFromExplain(queryIndexedExplain), + getPlanCacheKeyFromExplain(queryUnindexedExplain)); +})(); })(); diff --git a/jstests/core/wildcard_index_collation.js b/jstests/core/wildcard_index_collation.js index 5e71100c7c2..9ccbc0181dd 100644 --- a/jstests/core/wildcard_index_collation.js +++ b/jstests/core/wildcard_index_collation.js @@ -9,116 +9,116 @@ * requires_non_retryable_writes] */ (function() { - "user strict"; +"user strict"; - load("jstests/aggregation/extras/utils.js"); // For arrayEq. - load("jstests/libs/analyze_plan.js"); // For getPlanStages. - load("jstests/libs/get_index_helpers.js"); // For GetIndexHelpers. - load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection. - load("jstests/libs/fixture_helpers.js"); // For isMongos. +load("jstests/aggregation/extras/utils.js"); // For arrayEq. +load("jstests/libs/analyze_plan.js"); // For getPlanStages. +load("jstests/libs/get_index_helpers.js"); // For GetIndexHelpers. +load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection. +load("jstests/libs/fixture_helpers.js"); // For isMongos. - const assertArrayEq = (l, r) => assert(arrayEq(l, r)); +const assertArrayEq = (l, r) => assert(arrayEq(l, r)); - // Create the collection and assign it a default case-insensitive collation. - const coll = assertDropAndRecreateCollection( - db, "wildcard_collation", {collation: {locale: "en_US", strength: 1}}); +// Create the collection and assign it a default case-insensitive collation. +const coll = assertDropAndRecreateCollection( + db, "wildcard_collation", {collation: {locale: "en_US", strength: 1}}); - // Extracts the winning plan for the given query and projection from the explain output. - const winningPlan = (query, proj) => FixtureHelpers.isMongos(db) - ? coll.find(query, proj).explain().queryPlanner.winningPlan.shards[0].winningPlan - : coll.find(query, proj).explain().queryPlanner.winningPlan; +// Extracts the winning plan for the given query and projection from the explain output. +const winningPlan = (query, proj) => FixtureHelpers.isMongos(db) + ? coll.find(query, proj).explain().queryPlanner.winningPlan.shards[0].winningPlan + : coll.find(query, proj).explain().queryPlanner.winningPlan; - // Runs the given query and confirms that: (1) the $** was used to answer the query, (2) the - // results produced by the $** index match the given 'expectedResults', and (3) the same output - // is produced by a COLLSCAN with the same collation. - function assertWildcardIndexAnswersQuery(query, expectedResults, projection) { - // Verify that the $** index can answer this query. - const ixScans = getPlanStages(winningPlan(query, (projection || {_id: 0})), "IXSCAN"); - assert.gt(ixScans.length, 0, tojson(coll.find(query).explain())); - ixScans.forEach((ixScan) => assert(ixScan.keyPattern.$_path)); +// Runs the given query and confirms that: (1) the $** was used to answer the query, (2) the +// results produced by the $** index match the given 'expectedResults', and (3) the same output +// is produced by a COLLSCAN with the same collation. +function assertWildcardIndexAnswersQuery(query, expectedResults, projection) { + // Verify that the $** index can answer this query. + const ixScans = getPlanStages(winningPlan(query, (projection || {_id: 0})), "IXSCAN"); + assert.gt(ixScans.length, 0, tojson(coll.find(query).explain())); + ixScans.forEach((ixScan) => assert(ixScan.keyPattern.$_path)); - // Assert that the $** index produces the expected results, and that these are the same - // as those produced by a COLLSCAN with the same collation. - const wildcardResults = coll.find(query, (projection || {_id: 0})).toArray(); - assertArrayEq(wildcardResults, expectedResults); - assertArrayEq(wildcardResults, - coll.find(query, (projection || {_id: 0})) - .collation({locale: "en_US", strength: 1}) - .hint({$natural: 1}) - .toArray()); - } + // Assert that the $** index produces the expected results, and that these are the same + // as those produced by a COLLSCAN with the same collation. + const wildcardResults = coll.find(query, (projection || {_id: 0})).toArray(); + assertArrayEq(wildcardResults, expectedResults); + assertArrayEq(wildcardResults, + coll.find(query, (projection || {_id: 0})) + .collation({locale: "en_US", strength: 1}) + .hint({$natural: 1}) + .toArray()); +} - // Confirms that the index matching the given keyPattern has the specified collation. - function assertIndexHasCollation(keyPattern, collation) { - var indexSpecs = coll.getIndexes(); - var found = GetIndexHelpers.findByKeyPattern(indexSpecs, keyPattern, collation); - assert.neq(null, - found, - "Index with key pattern " + tojson(keyPattern) + " and collation " + - tojson(collation) + " not found: " + tojson(indexSpecs)); - } +// Confirms that the index matching the given keyPattern has the specified collation. +function assertIndexHasCollation(keyPattern, collation) { + var indexSpecs = coll.getIndexes(); + var found = GetIndexHelpers.findByKeyPattern(indexSpecs, keyPattern, collation); + assert.neq(null, + found, + "Index with key pattern " + tojson(keyPattern) + " and collation " + + tojson(collation) + " not found: " + tojson(indexSpecs)); +} - // Confirm that the $** index inherits the collection's default collation. - assert.commandWorked(coll.createIndex({"$**": 1})); - assertIndexHasCollation({"$**": 1}, { - locale: "en_US", - caseLevel: false, - caseFirst: "off", - strength: 1, - numericOrdering: false, - alternate: "non-ignorable", - maxVariable: "punct", - normalization: false, - backwards: false, - version: "57.1", - }); +// Confirm that the $** index inherits the collection's default collation. +assert.commandWorked(coll.createIndex({"$**": 1})); +assertIndexHasCollation({"$**": 1}, { + locale: "en_US", + caseLevel: false, + caseFirst: "off", + strength: 1, + numericOrdering: false, + alternate: "non-ignorable", + maxVariable: "punct", + normalization: false, + backwards: false, + version: "57.1", +}); - // Insert a series of documents whose fieldnames and values differ only by case. - assert.commandWorked(coll.insert({a: {b: "string", c: "STRING"}, d: "sTrInG", e: 5})); - assert.commandWorked(coll.insert({a: {b: "STRING", c: "string"}, d: "StRiNg", e: 5})); - assert.commandWorked(coll.insert({A: {B: "string", C: "STRING"}, d: "sTrInG", E: 5})); - assert.commandWorked(coll.insert({A: {B: "STRING", C: "string"}, d: "StRiNg", E: 5})); +// Insert a series of documents whose fieldnames and values differ only by case. +assert.commandWorked(coll.insert({a: {b: "string", c: "STRING"}, d: "sTrInG", e: 5})); +assert.commandWorked(coll.insert({a: {b: "STRING", c: "string"}, d: "StRiNg", e: 5})); +assert.commandWorked(coll.insert({A: {B: "string", C: "STRING"}, d: "sTrInG", E: 5})); +assert.commandWorked(coll.insert({A: {B: "STRING", C: "string"}, d: "StRiNg", E: 5})); - // Confirm that only the document's values adhere to the case-insensitive collation. The field - // paths, which are also present in the $** index keys, are evaluated using simple binary - // comparison; so for instance, path "a.b" does *not* match path "A.B". - assertWildcardIndexAnswersQuery({"a.b": "string"}, [ - {a: {b: "string", c: "STRING"}, d: "sTrInG", e: 5}, - {a: {b: "STRING", c: "string"}, d: "StRiNg", e: 5} - ]); - assertWildcardIndexAnswersQuery({"A.B": "string"}, [ - {A: {B: "string", C: "STRING"}, d: "sTrInG", E: 5}, - {A: {B: "STRING", C: "string"}, d: "StRiNg", E: 5} - ]); +// Confirm that only the document's values adhere to the case-insensitive collation. The field +// paths, which are also present in the $** index keys, are evaluated using simple binary +// comparison; so for instance, path "a.b" does *not* match path "A.B". +assertWildcardIndexAnswersQuery({"a.b": "string"}, [ + {a: {b: "string", c: "STRING"}, d: "sTrInG", e: 5}, + {a: {b: "STRING", c: "string"}, d: "StRiNg", e: 5} +]); +assertWildcardIndexAnswersQuery({"A.B": "string"}, [ + {A: {B: "string", C: "STRING"}, d: "sTrInG", E: 5}, + {A: {B: "STRING", C: "string"}, d: "StRiNg", E: 5} +]); - // All documents in the collection are returned if we query over both upper- and lower-case - // fieldnames, or when the fieldname has a consistent case across all documents. - const allDocs = coll.find({}, {_id: 0}).toArray(); - assertWildcardIndexAnswersQuery({$or: [{"a.c": "string"}, {"A.C": "string"}]}, allDocs); - assertWildcardIndexAnswersQuery({d: "string"}, allDocs); +// All documents in the collection are returned if we query over both upper- and lower-case +// fieldnames, or when the fieldname has a consistent case across all documents. +const allDocs = coll.find({}, {_id: 0}).toArray(); +assertWildcardIndexAnswersQuery({$or: [{"a.c": "string"}, {"A.C": "string"}]}, allDocs); +assertWildcardIndexAnswersQuery({d: "string"}, allDocs); - // Confirm that the $** index also differentiates between upper and lower fieldname case when - // querying fields which do not contain string values. - assertWildcardIndexAnswersQuery({e: 5}, [ - {a: {b: "string", c: "STRING"}, d: "sTrInG", e: 5}, - {a: {b: "STRING", c: "string"}, d: "StRiNg", e: 5} - ]); - assertWildcardIndexAnswersQuery({E: 5}, [ - {A: {B: "string", C: "STRING"}, d: "sTrInG", E: 5}, - {A: {B: "STRING", C: "string"}, d: "StRiNg", E: 5} - ]); +// Confirm that the $** index also differentiates between upper and lower fieldname case when +// querying fields which do not contain string values. +assertWildcardIndexAnswersQuery({e: 5}, [ + {a: {b: "string", c: "STRING"}, d: "sTrInG", e: 5}, + {a: {b: "STRING", c: "string"}, d: "StRiNg", e: 5} +]); +assertWildcardIndexAnswersQuery({E: 5}, [ + {A: {B: "string", C: "STRING"}, d: "sTrInG", E: 5}, + {A: {B: "STRING", C: "string"}, d: "StRiNg", E: 5} +]); - // Confirm that the $** index produces a covered plan for a query on non-string, non-object, - // non-array values. - assert(isIndexOnly(coll.getDB(), winningPlan({e: 5}, {_id: 0, e: 1}))); - assert(isIndexOnly(coll.getDB(), winningPlan({E: 5}, {_id: 0, E: 1}))); +// Confirm that the $** index produces a covered plan for a query on non-string, non-object, +// non-array values. +assert(isIndexOnly(coll.getDB(), winningPlan({e: 5}, {_id: 0, e: 1}))); +assert(isIndexOnly(coll.getDB(), winningPlan({E: 5}, {_id: 0, E: 1}))); - // Confirm that the $** index differentiates fieldname case when attempting to cover. - assert(!isIndexOnly(coll.getDB(), winningPlan({e: 5}, {_id: 0, E: 1}))); - assert(!isIndexOnly(coll.getDB(), winningPlan({E: 5}, {_id: 0, e: 1}))); +// Confirm that the $** index differentiates fieldname case when attempting to cover. +assert(!isIndexOnly(coll.getDB(), winningPlan({e: 5}, {_id: 0, E: 1}))); +assert(!isIndexOnly(coll.getDB(), winningPlan({E: 5}, {_id: 0, e: 1}))); - // Confirm that attempting to project the virtual $_path field which is present in $** index - // keys produces a non-covered solution, which nonetheless returns the correct results. - assert(!isIndexOnly(coll.getDB(), winningPlan({e: 5}, {_id: 0, e: 1, $_path: 1}))); - assertWildcardIndexAnswersQuery({e: 5}, [{e: 5}, {e: 5}], {_id: 0, e: 1, $_path: 1}); +// Confirm that attempting to project the virtual $_path field which is present in $** index +// keys produces a non-covered solution, which nonetheless returns the correct results. +assert(!isIndexOnly(coll.getDB(), winningPlan({e: 5}, {_id: 0, e: 1, $_path: 1}))); +assertWildcardIndexAnswersQuery({e: 5}, [{e: 5}, {e: 5}], {_id: 0, e: 1, $_path: 1}); })();
\ No newline at end of file diff --git a/jstests/core/wildcard_index_count.js b/jstests/core/wildcard_index_count.js index 7b684b29f3c..6c9fa8f05a3 100644 --- a/jstests/core/wildcard_index_count.js +++ b/jstests/core/wildcard_index_count.js @@ -6,90 +6,90 @@ // for retrying on interrupt is not prepared to handle aggregation explain. // @tags: [assumes_unsharded_collection, does_not_support_stepdowns] (function() { - "use strict"; +"use strict"; - load("jstests/libs/analyze_plan.js"); +load("jstests/libs/analyze_plan.js"); - const coll = db.wildcard_index_count; - coll.drop(); +const coll = db.wildcard_index_count; +coll.drop(); - assert.commandWorked(coll.insert([ - {a: 3}, - {a: null}, - {a: [-1, 0]}, - {a: [4, -3, 5]}, - {}, - {a: {b: 4}}, - {a: []}, - {a: [[], {}]}, - {a: {}}, - ])); - assert.commandWorked(coll.createIndex({"$**": 1})); +assert.commandWorked(coll.insert([ + {a: 3}, + {a: null}, + {a: [-1, 0]}, + {a: [4, -3, 5]}, + {}, + {a: {b: 4}}, + {a: []}, + {a: [[], {}]}, + {a: {}}, +])); +assert.commandWorked(coll.createIndex({"$**": 1})); - assert.eq(2, coll.count({a: {$gt: 0}})); - assert.eq(2, coll.find({a: {$gt: 0}}).itcount()); - assert.eq(2, coll.aggregate([{$match: {a: {$gt: 0}}}, {$count: "count"}]).next().count); +assert.eq(2, coll.count({a: {$gt: 0}})); +assert.eq(2, coll.find({a: {$gt: 0}}).itcount()); +assert.eq(2, coll.aggregate([{$match: {a: {$gt: 0}}}, {$count: "count"}]).next().count); - // Verify that this query uses a COUNT_SCAN. - let explain = coll.explain().count({a: {$gt: 0}}); - let countScan = getPlanStage(explain.queryPlanner.winningPlan, "COUNT_SCAN"); - assert.neq(null, countScan, explain); - assert.eq({$_path: 1, a: 1}, countScan.keyPattern, countScan); +// Verify that this query uses a COUNT_SCAN. +let explain = coll.explain().count({a: {$gt: 0}}); +let countScan = getPlanStage(explain.queryPlanner.winningPlan, "COUNT_SCAN"); +assert.neq(null, countScan, explain); +assert.eq({$_path: 1, a: 1}, countScan.keyPattern, countScan); - // Query should also COUNT_SCAN when expressed as an aggregation. - explain = coll.explain().aggregate([{$match: {a: {$gt: 0}}}, {$count: "count"}]); - countScan = getAggPlanStage(explain, "COUNT_SCAN"); - assert.neq(null, countScan, explain); - assert.eq({$_path: 1, a: 1}, countScan.keyPattern, countScan); +// Query should also COUNT_SCAN when expressed as an aggregation. +explain = coll.explain().aggregate([{$match: {a: {$gt: 0}}}, {$count: "count"}]); +countScan = getAggPlanStage(explain, "COUNT_SCAN"); +assert.neq(null, countScan, explain); +assert.eq({$_path: 1, a: 1}, countScan.keyPattern, countScan); - // $count of entire collection does not COUNT_SCAN. - assert.eq(9, coll.find().itcount()); - assert.eq(9, coll.aggregate([{$count: "count"}]).next().count); - explain = coll.explain().aggregate([{$count: "count"}]); - countScan = getAggPlanStage(explain, "COUNT_SCAN"); - assert.eq(null, countScan, explain); +// $count of entire collection does not COUNT_SCAN. +assert.eq(9, coll.find().itcount()); +assert.eq(9, coll.aggregate([{$count: "count"}]).next().count); +explain = coll.explain().aggregate([{$count: "count"}]); +countScan = getAggPlanStage(explain, "COUNT_SCAN"); +assert.eq(null, countScan, explain); - // When the count consists of multiple intervals, we cannot use COUNT_SCAN. - assert.eq(2, coll.count({a: {$in: [3, 4]}})); - assert.eq(2, coll.find({a: {$in: [3, 4]}}).itcount()); - assert.eq(2, coll.aggregate([{$match: {a: {$in: [3, 4]}}}, {$count: "count"}]).next().count); - explain = coll.explain().aggregate([{$match: {a: {$in: [3, 4]}}}, {$count: "count"}]); - countScan = getAggPlanStage(explain, "COUNT_SCAN"); - assert.eq(null, countScan, explain); - let ixscan = getAggPlanStage(explain, "IXSCAN"); - assert.neq(null, ixscan, explain); - assert.eq({$_path: 1, a: 1}, ixscan.keyPattern, ixscan); +// When the count consists of multiple intervals, we cannot use COUNT_SCAN. +assert.eq(2, coll.count({a: {$in: [3, 4]}})); +assert.eq(2, coll.find({a: {$in: [3, 4]}}).itcount()); +assert.eq(2, coll.aggregate([{$match: {a: {$in: [3, 4]}}}, {$count: "count"}]).next().count); +explain = coll.explain().aggregate([{$match: {a: {$in: [3, 4]}}}, {$count: "count"}]); +countScan = getAggPlanStage(explain, "COUNT_SCAN"); +assert.eq(null, countScan, explain); +let ixscan = getAggPlanStage(explain, "IXSCAN"); +assert.neq(null, ixscan, explain); +assert.eq({$_path: 1, a: 1}, ixscan.keyPattern, ixscan); - // Count with an equality match on an empty array cannot use COUNT_SCAN. - assert.eq(2, coll.count({a: {$eq: []}})); - assert.eq(2, coll.find({a: {$eq: []}}).itcount()); - assert.eq(2, coll.aggregate([{$match: {a: {$eq: []}}}, {$count: "count"}]).next().count); - explain = coll.explain().count({a: {$eq: []}}); - countScan = getPlanStage(explain.queryPlanner.winningPlan, "COUNT_SCAN"); - assert.eq(null, countScan, explain); - ixscan = getPlanStage(explain.queryPlanner.winningPlan, "IXSCAN"); - assert.neq(null, ixscan, explain); - assert.eq({$_path: 1, a: 1}, ixscan.keyPattern, ixscan); +// Count with an equality match on an empty array cannot use COUNT_SCAN. +assert.eq(2, coll.count({a: {$eq: []}})); +assert.eq(2, coll.find({a: {$eq: []}}).itcount()); +assert.eq(2, coll.aggregate([{$match: {a: {$eq: []}}}, {$count: "count"}]).next().count); +explain = coll.explain().count({a: {$eq: []}}); +countScan = getPlanStage(explain.queryPlanner.winningPlan, "COUNT_SCAN"); +assert.eq(null, countScan, explain); +ixscan = getPlanStage(explain.queryPlanner.winningPlan, "IXSCAN"); +assert.neq(null, ixscan, explain); +assert.eq({$_path: 1, a: 1}, ixscan.keyPattern, ixscan); - // Count with an equality match on an empty object can use COUNT_SCAN. - assert.eq(2, coll.count({a: {$eq: {}}})); - assert.eq(2, coll.find({a: {$eq: {}}}).itcount()); - assert.eq(2, coll.aggregate([{$match: {a: {$eq: {}}}}, {$count: "count"}]).next().count); - explain = coll.explain().count({a: {$eq: {}}}); - countScan = getPlanStage(explain.queryPlanner.winningPlan, "COUNT_SCAN"); - assert.eq({$_path: 1, a: 1}, countScan.keyPattern, explain); +// Count with an equality match on an empty object can use COUNT_SCAN. +assert.eq(2, coll.count({a: {$eq: {}}})); +assert.eq(2, coll.find({a: {$eq: {}}}).itcount()); +assert.eq(2, coll.aggregate([{$match: {a: {$eq: {}}}}, {$count: "count"}]).next().count); +explain = coll.explain().count({a: {$eq: {}}}); +countScan = getPlanStage(explain.queryPlanner.winningPlan, "COUNT_SCAN"); +assert.eq({$_path: 1, a: 1}, countScan.keyPattern, explain); - // Count with equality to a non-empty object cannot use the wildcard index. - assert.eq(1, coll.count({a: {b: 4}})); - assert.eq(1, coll.find({a: {b: 4}}).itcount()); - assert.eq(1, coll.aggregate([{$match: {a: {b: 4}}}, {$count: "count"}]).next().count); - explain = coll.explain().count({a: {b: 4}}); - assert(isCollscan(db, explain.queryPlanner.winningPlan), explain); +// Count with equality to a non-empty object cannot use the wildcard index. +assert.eq(1, coll.count({a: {b: 4}})); +assert.eq(1, coll.find({a: {b: 4}}).itcount()); +assert.eq(1, coll.aggregate([{$match: {a: {b: 4}}}, {$count: "count"}]).next().count); +explain = coll.explain().count({a: {b: 4}}); +assert(isCollscan(db, explain.queryPlanner.winningPlan), explain); - // Count with equality to a non-empty array cannot use the wildcard index. - assert.eq(1, coll.count({a: [-1, 0]})); - assert.eq(1, coll.find({a: [-1, 0]}).itcount()); - assert.eq(1, coll.aggregate([{$match: {a: [-1, 0]}}, {$count: "count"}]).next().count); - explain = coll.explain().count({a: [-1, 0]}); - assert(isCollscan(db, explain.queryPlanner.winningPlan), explain); +// Count with equality to a non-empty array cannot use the wildcard index. +assert.eq(1, coll.count({a: [-1, 0]})); +assert.eq(1, coll.find({a: [-1, 0]}).itcount()); +assert.eq(1, coll.aggregate([{$match: {a: [-1, 0]}}, {$count: "count"}]).next().count); +explain = coll.explain().count({a: [-1, 0]}); +assert(isCollscan(db, explain.queryPlanner.winningPlan), explain); }()); diff --git a/jstests/core/wildcard_index_covered_queries.js b/jstests/core/wildcard_index_covered_queries.js index df6142e859e..1e4451e8710 100644 --- a/jstests/core/wildcard_index_covered_queries.js +++ b/jstests/core/wildcard_index_covered_queries.js @@ -8,76 +8,76 @@ * @tags: [assumes_unsharded_collection, does_not_support_stepdowns] */ (function() { - "use strict"; - - load("jstests/aggregation/extras/utils.js"); // For arrayEq. - load("jstests/libs/analyze_plan.js"); // For getPlanStages and isIndexOnly. - - const assertArrayEq = (l, r) => assert(arrayEq(l, r)); - - const coll = db.wildcard_covered_query; - coll.drop(); - - // Confirms that the $** index can answer the given query and projection, that it produces a - // covered solution, and that the results are identical to those obtained by a COLLSCAN. If - // 'shouldFailToCover' is true, inverts the assertion and confirms that the given query and - // projection do *not* produce a covered plan. - function assertWildcardProvidesCoveredSolution(query, proj, shouldFailToCover = false) { - // Obtain the explain output for the given query and projection. We run the explain with - // 'executionStats' so that we can subsequently validate the number of documents examined. - const explainOut = assert.commandWorked(coll.find(query, proj).explain("executionStats")); - const winningPlan = explainOut.queryPlanner.winningPlan; - - // Verify that the $** index provided the winning solution for this query. - const ixScans = getPlanStages(winningPlan, "IXSCAN"); - assert.gt(ixScans.length, 0, tojson(explainOut)); - ixScans.forEach((ixScan) => assert(ixScan.keyPattern.hasOwnProperty("$_path"))); - - // Verify that the solution is covered, and that no documents were examined. If the argument - // 'shouldFailToCover' is true, invert the validation to confirm that it is NOT covered. - assert.eq(!!explainOut.executionStats.totalDocsExamined, shouldFailToCover); - assert.eq(isIndexOnly(coll.getDB(), winningPlan), !shouldFailToCover); - - // Verify that the query covered by the $** index produces the same results as a COLLSCAN. - assertArrayEq(coll.find(query, proj).toArray(), - coll.find(query, proj).hint({$natural: 1}).toArray()); - } - - // Create a new collection and build a $** index on it. - const bulk = coll.initializeUnorderedBulkOp(); - for (let i = 0; i < 200; i++) { - bulk.insert({a: {b: i, c: `${(i+1)}`}, d: (i + 2)}); - } - assert.commandWorked(bulk.execute()); - assert.commandWorked(coll.createIndex({"$**": 1})); - - // Verify that the $** index can cover an exact match on an integer value. - assertWildcardProvidesCoveredSolution({"a.b": 10}, {_id: 0, "a.b": 1}); - - // Verify that the $** index can cover an exact match on a string value. - assertWildcardProvidesCoveredSolution({"a.c": "10"}, {_id: 0, "a.c": 1}); - - // Verify that the $** index can cover a range query for integer values. - assertWildcardProvidesCoveredSolution({"a.b": {$gt: 10, $lt: 99}}, {_id: 0, "a.b": 1}); - - // Verify that the $** index can cover a range query for string values. - assertWildcardProvidesCoveredSolution({"a.c": {$gt: "10", $lt: "99"}}, {_id: 0, "a.c": 1}); - - // Verify that the $** index can cover an $in query for integer values. - assertWildcardProvidesCoveredSolution({"a.b": {$in: [0, 50, 100, 150]}}, {_id: 0, "a.b": 1}); - - // Verify that the $** index can cover an $in query for string values. - assertWildcardProvidesCoveredSolution({"a.c": {$in: ["0", "50", "100", "150"]}}, - {_id: 0, "a.c": 1}); - - // Verify that attempting to project the virtual $_path field from the $** keyPattern will fail - // to do so and will instead produce a non-covered query. However, this query will nonetheless - // output the correct results. - const shouldFailToCover = true; - assertWildcardProvidesCoveredSolution( - {d: {$in: [0, 25, 50, 75, 100]}}, {_id: 0, d: 1, $_path: 1}, shouldFailToCover); - - // Verify that predicates which produce inexact-fetch bounds are not covered by a $** index. - assertWildcardProvidesCoveredSolution( - {d: {$elemMatch: {$eq: 50}}}, {_id: 0, d: 1}, shouldFailToCover); +"use strict"; + +load("jstests/aggregation/extras/utils.js"); // For arrayEq. +load("jstests/libs/analyze_plan.js"); // For getPlanStages and isIndexOnly. + +const assertArrayEq = (l, r) => assert(arrayEq(l, r)); + +const coll = db.wildcard_covered_query; +coll.drop(); + +// Confirms that the $** index can answer the given query and projection, that it produces a +// covered solution, and that the results are identical to those obtained by a COLLSCAN. If +// 'shouldFailToCover' is true, inverts the assertion and confirms that the given query and +// projection do *not* produce a covered plan. +function assertWildcardProvidesCoveredSolution(query, proj, shouldFailToCover = false) { + // Obtain the explain output for the given query and projection. We run the explain with + // 'executionStats' so that we can subsequently validate the number of documents examined. + const explainOut = assert.commandWorked(coll.find(query, proj).explain("executionStats")); + const winningPlan = explainOut.queryPlanner.winningPlan; + + // Verify that the $** index provided the winning solution for this query. + const ixScans = getPlanStages(winningPlan, "IXSCAN"); + assert.gt(ixScans.length, 0, tojson(explainOut)); + ixScans.forEach((ixScan) => assert(ixScan.keyPattern.hasOwnProperty("$_path"))); + + // Verify that the solution is covered, and that no documents were examined. If the argument + // 'shouldFailToCover' is true, invert the validation to confirm that it is NOT covered. + assert.eq(!!explainOut.executionStats.totalDocsExamined, shouldFailToCover); + assert.eq(isIndexOnly(coll.getDB(), winningPlan), !shouldFailToCover); + + // Verify that the query covered by the $** index produces the same results as a COLLSCAN. + assertArrayEq(coll.find(query, proj).toArray(), + coll.find(query, proj).hint({$natural: 1}).toArray()); +} + +// Create a new collection and build a $** index on it. +const bulk = coll.initializeUnorderedBulkOp(); +for (let i = 0; i < 200; i++) { + bulk.insert({a: {b: i, c: `${(i + 1)}`}, d: (i + 2)}); +} +assert.commandWorked(bulk.execute()); +assert.commandWorked(coll.createIndex({"$**": 1})); + +// Verify that the $** index can cover an exact match on an integer value. +assertWildcardProvidesCoveredSolution({"a.b": 10}, {_id: 0, "a.b": 1}); + +// Verify that the $** index can cover an exact match on a string value. +assertWildcardProvidesCoveredSolution({"a.c": "10"}, {_id: 0, "a.c": 1}); + +// Verify that the $** index can cover a range query for integer values. +assertWildcardProvidesCoveredSolution({"a.b": {$gt: 10, $lt: 99}}, {_id: 0, "a.b": 1}); + +// Verify that the $** index can cover a range query for string values. +assertWildcardProvidesCoveredSolution({"a.c": {$gt: "10", $lt: "99"}}, {_id: 0, "a.c": 1}); + +// Verify that the $** index can cover an $in query for integer values. +assertWildcardProvidesCoveredSolution({"a.b": {$in: [0, 50, 100, 150]}}, {_id: 0, "a.b": 1}); + +// Verify that the $** index can cover an $in query for string values. +assertWildcardProvidesCoveredSolution({"a.c": {$in: ["0", "50", "100", "150"]}}, + {_id: 0, "a.c": 1}); + +// Verify that attempting to project the virtual $_path field from the $** keyPattern will fail +// to do so and will instead produce a non-covered query. However, this query will nonetheless +// output the correct results. +const shouldFailToCover = true; +assertWildcardProvidesCoveredSolution( + {d: {$in: [0, 25, 50, 75, 100]}}, {_id: 0, d: 1, $_path: 1}, shouldFailToCover); + +// Verify that predicates which produce inexact-fetch bounds are not covered by a $** index. +assertWildcardProvidesCoveredSolution( + {d: {$elemMatch: {$eq: 50}}}, {_id: 0, d: 1}, shouldFailToCover); })();
\ No newline at end of file diff --git a/jstests/core/wildcard_index_dedup.js b/jstests/core/wildcard_index_dedup.js index 4fd5bf5df8e..093d3e9d219 100644 --- a/jstests/core/wildcard_index_dedup.js +++ b/jstests/core/wildcard_index_dedup.js @@ -5,24 +5,24 @@ * scanned and return only a single object. */ (function() { - "use strict"; +"use strict"; - const coll = db.wildcard_index_dedup; - coll.drop(); +const coll = db.wildcard_index_dedup; +coll.drop(); - assert.commandWorked(coll.createIndex({"$**": 1})); +assert.commandWorked(coll.createIndex({"$**": 1})); - assert.commandWorked(coll.insert({a: {b: 1, c: {f: 1, g: 1}}, d: {e: [1, 2, 3]}})); +assert.commandWorked(coll.insert({a: {b: 1, c: {f: 1, g: 1}}, d: {e: [1, 2, 3]}})); - // An $exists that matches multiple $** index paths from nested objects does not return - // duplicates of the same object. - assert.eq(1, coll.find({a: {$exists: true}}).hint({"$**": 1}).itcount()); +// An $exists that matches multiple $** index paths from nested objects does not return +// duplicates of the same object. +assert.eq(1, coll.find({a: {$exists: true}}).hint({"$**": 1}).itcount()); - // An $exists that matches multiple $** index paths from nested array does not return - // duplicates of the same object. - assert.eq(1, coll.find({d: {$exists: true}}).hint({"$**": 1}).itcount()); +// An $exists that matches multiple $** index paths from nested array does not return +// duplicates of the same object. +assert.eq(1, coll.find({d: {$exists: true}}).hint({"$**": 1}).itcount()); - // An $exists with dotted path that matches multiple $** index paths from nested objects - // does not return duplicates of the same object. - assert.eq(1, coll.find({"a.c": {$exists: true}}).hint({"$**": 1}).itcount()); +// An $exists with dotted path that matches multiple $** index paths from nested objects +// does not return duplicates of the same object. +assert.eq(1, coll.find({"a.c": {$exists: true}}).hint({"$**": 1}).itcount()); })();
\ No newline at end of file diff --git a/jstests/core/wildcard_index_distinct_scan.js b/jstests/core/wildcard_index_distinct_scan.js index df831cbc5c9..f1e0fa67f3f 100644 --- a/jstests/core/wildcard_index_distinct_scan.js +++ b/jstests/core/wildcard_index_distinct_scan.js @@ -2,197 +2,197 @@ * Tests that a $** index can provide a DISTINCT_SCAN or indexed solution where appropriate. */ (function() { - "use strict"; +"use strict"; - load("jstests/aggregation/extras/utils.js"); // For arrayEq. - load("jstests/libs/analyze_plan.js"); // For planHasStage and getPlanStages. +load("jstests/aggregation/extras/utils.js"); // For arrayEq. +load("jstests/libs/analyze_plan.js"); // For planHasStage and getPlanStages. - const assertArrayEq = (l, r) => assert(arrayEq(l, r), tojson(l) + " != " + tojson(r)); +const assertArrayEq = (l, r) => assert(arrayEq(l, r), tojson(l) + " != " + tojson(r)); - const coll = db.all_paths_distinct_scan; - coll.drop(); +const coll = db.all_paths_distinct_scan; +coll.drop(); + +// Records whether the field which we are distinct-ing over is multikey. +let distinctFieldIsMultikey = false; - // Records whether the field which we are distinct-ing over is multikey. - let distinctFieldIsMultikey = false; - - // Insert a set of documents into the collection. The 'listOfValues' argument contains values of - // various types, and we insert numerous documents containing each of the values. This allows us - // to confirm that 'distinct' with a wildcard index (1) can return values of any type, (2) will - // only return the set of unique values, and (3) handles multikey values appropriately in cases - // where 'listOfValues' includes an array. - function insertTestData(fieldName, listOfValues) { - distinctFieldIsMultikey = listOfValues.some((val) => Array.isArray(val)); - const bulk = coll.initializeUnorderedBulkOp(); - coll.drop(); - for (let i = 0; i < 200; i++) { - const didx = (i % listOfValues.length); - bulk.insert({[fieldName]: listOfValues[didx], b: didx, c: (-i)}); - } - assert.commandWorked(bulk.execute()); +// Insert a set of documents into the collection. The 'listOfValues' argument contains values of +// various types, and we insert numerous documents containing each of the values. This allows us +// to confirm that 'distinct' with a wildcard index (1) can return values of any type, (2) will +// only return the set of unique values, and (3) handles multikey values appropriately in cases +// where 'listOfValues' includes an array. +function insertTestData(fieldName, listOfValues) { + distinctFieldIsMultikey = listOfValues.some((val) => Array.isArray(val)); + const bulk = coll.initializeUnorderedBulkOp(); + coll.drop(); + for (let i = 0; i < 200; i++) { + const didx = (i % listOfValues.length); + bulk.insert({[fieldName]: listOfValues[didx], b: didx, c: (-i)}); } + assert.commandWorked(bulk.execute()); +} - /** - * Runs a single wildcard distinct scan test. If 'expectedPath' is non-null, verifies that there - * is an indexed solution that uses the $** index with the given path string. If 'expectedPath' - * is null, verifies that no indexed solution was found. - */ - function assertWildcardDistinctScan( - {distinctKey, query, pathProjection, expectedScanType, expectedResults, expectedPath}) { - // Drop all indexes before running the test. This allows us to perform the distinct with a - // COLLSCAN at first, to confirm that the results are as expected. - assert.commandWorked(coll.dropIndexes()); - - // Confirm that the distinct runs with a COLLSCAN. - let winningPlan = coll.explain().distinct(distinctKey, query).queryPlanner.winningPlan; +/** + * Runs a single wildcard distinct scan test. If 'expectedPath' is non-null, verifies that there + * is an indexed solution that uses the $** index with the given path string. If 'expectedPath' + * is null, verifies that no indexed solution was found. + */ +function assertWildcardDistinctScan( + {distinctKey, query, pathProjection, expectedScanType, expectedResults, expectedPath}) { + // Drop all indexes before running the test. This allows us to perform the distinct with a + // COLLSCAN at first, to confirm that the results are as expected. + assert.commandWorked(coll.dropIndexes()); + + // Confirm that the distinct runs with a COLLSCAN. + let winningPlan = coll.explain().distinct(distinctKey, query).queryPlanner.winningPlan; + assert(planHasStage(coll.getDB(), winningPlan, "COLLSCAN")); + // Run the distinct and confirm that it produces the expected results. + assertArrayEq(coll.distinct(distinctKey, query), expectedResults); + + // Build a wildcard index on the collection and re-run the test. + const options = (pathProjection ? {wildcardProjection: pathProjection} : {}); + assert.commandWorked(coll.createIndex({"$**": 1}, options)); + + // We expect the following outcomes for a 'distinct' that attempts to use a $** index: + // - No query: COLLSCAN. + // - Query for object value on distinct field: COLLSCAN. + // - Query for non-object value on non-multikey distinct field: DISTINCT_SCAN. + // - Query for non-object value on multikey distinct field: IXSCAN with FETCH. + // - Query for non-object value on field other than the distinct field: IXSCAN with FETCH. + const fetchIsExpected = (expectedScanType !== "DISTINCT_SCAN"); + + // Explain the query, and determine whether an indexed solution is available. If + // 'expectedPath' is null, then we do not expect the $** index to provide a plan. + winningPlan = coll.explain().distinct(distinctKey, query).queryPlanner.winningPlan; + if (!expectedPath) { assert(planHasStage(coll.getDB(), winningPlan, "COLLSCAN")); - // Run the distinct and confirm that it produces the expected results. - assertArrayEq(coll.distinct(distinctKey, query), expectedResults); - - // Build a wildcard index on the collection and re-run the test. - const options = (pathProjection ? {wildcardProjection: pathProjection} : {}); - assert.commandWorked(coll.createIndex({"$**": 1}, options)); - - // We expect the following outcomes for a 'distinct' that attempts to use a $** index: - // - No query: COLLSCAN. - // - Query for object value on distinct field: COLLSCAN. - // - Query for non-object value on non-multikey distinct field: DISTINCT_SCAN. - // - Query for non-object value on multikey distinct field: IXSCAN with FETCH. - // - Query for non-object value on field other than the distinct field: IXSCAN with FETCH. - const fetchIsExpected = (expectedScanType !== "DISTINCT_SCAN"); - - // Explain the query, and determine whether an indexed solution is available. If - // 'expectedPath' is null, then we do not expect the $** index to provide a plan. - winningPlan = coll.explain().distinct(distinctKey, query).queryPlanner.winningPlan; - if (!expectedPath) { - assert(planHasStage(coll.getDB(), winningPlan, "COLLSCAN")); - assert.eq(expectedScanType, "COLLSCAN"); - return; - } - - // Confirm that the $** distinct scan produces the expected results. - assertArrayEq(coll.distinct(distinctKey, query), expectedResults); - // Confirm that the $** plan adheres to 'fetchIsExpected' and 'expectedScanType'. - assert.eq(planHasStage(coll.getDB(), winningPlan, "FETCH"), fetchIsExpected); - assert(planHasStage(coll.getDB(), winningPlan, expectedScanType)); - assert.docEq({$_path: 1, [expectedPath]: 1}, - getPlanStages(winningPlan, expectedScanType).shift().keyPattern); + assert.eq(expectedScanType, "COLLSCAN"); + return; } - // The set of distinct values that should be produced by each of the test listed below. - const distinctValues = [1, 2, "3", null, {c: 5, d: 6}, {d: 6, c: 5}, {}, 9, 10, {e: 11}]; - - // Define the set of values that the distinct field may take. The first test case consists - // entirely of non-multikey fields, while the second includes multikey fields. - const testCases = [ - // Non-multikey field values. - { - insertField: "a", - queryField: "a", - fieldValues: [1, 2, "3", null, {c: 5, d: 6}, {d: 6, c: 5}, {}, 9, 10, {e: 11}] - }, - // Multikey field values. Note that values within arrays are unwrapped by the distinct - // scan, and empty arrays are thus not included. - { - insertField: "a", - queryField: "a", - fieldValues: [1, 2, "3", null, {c: 5, d: 6}, {d: 6, c: 5}, {}, [], [9, 10], [{e: 11}]] - }, - // Non-multikey dotted field values. - { - insertField: "a", - queryField: "a.x", - fieldValues: [ - {x: 1}, - {x: 2}, - {x: "3"}, - {x: null}, - {x: {c: 5, d: 6}}, - {x: {d: 6, c: 5}}, - {x: {}}, - {x: 9}, - {x: 10}, - {x: {e: 11}} - ] - }, - // Multikey dotted field values. - { - insertField: "a", - queryField: "a.x", - fieldValues: [ - [{x: 1}], - [{x: 2}], - [{x: "3"}], - [{x: null}], - [{x: {c: 5, d: 6}}], - [{x: {d: 6, c: 5}}], - [{x: {}}], - [{x: []}], - [{x: 9}, {x: 10}], - [{x: [{e: 11}]}] - ] - } - ]; - - // Run all combinations of query, no-query, multikey and non-multikey distinct tests. - for (let testCase of testCases) { - // Log the start of the test and create the dataset. - jsTestLog("Test case: " + tojson(testCase)); - insertTestData(testCase.insertField, testCase.fieldValues); - - // Test that a $** index cannot provide an indexed 'distinct' without a query. - assertWildcardDistinctScan({ - distinctKey: testCase.queryField, - query: {}, - expectedScanType: "COLLSCAN", - expectedResults: distinctValues, - expectedPath: null - }); - - // Test that a $** index can provide an indexed 'distinct' for distinct-key queries. - assertWildcardDistinctScan({ - distinctKey: testCase.queryField, - query: {[testCase.queryField]: {$lt: 3}}, - expectedScanType: (distinctFieldIsMultikey ? "IXSCAN" : "DISTINCT_SCAN"), - expectedResults: [1, 2], - expectedPath: testCase.queryField - }); - - // Test that a $** index can provide an indexed 'distinct' for a query on another field. - const offset = Math.floor(testCase.fieldValues.length / 2); - assertWildcardDistinctScan({ - distinctKey: testCase.queryField, - query: {b: {$gte: offset}}, - expectedScanType: "IXSCAN", - expectedResults: distinctValues.slice(offset), - expectedPath: "b" - }); - - // Test that a $** index cannot provide an indexed 'distinct' for object value queries. - assertWildcardDistinctScan({ - distinctKey: testCase.queryField, - query: {[testCase.queryField]: {$gte: {c: 5}}}, - expectedScanType: "COLLSCAN", - expectedResults: [{c: 5, d: 6}, {d: 6, c: 5}, {e: 11}], - expectedPath: null - }); - - // Test that a $** index can provide an indexed 'distinct' for a MinMax query. - assertWildcardDistinctScan({ - distinctKey: testCase.queryField, - query: {[testCase.queryField]: {$gte: MinKey, $lte: MaxKey}}, - expectedScanType: "IXSCAN", - expectedResults: distinctValues, - expectedPath: testCase.queryField - }); - - // Test that a $** index cannot provide an indexed 'distinct' for excluded fields. - assertWildcardDistinctScan({ - distinctKey: testCase.queryField, - query: {c: {$lt: 0}}, - pathProjection: {c: 0}, - expectedScanType: "COLLSCAN", - expectedResults: distinctValues, - expectedPath: null - }); + // Confirm that the $** distinct scan produces the expected results. + assertArrayEq(coll.distinct(distinctKey, query), expectedResults); + // Confirm that the $** plan adheres to 'fetchIsExpected' and 'expectedScanType'. + assert.eq(planHasStage(coll.getDB(), winningPlan, "FETCH"), fetchIsExpected); + assert(planHasStage(coll.getDB(), winningPlan, expectedScanType)); + assert.docEq({$_path: 1, [expectedPath]: 1}, + getPlanStages(winningPlan, expectedScanType).shift().keyPattern); +} + +// The set of distinct values that should be produced by each of the test listed below. +const distinctValues = [1, 2, "3", null, {c: 5, d: 6}, {d: 6, c: 5}, {}, 9, 10, {e: 11}]; + +// Define the set of values that the distinct field may take. The first test case consists +// entirely of non-multikey fields, while the second includes multikey fields. +const testCases = [ + // Non-multikey field values. + { + insertField: "a", + queryField: "a", + fieldValues: [1, 2, "3", null, {c: 5, d: 6}, {d: 6, c: 5}, {}, 9, 10, {e: 11}] + }, + // Multikey field values. Note that values within arrays are unwrapped by the distinct + // scan, and empty arrays are thus not included. + { + insertField: "a", + queryField: "a", + fieldValues: [1, 2, "3", null, {c: 5, d: 6}, {d: 6, c: 5}, {}, [], [9, 10], [{e: 11}]] + }, + // Non-multikey dotted field values. + { + insertField: "a", + queryField: "a.x", + fieldValues: [ + {x: 1}, + {x: 2}, + {x: "3"}, + {x: null}, + {x: {c: 5, d: 6}}, + {x: {d: 6, c: 5}}, + {x: {}}, + {x: 9}, + {x: 10}, + {x: {e: 11}} + ] + }, + // Multikey dotted field values. + { + insertField: "a", + queryField: "a.x", + fieldValues: [ + [{x: 1}], + [{x: 2}], + [{x: "3"}], + [{x: null}], + [{x: {c: 5, d: 6}}], + [{x: {d: 6, c: 5}}], + [{x: {}}], + [{x: []}], + [{x: 9}, {x: 10}], + [{x: [{e: 11}]}] + ] } +]; + +// Run all combinations of query, no-query, multikey and non-multikey distinct tests. +for (let testCase of testCases) { + // Log the start of the test and create the dataset. + jsTestLog("Test case: " + tojson(testCase)); + insertTestData(testCase.insertField, testCase.fieldValues); + + // Test that a $** index cannot provide an indexed 'distinct' without a query. + assertWildcardDistinctScan({ + distinctKey: testCase.queryField, + query: {}, + expectedScanType: "COLLSCAN", + expectedResults: distinctValues, + expectedPath: null + }); + + // Test that a $** index can provide an indexed 'distinct' for distinct-key queries. + assertWildcardDistinctScan({ + distinctKey: testCase.queryField, + query: {[testCase.queryField]: {$lt: 3}}, + expectedScanType: (distinctFieldIsMultikey ? "IXSCAN" : "DISTINCT_SCAN"), + expectedResults: [1, 2], + expectedPath: testCase.queryField + }); + + // Test that a $** index can provide an indexed 'distinct' for a query on another field. + const offset = Math.floor(testCase.fieldValues.length / 2); + assertWildcardDistinctScan({ + distinctKey: testCase.queryField, + query: {b: {$gte: offset}}, + expectedScanType: "IXSCAN", + expectedResults: distinctValues.slice(offset), + expectedPath: "b" + }); + + // Test that a $** index cannot provide an indexed 'distinct' for object value queries. + assertWildcardDistinctScan({ + distinctKey: testCase.queryField, + query: {[testCase.queryField]: {$gte: {c: 5}}}, + expectedScanType: "COLLSCAN", + expectedResults: [{c: 5, d: 6}, {d: 6, c: 5}, {e: 11}], + expectedPath: null + }); + + // Test that a $** index can provide an indexed 'distinct' for a MinMax query. + assertWildcardDistinctScan({ + distinctKey: testCase.queryField, + query: {[testCase.queryField]: {$gte: MinKey, $lte: MaxKey}}, + expectedScanType: "IXSCAN", + expectedResults: distinctValues, + expectedPath: testCase.queryField + }); + + // Test that a $** index cannot provide an indexed 'distinct' for excluded fields. + assertWildcardDistinctScan({ + distinctKey: testCase.queryField, + query: {c: {$lt: 0}}, + pathProjection: {c: 0}, + expectedScanType: "COLLSCAN", + expectedResults: distinctValues, + expectedPath: null + }); +} })(); diff --git a/jstests/core/wildcard_index_empty_arrays.js b/jstests/core/wildcard_index_empty_arrays.js index 7b5e763bbad..cfea1495a48 100644 --- a/jstests/core/wildcard_index_empty_arrays.js +++ b/jstests/core/wildcard_index_empty_arrays.js @@ -2,40 +2,42 @@ * Tests that wildcard indexes will correctly match for empty arrays. */ (function() { - "use strict"; +"use strict"; - load("jstests/aggregation/extras/utils.js"); // For arrayEq. +load("jstests/aggregation/extras/utils.js"); // For arrayEq. - const coll = db.wildcard_empty_arrays; - coll.drop(); +const coll = db.wildcard_empty_arrays; +coll.drop(); - const assertArrayEq = (l, r) => assert(arrayEq(l, r), tojson(l) + " != " + tojson(r)); +const assertArrayEq = (l, r) => assert(arrayEq(l, r), tojson(l) + " != " + tojson(r)); - const indexWildcard = {"$**": 1}; - assert.commandWorked(coll.createIndex(indexWildcard)); +const indexWildcard = { + "$**": 1 +}; +assert.commandWorked(coll.createIndex(indexWildcard)); - assert.commandWorked(coll.insert({a: 1, b: 1, c: [], d: {e: [5, 6]}})); - assert.commandWorked(coll.insert({a: 2, b: 2, c: [1, 2], d: {e: []}})); - assert.commandWorked(coll.insert({a: 1, b: 2, c: [3, 4], d: {e: [7, 8]}, f: [{g: []}]})); - assert.commandWorked(coll.insert({a: 2, b: [[]], c: 1, d: 4})); +assert.commandWorked(coll.insert({a: 1, b: 1, c: [], d: {e: [5, 6]}})); +assert.commandWorked(coll.insert({a: 2, b: 2, c: [1, 2], d: {e: []}})); +assert.commandWorked(coll.insert({a: 1, b: 2, c: [3, 4], d: {e: [7, 8]}, f: [{g: []}]})); +assert.commandWorked(coll.insert({a: 2, b: [[]], c: 1, d: 4})); - // $** index matches empty array. - assertArrayEq(coll.find({c: []}, {_id: 0}).hint(indexWildcard).toArray(), - [{a: 1, b: 1, c: [], d: {e: [5, 6]}}]); +// $** index matches empty array. +assertArrayEq(coll.find({c: []}, {_id: 0}).hint(indexWildcard).toArray(), + [{a: 1, b: 1, c: [], d: {e: [5, 6]}}]); - // $** index supports equality to array offset. - assertArrayEq(coll.find({"c.0": 1}, {_id: 0}).hint(indexWildcard).toArray(), - [{a: 2, b: 2, c: [1, 2], d: {e: []}}]); +// $** index supports equality to array offset. +assertArrayEq(coll.find({"c.0": 1}, {_id: 0}).hint(indexWildcard).toArray(), + [{a: 2, b: 2, c: [1, 2], d: {e: []}}]); - // $** index matches empty array nested in object. - assertArrayEq(coll.find({"d.e": []}, {_id: 0}).hint(indexWildcard).toArray(), - [{a: 2, b: 2, c: [1, 2], d: {e: []}}]); +// $** index matches empty array nested in object. +assertArrayEq(coll.find({"d.e": []}, {_id: 0}).hint(indexWildcard).toArray(), + [{a: 2, b: 2, c: [1, 2], d: {e: []}}]); - // $** index matches empty array nested within an array of objects. - assertArrayEq(coll.find({"f.0.g": []}, {_id: 0}).hint(indexWildcard).toArray(), - [{a: 1, b: 2, c: [3, 4], d: {e: [7, 8]}, f: [{g: []}]}]); +// $** index matches empty array nested within an array of objects. +assertArrayEq(coll.find({"f.0.g": []}, {_id: 0}).hint(indexWildcard).toArray(), + [{a: 1, b: 2, c: [3, 4], d: {e: [7, 8]}, f: [{g: []}]}]); - // $** index matches empty array nested within an array. - assertArrayEq(coll.find({"b": []}, {_id: 0}).hint(indexWildcard).toArray(), - [{a: 2, b: [[]], c: 1, d: 4}]); +// $** index matches empty array nested within an array. +assertArrayEq(coll.find({"b": []}, {_id: 0}).hint(indexWildcard).toArray(), + [{a: 2, b: [[]], c: 1, d: 4}]); })();
\ No newline at end of file diff --git a/jstests/core/wildcard_index_equality_to_empty_obj.js b/jstests/core/wildcard_index_equality_to_empty_obj.js index 28e99534147..c6801bcdcb6 100644 --- a/jstests/core/wildcard_index_equality_to_empty_obj.js +++ b/jstests/core/wildcard_index_equality_to_empty_obj.js @@ -2,75 +2,73 @@ * Tests that a $** index can support queries which test for equality to empty nested objects. */ (function() { - "use strict"; +"use strict"; - const coll = db.wildcard_index_equality_to_empty_obj; - coll.drop(); +const coll = db.wildcard_index_equality_to_empty_obj; +coll.drop(); - assert.commandWorked(coll.insert([ - {_id: 0}, - {_id: 1, a: null}, - {_id: 2, a: []}, - {_id: 3, a: {}}, - {_id: 4, a: [{}]}, - {_id: 5, a: [[{}]]}, - {_id: 6, a: [1, 2, {}]}, - {_id: 7, a: {b: 1}}, - {_id: 8, a: 3}, - {_id: 9, a: {b: {}}}, - {_id: 10, a: [0, {b: {}}]}, - ])); +assert.commandWorked(coll.insert([ + {_id: 0}, + {_id: 1, a: null}, + {_id: 2, a: []}, + {_id: 3, a: {}}, + {_id: 4, a: [{}]}, + {_id: 5, a: [[{}]]}, + {_id: 6, a: [1, 2, {}]}, + {_id: 7, a: {b: 1}}, + {_id: 8, a: 3}, + {_id: 9, a: {b: {}}}, + {_id: 10, a: [0, {b: {}}]}, +])); - assert.commandWorked(coll.createIndex({"$**": 1})); +assert.commandWorked(coll.createIndex({"$**": 1})); - // Test that a comparison to empty object query returns the expected results when the $** index - // is hinted. - let results = coll.find({a: {}}, {_id: 1}).sort({_id: 1}).hint({"$**": 1}).toArray(); - assert.eq(results, [{_id: 3}, {_id: 4}, {_id: 6}]); +// Test that a comparison to empty object query returns the expected results when the $** index +// is hinted. +let results = coll.find({a: {}}, {_id: 1}).sort({_id: 1}).hint({"$**": 1}).toArray(); +assert.eq(results, [{_id: 3}, {_id: 4}, {_id: 6}]); - // Result set should be the same as when hinting a COLLSCAN and with no hint. - assert.eq(results, coll.find({a: {}}, {_id: 1}).sort({_id: 1}).hint({$natural: 1}).toArray()); - assert.eq(results, coll.find({a: {}}, {_id: 1}).sort({_id: 1}).toArray()); +// Result set should be the same as when hinting a COLLSCAN and with no hint. +assert.eq(results, coll.find({a: {}}, {_id: 1}).sort({_id: 1}).hint({$natural: 1}).toArray()); +assert.eq(results, coll.find({a: {}}, {_id: 1}).sort({_id: 1}).toArray()); - // Repeat the above query, but express it using $lte:{}, which is a synonym for $eq:{}. - results = coll.find({a: {$lte: {}}}, {_id: 1}).sort({_id: 1}).hint({"$**": 1}).toArray(); - assert.eq(results, [{_id: 3}, {_id: 4}, {_id: 6}]); - assert.eq(results, - coll.find({a: {$lte: {}}}, {_id: 1}).sort({_id: 1}).hint({$natural: 1}).toArray()); - assert.eq(results, coll.find({a: {$lte: {}}}, {_id: 1}).sort({_id: 1}).toArray()); +// Repeat the above query, but express it using $lte:{}, which is a synonym for $eq:{}. +results = coll.find({a: {$lte: {}}}, {_id: 1}).sort({_id: 1}).hint({"$**": 1}).toArray(); +assert.eq(results, [{_id: 3}, {_id: 4}, {_id: 6}]); +assert.eq(results, + coll.find({a: {$lte: {}}}, {_id: 1}).sort({_id: 1}).hint({$natural: 1}).toArray()); +assert.eq(results, coll.find({a: {$lte: {}}}, {_id: 1}).sort({_id: 1}).toArray()); - // Test that an inequality to empty object query results in an error when the $** index is - // hinted. - assert.throws( - () => coll.find({a: {$gte: {}}}, {_id: 1}).sort({_id: 1}).hint({"$**": 1}).toArray()); +// Test that an inequality to empty object query results in an error when the $** index is +// hinted. +assert.throws(() => coll.find({a: {$gte: {}}}, {_id: 1}).sort({_id: 1}).hint({"$**": 1}).toArray()); - // Test that an inequality to empty object query returns the expected results in the presence of - // the $** index. - results = coll.find({a: {$gte: {}}}, {_id: 1}).sort({_id: 1}).toArray(); - assert.eq(results, [{_id: 3}, {_id: 4}, {_id: 6}, {_id: 7}, {_id: 9}, {_id: 10}]); +// Test that an inequality to empty object query returns the expected results in the presence of +// the $** index. +results = coll.find({a: {$gte: {}}}, {_id: 1}).sort({_id: 1}).toArray(); +assert.eq(results, [{_id: 3}, {_id: 4}, {_id: 6}, {_id: 7}, {_id: 9}, {_id: 10}]); - // Result set should be the same as when hinting a COLLSCAN and with no hint. - assert.eq(results, - coll.find({a: {$gte: {}}}, {_id: 1}).sort({_id: 1}).hint({$natural: 1}).toArray()); - assert.eq(results, coll.find({a: {$gte: {}}}, {_id: 1}).sort({_id: 1}).toArray()); +// Result set should be the same as when hinting a COLLSCAN and with no hint. +assert.eq(results, + coll.find({a: {$gte: {}}}, {_id: 1}).sort({_id: 1}).hint({$natural: 1}).toArray()); +assert.eq(results, coll.find({a: {$gte: {}}}, {_id: 1}).sort({_id: 1}).toArray()); - // Test that an $in with an empty object returns the expected results when the $** index is - // hinted. - results = coll.find({a: {$in: [3, {}]}}, {_id: 1}).sort({_id: 1}).hint({"$**": 1}).toArray(); - assert.eq(results, [{_id: 3}, {_id: 4}, {_id: 6}, {_id: 8}]); +// Test that an $in with an empty object returns the expected results when the $** index is +// hinted. +results = coll.find({a: {$in: [3, {}]}}, {_id: 1}).sort({_id: 1}).hint({"$**": 1}).toArray(); +assert.eq(results, [{_id: 3}, {_id: 4}, {_id: 6}, {_id: 8}]); - // Result set should be the same as when hinting a COLLSCAN and with no hint. - assert.eq( - results, - coll.find({a: {$in: [3, {}]}}, {_id: 1}).sort({_id: 1}).hint({$natural: 1}).toArray()); - assert.eq(results, coll.find({a: {$in: [3, {}]}}, {_id: 1}).sort({_id: 1}).toArray()); +// Result set should be the same as when hinting a COLLSCAN and with no hint. +assert.eq(results, + coll.find({a: {$in: [3, {}]}}, {_id: 1}).sort({_id: 1}).hint({$natural: 1}).toArray()); +assert.eq(results, coll.find({a: {$in: [3, {}]}}, {_id: 1}).sort({_id: 1}).toArray()); - // Test that a wildcard index can support equality to an empty object on a dotted field. - results = coll.find({"a.b": {$eq: {}}}, {_id: 1}).sort({_id: 1}).hint({"$**": 1}).toArray(); - assert.eq(results, [{_id: 9}, {_id: 10}]); +// Test that a wildcard index can support equality to an empty object on a dotted field. +results = coll.find({"a.b": {$eq: {}}}, {_id: 1}).sort({_id: 1}).hint({"$**": 1}).toArray(); +assert.eq(results, [{_id: 9}, {_id: 10}]); - // Result set should be the same as when hinting a COLLSCAN and with no hint. - assert.eq(results, - coll.find({"a.b": {$eq: {}}}, {_id: 1}).sort({_id: 1}).hint({$natural: 1}).toArray()); - assert.eq(results, coll.find({"a.b": {$eq: {}}}, {_id: 1}).sort({_id: 1}).toArray()); +// Result set should be the same as when hinting a COLLSCAN and with no hint. +assert.eq(results, + coll.find({"a.b": {$eq: {}}}, {_id: 1}).sort({_id: 1}).hint({$natural: 1}).toArray()); +assert.eq(results, coll.find({"a.b": {$eq: {}}}, {_id: 1}).sort({_id: 1}).toArray()); }()); diff --git a/jstests/core/wildcard_index_filter.js b/jstests/core/wildcard_index_filter.js index 74c81edf462..fc1f1efdc6f 100644 --- a/jstests/core/wildcard_index_filter.js +++ b/jstests/core/wildcard_index_filter.js @@ -6,88 +6,95 @@ * @tags: [does_not_support_stepdowns] */ (function() { - "use strict"; - - load("jstests/libs/analyze_plan.js"); - - const coll = db.wildcard_index_filter; - - // Utility function to list index filters. - function getFilters() { - const res = assert.commandWorked(coll.runCommand('planCacheListFilters')); - assert(res.hasOwnProperty('filters'), 'filters missing from planCacheListFilters result'); - return res.filters; +"use strict"; + +load("jstests/libs/analyze_plan.js"); + +const coll = db.wildcard_index_filter; + +// Utility function to list index filters. +function getFilters() { + const res = assert.commandWorked(coll.runCommand('planCacheListFilters')); + assert(res.hasOwnProperty('filters'), 'filters missing from planCacheListFilters result'); + return res.filters; +} + +// Sets an index filter given a query shape then confirms that the expected index was used to +// answer a query. +function assertExpectedIndexAnswersQueryWithFilter( + filterQuery, filterIndexes, query, expectedIndexName, hint) { + // Clear existing cache filters. + assert.commandWorked(coll.runCommand('planCacheClearFilters'), 'planCacheClearFilters failed'); + + // Make sure that the filter is set correctly. + assert.commandWorked( + coll.runCommand('planCacheSetFilter', {query: filterQuery, indexes: filterIndexes})); + assert.eq(1, + getFilters().length, + 'no change in query settings after successfully setting index filters'); + + // Check that expectedIndex index was used over another index. + let explain; + if (hint == undefined) { + explain = assert.commandWorked(coll.explain("executionStats").find(query).finish()); + } else { + explain = + assert.commandWorked(coll.explain("executionStats").find(query).hint(hint).finish()); } - // Sets an index filter given a query shape then confirms that the expected index was used to - // answer a query. - function assertExpectedIndexAnswersQueryWithFilter( - filterQuery, filterIndexes, query, expectedIndexName, hint) { - // Clear existing cache filters. - assert.commandWorked(coll.runCommand('planCacheClearFilters'), - 'planCacheClearFilters failed'); - - // Make sure that the filter is set correctly. - assert.commandWorked( - coll.runCommand('planCacheSetFilter', {query: filterQuery, indexes: filterIndexes})); - assert.eq(1, - getFilters().length, - 'no change in query settings after successfully setting index filters'); - - // Check that expectedIndex index was used over another index. - let explain; - if (hint == undefined) { - explain = assert.commandWorked(coll.explain("executionStats").find(query).finish()); - } else { - explain = assert.commandWorked( - coll.explain("executionStats").find(query).hint(hint).finish()); - } - - const executionStages = getExecutionStages(explain).shift(); - let planStage = getPlanStage(executionStages, 'IXSCAN'); - assert.neq(null, planStage); - assert.eq(planStage.indexName, expectedIndexName, tojson(planStage)); - } + const executionStages = getExecutionStages(explain).shift(); + let planStage = getPlanStage(executionStages, 'IXSCAN'); + assert.neq(null, planStage); + assert.eq(planStage.indexName, expectedIndexName, tojson(planStage)); +} - const indexWildcard = {"$**": 1}; - const indexA = {"a": 1}; - assert.commandWorked(coll.createIndex(indexWildcard)); - assert.commandWorked(coll.createIndex(indexA)); +const indexWildcard = { + "$**": 1 +}; +const indexA = { + "a": 1 +}; +assert.commandWorked(coll.createIndex(indexWildcard)); +assert.commandWorked(coll.createIndex(indexA)); - assert.commandWorked(coll.insert({a: "a"})); +assert.commandWorked(coll.insert({a: "a"})); - // Filtering on $** index. $** index is used over another index. - assertExpectedIndexAnswersQueryWithFilter({a: "a"}, [indexWildcard], {a: "a"}, "$**_1"); +// Filtering on $** index. $** index is used over another index. +assertExpectedIndexAnswersQueryWithFilter({a: "a"}, [indexWildcard], {a: "a"}, "$**_1"); - // Filtering on regular index. $** index is not used over another index. - assertExpectedIndexAnswersQueryWithFilter({a: "a"}, [indexA], {a: "a"}, "a_1"); +// Filtering on regular index. $** index is not used over another index. +assertExpectedIndexAnswersQueryWithFilter({a: "a"}, [indexA], {a: "a"}, "a_1"); - assert.commandWorked(coll.insert({a: "a", b: "b"})); +assert.commandWorked(coll.insert({a: "a", b: "b"})); - const indexAB = {"a": 1, "b": 1}; - assert.commandWorked(coll.createIndex(indexAB)); +const indexAB = { + "a": 1, + "b": 1 +}; +assert.commandWorked(coll.createIndex(indexAB)); - // Filtering on $** index. $** index is used over another index for compound query. - assertExpectedIndexAnswersQueryWithFilter( - {a: "a", b: "b"}, [indexWildcard], {a: "a", b: "b"}, "$**_1"); +// Filtering on $** index. $** index is used over another index for compound query. +assertExpectedIndexAnswersQueryWithFilter( + {a: "a", b: "b"}, [indexWildcard], {a: "a", b: "b"}, "$**_1"); - // Filtering on regular compound index. Check that $** index is not used over another index - // for compound query. - assertExpectedIndexAnswersQueryWithFilter( - {a: "a", b: "b"}, [indexAB], {a: "a", b: "b"}, "a_1_b_1"); +// Filtering on regular compound index. Check that $** index is not used over another index +// for compound query. +assertExpectedIndexAnswersQueryWithFilter({a: "a", b: "b"}, [indexAB], {a: "a", b: "b"}, "a_1_b_1"); - // Filtering on $** index while hinting on another index. Index filter is prioritized. - assertExpectedIndexAnswersQueryWithFilter({a: "a"}, [indexWildcard], {a: "a"}, "$**_1", indexA); +// Filtering on $** index while hinting on another index. Index filter is prioritized. +assertExpectedIndexAnswersQueryWithFilter({a: "a"}, [indexWildcard], {a: "a"}, "$**_1", indexA); - // Filtering on regular index while hinting on $** index. Index filter is prioritized. - assertExpectedIndexAnswersQueryWithFilter({a: "a"}, [indexA], {a: "a"}, "a_1", indexWildcard); +// Filtering on regular index while hinting on $** index. Index filter is prioritized. +assertExpectedIndexAnswersQueryWithFilter({a: "a"}, [indexA], {a: "a"}, "a_1", indexWildcard); - // Index filter for $** index does not apply when query does not match filter query shape. - assertExpectedIndexAnswersQueryWithFilter({b: "b"}, [indexWildcard], {a: "a"}, "a_1", indexA); +// Index filter for $** index does not apply when query does not match filter query shape. +assertExpectedIndexAnswersQueryWithFilter({b: "b"}, [indexWildcard], {a: "a"}, "a_1", indexA); - const indexAWildcard = {"a.$**": 1}; - assert.commandWorked(coll.createIndex(indexAWildcard)); +const indexAWildcard = { + "a.$**": 1 +}; +assert.commandWorked(coll.createIndex(indexAWildcard)); - // Filtering on a path specified $** index. Check that the $** is used over other indices. - assertExpectedIndexAnswersQueryWithFilter({a: "a"}, [indexAWildcard], {a: "a"}, "a.$**_1"); +// Filtering on a path specified $** index. Check that the $** is used over other indices. +assertExpectedIndexAnswersQueryWithFilter({a: "a"}, [indexAWildcard], {a: "a"}, "a.$**_1"); })();
\ No newline at end of file diff --git a/jstests/core/wildcard_index_hint.js b/jstests/core/wildcard_index_hint.js index 3f1ac41b42f..f20e2b238c1 100644 --- a/jstests/core/wildcard_index_hint.js +++ b/jstests/core/wildcard_index_hint.js @@ -2,104 +2,96 @@ * Tests that $** indexes obey hinting. */ (function() { - "use strict"; - - load("jstests/aggregation/extras/utils.js"); // For arrayEq. - load("jstests/libs/analyze_plan.js"); // For getPlanStages. - - const coll = db.wildcard_hint; - coll.drop(); - - const assertArrayEq = (l, r) => assert(arrayEq(l, r), tojson(l) + " != " + tojson(r)); - - // Extracts the winning plan for the given query and hint from the explain output. - const winningPlan = (query, hint) => - assert.commandWorked(coll.find(query).hint(hint).explain()).queryPlanner.winningPlan; - - // Runs the given query and confirms that: - // (1) the expected index was used to answer the query, and - // (2) the results produced by the index match the given 'expectedResults'. - function assertExpectedIndexAnswersQueryWithHint( - query, hint, expectedIndexName, expectedResults) { - const ixScans = getPlanStages(winningPlan(query, hint), "IXSCAN"); - assert.gt(ixScans.length, 0, tojson(coll.find(query).hint(hint).explain())); - ixScans.forEach((ixScan) => assert.eq(ixScan.indexName, expectedIndexName)); - - const wildcardResults = coll.find(query, {_id: 0}).hint(hint).toArray(); - assertArrayEq(wildcardResults, expectedResults); - } - - assert.commandWorked(db.createCollection(coll.getName())); - - // Check that error is thrown if the hinted index doesn't exist. - assert.commandFailedWithCode( - db.runCommand({find: coll.getName(), filter: {"a": 1}, hint: {"$**": 1}}), - ErrorCodes.BadValue); - - assert.commandWorked(coll.createIndex({"$**": 1})); - - assert.commandWorked(coll.insert({_id: 10, a: 1, b: 1, c: {d: 1, e: 1}})); - assert.commandWorked(coll.insert({a: 1, b: 2, c: {d: 2, e: 2}})); - assert.commandWorked(coll.insert({a: 2, b: 2, c: {d: 1, e: 2}})); - assert.commandWorked(coll.insert({a: 2, b: 1, c: {d: 2, e: 2}})); - assert.commandWorked(coll.insert({a: 2, b: 2, c: {e: 2}})); - - // Hint a $** index without a competing index. - assertExpectedIndexAnswersQueryWithHint( - {"a": 1}, - {"$**": 1}, - "$**_1", - [{a: 1, b: 1, c: {d: 1, e: 1}}, {a: 1, b: 2, c: {d: 2, e: 2}}]); - - assert.commandWorked(coll.createIndex({"a": 1})); - - // Hint a $** index with a competing index. - assertExpectedIndexAnswersQueryWithHint( - {"a": 1}, - {"$**": 1}, - "$**_1", - [{a: 1, b: 1, c: {d: 1, e: 1}}, {a: 1, b: 2, c: {d: 2, e: 2}}]); - - // Hint a $** index with a competing _id index. - assertExpectedIndexAnswersQueryWithHint( - {"a": 1, "_id": 10}, {"$**": 1}, "$**_1", [{a: 1, b: 1, c: {d: 1, e: 1}}]); - - // Hint a regular index with a competing $** index. - assertExpectedIndexAnswersQueryWithHint( - {"a": 1}, {"a": 1}, "a_1", [{a: 1, b: 1, c: {d: 1, e: 1}}, {a: 1, b: 2, c: {d: 2, e: 2}}]); - - // Query on fields that not all documents in the collection have with $** index hint. - assertExpectedIndexAnswersQueryWithHint( - {"c.d": 1}, - {"$**": 1}, - "$**_1", - [{a: 1, b: 1, c: {d: 1, e: 1}}, {a: 2, b: 2, c: {d: 1, e: 2}}]); - - // Adding another wildcard index with a path specified. - assert.commandWorked(coll.createIndex({"c.$**": 1})); - - // Hint on path that is not in query argument. - assert.commandFailedWithCode( - db.runCommand({find: coll.getName(), filter: {"a": 1}, hint: {"c.$**": 1}}), - ErrorCodes.BadValue); - - // Hint on a path specified $** index. - assertExpectedIndexAnswersQueryWithHint( - {"c.d": 1}, - {"c.$**": 1}, - "c.$**_1", - [{a: 2, b: 2, c: {d: 1, e: 2}}, {a: 1, b: 1, c: {d: 1, e: 1}}]); - - // Min/max with $** index hint. - assert.commandFailedWithCode( - db.runCommand({find: coll.getName(), filter: {"b": 1}, min: {"a": 1}, hint: {"$**": 1}}), - 51174); - - // Hint a $** index on a query with compound fields. - assertExpectedIndexAnswersQueryWithHint( - {"a": 1, "c.e": 1}, {"$**": 1}, "$**_1", [{a: 1, b: 1, c: {d: 1, e: 1}}]); - - // Hint a $** index by name. - assertExpectedIndexAnswersQueryWithHint( - {"a": 1}, "$**_1", "$**_1", [{a: 1, b: 1, c: {d: 1, e: 1}}, {a: 1, b: 2, c: {d: 2, e: 2}}]); +"use strict"; + +load("jstests/aggregation/extras/utils.js"); // For arrayEq. +load("jstests/libs/analyze_plan.js"); // For getPlanStages. + +const coll = db.wildcard_hint; +coll.drop(); + +const assertArrayEq = (l, r) => assert(arrayEq(l, r), tojson(l) + " != " + tojson(r)); + +// Extracts the winning plan for the given query and hint from the explain output. +const winningPlan = (query, hint) => + assert.commandWorked(coll.find(query).hint(hint).explain()).queryPlanner.winningPlan; + +// Runs the given query and confirms that: +// (1) the expected index was used to answer the query, and +// (2) the results produced by the index match the given 'expectedResults'. +function assertExpectedIndexAnswersQueryWithHint(query, hint, expectedIndexName, expectedResults) { + const ixScans = getPlanStages(winningPlan(query, hint), "IXSCAN"); + assert.gt(ixScans.length, 0, tojson(coll.find(query).hint(hint).explain())); + ixScans.forEach((ixScan) => assert.eq(ixScan.indexName, expectedIndexName)); + + const wildcardResults = coll.find(query, {_id: 0}).hint(hint).toArray(); + assertArrayEq(wildcardResults, expectedResults); +} + +assert.commandWorked(db.createCollection(coll.getName())); + +// Check that error is thrown if the hinted index doesn't exist. +assert.commandFailedWithCode( + db.runCommand({find: coll.getName(), filter: {"a": 1}, hint: {"$**": 1}}), ErrorCodes.BadValue); + +assert.commandWorked(coll.createIndex({"$**": 1})); + +assert.commandWorked(coll.insert({_id: 10, a: 1, b: 1, c: {d: 1, e: 1}})); +assert.commandWorked(coll.insert({a: 1, b: 2, c: {d: 2, e: 2}})); +assert.commandWorked(coll.insert({a: 2, b: 2, c: {d: 1, e: 2}})); +assert.commandWorked(coll.insert({a: 2, b: 1, c: {d: 2, e: 2}})); +assert.commandWorked(coll.insert({a: 2, b: 2, c: {e: 2}})); + +// Hint a $** index without a competing index. +assertExpectedIndexAnswersQueryWithHint( + {"a": 1}, {"$**": 1}, "$**_1", [{a: 1, b: 1, c: {d: 1, e: 1}}, {a: 1, b: 2, c: {d: 2, e: 2}}]); + +assert.commandWorked(coll.createIndex({"a": 1})); + +// Hint a $** index with a competing index. +assertExpectedIndexAnswersQueryWithHint( + {"a": 1}, {"$**": 1}, "$**_1", [{a: 1, b: 1, c: {d: 1, e: 1}}, {a: 1, b: 2, c: {d: 2, e: 2}}]); + +// Hint a $** index with a competing _id index. +assertExpectedIndexAnswersQueryWithHint( + {"a": 1, "_id": 10}, {"$**": 1}, "$**_1", [{a: 1, b: 1, c: {d: 1, e: 1}}]); + +// Hint a regular index with a competing $** index. +assertExpectedIndexAnswersQueryWithHint( + {"a": 1}, {"a": 1}, "a_1", [{a: 1, b: 1, c: {d: 1, e: 1}}, {a: 1, b: 2, c: {d: 2, e: 2}}]); + +// Query on fields that not all documents in the collection have with $** index hint. +assertExpectedIndexAnswersQueryWithHint( + {"c.d": 1}, + {"$**": 1}, + "$**_1", + [{a: 1, b: 1, c: {d: 1, e: 1}}, {a: 2, b: 2, c: {d: 1, e: 2}}]); + +// Adding another wildcard index with a path specified. +assert.commandWorked(coll.createIndex({"c.$**": 1})); + +// Hint on path that is not in query argument. +assert.commandFailedWithCode( + db.runCommand({find: coll.getName(), filter: {"a": 1}, hint: {"c.$**": 1}}), + ErrorCodes.BadValue); + +// Hint on a path specified $** index. +assertExpectedIndexAnswersQueryWithHint( + {"c.d": 1}, + {"c.$**": 1}, + "c.$**_1", + [{a: 2, b: 2, c: {d: 1, e: 2}}, {a: 1, b: 1, c: {d: 1, e: 1}}]); + +// Min/max with $** index hint. +assert.commandFailedWithCode( + db.runCommand({find: coll.getName(), filter: {"b": 1}, min: {"a": 1}, hint: {"$**": 1}}), + 51174); + +// Hint a $** index on a query with compound fields. +assertExpectedIndexAnswersQueryWithHint( + {"a": 1, "c.e": 1}, {"$**": 1}, "$**_1", [{a: 1, b: 1, c: {d: 1, e: 1}}]); + +// Hint a $** index by name. +assertExpectedIndexAnswersQueryWithHint( + {"a": 1}, "$**_1", "$**_1", [{a: 1, b: 1, c: {d: 1, e: 1}}, {a: 1, b: 2, c: {d: 2, e: 2}}]); })(); diff --git a/jstests/core/wildcard_index_minmax.js b/jstests/core/wildcard_index_minmax.js index b13d2c81b94..f7baf8a0713 100644 --- a/jstests/core/wildcard_index_minmax.js +++ b/jstests/core/wildcard_index_minmax.js @@ -2,77 +2,73 @@ * Tests that min/max is not supported for wildcard index. */ (function() { - "use strict"; +"use strict"; - load("jstests/aggregation/extras/utils.js"); // For arrayEq. +load("jstests/aggregation/extras/utils.js"); // For arrayEq. - const coll = db.wildcard_index_minmax; - coll.drop(); +const coll = db.wildcard_index_minmax; +coll.drop(); - const assertArrayEq = (l, r) => assert(arrayEq(l, r), tojson(l) + " != " + tojson(r)); +const assertArrayEq = (l, r) => assert(arrayEq(l, r), tojson(l) + " != " + tojson(r)); - assert.commandWorked(coll.insert({a: 1, b: 1})); - assert.commandWorked(coll.insert({a: 1, b: 2})); - assert.commandWorked(coll.insert({a: 2, b: 1})); - assert.commandWorked(coll.insert({a: 2, b: 2})); +assert.commandWorked(coll.insert({a: 1, b: 1})); +assert.commandWorked(coll.insert({a: 1, b: 2})); +assert.commandWorked(coll.insert({a: 2, b: 1})); +assert.commandWorked(coll.insert({a: 2, b: 2})); - assert.commandWorked(coll.createIndex({"$**": 1})); - assert.commandWorked(coll.createIndex({"a": 1})); +assert.commandWorked(coll.createIndex({"$**": 1})); +assert.commandWorked(coll.createIndex({"a": 1})); - // Throws error for $** index min. - assert.commandFailedWithCode( - db.runCommand({find: coll.getName(), min: {"a": 0.5}, hint: {"$**": 1}}), 51174); +// Throws error for $** index min. +assert.commandFailedWithCode( + db.runCommand({find: coll.getName(), min: {"a": 0.5}, hint: {"$**": 1}}), 51174); - // Throws error for $** index max. - assert.commandFailedWithCode( - db.runCommand({find: coll.getName(), max: {"a": 1.5}, hint: {"$**": 1}}), 51174); +// Throws error for $** index max. +assert.commandFailedWithCode( + db.runCommand({find: coll.getName(), max: {"a": 1.5}, hint: {"$**": 1}}), 51174); - // Throws error for $** index min/max. - assert.commandFailedWithCode( - db.runCommand({find: coll.getName(), min: {"a": 0.5}, max: {"a": 1.5}, hint: {"$**": 1}}), - 51174); +// Throws error for $** index min/max. +assert.commandFailedWithCode( + db.runCommand({find: coll.getName(), min: {"a": 0.5}, max: {"a": 1.5}, hint: {"$**": 1}}), + 51174); - // Throws error for $** index min with filter of a different value. - assert.commandFailedWithCode( - db.runCommand({find: coll.getName(), filter: {"a": 2}, min: {"a": 1}, hint: {"$**": 1}}), - 51174); +// Throws error for $** index min with filter of a different value. +assert.commandFailedWithCode( + db.runCommand({find: coll.getName(), filter: {"a": 2}, min: {"a": 1}, hint: {"$**": 1}}), + 51174); - // Throws error for $** index max with filter of a different value. - assert.commandFailedWithCode( - db.runCommand({find: coll.getName(), filter: {"a": 1}, max: {"a": 1.5}, hint: {"$**": 1}}), - 51174); +// Throws error for $** index max with filter of a different value. +assert.commandFailedWithCode( + db.runCommand({find: coll.getName(), filter: {"a": 1}, max: {"a": 1.5}, hint: {"$**": 1}}), + 51174); - // Throws error for $** index min and max with filter of a different value. - assert.commandFailedWithCode(db.runCommand({ - find: coll.getName(), - filter: {"a": 1}, - min: {"a": 0.5}, - max: {"a": 1.5}, - hint: {"$**": 1} - }), - 51174); +// Throws error for $** index min and max with filter of a different value. +assert.commandFailedWithCode(db.runCommand({ + find: coll.getName(), + filter: {"a": 1}, + min: {"a": 0.5}, + max: {"a": 1.5}, + hint: {"$**": 1} +}), + 51174); - // Throws error for $** index min with filter of the same value. - assert.commandFailedWithCode( - db.runCommand({find: coll.getName(), filter: {"a": 1}, min: {"a": 1}, hint: {"$**": 1}}), - 51174); +// Throws error for $** index min with filter of the same value. +assert.commandFailedWithCode( + db.runCommand({find: coll.getName(), filter: {"a": 1}, min: {"a": 1}, hint: {"$**": 1}}), + 51174); - // Throws error for $** index max with filter of the same value. - assert.commandFailedWithCode( - db.runCommand({find: coll.getName(), filter: {"a": 1}, max: {"a": 1}, hint: {"$**": 1}}), - 51174); +// Throws error for $** index max with filter of the same value. +assert.commandFailedWithCode( + db.runCommand({find: coll.getName(), filter: {"a": 1}, max: {"a": 1}, hint: {"$**": 1}}), + 51174); - // Throws error for $** index min and max with filter of the same value. - assert.commandFailedWithCode(db.runCommand({ - find: coll.getName(), - filter: {"a": 1}, - min: {"a": 1}, - max: {"a": 1}, - hint: {"$**": 1} - }), - 51174); +// Throws error for $** index min and max with filter of the same value. +assert.commandFailedWithCode( + db.runCommand( + {find: coll.getName(), filter: {"a": 1}, min: {"a": 1}, max: {"a": 1}, hint: {"$**": 1}}), + 51174); - // $** index does not interfere with valid min/max. - assertArrayEq(coll.find({}, {_id: 0}).min({"a": 0.5}).max({"a": 1.5}).hint({a: 1}).toArray(), - [{a: 1, b: 1}, {a: 1, b: 2}]); +// $** index does not interfere with valid min/max. +assertArrayEq(coll.find({}, {_id: 0}).min({"a": 0.5}).max({"a": 1.5}).hint({a: 1}).toArray(), + [{a: 1, b: 1}, {a: 1, b: 2}]); })(); diff --git a/jstests/core/wildcard_index_multikey.js b/jstests/core/wildcard_index_multikey.js index 039c5176ff4..ce6a7151ad1 100644 --- a/jstests/core/wildcard_index_multikey.js +++ b/jstests/core/wildcard_index_multikey.js @@ -3,265 +3,266 @@ * @tags: [assumes_balancer_off] */ (function() { - "use strict"; +"use strict"; - load("jstests/aggregation/extras/utils.js"); // For arrayEq. - load("jstests/libs/analyze_plan.js"); // For getPlanStages. +load("jstests/aggregation/extras/utils.js"); // For arrayEq. +load("jstests/libs/analyze_plan.js"); // For getPlanStages. - const assertArrayEq = (l, r) => assert(arrayEq(l, r), tojson(l) + " != " + tojson(r)); +const assertArrayEq = (l, r) => assert(arrayEq(l, r), tojson(l) + " != " + tojson(r)); - const coll = db.wildcard_multikey_index; - coll.drop(); +const coll = db.wildcard_multikey_index; +coll.drop(); - // Template document which defines the 'schema' of the documents in the test collection. - const templateDoc = {a: [], b: {c: [], d: [{e: 0}]}}; - const pathList = ["a", "b.c", "b.d.e"]; +// Template document which defines the 'schema' of the documents in the test collection. +const templateDoc = { + a: [], + b: {c: [], d: [{e: 0}]} +}; +const pathList = ["a", "b.c", "b.d.e"]; - // Insert a set of documents into the collection, based on the template document and populated - // with an increasing sequence of values. This is to ensure that the range of values present for - // each field in the dataset is not entirely homogeneous. - for (let i = 0; i < 50; i++) { - (function populateDoc(doc, value) { - for (let key in doc) { - if (typeof doc[key] === "object") { - if (Array.isArray(doc[key])) { - if (typeof doc[key][0] === "object") { - value = populateDoc(doc[key][0], value); - } else { - doc[key] = [++value, ++value]; - } +// Insert a set of documents into the collection, based on the template document and populated +// with an increasing sequence of values. This is to ensure that the range of values present for +// each field in the dataset is not entirely homogeneous. +for (let i = 0; i < 50; i++) { + (function populateDoc(doc, value) { + for (let key in doc) { + if (typeof doc[key] === "object") { + if (Array.isArray(doc[key])) { + if (typeof doc[key][0] === "object") { + value = populateDoc(doc[key][0], value); } else { - value = populateDoc(doc[key], value); + doc[key] = [++value, ++value]; } } else { - doc[key] = ++value; + value = populateDoc(doc[key], value); } + } else { + doc[key] = ++value; } - return value; - })(templateDoc, i); - assert.commandWorked(coll.insert(templateDoc)); - } + } + return value; + })(templateDoc, i); + assert.commandWorked(coll.insert(templateDoc)); +} - // Set of operations which will be applied to each field in the index in turn. - const operationList = [ - {expression: {$gte: 10}}, - {expression: {$gt: 10}}, - {expression: {$lt: 40}}, - {expression: {$lte: 40}}, - {expression: {$gt: 10, $lt: 40}}, - {expression: {$eq: 25}}, - {expression: {$in: [5, 15, 35, 40]}}, - {expression: {$elemMatch: {$gte: 10, $lte: 40}}}, - ]; +// Set of operations which will be applied to each field in the index in turn. +const operationList = [ + {expression: {$gte: 10}}, + {expression: {$gt: 10}}, + {expression: {$lt: 40}}, + {expression: {$lte: 40}}, + {expression: {$gt: 10, $lt: 40}}, + {expression: {$eq: 25}}, + {expression: {$in: [5, 15, 35, 40]}}, + {expression: {$elemMatch: {$gte: 10, $lte: 40}}}, +]; - // Given a keyPattern and (optional) pathProjection, this function builds a $** index on the - // collection and then tests each of the match expression in the 'operationList' on each indexed - // field in turn. The 'expectedPaths' argument lists the set of paths which we expect to have - // been indexed based on the spec; this function will confirm that only the appropriate paths - // are present in the $** index. - function runWildcardIndexTest(keyPattern, pathProjection, expectedPaths) { - assert.commandWorked(coll.dropIndexes()); - assert.commandWorked(coll.createIndex( - keyPattern, pathProjection ? {wildcardProjection: pathProjection} : {})); - assert(expectedPaths); - // Verify the expected behaviour for every combination of path and operator. - for (let op of operationList) { - for (let path of pathList) { - const query = {[path]: op.expression}; - assertWildcardQuery(query, expectedPaths.includes(path) ? path : null); - } +// Given a keyPattern and (optional) pathProjection, this function builds a $** index on the +// collection and then tests each of the match expression in the 'operationList' on each indexed +// field in turn. The 'expectedPaths' argument lists the set of paths which we expect to have +// been indexed based on the spec; this function will confirm that only the appropriate paths +// are present in the $** index. +function runWildcardIndexTest(keyPattern, pathProjection, expectedPaths) { + assert.commandWorked(coll.dropIndexes()); + assert.commandWorked( + coll.createIndex(keyPattern, pathProjection ? {wildcardProjection: pathProjection} : {})); + assert(expectedPaths); + // Verify the expected behaviour for every combination of path and operator. + for (let op of operationList) { + for (let path of pathList) { + const query = {[path]: op.expression}; + assertWildcardQuery(query, expectedPaths.includes(path) ? path : null); } } +} - // Runs a single wildcard query test. If 'expectedPath' is non-null, verifies that there is an - // indexed solution that uses the $** index with the given path string. If 'expectedPath' is - // null, verifies that no indexed solution was found. If 'explainStats' is non-empty, verifies - // that the query's explain output reflects the given stats. - function assertWildcardQuery(query, expectedPath, explainStats = {}) { - // Explain the query, and determine whether an indexed solution is available. - const explainOutput = coll.find(query).explain("executionStats"); - // If we expect the current path to have been excluded based on the $** keyPattern - // or projection, confirm that no indexed solution was found. - if (!expectedPath) { - assert.gt(getPlanStages(explainOutput.queryPlanner.winningPlan, "COLLSCAN").length, 0); - return; - } - // Verify that the winning plan uses the $** index with the expected path. - const ixScans = getPlanStages(explainOutput.queryPlanner.winningPlan, "IXSCAN"); - assert.eq(ixScans.length, FixtureHelpers.numberOfShardsForCollection(coll)); - assert.docEq(ixScans[0].keyPattern, {"$_path": 1, [expectedPath]: 1}); - // Verify that the results obtained from the $** index are identical to a COLLSCAN. - assertArrayEq(coll.find(query).toArray(), coll.find(query).hint({$natural: 1}).toArray()); - // Verify that the explain output reflects the given 'explainStats'. - for (let stat in explainStats) { - assert.eq(explainStats[stat], - stat.split('.').reduce((obj, i) => obj[i], explainOutput), - explainOutput); - } +// Runs a single wildcard query test. If 'expectedPath' is non-null, verifies that there is an +// indexed solution that uses the $** index with the given path string. If 'expectedPath' is +// null, verifies that no indexed solution was found. If 'explainStats' is non-empty, verifies +// that the query's explain output reflects the given stats. +function assertWildcardQuery(query, expectedPath, explainStats = {}) { + // Explain the query, and determine whether an indexed solution is available. + const explainOutput = coll.find(query).explain("executionStats"); + // If we expect the current path to have been excluded based on the $** keyPattern + // or projection, confirm that no indexed solution was found. + if (!expectedPath) { + assert.gt(getPlanStages(explainOutput.queryPlanner.winningPlan, "COLLSCAN").length, 0); + return; } + // Verify that the winning plan uses the $** index with the expected path. + const ixScans = getPlanStages(explainOutput.queryPlanner.winningPlan, "IXSCAN"); + assert.eq(ixScans.length, FixtureHelpers.numberOfShardsForCollection(coll)); + assert.docEq(ixScans[0].keyPattern, {"$_path": 1, [expectedPath]: 1}); + // Verify that the results obtained from the $** index are identical to a COLLSCAN. + assertArrayEq(coll.find(query).toArray(), coll.find(query).hint({$natural: 1}).toArray()); + // Verify that the explain output reflects the given 'explainStats'. + for (let stat in explainStats) { + assert.eq(explainStats[stat], + stat.split('.').reduce((obj, i) => obj[i], explainOutput), + explainOutput); + } +} - // Test a $** index that indexes the entire document. - runWildcardIndexTest({'$**': 1}, null, ['a', 'b.c', 'b.d.e']); - // Test a $** index on a single subtree. - runWildcardIndexTest({'a.$**': 1}, null, ['a']); - runWildcardIndexTest({'b.$**': 1}, null, ['b.c', 'b.d.e']); - runWildcardIndexTest({'b.c.$**': 1}, null, ['b.c']); - runWildcardIndexTest({'b.d.$**': 1}, null, ['b.d.e']); - // Test a $** index which includes a subset of paths. - runWildcardIndexTest({'$**': 1}, {a: 1}, ['a']); - runWildcardIndexTest({'$**': 1}, {b: 1}, ['b.c', 'b.d.e']); - runWildcardIndexTest({'$**': 1}, {'b.d': 1}, ['b.d.e']); - runWildcardIndexTest({'$**': 1}, {a: 1, 'b.d': 1}, ['a', 'b.d.e']); - // Test a $** index which excludes a subset of paths. - runWildcardIndexTest({'$**': 1}, {a: 0}, ['b.c', 'b.d.e']); - runWildcardIndexTest({'$**': 1}, {b: 0}, ['a']); - runWildcardIndexTest({'$**': 1}, {'b.c': 0}, ['a', 'b.d.e']); - runWildcardIndexTest({'$**': 1}, {a: 0, 'b.c': 0}, ['b.d.e']); - - // Sanity check that a few queries which need to be planned specially in the multikey case - // return the correct results. - coll.drop(); - assert.commandWorked(coll.createIndex({"$**": 1})); - assert.commandWorked(coll.insert({a: [-5, 15]})); - assert.eq(1, coll.find({a: {$gt: 0, $lt: 9}}).itcount()); - assert.eq(1, coll.find({a: {$gt: 0, $lt: 9}}).hint({$natural: 1}).itcount()); - assert.eq(0, coll.find({a: {$elemMatch: {$gt: 0, $lt: 9}}}).itcount()); - assert.eq(0, coll.find({a: {$elemMatch: {$gt: 0, $lt: 9}}}).hint({$natural: 1}).itcount()); +// Test a $** index that indexes the entire document. +runWildcardIndexTest({'$**': 1}, null, ['a', 'b.c', 'b.d.e']); +// Test a $** index on a single subtree. +runWildcardIndexTest({'a.$**': 1}, null, ['a']); +runWildcardIndexTest({'b.$**': 1}, null, ['b.c', 'b.d.e']); +runWildcardIndexTest({'b.c.$**': 1}, null, ['b.c']); +runWildcardIndexTest({'b.d.$**': 1}, null, ['b.d.e']); +// Test a $** index which includes a subset of paths. +runWildcardIndexTest({'$**': 1}, {a: 1}, ['a']); +runWildcardIndexTest({'$**': 1}, {b: 1}, ['b.c', 'b.d.e']); +runWildcardIndexTest({'$**': 1}, {'b.d': 1}, ['b.d.e']); +runWildcardIndexTest({'$**': 1}, {a: 1, 'b.d': 1}, ['a', 'b.d.e']); +// Test a $** index which excludes a subset of paths. +runWildcardIndexTest({'$**': 1}, {a: 0}, ['b.c', 'b.d.e']); +runWildcardIndexTest({'$**': 1}, {b: 0}, ['a']); +runWildcardIndexTest({'$**': 1}, {'b.c': 0}, ['a', 'b.d.e']); +runWildcardIndexTest({'$**': 1}, {a: 0, 'b.c': 0}, ['b.d.e']); - assert.commandWorked(coll.insert({b: {c: {d: [{e: {f: -5}}, {e: {f: 15}}]}}})); - assert.eq(1, coll.find({"b.c.d.e.f": {$gt: 0, $lt: 9}}).itcount()); - assert.eq(1, coll.find({"b.c.d.e.f": {$gt: 0, $lt: 9}}).hint({$natural: 1}).itcount()); - assert.eq(0, coll.find({"b.c.d": {$elemMatch: {"e.f": {$gt: 0, $lt: 9}}}}).itcount()); - assert.eq(0, - coll.find({"b.c.d": {$elemMatch: {"e.f": {$gt: 0, $lt: 9}}}}) - .hint({$natural: 1}) - .itcount()); +// Sanity check that a few queries which need to be planned specially in the multikey case +// return the correct results. +coll.drop(); +assert.commandWorked(coll.createIndex({"$**": 1})); +assert.commandWorked(coll.insert({a: [-5, 15]})); +assert.eq(1, coll.find({a: {$gt: 0, $lt: 9}}).itcount()); +assert.eq(1, coll.find({a: {$gt: 0, $lt: 9}}).hint({$natural: 1}).itcount()); +assert.eq(0, coll.find({a: {$elemMatch: {$gt: 0, $lt: 9}}}).itcount()); +assert.eq(0, coll.find({a: {$elemMatch: {$gt: 0, $lt: 9}}}).hint({$natural: 1}).itcount()); - // Fieldname-or-array-index query tests. - assert(coll.drop()); - assert.commandWorked(coll.createIndex({"$**": 1})); +assert.commandWorked(coll.insert({b: {c: {d: [{e: {f: -5}}, {e: {f: 15}}]}}})); +assert.eq(1, coll.find({"b.c.d.e.f": {$gt: 0, $lt: 9}}).itcount()); +assert.eq(1, coll.find({"b.c.d.e.f": {$gt: 0, $lt: 9}}).hint({$natural: 1}).itcount()); +assert.eq(0, coll.find({"b.c.d": {$elemMatch: {"e.f": {$gt: 0, $lt: 9}}}}).itcount()); +assert.eq( + 0, coll.find({"b.c.d": {$elemMatch: {"e.f": {$gt: 0, $lt: 9}}}}).hint({$natural: 1}).itcount()); - // Insert some documents that exhibit a mix of numeric fieldnames and array indices. - assert.commandWorked(coll.insert({_id: 1, a: [{b: [{c: 1}]}]})); - assert.commandWorked(coll.insert({_id: 2, a: [{b: [{c: 0}, {c: 1}]}]})); - assert.commandWorked(coll.insert({_id: 3, a: {'0': [{b: {'1': {c: 1}}}, {d: 1}]}})); - assert.commandWorked(coll.insert({_id: 4, a: [{b: [{1: {c: 1}}]}]})); - assert.commandWorked( - coll.insert({_id: 5, a: [{b: [{'1': {c: {'2': {d: [0, 1, 2, 3, {e: 1}]}}}}]}]})); +// Fieldname-or-array-index query tests. +assert(coll.drop()); +assert.commandWorked(coll.createIndex({"$**": 1})); - /* - * Multikey Metadata Keys: - * {'': 1, '': 'a'} - * {'': 1, '': 'a.0'} - * {'': 1, '': 'a.b'} - * {'': 1, '': 'a.b.1.c.2.d'} - * Keys: - * {'': 'a.b.c', '': 1} // _id: 1, a,b multikey - * {'': 'a.b.c', '': 0} // _id: 2, a,b multikey - * {'': 'a.b.c', '': 1} // _id: 2, a,b multikey - * {'': 'a.0.b.1.c', '': 1} // _id: 3, '0, 1' are fieldnames, a.0 multikey - * {'': 'a.0.d', '': 1} // _id: 3, '0' is fieldname, a.0 multikey - * {'': 'a.b.1.c', '': 1} // _id: 4, '1' is fieldname, a,b multikey - * {'': 'a.b.1.c.2.d', '': 0} // _id: 5, a,b,a.b.1.c.2.d multikey, '1' is fieldname - * {'': 'a.b.1.c.2.d', '': 1} // _id: 5 - * {'': 'a.b.1.c.2.d', '': 2} // _id: 5 - * {'': 'a.b.1.c.2.d', '': 3} // _id: 5 - * {'': 'a.b.1.c.2.d.e', '': 1} // _id: 5 - */ +// Insert some documents that exhibit a mix of numeric fieldnames and array indices. +assert.commandWorked(coll.insert({_id: 1, a: [{b: [{c: 1}]}]})); +assert.commandWorked(coll.insert({_id: 2, a: [{b: [{c: 0}, {c: 1}]}]})); +assert.commandWorked(coll.insert({_id: 3, a: {'0': [{b: {'1': {c: 1}}}, {d: 1}]}})); +assert.commandWorked(coll.insert({_id: 4, a: [{b: [{1: {c: 1}}]}]})); +assert.commandWorked( + coll.insert({_id: 5, a: [{b: [{'1': {c: {'2': {d: [0, 1, 2, 3, {e: 1}]}}}}]}]})); - // Test that a query with multiple numeric path components returns all relevant documents, - // whether the numeric path component refers to a fieldname or array index in each doc: - // - // _id:1 will be captured by the special fieldname-or-array-index bounds 'a.b.c', but will be - // filtered out by the INEXACT_FETCH since it has no array index or fieldname 'b.1'. - // _id:2 will match both 'a.0' and 'b.1' by array index. - // _id:3 will match both 'a.0' and 'b.1' by fieldname. - // _id:4 will match 'a.0' by array index and 'b.1' by fieldname. - // _id:5 is not captured by the special fieldname-or-array-index bounds. - // - // We examine the solution's 'nReturned' versus 'totalDocsExamined' to confirm this. - // totalDocsExamined: [_id:1, _id:2, _id:3, _id:4], nReturned: [_id:2, _id:3, _id:4] - assertWildcardQuery({'a.0.b.1.c': 1}, - 'a.0.b.1.c', - {'executionStats.nReturned': 3, 'executionStats.totalDocsExamined': 4}); +/* + * Multikey Metadata Keys: + * {'': 1, '': 'a'} + * {'': 1, '': 'a.0'} + * {'': 1, '': 'a.b'} + * {'': 1, '': 'a.b.1.c.2.d'} + * Keys: + * {'': 'a.b.c', '': 1} // _id: 1, a,b multikey + * {'': 'a.b.c', '': 0} // _id: 2, a,b multikey + * {'': 'a.b.c', '': 1} // _id: 2, a,b multikey + * {'': 'a.0.b.1.c', '': 1} // _id: 3, '0, 1' are fieldnames, a.0 multikey + * {'': 'a.0.d', '': 1} // _id: 3, '0' is fieldname, a.0 multikey + * {'': 'a.b.1.c', '': 1} // _id: 4, '1' is fieldname, a,b multikey + * {'': 'a.b.1.c.2.d', '': 0} // _id: 5, a,b,a.b.1.c.2.d multikey, '1' is fieldname + * {'': 'a.b.1.c.2.d', '': 1} // _id: 5 + * {'': 'a.b.1.c.2.d', '': 2} // _id: 5 + * {'': 'a.b.1.c.2.d', '': 3} // _id: 5 + * {'': 'a.b.1.c.2.d.e', '': 1} // _id: 5 + */ - // Test that we can query a specific field of an array whose fieldname is itself numeric. - assertWildcardQuery({'a.0.1.d': 1}, - 'a.0.1.d', - {'executionStats.nReturned': 1, 'executionStats.totalDocsExamined': 1}); +// Test that a query with multiple numeric path components returns all relevant documents, +// whether the numeric path component refers to a fieldname or array index in each doc: +// +// _id:1 will be captured by the special fieldname-or-array-index bounds 'a.b.c', but will be +// filtered out by the INEXACT_FETCH since it has no array index or fieldname 'b.1'. +// _id:2 will match both 'a.0' and 'b.1' by array index. +// _id:3 will match both 'a.0' and 'b.1' by fieldname. +// _id:4 will match 'a.0' by array index and 'b.1' by fieldname. +// _id:5 is not captured by the special fieldname-or-array-index bounds. +// +// We examine the solution's 'nReturned' versus 'totalDocsExamined' to confirm this. +// totalDocsExamined: [_id:1, _id:2, _id:3, _id:4], nReturned: [_id:2, _id:3, _id:4] +assertWildcardQuery({'a.0.b.1.c': 1}, + 'a.0.b.1.c', + {'executionStats.nReturned': 3, 'executionStats.totalDocsExamined': 4}); - // Test that we can query a primitive value at a specific array index. - assertWildcardQuery({'a.0.b.1.c.2.d.3': 3}, - 'a.0.b.1.c.2.d.3', - {'executionStats.nReturned': 1, 'executionStats.totalDocsExamined': 1}); +// Test that we can query a specific field of an array whose fieldname is itself numeric. +assertWildcardQuery({'a.0.1.d': 1}, + 'a.0.1.d', + {'executionStats.nReturned': 1, 'executionStats.totalDocsExamined': 1}); - // Test that a $** index can't be used for a query through more than 8 nested array indices. - assert.commandWorked( - coll.insert({_id: 6, a: [{b: [{c: [{d: [{e: [{f: [{g: [{h: [{i: [1]}]}]}]}]}]}]}]}]})); - // We can query up to a depth of 8 arrays via specific indices, but not through 9 or more. - assertWildcardQuery({'a.0.b.0.c.0.d.0.e.0.f.0.g.0.h.0.i': 1}, - 'a.0.b.0.c.0.d.0.e.0.f.0.g.0.h.0.i'); - assertWildcardQuery({'a.0.b.0.c.0.d.0.e.0.f.0.g.0.h.0.i.0': 1}, null); +// Test that we can query a primitive value at a specific array index. +assertWildcardQuery({'a.0.b.1.c.2.d.3': 3}, + 'a.0.b.1.c.2.d.3', + {'executionStats.nReturned': 1, 'executionStats.totalDocsExamined': 1}); - // Test that fieldname-or-array-index queries do not inappropriately trim predicates; that is, - // all predicates on the field are added to a FETCH filter above the IXSCAN. - assert(coll.drop()); - assert.commandWorked(coll.createIndex({"$**": 1})); +// Test that a $** index can't be used for a query through more than 8 nested array indices. +assert.commandWorked( + coll.insert({_id: 6, a: [{b: [{c: [{d: [{e: [{f: [{g: [{h: [{i: [1]}]}]}]}]}]}]}]}]})); +// We can query up to a depth of 8 arrays via specific indices, but not through 9 or more. +assertWildcardQuery({'a.0.b.0.c.0.d.0.e.0.f.0.g.0.h.0.i': 1}, 'a.0.b.0.c.0.d.0.e.0.f.0.g.0.h.0.i'); +assertWildcardQuery({'a.0.b.0.c.0.d.0.e.0.f.0.g.0.h.0.i.0': 1}, null); - assert.commandWorked(coll.insert({_id: 1, a: [0, 1, 2]})); - assert.commandWorked(coll.insert({_id: 2, a: [1, 2, 3]})); - assert.commandWorked(coll.insert({_id: 3, a: [2, 3, 4], b: [5, 6, 7]})); - assert.commandWorked(coll.insert({_id: 4, a: [3, 4, 5], b: [6, 7, 8], c: {'0': 9}})); - assert.commandWorked(coll.insert({_id: 5, a: [4, 5, 6], b: [7, 8, 9], c: {'0': 10}})); - assert.commandWorked(coll.insert({_id: 6, a: [5, 6, 7], b: [8, 9, 10], c: {'0': 11}})); +// Test that fieldname-or-array-index queries do not inappropriately trim predicates; that is, +// all predicates on the field are added to a FETCH filter above the IXSCAN. +assert(coll.drop()); +assert.commandWorked(coll.createIndex({"$**": 1})); - assertWildcardQuery({"a.0": {$gt: 1, $lt: 4}}, 'a.0', {'executionStats.nReturned': 2}); - assertWildcardQuery({"a.1": {$gte: 1, $lte: 4}}, 'a.1', {'executionStats.nReturned': 4}); - assertWildcardQuery({"b.2": {$in: [5, 9]}}, 'b.2', {'executionStats.nReturned': 1}); - assertWildcardQuery({"c.0": {$in: [10, 11]}}, 'c.0', {'executionStats.nReturned': 2}); +assert.commandWorked(coll.insert({_id: 1, a: [0, 1, 2]})); +assert.commandWorked(coll.insert({_id: 2, a: [1, 2, 3]})); +assert.commandWorked(coll.insert({_id: 3, a: [2, 3, 4], b: [5, 6, 7]})); +assert.commandWorked(coll.insert({_id: 4, a: [3, 4, 5], b: [6, 7, 8], c: {'0': 9}})); +assert.commandWorked(coll.insert({_id: 5, a: [4, 5, 6], b: [7, 8, 9], c: {'0': 10}})); +assert.commandWorked(coll.insert({_id: 6, a: [5, 6, 7], b: [8, 9, 10], c: {'0': 11}})); - // Test that the $** index doesn't trim predicates when planning across multiple nested $and/$or - // expressions on various fieldname-or-array-index paths. - const trimTestQuery = { - $or: [ - {"a.0": {$gte: 0, $lt: 3}, "a.1": {$in: [2, 3, 4]}}, - {"b.1": {$gt: 6, $lte: 9}, "c.0": {$gt: 9, $lt: 12}} - ] - }; - const trimTestExplain = coll.find(trimTestQuery).explain("executionStats"); - // Verify that the expected number of documents were matched, and the $** index was used. - // Matched documents: [_id:2, _id:3, _id:5, _id:6] - assert.eq(trimTestExplain.executionStats.nReturned, 4); - const trimTestIxScans = getPlanStages(trimTestExplain.queryPlanner.winningPlan, "IXSCAN"); - for (let ixScan of trimTestIxScans) { - assert.eq(ixScan.keyPattern["$_path"], 1); - } - // Finally, confirm that a collection scan produces the same results. - assertArrayEq(coll.find(trimTestQuery).toArray(), - coll.find(trimTestQuery).hint({$natural: 1}).toArray()); +assertWildcardQuery({"a.0": {$gt: 1, $lt: 4}}, 'a.0', {'executionStats.nReturned': 2}); +assertWildcardQuery({"a.1": {$gte: 1, $lte: 4}}, 'a.1', {'executionStats.nReturned': 4}); +assertWildcardQuery({"b.2": {$in: [5, 9]}}, 'b.2', {'executionStats.nReturned': 1}); +assertWildcardQuery({"c.0": {$in: [10, 11]}}, 'c.0', {'executionStats.nReturned': 2}); - // Verify that no overlapping bounds are generated and all the expected documents are returned - // for fieldname-or-array-index queries. - const existenceQuery = {"a.0.1": {$exists: true}}; - assert.commandWorked(coll.insert({a: [{1: "exists"}, 1]})); - assert.commandWorked(coll.insert({a: {0: {1: "exists"}}})); - assert.commandWorked(coll.insert({a: {0: [2, "exists"]}})); - assert.commandWorked(coll.insert({a: {0: [2, {"object_exists": 1}]}})); - assert.commandWorked(coll.insert({a: {0: [2, ["array_exists"]]}})); - assert.commandWorked(coll.insert({a: {0: [{1: "exists"}]}})); - assert.commandWorked(coll.insert({a: {0: [{1: []}]}})); - assert.commandWorked(coll.insert({a: {0: [{1: {}}]}})); - assert.commandWorked(coll.insert({a: [{0: [{1: ["exists"]}]}]})); - assert.commandWorked(coll.insert({a: [{}, {0: [{1: ["exists"]}]}]})); - assert.commandWorked(coll.insert({a: [{}, {0: [[], {}, {1: ["exists"]}]}]})); +// Test that the $** index doesn't trim predicates when planning across multiple nested $and/$or +// expressions on various fieldname-or-array-index paths. +const trimTestQuery = { + $or: [ + {"a.0": {$gte: 0, $lt: 3}, "a.1": {$in: [2, 3, 4]}}, + {"b.1": {$gt: 6, $lte: 9}, "c.0": {$gt: 9, $lt: 12}} + ] +}; +const trimTestExplain = coll.find(trimTestQuery).explain("executionStats"); +// Verify that the expected number of documents were matched, and the $** index was used. +// Matched documents: [_id:2, _id:3, _id:5, _id:6] +assert.eq(trimTestExplain.executionStats.nReturned, 4); +const trimTestIxScans = getPlanStages(trimTestExplain.queryPlanner.winningPlan, "IXSCAN"); +for (let ixScan of trimTestIxScans) { + assert.eq(ixScan.keyPattern["$_path"], 1); +} +// Finally, confirm that a collection scan produces the same results. +assertArrayEq(coll.find(trimTestQuery).toArray(), + coll.find(trimTestQuery).hint({$natural: 1}).toArray()); - assert.commandWorked(coll.insert({a: {0: ["not_exist"]}})); - assert.commandWorked(coll.insert({a: {"01": ["not_exist"]}})); - assert.commandWorked(coll.insert({a: [{11: "not_exist"}]})); +// Verify that no overlapping bounds are generated and all the expected documents are returned +// for fieldname-or-array-index queries. +const existenceQuery = { + "a.0.1": {$exists: true} +}; +assert.commandWorked(coll.insert({a: [{1: "exists"}, 1]})); +assert.commandWorked(coll.insert({a: {0: {1: "exists"}}})); +assert.commandWorked(coll.insert({a: {0: [2, "exists"]}})); +assert.commandWorked(coll.insert({a: {0: [2, {"object_exists": 1}]}})); +assert.commandWorked(coll.insert({a: {0: [2, ["array_exists"]]}})); +assert.commandWorked(coll.insert({a: {0: [{1: "exists"}]}})); +assert.commandWorked(coll.insert({a: {0: [{1: []}]}})); +assert.commandWorked(coll.insert({a: {0: [{1: {}}]}})); +assert.commandWorked(coll.insert({a: [{0: [{1: ["exists"]}]}]})); +assert.commandWorked(coll.insert({a: [{}, {0: [{1: ["exists"]}]}]})); +assert.commandWorked(coll.insert({a: [{}, {0: [[], {}, {1: ["exists"]}]}]})); - assertWildcardQuery(existenceQuery, 'a.0.1', {'executionStats.nReturned': 11}); - // Finally, confirm that a collection scan produces the same results. - assertArrayEq(coll.find(existenceQuery).toArray(), - coll.find(existenceQuery).hint({$natural: 1}).toArray()); +assert.commandWorked(coll.insert({a: {0: ["not_exist"]}})); +assert.commandWorked(coll.insert({a: {"01": ["not_exist"]}})); +assert.commandWorked(coll.insert({a: [{11: "not_exist"}]})); +assertWildcardQuery(existenceQuery, 'a.0.1', {'executionStats.nReturned': 11}); +// Finally, confirm that a collection scan produces the same results. +assertArrayEq(coll.find(existenceQuery).toArray(), + coll.find(existenceQuery).hint({$natural: 1}).toArray()); })(); diff --git a/jstests/core/wildcard_index_nonblocking_sort.js b/jstests/core/wildcard_index_nonblocking_sort.js index c21a0cacdb5..2537906b412 100644 --- a/jstests/core/wildcard_index_nonblocking_sort.js +++ b/jstests/core/wildcard_index_nonblocking_sort.js @@ -1,85 +1,83 @@ // @tags: [assumes_balancer_off] (function() { - "use strict"; +"use strict"; - load("jstests/aggregation/extras/utils.js"); // For arrayEq(). - load("jstests/libs/analyze_plan.js"); // For getPlanStages(). - load("jstests/libs/fixture_helpers.js"); // For numberOfShardsForCollection(). +load("jstests/aggregation/extras/utils.js"); // For arrayEq(). +load("jstests/libs/analyze_plan.js"); // For getPlanStages(). +load("jstests/libs/fixture_helpers.js"); // For numberOfShardsForCollection(). - const coll = db.wildcard_nonblocking_sort; +const coll = db.wildcard_nonblocking_sort; - assert.commandWorked(coll.createIndex({"$**": 1}, {wildcardProjection: {"excludedField": 0}})); +assert.commandWorked(coll.createIndex({"$**": 1}, {wildcardProjection: {"excludedField": 0}})); - for (let i = 0; i < 50; i++) { - assert.commandWorked(coll.insert({a: i, b: -i, x: [123], excludedField: i})); - } +for (let i = 0; i < 50; i++) { + assert.commandWorked(coll.insert({a: i, b: -i, x: [123], excludedField: i})); +} - function checkQueryHasSameResultsWhenUsingIdIndex(query, sort, projection) { - const l = coll.find(query, projection).sort(sort).toArray(); - const r = coll.find(query, projection).sort(sort).hint({$natural: 1}).toArray(); - assert(arrayEq(l, r)); - } +function checkQueryHasSameResultsWhenUsingIdIndex(query, sort, projection) { + const l = coll.find(query, projection).sort(sort).toArray(); + const r = coll.find(query, projection).sort(sort).hint({$natural: 1}).toArray(); + assert(arrayEq(l, r)); +} - function checkQueryUsesSortType(query, sort, projection, isBlocking) { - const explain = assert.commandWorked(coll.find(query, projection).sort(sort).explain()); - const plan = explain.queryPlanner.winningPlan; +function checkQueryUsesSortType(query, sort, projection, isBlocking) { + const explain = assert.commandWorked(coll.find(query, projection).sort(sort).explain()); + const plan = explain.queryPlanner.winningPlan; - const ixScans = getPlanStages(plan, "IXSCAN"); - const sorts = getPlanStages(plan, "SORT"); + const ixScans = getPlanStages(plan, "IXSCAN"); + const sorts = getPlanStages(plan, "SORT"); - if (isBlocking) { - assert.eq(sorts.length, FixtureHelpers.numberOfShardsForCollection(coll)); - assert.eq(sorts[0].sortPattern, sort); + if (isBlocking) { + assert.eq(sorts.length, FixtureHelpers.numberOfShardsForCollection(coll)); + assert.eq(sorts[0].sortPattern, sort); - // A blocking sort may or may not use the index, so we don't check the length of - // 'ixScans'. - } else { - assert.eq(sorts.length, 0); - assert.eq(ixScans.length, FixtureHelpers.numberOfShardsForCollection(coll)); + // A blocking sort may or may not use the index, so we don't check the length of + // 'ixScans'. + } else { + assert.eq(sorts.length, 0); + assert.eq(ixScans.length, FixtureHelpers.numberOfShardsForCollection(coll)); - const sortKey = Object.keys(sort)[0]; - assert.docEq(ixScans[0].keyPattern, {$_path: 1, [sortKey]: 1}); - } + const sortKey = Object.keys(sort)[0]; + assert.docEq(ixScans[0].keyPattern, {$_path: 1, [sortKey]: 1}); } - - function checkQueryUsesNonBlockingSortAndGetsCorrectResults(query, sort, projection) { - checkQueryUsesSortType(query, sort, projection, false); - checkQueryHasSameResultsWhenUsingIdIndex(query, sort, projection); - } - - function checkQueryUsesBlockingSortAndGetsCorrectResults(query, sort, projection) { - checkQueryUsesSortType(query, sort, projection, true); - checkQueryHasSameResultsWhenUsingIdIndex(query, sort, projection); - } - - function runSortTests(dir, proj) { - // Test that the $** index can provide a non-blocking sort where appropriate. - checkQueryUsesNonBlockingSortAndGetsCorrectResults({a: {$gte: 0}}, {a: dir}, proj); - checkQueryUsesNonBlockingSortAndGetsCorrectResults({a: {$gte: 0}, x: 123}, {a: dir}, proj); - - // Test that the $** index can produce a solution with a blocking sort where appropriate. - checkQueryUsesBlockingSortAndGetsCorrectResults({a: {$gte: 0}}, {a: dir, b: dir}, proj); - checkQueryUsesBlockingSortAndGetsCorrectResults({a: {$gte: 0}}, {a: dir, b: -dir}, proj); - checkQueryUsesBlockingSortAndGetsCorrectResults({a: {$gte: 0}}, {a: -dir, b: dir}, proj); - checkQueryUsesBlockingSortAndGetsCorrectResults({a: {$exists: true}}, {a: dir}, proj); - checkQueryUsesBlockingSortAndGetsCorrectResults({}, {a: dir}, proj); - - // Test sorted queries on a field that is excluded by the $** index's wildcardProjection. - checkQueryUsesBlockingSortAndGetsCorrectResults( - {excludedField: {$gte: 0}}, {excludedField: dir}, proj); - - // Test sorted queries on a multikey field, with and without $elemMatch. - checkQueryUsesBlockingSortAndGetsCorrectResults({x: 123}, {a: dir}, proj); - checkQueryUsesBlockingSortAndGetsCorrectResults( - {x: {$elemMatch: {$eq: 123}}}, {x: dir}, proj); - checkQueryUsesBlockingSortAndGetsCorrectResults( - {x: {$elemMatch: {$eq: 123}}}, {a: dir}, proj); - } - - // Run each test for both ascending and descending sorts, with and without a projection. - for (let dir of[1, -1]) { - for (let proj of[{}, {_id: 0, a: 1}]) { - runSortTests(dir, proj); - } +} + +function checkQueryUsesNonBlockingSortAndGetsCorrectResults(query, sort, projection) { + checkQueryUsesSortType(query, sort, projection, false); + checkQueryHasSameResultsWhenUsingIdIndex(query, sort, projection); +} + +function checkQueryUsesBlockingSortAndGetsCorrectResults(query, sort, projection) { + checkQueryUsesSortType(query, sort, projection, true); + checkQueryHasSameResultsWhenUsingIdIndex(query, sort, projection); +} + +function runSortTests(dir, proj) { + // Test that the $** index can provide a non-blocking sort where appropriate. + checkQueryUsesNonBlockingSortAndGetsCorrectResults({a: {$gte: 0}}, {a: dir}, proj); + checkQueryUsesNonBlockingSortAndGetsCorrectResults({a: {$gte: 0}, x: 123}, {a: dir}, proj); + + // Test that the $** index can produce a solution with a blocking sort where appropriate. + checkQueryUsesBlockingSortAndGetsCorrectResults({a: {$gte: 0}}, {a: dir, b: dir}, proj); + checkQueryUsesBlockingSortAndGetsCorrectResults({a: {$gte: 0}}, {a: dir, b: -dir}, proj); + checkQueryUsesBlockingSortAndGetsCorrectResults({a: {$gte: 0}}, {a: -dir, b: dir}, proj); + checkQueryUsesBlockingSortAndGetsCorrectResults({a: {$exists: true}}, {a: dir}, proj); + checkQueryUsesBlockingSortAndGetsCorrectResults({}, {a: dir}, proj); + + // Test sorted queries on a field that is excluded by the $** index's wildcardProjection. + checkQueryUsesBlockingSortAndGetsCorrectResults( + {excludedField: {$gte: 0}}, {excludedField: dir}, proj); + + // Test sorted queries on a multikey field, with and without $elemMatch. + checkQueryUsesBlockingSortAndGetsCorrectResults({x: 123}, {a: dir}, proj); + checkQueryUsesBlockingSortAndGetsCorrectResults({x: {$elemMatch: {$eq: 123}}}, {x: dir}, proj); + checkQueryUsesBlockingSortAndGetsCorrectResults({x: {$elemMatch: {$eq: 123}}}, {a: dir}, proj); +} + +// Run each test for both ascending and descending sorts, with and without a projection. +for (let dir of [1, -1]) { + for (let proj of [{}, {_id: 0, a: 1}]) { + runSortTests(dir, proj); } +} })(); diff --git a/jstests/core/wildcard_index_partial_index.js b/jstests/core/wildcard_index_partial_index.js index 5961caea87a..fa76746d9f9 100644 --- a/jstests/core/wildcard_index_partial_index.js +++ b/jstests/core/wildcard_index_partial_index.js @@ -2,47 +2,47 @@ * Test that $** indexes work when provided with a partial filter expression. */ (function() { - "use strict"; +"use strict"; - load("jstests/libs/analyze_plan.js"); // For isIxScan, isCollscan. +load("jstests/libs/analyze_plan.js"); // For isIxScan, isCollscan. - const coll = db.wildcard_partial_index; +const coll = db.wildcard_partial_index; - function testPartialWildcardIndex(indexKeyPattern, indexOptions) { - coll.drop(); +function testPartialWildcardIndex(indexKeyPattern, indexOptions) { + coll.drop(); - assert.commandWorked(coll.createIndex(indexKeyPattern, indexOptions)); - assert.commandWorked(coll.insert({x: 5, a: 2})); // Not in index. - assert.commandWorked(coll.insert({x: 6, a: 1})); // In index. + assert.commandWorked(coll.createIndex(indexKeyPattern, indexOptions)); + assert.commandWorked(coll.insert({x: 5, a: 2})); // Not in index. + assert.commandWorked(coll.insert({x: 6, a: 1})); // In index. - // find() operations that should use the index. - let explain = coll.explain("executionStats").find({x: 6, a: 1}).finish(); - assert.eq(1, explain.executionStats.nReturned); - assert(isIxscan(db, explain.queryPlanner.winningPlan)); - explain = coll.explain("executionStats").find({x: {$gt: 1}, a: 1}).finish(); - assert.eq(1, explain.executionStats.nReturned); - assert(isIxscan(db, explain.queryPlanner.winningPlan)); - explain = coll.explain("executionStats").find({x: 6, a: {$lte: 1}}).finish(); - assert.eq(1, explain.executionStats.nReturned); - assert(isIxscan(db, explain.queryPlanner.winningPlan)); + // find() operations that should use the index. + let explain = coll.explain("executionStats").find({x: 6, a: 1}).finish(); + assert.eq(1, explain.executionStats.nReturned); + assert(isIxscan(db, explain.queryPlanner.winningPlan)); + explain = coll.explain("executionStats").find({x: {$gt: 1}, a: 1}).finish(); + assert.eq(1, explain.executionStats.nReturned); + assert(isIxscan(db, explain.queryPlanner.winningPlan)); + explain = coll.explain("executionStats").find({x: 6, a: {$lte: 1}}).finish(); + assert.eq(1, explain.executionStats.nReturned); + assert(isIxscan(db, explain.queryPlanner.winningPlan)); - // find() operations that should not use the index. - explain = coll.explain("executionStats").find({x: 6, a: {$lt: 1.6}}).finish(); - assert.eq(1, explain.executionStats.nReturned); - assert(isCollscan(db, explain.queryPlanner.winningPlan)); + // find() operations that should not use the index. + explain = coll.explain("executionStats").find({x: 6, a: {$lt: 1.6}}).finish(); + assert.eq(1, explain.executionStats.nReturned); + assert(isCollscan(db, explain.queryPlanner.winningPlan)); - explain = coll.explain("executionStats").find({x: 6}).finish(); - assert.eq(1, explain.executionStats.nReturned); - assert(isCollscan(db, explain.queryPlanner.winningPlan)); + explain = coll.explain("executionStats").find({x: 6}).finish(); + assert.eq(1, explain.executionStats.nReturned); + assert(isCollscan(db, explain.queryPlanner.winningPlan)); - explain = coll.explain("executionStats").find({a: {$gte: 0}}).finish(); - assert.eq(2, explain.executionStats.nReturned); - assert(isCollscan(db, explain.queryPlanner.winningPlan)); - } + explain = coll.explain("executionStats").find({a: {$gte: 0}}).finish(); + assert.eq(2, explain.executionStats.nReturned); + assert(isCollscan(db, explain.queryPlanner.winningPlan)); +} - // Case where the partial filter expression is on a field in the index. - testPartialWildcardIndex({"$**": 1}, {partialFilterExpression: {a: {$lte: 1.5}}}); +// Case where the partial filter expression is on a field in the index. +testPartialWildcardIndex({"$**": 1}, {partialFilterExpression: {a: {$lte: 1.5}}}); - // Case where the partial filter expression is on a field not included in the index. - testPartialWildcardIndex({"x.$**": 1}, {partialFilterExpression: {a: {$lte: 1.5}}}); +// Case where the partial filter expression is on a field not included in the index. +testPartialWildcardIndex({"x.$**": 1}, {partialFilterExpression: {a: {$lte: 1.5}}}); })(); diff --git a/jstests/core/wildcard_index_return_key.js b/jstests/core/wildcard_index_return_key.js index ceaf691aad8..53f7da8c09c 100644 --- a/jstests/core/wildcard_index_return_key.js +++ b/jstests/core/wildcard_index_return_key.js @@ -2,58 +2,57 @@ * Tests that $** indexes works with returnKey option. */ (function() { - 'use strict'; +'use strict'; - load("jstests/aggregation/extras/utils.js"); +load("jstests/aggregation/extras/utils.js"); - const coll = db.wildcard_return_key; - coll.drop(); +const coll = db.wildcard_return_key; +coll.drop(); - const assertArrayEq = (l, r) => assert(arrayEq(l, r), tojson(l) + " != " + tojson(r)); - const assertArrayNotEq = (l, r) => assert(!arrayEq(l, r), tojson(l) + " == " + tojson(r)); +const assertArrayEq = (l, r) => assert(arrayEq(l, r), tojson(l) + " != " + tojson(r)); +const assertArrayNotEq = (l, r) => assert(!arrayEq(l, r), tojson(l) + " == " + tojson(r)); - assert.commandWorked(coll.createIndex({"$**": 1})); +assert.commandWorked(coll.createIndex({"$**": 1})); - assert.commandWorked(coll.insert({a: 1, b: 2, c: {d: 2, e: 1}})); - assert.commandWorked(coll.insert({a: 2, b: 2, c: {d: 1, e: 2}})); - assert.commandWorked(coll.insert({a: 2, b: 1, c: {d: 2, e: 2}})); - assert.commandWorked(coll.insert({a: 1, b: 1, c: {e: 2}})); +assert.commandWorked(coll.insert({a: 1, b: 2, c: {d: 2, e: 1}})); +assert.commandWorked(coll.insert({a: 2, b: 2, c: {d: 1, e: 2}})); +assert.commandWorked(coll.insert({a: 2, b: 1, c: {d: 2, e: 2}})); +assert.commandWorked(coll.insert({a: 1, b: 1, c: {e: 2}})); - // $** index return key with one field argument. - assertArrayEq(coll.find({a: 1}).returnKey().toArray(), - [{"$_path": "a", a: 1}, {"$_path": "a", a: 1}]); +// $** index return key with one field argument. +assertArrayEq(coll.find({a: 1}).returnKey().toArray(), + [{"$_path": "a", a: 1}, {"$_path": "a", a: 1}]); - // $** index return key with dot path argument. - assertArrayEq(coll.find({"c.e": 1}).returnKey().toArray(), [{"$_path": "c.e", "c.e": 1}]); +// $** index return key with dot path argument. +assertArrayEq(coll.find({"c.e": 1}).returnKey().toArray(), [{"$_path": "c.e", "c.e": 1}]); - assert.commandWorked(coll.createIndex({"a": 1})); +assert.commandWorked(coll.createIndex({"a": 1})); - // $** index return key with competing regular index. - assertArrayEq(coll.find({a: 1}).hint({"$**": 1}).returnKey().toArray(), - [{"$_path": "a", a: 1}, {"$_path": "a", a: 1}]); +// $** index return key with competing regular index. +assertArrayEq(coll.find({a: 1}).hint({"$**": 1}).returnKey().toArray(), + [{"$_path": "a", a: 1}, {"$_path": "a", a: 1}]); - assert.commandWorked(coll.createIndex({"a": 1, "b": 1})); +assert.commandWorked(coll.createIndex({"a": 1, "b": 1})); - // $** index return key with competing compound index. - assertArrayNotEq(coll.find({a: 1, b: 1}).hint({"$**": 1}).returnKey().toArray(), - [{a: 1, b: 1}]); +// $** index return key with competing compound index. +assertArrayNotEq(coll.find({a: 1, b: 1}).hint({"$**": 1}).returnKey().toArray(), [{a: 1, b: 1}]); - assert.commandWorked(coll.insert({a: 2, b: 2, c: {e: 2}, f: [1, 2, 3]})); - assert.commandWorked(coll.insert({a: 2, b: 2, c: {e: 2}, g: [{h: 1}, {i: 2}]})); +assert.commandWorked(coll.insert({a: 2, b: 2, c: {e: 2}, f: [1, 2, 3]})); +assert.commandWorked(coll.insert({a: 2, b: 2, c: {e: 2}, g: [{h: 1}, {i: 2}]})); - // Multikey path $** index return key. - assertArrayEq(coll.find({f: 1}).returnKey().toArray(), [{"$_path": "f", f: 1}]); +// Multikey path $** index return key. +assertArrayEq(coll.find({f: 1}).returnKey().toArray(), [{"$_path": "f", f: 1}]); - // Multikey subobject $** index return key. - assertArrayEq(coll.find({"g.h": 1}).returnKey().toArray(), [{"$_path": "g.h", "g.h": 1}]); +// Multikey subobject $** index return key. +assertArrayEq(coll.find({"g.h": 1}).returnKey().toArray(), [{"$_path": "g.h", "g.h": 1}]); - assert.commandWorked(coll.dropIndexes()); - assert.commandWorked(coll.createIndex({"c.$**": 1})); +assert.commandWorked(coll.dropIndexes()); +assert.commandWorked(coll.createIndex({"c.$**": 1})); - // Path specified $** index return key. - assertArrayEq(coll.find({"c.d": 1}).returnKey().toArray(), [{"$_path": "c.d", "c.d": 1}]); +// Path specified $** index return key. +assertArrayEq(coll.find({"c.d": 1}).returnKey().toArray(), [{"$_path": "c.d", "c.d": 1}]); - // Path specified $** index return key with irrelevant query. We expect this query to be - // answered with a COLLSCAN, in which case returnKey is expected to return empty objects. - assertArrayEq(coll.find({a: 1, b: 1}).returnKey().toArray(), [{}]); +// Path specified $** index return key with irrelevant query. We expect this query to be +// answered with a COLLSCAN, in which case returnKey is expected to return empty objects. +assertArrayEq(coll.find({a: 1, b: 1}).returnKey().toArray(), [{}]); })();
\ No newline at end of file diff --git a/jstests/core/wildcard_index_type.js b/jstests/core/wildcard_index_type.js index 4e8d5c68939..34831c3f320 100644 --- a/jstests/core/wildcard_index_type.js +++ b/jstests/core/wildcard_index_type.js @@ -2,144 +2,143 @@ * Test $** support for the $type operator. */ (function() { - "use strict"; +"use strict"; - load("jstests/libs/analyze_plan.js"); // For getPlanStages. +load("jstests/libs/analyze_plan.js"); // For getPlanStages. - const coll = db.wildcard_index_type; +const coll = db.wildcard_index_type; +coll.drop(); + +const indexWildcard = { + "$**": 1 +}; + +// Inserts the given document and runs the given query to confirm that: +// (1) query matches the given document if match is true, +// (2) the winning plan does a wildcard index scan, and +// (3) the resulting index bound matches 'expectedBounds' if given. +function assertExpectedDocAnswersWildcardIndexQuery(doc, query, match, expectedBounds) { coll.drop(); + assert.commandWorked(coll.createIndex(indexWildcard)); + assert.commandWorked(coll.insert(doc)); + + // Check that a wildcard index scan is being used to answer query. + const explain = coll.explain("executionStats").find(query).finish(); + if (!match) { + assert.eq(0, explain.executionStats.nReturned, explain); + return; + } - const indexWildcard = {"$**": 1}; - - // Inserts the given document and runs the given query to confirm that: - // (1) query matches the given document if match is true, - // (2) the winning plan does a wildcard index scan, and - // (3) the resulting index bound matches 'expectedBounds' if given. - function assertExpectedDocAnswersWildcardIndexQuery(doc, query, match, expectedBounds) { - coll.drop(); - assert.commandWorked(coll.createIndex(indexWildcard)); - assert.commandWorked(coll.insert(doc)); - - // Check that a wildcard index scan is being used to answer query. - const explain = coll.explain("executionStats").find(query).finish(); - if (!match) { - assert.eq(0, explain.executionStats.nReturned, explain); - return; - } - - // Check that the query returns the document. - assert.eq(1, explain.executionStats.nReturned, explain); - - // Winning plan uses a wildcard index scan. - const winningPlan = explain.queryPlanner.winningPlan; - const ixScans = getPlanStages(winningPlan, "IXSCAN"); - assert.gt(ixScans.length, 0, explain); - ixScans.forEach((ixScan) => assert(ixScan.keyPattern.$_path)); - - // Expected bounds were used. - if (expectedBounds !== undefined) { - ixScans.forEach((ixScan) => assert.docEq(ixScan.indexBounds, expectedBounds)); - } + // Check that the query returns the document. + assert.eq(1, explain.executionStats.nReturned, explain); + + // Winning plan uses a wildcard index scan. + const winningPlan = explain.queryPlanner.winningPlan; + const ixScans = getPlanStages(winningPlan, "IXSCAN"); + assert.gt(ixScans.length, 0, explain); + ixScans.forEach((ixScan) => assert(ixScan.keyPattern.$_path)); + + // Expected bounds were used. + if (expectedBounds !== undefined) { + ixScans.forEach((ixScan) => assert.docEq(ixScan.indexBounds, expectedBounds)); + } +} + +// A $type of 'string' will match a string value. +assertExpectedDocAnswersWildcardIndexQuery({a: "a"}, {a: {$type: "string"}}, true); + +// A $type of 'double' will match a double. +assertExpectedDocAnswersWildcardIndexQuery({a: 1.1}, {a: {$type: "double"}}, true); + +// A $type of 'boolean' will match a boolean. +assertExpectedDocAnswersWildcardIndexQuery({a: true}, {a: {$type: "bool"}}, true); + +// A $type of 'string' will match a multifield document with a string value. +assertExpectedDocAnswersWildcardIndexQuery({a: "a", b: 1.1, c: true}, {a: {$type: "string"}}, true); + +// A compound $type of 'string' and 'double' will match a multifield document with a string and +// double value. +assertExpectedDocAnswersWildcardIndexQuery( + {a: "a", b: 1.1, c: true}, {a: {$type: "string"}, b: {$type: "double"}}, true); + +// A compound $type of 'string' and 'double' won't match a multifield document with a string but +// no double value. +assertExpectedDocAnswersWildcardIndexQuery( + {a: "a", b: "b", c: true}, {a: {$type: "string"}, b: {$type: "double"}}, false); + +// A $type of 'object' will match a object. +assertExpectedDocAnswersWildcardIndexQuery( + {a: {"": ""}}, + {a: {$type: "object"}}, + true, + {$_path: [`["a", "a"]`, `["a.", "a/")`], a: [`[MinKey, MaxKey]`]}); + +// A $type of 'object' will match an empty object. +assertExpectedDocAnswersWildcardIndexQuery( + {a: {}}, + {a: {$type: "object"}}, + true, + {$_path: [`["a", "a"]`, `["a.", "a/")`], a: [`[MinKey, MaxKey]`]}); + +// A $type of 'object' will match a nested object. +assertExpectedDocAnswersWildcardIndexQuery( + {b: {a: {}}}, + {"b.a": {$type: "object"}}, + true, + {$_path: [`["b.a", "b.a"]`, `["b.a.", "b.a/")`], "b.a": [`[MinKey, MaxKey]`]}); + +// A $type of 'array' will match an empty array. +assertExpectedDocAnswersWildcardIndexQuery( + {a: [[]]}, + {a: {$type: "array"}}, + true, + {$_path: [`["a", "a"]`, `["a.", "a/")`], a: [`[MinKey, MaxKey]`]}); + +// A $type of 'array' will match an array. +assertExpectedDocAnswersWildcardIndexQuery( + {a: [["c"]]}, + {a: {$type: "array"}}, + true, + {$_path: [`["a", "a"]`, `["a.", "a/")`], a: [`[MinKey, MaxKey]`]}); + +// A $type of 'regex' will match regex. +assertExpectedDocAnswersWildcardIndexQuery({a: /r/}, {a: {$type: "regex"}}, true); + +// A $type of 'null' will match a null value. +assertExpectedDocAnswersWildcardIndexQuery({a: null}, {a: {$type: "null"}}, true); + +// A $type of 'undefined' will match undefined. +assertExpectedDocAnswersWildcardIndexQuery({a: undefined}, {a: {$type: "undefined"}}, true); + +// A $type of 'undefined' won't match a null value. +assertExpectedDocAnswersWildcardIndexQuery({a: null}, {a: {$type: "undefined"}}, false); + +// A $type of 'code' will match a function value. +assertExpectedDocAnswersWildcardIndexQuery({ + a: function() { + var a = 0; } +}, + {a: {$type: "javascript"}}, + true); + +// A $type of 'binData' will match a binData value. +assertExpectedDocAnswersWildcardIndexQuery({a: new BinData(0, "")}, {a: {$type: "binData"}}, true); + +// A $type of 'timestamp' will match an empty timestamp value. +assertExpectedDocAnswersWildcardIndexQuery({a: new Timestamp()}, {a: {$type: "timestamp"}}, true); + +// A $type of 'timestamp' will match a timestamp value. +assertExpectedDocAnswersWildcardIndexQuery( + {a: new Timestamp(0x80008000, 0)}, {a: {$type: "timestamp"}}, true); + +// A $type of 'date' won't match a timestamp value. +assertExpectedDocAnswersWildcardIndexQuery( + {a: new Timestamp(0x80008000, 0)}, {a: {$type: "date"}}, false); + +// A $type of 'date' will match a date value. +assertExpectedDocAnswersWildcardIndexQuery({a: new Date()}, {a: {$type: "date"}}, true); - // A $type of 'string' will match a string value. - assertExpectedDocAnswersWildcardIndexQuery({a: "a"}, {a: {$type: "string"}}, true); - - // A $type of 'double' will match a double. - assertExpectedDocAnswersWildcardIndexQuery({a: 1.1}, {a: {$type: "double"}}, true); - - // A $type of 'boolean' will match a boolean. - assertExpectedDocAnswersWildcardIndexQuery({a: true}, {a: {$type: "bool"}}, true); - - // A $type of 'string' will match a multifield document with a string value. - assertExpectedDocAnswersWildcardIndexQuery( - {a: "a", b: 1.1, c: true}, {a: {$type: "string"}}, true); - - // A compound $type of 'string' and 'double' will match a multifield document with a string and - // double value. - assertExpectedDocAnswersWildcardIndexQuery( - {a: "a", b: 1.1, c: true}, {a: {$type: "string"}, b: {$type: "double"}}, true); - - // A compound $type of 'string' and 'double' won't match a multifield document with a string but - // no double value. - assertExpectedDocAnswersWildcardIndexQuery( - {a: "a", b: "b", c: true}, {a: {$type: "string"}, b: {$type: "double"}}, false); - - // A $type of 'object' will match a object. - assertExpectedDocAnswersWildcardIndexQuery( - {a: {"": ""}}, - {a: {$type: "object"}}, - true, - {$_path: [`["a", "a"]`, `["a.", "a/")`], a: [`[MinKey, MaxKey]`]}); - - // A $type of 'object' will match an empty object. - assertExpectedDocAnswersWildcardIndexQuery( - {a: {}}, - {a: {$type: "object"}}, - true, - {$_path: [`["a", "a"]`, `["a.", "a/")`], a: [`[MinKey, MaxKey]`]}); - - // A $type of 'object' will match a nested object. - assertExpectedDocAnswersWildcardIndexQuery( - {b: {a: {}}}, - {"b.a": {$type: "object"}}, - true, - {$_path: [`["b.a", "b.a"]`, `["b.a.", "b.a/")`], "b.a": [`[MinKey, MaxKey]`]}); - - // A $type of 'array' will match an empty array. - assertExpectedDocAnswersWildcardIndexQuery( - {a: [[]]}, - {a: {$type: "array"}}, - true, - {$_path: [`["a", "a"]`, `["a.", "a/")`], a: [`[MinKey, MaxKey]`]}); - - // A $type of 'array' will match an array. - assertExpectedDocAnswersWildcardIndexQuery( - {a: [["c"]]}, - {a: {$type: "array"}}, - true, - {$_path: [`["a", "a"]`, `["a.", "a/")`], a: [`[MinKey, MaxKey]`]}); - - // A $type of 'regex' will match regex. - assertExpectedDocAnswersWildcardIndexQuery({a: /r/}, {a: {$type: "regex"}}, true); - - // A $type of 'null' will match a null value. - assertExpectedDocAnswersWildcardIndexQuery({a: null}, {a: {$type: "null"}}, true); - - // A $type of 'undefined' will match undefined. - assertExpectedDocAnswersWildcardIndexQuery({a: undefined}, {a: {$type: "undefined"}}, true); - - // A $type of 'undefined' won't match a null value. - assertExpectedDocAnswersWildcardIndexQuery({a: null}, {a: {$type: "undefined"}}, false); - - // A $type of 'code' will match a function value. - assertExpectedDocAnswersWildcardIndexQuery({ - a: function() { - var a = 0; - } - }, - {a: {$type: "javascript"}}, - true); - - // A $type of 'binData' will match a binData value. - assertExpectedDocAnswersWildcardIndexQuery( - {a: new BinData(0, "")}, {a: {$type: "binData"}}, true); - - // A $type of 'timestamp' will match an empty timestamp value. - assertExpectedDocAnswersWildcardIndexQuery( - {a: new Timestamp()}, {a: {$type: "timestamp"}}, true); - - // A $type of 'timestamp' will match a timestamp value. - assertExpectedDocAnswersWildcardIndexQuery( - {a: new Timestamp(0x80008000, 0)}, {a: {$type: "timestamp"}}, true); - - // A $type of 'date' won't match a timestamp value. - assertExpectedDocAnswersWildcardIndexQuery( - {a: new Timestamp(0x80008000, 0)}, {a: {$type: "date"}}, false); - - // A $type of 'date' will match a date value. - assertExpectedDocAnswersWildcardIndexQuery({a: new Date()}, {a: {$type: "date"}}, true); - - // A $type of 'timestamp' won't match a date value. - assertExpectedDocAnswersWildcardIndexQuery({a: new Date()}, {a: {$type: "timestamp"}}, false); +// A $type of 'timestamp' won't match a date value. +assertExpectedDocAnswersWildcardIndexQuery({a: new Date()}, {a: {$type: "timestamp"}}, false); })(); diff --git a/jstests/core/wildcard_index_validindex.js b/jstests/core/wildcard_index_validindex.js index 647986f55b4..f647bbcc969 100644 --- a/jstests/core/wildcard_index_validindex.js +++ b/jstests/core/wildcard_index_validindex.js @@ -6,145 +6,139 @@ * ] */ (function() { - "use strict"; - - const kCollectionName = "wildcard_validindex"; - const coll = db.getCollection(kCollectionName); - - const kIndexName = "wildcard_validindex"; - - const createIndexHelper = function(key, parameters) { - return db.runCommand( - {createIndexes: kCollectionName, indexes: [Object.assign({key: key}, parameters)]}); - }; - - const createIndexAndVerifyWithDrop = function(key, parameters) { - coll.dropIndexes(); - createIndexHelper(key, parameters); - assert.eq(coll.getIndexes() - .filter((index) => { - return index.name == parameters.name; - }) - .length, - 1); - }; - - // Can create a valid wildcard index. - createIndexAndVerifyWithDrop({"$**": 1}, {name: kIndexName}); - - // Can create a valid wildcard index with subpaths. - createIndexAndVerifyWithDrop({"a.$**": 1}, {name: kIndexName}); - - // Can create a wildcard index with partialFilterExpression. - createIndexAndVerifyWithDrop({"$**": 1}, - {name: kIndexName, partialFilterExpression: {a: {"$gt": 0}}}); - - // Can create a wildcard index with foreground & background construction. - createIndexAndVerifyWithDrop({"$**": 1}, {background: false, name: kIndexName}); - createIndexAndVerifyWithDrop({"$**": 1}, {background: true, name: kIndexName}); - - // Can create a wildcard index with index level collation. - createIndexAndVerifyWithDrop({"$**": 1}, {collation: {locale: "fr"}, name: kIndexName}); - - // Can create a wildcard index with an inclusion projection. - createIndexAndVerifyWithDrop({"$**": 1}, - {wildcardProjection: {a: 1, b: 1, c: 1}, name: kIndexName}); - // Can create a wildcard index with an exclusion projection. - createIndexAndVerifyWithDrop({"$**": 1}, - {wildcardProjection: {a: 0, b: 0, c: 0}, name: kIndexName}); - // Can include _id in an exclusion. - createIndexAndVerifyWithDrop( - {"$**": 1}, {wildcardProjection: {_id: 1, a: 0, b: 0, c: 0}, name: kIndexName}); - // Can exclude _id in an exclusion. - createIndexAndVerifyWithDrop( - {"$**": 1}, {wildcardProjection: {_id: 0, a: 1, b: 1, c: 1}, name: kIndexName}); - - // Cannot create a wildcard index with a non-positive numeric key value. +"use strict"; + +const kCollectionName = "wildcard_validindex"; +const coll = db.getCollection(kCollectionName); + +const kIndexName = "wildcard_validindex"; + +const createIndexHelper = function(key, parameters) { + return db.runCommand( + {createIndexes: kCollectionName, indexes: [Object.assign({key: key}, parameters)]}); +}; + +const createIndexAndVerifyWithDrop = function(key, parameters) { coll.dropIndexes(); - assert.commandFailedWithCode(coll.createIndex({"$**": 0}), ErrorCodes.CannotCreateIndex); - assert.commandFailedWithCode(coll.createIndex({"$**": -1}), ErrorCodes.CannotCreateIndex); - assert.commandFailedWithCode(coll.createIndex({"$**": -2}), ErrorCodes.CannotCreateIndex); - - // Cannot create a wildcard index with sparse option. - assert.commandFailedWithCode(coll.createIndex({"$**": 1}, {sparse: true}), - ErrorCodes.CannotCreateIndex); - - // Cannot create a wildcard index with a v0 or v1 index. - assert.commandFailedWithCode(coll.createIndex({"$**": 1}, {v: 0}), - ErrorCodes.CannotCreateIndex); - assert.commandFailedWithCode(coll.createIndex({"$**": 1}, {v: 1}), - ErrorCodes.CannotCreateIndex); - - // Cannot create a unique index. - assert.commandFailedWithCode(coll.createIndex({"$**": 1}, {unique: true}), - ErrorCodes.CannotCreateIndex); - - // Cannot create a hashed wildcard index. - assert.commandFailedWithCode(coll.createIndex({"$**": "hashed"}), ErrorCodes.CannotCreateIndex); - - // Cannot create a TTL wildcard index. - assert.commandFailedWithCode(coll.createIndex({"$**": 1}, {expireAfterSeconds: 3600}), - ErrorCodes.CannotCreateIndex); - - // Cannot create a geoSpatial wildcard index. - assert.commandFailedWithCode(coll.createIndex({"$**": "2dsphere"}), - ErrorCodes.CannotCreateIndex); - assert.commandFailedWithCode(coll.createIndex({"$**": "2d"}), ErrorCodes.CannotCreateIndex); - - // Cannot create a text wildcard index using single sub-path syntax. - assert.commandFailedWithCode(coll.createIndex({"a.$**": "text"}), ErrorCodes.CannotCreateIndex); - - // Cannot specify plugin by string. - assert.commandFailedWithCode(coll.createIndex({"a": "wildcard"}), ErrorCodes.CannotCreateIndex); - assert.commandFailedWithCode(coll.createIndex({"$**": "wildcard"}), - ErrorCodes.CannotCreateIndex); - - // Cannot create a compound wildcard index. - assert.commandFailedWithCode(coll.createIndex({"$**": 1, "a": 1}), - ErrorCodes.CannotCreateIndex); - assert.commandFailedWithCode(coll.createIndex({"a": 1, "$**": 1}), - ErrorCodes.CannotCreateIndex); - - // Cannot create an wildcard index with an invalid spec. - assert.commandFailedWithCode(coll.createIndex({"a.$**.$**": 1}), ErrorCodes.CannotCreateIndex); - assert.commandFailedWithCode(coll.createIndex({"$**.$**": 1}), ErrorCodes.CannotCreateIndex); - assert.commandFailedWithCode(coll.createIndex({"$**": "hello"}), ErrorCodes.CannotCreateIndex); - - // Cannot create an wildcard index with mixed inclusion exclusion. - assert.commandFailedWithCode( - createIndexHelper({"$**": 1}, {name: kIndexName, wildcardProjection: {a: 1, b: 0}}), 40178); - // Cannot create an wildcard index with computed fields. - assert.commandFailedWithCode( - createIndexHelper({"$**": 1}, {name: kIndexName, wildcardProjection: {a: 1, b: "string"}}), - ErrorCodes.FailedToParse); - // Cannot create an wildcard index with an empty projection. - assert.commandFailedWithCode( - createIndexHelper({"$**": 1}, {name: kIndexName, wildcardProjection: {}}), - ErrorCodes.FailedToParse); - // Cannot create another index type with "wildcardProjection" projection. - assert.commandFailedWithCode( - createIndexHelper({"a": 1}, {name: kIndexName, wildcardProjection: {a: 1, b: 1}}), - ErrorCodes.BadValue); - // Cannot create a text index with a "wildcardProjection" projection. - assert.commandFailedWithCode( - createIndexHelper({"$**": "text"}, {name: kIndexName, wildcardProjection: {a: 1, b: 1}}), - ErrorCodes.BadValue); - // Cannot create an wildcard index with a non-object "wildcardProjection" projection. - assert.commandFailedWithCode( - createIndexHelper({"a.$**": 1}, {name: kIndexName, wildcardProjection: "string"}), - ErrorCodes.TypeMismatch); - // Cannot exclude an subfield of _id in an inclusion. - assert.commandFailedWithCode(createIndexHelper({"_id.id": 0, a: 1, b: 1, c: 1}), - ErrorCodes.CannotCreateIndex); - // Cannot include an subfield of _id in an exclusion. - assert.commandFailedWithCode(createIndexHelper({"_id.id": 1, a: 0, b: 0, c: 0}), - ErrorCodes.CannotCreateIndex); - - // Cannot specify both a subpath and a projection. - assert.commandFailedWithCode( - createIndexHelper({"a.$**": 1}, {name: kIndexName, wildcardProjection: {a: 1}}), - ErrorCodes.FailedToParse); - assert.commandFailedWithCode( - createIndexHelper({"a.$**": 1}, {name: kIndexName, wildcardProjection: {b: 0}}), - ErrorCodes.FailedToParse); + createIndexHelper(key, parameters); + assert.eq(coll.getIndexes() + .filter((index) => { + return index.name == parameters.name; + }) + .length, + 1); +}; + +// Can create a valid wildcard index. +createIndexAndVerifyWithDrop({"$**": 1}, {name: kIndexName}); + +// Can create a valid wildcard index with subpaths. +createIndexAndVerifyWithDrop({"a.$**": 1}, {name: kIndexName}); + +// Can create a wildcard index with partialFilterExpression. +createIndexAndVerifyWithDrop({"$**": 1}, + {name: kIndexName, partialFilterExpression: {a: {"$gt": 0}}}); + +// Can create a wildcard index with foreground & background construction. +createIndexAndVerifyWithDrop({"$**": 1}, {background: false, name: kIndexName}); +createIndexAndVerifyWithDrop({"$**": 1}, {background: true, name: kIndexName}); + +// Can create a wildcard index with index level collation. +createIndexAndVerifyWithDrop({"$**": 1}, {collation: {locale: "fr"}, name: kIndexName}); + +// Can create a wildcard index with an inclusion projection. +createIndexAndVerifyWithDrop({"$**": 1}, + {wildcardProjection: {a: 1, b: 1, c: 1}, name: kIndexName}); +// Can create a wildcard index with an exclusion projection. +createIndexAndVerifyWithDrop({"$**": 1}, + {wildcardProjection: {a: 0, b: 0, c: 0}, name: kIndexName}); +// Can include _id in an exclusion. +createIndexAndVerifyWithDrop({"$**": 1}, + {wildcardProjection: {_id: 1, a: 0, b: 0, c: 0}, name: kIndexName}); +// Can exclude _id in an exclusion. +createIndexAndVerifyWithDrop({"$**": 1}, + {wildcardProjection: {_id: 0, a: 1, b: 1, c: 1}, name: kIndexName}); + +// Cannot create a wildcard index with a non-positive numeric key value. +coll.dropIndexes(); +assert.commandFailedWithCode(coll.createIndex({"$**": 0}), ErrorCodes.CannotCreateIndex); +assert.commandFailedWithCode(coll.createIndex({"$**": -1}), ErrorCodes.CannotCreateIndex); +assert.commandFailedWithCode(coll.createIndex({"$**": -2}), ErrorCodes.CannotCreateIndex); + +// Cannot create a wildcard index with sparse option. +assert.commandFailedWithCode(coll.createIndex({"$**": 1}, {sparse: true}), + ErrorCodes.CannotCreateIndex); + +// Cannot create a wildcard index with a v0 or v1 index. +assert.commandFailedWithCode(coll.createIndex({"$**": 1}, {v: 0}), ErrorCodes.CannotCreateIndex); +assert.commandFailedWithCode(coll.createIndex({"$**": 1}, {v: 1}), ErrorCodes.CannotCreateIndex); + +// Cannot create a unique index. +assert.commandFailedWithCode(coll.createIndex({"$**": 1}, {unique: true}), + ErrorCodes.CannotCreateIndex); + +// Cannot create a hashed wildcard index. +assert.commandFailedWithCode(coll.createIndex({"$**": "hashed"}), ErrorCodes.CannotCreateIndex); + +// Cannot create a TTL wildcard index. +assert.commandFailedWithCode(coll.createIndex({"$**": 1}, {expireAfterSeconds: 3600}), + ErrorCodes.CannotCreateIndex); + +// Cannot create a geoSpatial wildcard index. +assert.commandFailedWithCode(coll.createIndex({"$**": "2dsphere"}), ErrorCodes.CannotCreateIndex); +assert.commandFailedWithCode(coll.createIndex({"$**": "2d"}), ErrorCodes.CannotCreateIndex); + +// Cannot create a text wildcard index using single sub-path syntax. +assert.commandFailedWithCode(coll.createIndex({"a.$**": "text"}), ErrorCodes.CannotCreateIndex); + +// Cannot specify plugin by string. +assert.commandFailedWithCode(coll.createIndex({"a": "wildcard"}), ErrorCodes.CannotCreateIndex); +assert.commandFailedWithCode(coll.createIndex({"$**": "wildcard"}), ErrorCodes.CannotCreateIndex); + +// Cannot create a compound wildcard index. +assert.commandFailedWithCode(coll.createIndex({"$**": 1, "a": 1}), ErrorCodes.CannotCreateIndex); +assert.commandFailedWithCode(coll.createIndex({"a": 1, "$**": 1}), ErrorCodes.CannotCreateIndex); + +// Cannot create an wildcard index with an invalid spec. +assert.commandFailedWithCode(coll.createIndex({"a.$**.$**": 1}), ErrorCodes.CannotCreateIndex); +assert.commandFailedWithCode(coll.createIndex({"$**.$**": 1}), ErrorCodes.CannotCreateIndex); +assert.commandFailedWithCode(coll.createIndex({"$**": "hello"}), ErrorCodes.CannotCreateIndex); + +// Cannot create an wildcard index with mixed inclusion exclusion. +assert.commandFailedWithCode( + createIndexHelper({"$**": 1}, {name: kIndexName, wildcardProjection: {a: 1, b: 0}}), 40178); +// Cannot create an wildcard index with computed fields. +assert.commandFailedWithCode( + createIndexHelper({"$**": 1}, {name: kIndexName, wildcardProjection: {a: 1, b: "string"}}), + ErrorCodes.FailedToParse); +// Cannot create an wildcard index with an empty projection. +assert.commandFailedWithCode( + createIndexHelper({"$**": 1}, {name: kIndexName, wildcardProjection: {}}), + ErrorCodes.FailedToParse); +// Cannot create another index type with "wildcardProjection" projection. +assert.commandFailedWithCode( + createIndexHelper({"a": 1}, {name: kIndexName, wildcardProjection: {a: 1, b: 1}}), + ErrorCodes.BadValue); +// Cannot create a text index with a "wildcardProjection" projection. +assert.commandFailedWithCode( + createIndexHelper({"$**": "text"}, {name: kIndexName, wildcardProjection: {a: 1, b: 1}}), + ErrorCodes.BadValue); +// Cannot create an wildcard index with a non-object "wildcardProjection" projection. +assert.commandFailedWithCode( + createIndexHelper({"a.$**": 1}, {name: kIndexName, wildcardProjection: "string"}), + ErrorCodes.TypeMismatch); +// Cannot exclude an subfield of _id in an inclusion. +assert.commandFailedWithCode(createIndexHelper({"_id.id": 0, a: 1, b: 1, c: 1}), + ErrorCodes.CannotCreateIndex); +// Cannot include an subfield of _id in an exclusion. +assert.commandFailedWithCode(createIndexHelper({"_id.id": 1, a: 0, b: 0, c: 0}), + ErrorCodes.CannotCreateIndex); + +// Cannot specify both a subpath and a projection. +assert.commandFailedWithCode( + createIndexHelper({"a.$**": 1}, {name: kIndexName, wildcardProjection: {a: 1}}), + ErrorCodes.FailedToParse); +assert.commandFailedWithCode( + createIndexHelper({"a.$**": 1}, {name: kIndexName, wildcardProjection: {b: 0}}), + ErrorCodes.FailedToParse); })(); diff --git a/jstests/core/write_commands_reject_unknown_fields.js b/jstests/core/write_commands_reject_unknown_fields.js index d21cf2ed9f3..a7f834280d1 100644 --- a/jstests/core/write_commands_reject_unknown_fields.js +++ b/jstests/core/write_commands_reject_unknown_fields.js @@ -3,19 +3,19 @@ // SERVER-23129 Write commands should reject unknown fields. This is run in passthrough tests to // ensure that both mongos and mongod reject these commands. (function() { - 'use strict'; +'use strict'; - var coll = db.write_commands_reject_unknown_fields; +var coll = db.write_commands_reject_unknown_fields; - // All commands must reject fields at the top-level. - assert.commandFailed(coll.runCommand('insert', {documents: [{}], asdf: true})); - assert.commandFailed( - coll.runCommand('update', {updates: [{q: {}, u: {$inc: {a: 1}}}], asdf: true})); - assert.commandFailed(coll.runCommand('delete', {deletes: [{q: {}, limit: 0}], asdf: true})); +// All commands must reject fields at the top-level. +assert.commandFailed(coll.runCommand('insert', {documents: [{}], asdf: true})); +assert.commandFailed( + coll.runCommand('update', {updates: [{q: {}, u: {$inc: {a: 1}}}], asdf: true})); +assert.commandFailed(coll.runCommand('delete', {deletes: [{q: {}, limit: 0}], asdf: true})); - // The inner objects in update and delete must also reject unknown fields. Insert isn't included - // because its inner objects are the raw objects to insert and can have any fields. - assert.commandFailed( - coll.runCommand('update', {updates: [{q: {}, u: {$inc: {a: 1}}, asdf: true}]})); - assert.commandFailed(coll.runCommand('delete', {deletes: [{q: {}, limit: 0, asdf: true}]})); +// The inner objects in update and delete must also reject unknown fields. Insert isn't included +// because its inner objects are the raw objects to insert and can have any fields. +assert.commandFailed( + coll.runCommand('update', {updates: [{q: {}, u: {$inc: {a: 1}}, asdf: true}]})); +assert.commandFailed(coll.runCommand('delete', {deletes: [{q: {}, limit: 0, asdf: true}]})); }()); |