diff options
29 files changed, 774 insertions, 271 deletions
diff --git a/jstests/aggregation/bugs/match.js b/jstests/aggregation/bugs/match.js index 0d08c0744b8..8cb4519a861 100644 --- a/jstests/aggregation/bugs/match.js +++ b/jstests/aggregation/bugs/match.js @@ -1,188 +1,167 @@ // Check $match pipeline stage. // - Filtering behavior equivalent to a mongo query. // - $where and geo operators are not allowed -load('jstests/aggregation/extras/utils.js'); - -t = db.jstests_aggregation_match; -t.drop(); - -identityProjection = { - _id: '$_id', - a: '$a' -}; - -/** Assert that an aggregation generated the expected error. */ -function assertError(expectedCode, matchSpec) { - matchStage = {$match: matchSpec}; - // Check where matching is folded in to DocumentSourceCursor. - assertErrorCode(t, [matchStage], expectedCode); - // Check where matching is not folded in to DocumentSourceCursor. - assertErrorCode(t, [{$project: identityProjection}, matchStage], expectedCode); -} - -/** Assert that the contents of two arrays are equal, ignoring element ordering. */ -function assertEqualResultsUnordered(one, two) { - oneStr = one.map(function(x) { - return tojson(x); - }); - twoStr = two.map(function(x) { - return tojson(x); - }); - oneStr.sort(); - twoStr.sort(); - assert.eq(oneStr, twoStr); -} - -/** Assert that an aggregation result is as expected. */ -function assertResults(expectedResults, matchSpec) { - findResults = t.find(matchSpec).toArray(); - if (expectedResults) { - assertEqualResultsUnordered(expectedResults, findResults); +(function() { + "use strict"; + + load('jstests/aggregation/extras/utils.js'); + + const coll = db.jstests_aggregation_match; + coll.drop(); + + const identityProjection = {_id: '$_id', a: '$a'}; + + /** Assert that an aggregation generated the expected error. */ + function assertError(expectedCode, matchSpec) { + const matchStage = {$match: matchSpec}; + // Check where matching is folded in to DocumentSourceCursor. + assertErrorCode(coll, [matchStage], expectedCode); + // Check where matching is not folded in to DocumentSourceCursor. + assertErrorCode(coll, [{$project: identityProjection}, matchStage], expectedCode); } - matchStage = {$match: matchSpec}; - // Check where matching is folded in to DocumentSourceCursor. - assertEqualResultsUnordered(findResults, t.aggregate(matchStage).toArray()); - // Check where matching is not folded in to DocumentSourceCursor. - assertEqualResultsUnordered(findResults, - t.aggregate({$project: identityProjection}, matchStage).toArray()); -} - -// Invalid matcher syntax. -assertError(2, {a: {$mod: [0 /* invalid */, 0]}}); - -// $where not allowed. -assertError(ErrorCodes.BadValue, {$where: 'true'}); - -// Geo not allowed. -assertError(ErrorCodes.BadValue, {$match: {a: {$near: [0, 0]}}}); - -// Update modifier not allowed. -if (0) { // SERVER-6650 - assertError(0, {a: 1, $inc: {b: 1}}); -} - -// Aggregation expression not allowed. -if (0) { // SERVER-6650 - assertError(0, {a: 1, b: {$gt: {$add: [1, 1]}}}); -} - -function checkMatchResults(indexed) { - // No results. - t.remove({}); - assertResults([], {}); - - t.save({_id: 0, a: 1}); - t.save({_id: 1, a: 2}); - t.save({_id: 2, a: 3}); - - // Empty query. - assertResults([{_id: 0, a: 1}, {_id: 1, a: 2}, {_id: 2, a: 3}], {}); - - // Simple queries. - assertResults([{_id: 0, a: 1}], {a: 1}); - assertResults([{_id: 1, a: 2}], {a: 2}); - assertResults([{_id: 1, a: 2}, {_id: 2, a: 3}], {a: {$gt: 1}}); - assertResults([{_id: 0, a: 1}, {_id: 1, a: 2}], {a: {$lte: 2}}); - assertResults([{_id: 0, a: 1}, {_id: 2, a: 3}], {a: {$in: [1, 3]}}); - - // Regular expression. - t.remove({}); - t.save({_id: 0, a: 'x'}); - t.save({_id: 1, a: 'yx'}); - assertResults([{_id: 0, a: 'x'}], {a: /^x/}); - assertResults([{_id: 0, a: 'x'}, {_id: 1, a: 'yx'}], {a: /x/}); - - // Dotted field. - t.remove({}); - t.save({_id: 0, a: {b: 4}}); - t.save({_id: 1, a: 2}); - assertResults([{_id: 0, a: {b: 4}}], {'a.b': 4}); - - // Value within an array. - t.remove({}); - t.save({_id: 0, a: [1, 2, 3]}); - t.save({_id: 1, a: [2, 2, 3]}); - t.save({_id: 2, a: [2, 2, 2]}); - assertResults([{_id: 0, a: [1, 2, 3]}, {_id: 1, a: [2, 2, 3]}], {a: 3}); - - // Missing, null, $exists matching. - t.remove({}); - t.save({_id: 0}); - t.save({_id: 1, a: null}); - if (0) { // SERVER-6571 - t.save({_id: 2, a: undefined}); + + /** Assert that the contents of two arrays are equal, ignoring element ordering. */ + function assertEqualResultsUnordered(one, two) { + let oneStr = one.map(function(x) { + return tojson(x); + }); + let twoStr = two.map(function(x) { + return tojson(x); + }); + oneStr.sort(); + twoStr.sort(); + assert.eq(oneStr, twoStr); } - t.save({_id: 3, a: 0}); - assertResults([{_id: 0}, {_id: 1, a: null}], {a: null}); - assertResults(null, {a: {$exists: true}}); - assertResults(null, {a: {$exists: false}}); - - // $elemMatch - t.remove({}); - t.save({_id: 0, a: [1, 2]}); - t.save({_id: 1, a: [1, 2, 3]}); - assertResults([{_id: 1, a: [1, 2, 3]}], {a: {$elemMatch: {$gt: 1, $mod: [2, 1]}}}); - - t.remove({}); - t.save({_id: 0, a: [{b: 1}, {c: 2}]}); - t.save({_id: 1, a: [{b: 1, c: 2}]}); - assertResults([{_id: 1, a: [{b: 1, c: 2}]}], {a: {$elemMatch: {b: 1, c: 2}}}); - - // $size - t.remove({}); - t.save({}); - t.save({a: null}); - t.save({a: []}); - t.save({a: [1]}); - t.save({a: [1, 2]}); - assertResults(null, {a: {$size: 0}}); - assertResults(null, {a: {$size: 1}}); - assertResults(null, {a: {$size: 2}}); - - // $type - t.remove({}); - t.save({}); - t.save({a: null}); - if (0) { // SERVER-6571 - t.save({a: undefined}); + + /** Assert that an aggregation result is as expected. */ + function assertResults(expectedResults, matchSpec) { + const findResults = coll.find(matchSpec).toArray(); + if (expectedResults) { + assertEqualResultsUnordered(expectedResults, findResults); + } + const matchStage = {$match: matchSpec}; + // Check where matching is folded in to DocumentSourceCursor. + assertEqualResultsUnordered(findResults, coll.aggregate(matchStage).toArray()); + // Check where matching is not folded in to DocumentSourceCursor. + assertEqualResultsUnordered( + findResults, coll.aggregate({$project: identityProjection}, matchStage).toArray()); } - t.save({a: NumberInt(1)}); - t.save({a: NumberLong(2)}); - t.save({a: 66.6}); - t.save({a: 'abc'}); - t.save({a: /xyz/}); - t.save({a: {q: 1}}); - t.save({a: true}); - t.save({a: new Date()}); - t.save({a: new ObjectId()}); - for (type = 1; type <= 18; ++type) { - assertResults(null, {a: {$type: type}}); + + // Invalid matcher syntax. + assertError(2, {a: {$mod: [0 /* invalid */, 0]}}); + + // $where not allowed. + assertError(ErrorCodes.BadValue, {$where: 'true'}); + + // Geo not allowed. + assertError(ErrorCodes.BadValue, {$match: {a: {$near: [0, 0]}}}); + + function checkMatchResults(indexed) { + // No results. + coll.remove({}); + assertResults([], {}); + + assert.writeOK(coll.insert({_id: 0, a: 1})); + assert.writeOK(coll.insert({_id: 1, a: 2})); + assert.writeOK(coll.insert({_id: 2, a: 3})); + + // Empty query. + assertResults([{_id: 0, a: 1}, {_id: 1, a: 2}, {_id: 2, a: 3}], {}); + + // Simple queries. + assertResults([{_id: 0, a: 1}], {a: 1}); + assertResults([{_id: 1, a: 2}], {a: 2}); + assertResults([{_id: 1, a: 2}, {_id: 2, a: 3}], {a: {$gt: 1}}); + assertResults([{_id: 0, a: 1}, {_id: 1, a: 2}], {a: {$lte: 2}}); + assertResults([{_id: 0, a: 1}, {_id: 2, a: 3}], {a: {$in: [1, 3]}}); + + // Regular expression. + coll.remove({}); + assert.writeOK(coll.insert({_id: 0, a: 'x'})); + assert.writeOK(coll.insert({_id: 1, a: 'yx'})); + assertResults([{_id: 0, a: 'x'}], {a: /^x/}); + assertResults([{_id: 0, a: 'x'}, {_id: 1, a: 'yx'}], {a: /x/}); + + // Dotted field. + coll.remove({}); + assert.writeOK(coll.insert({_id: 0, a: {b: 4}})); + assert.writeOK(coll.insert({_id: 1, a: 2})); + assertResults([{_id: 0, a: {b: 4}}], {'a.b': 4}); + + // Value within an array. + coll.remove({}); + assert.writeOK(coll.insert({_id: 0, a: [1, 2, 3]})); + assert.writeOK(coll.insert({_id: 1, a: [2, 2, 3]})); + assert.writeOK(coll.insert({_id: 2, a: [2, 2, 2]})); + assertResults([{_id: 0, a: [1, 2, 3]}, {_id: 1, a: [2, 2, 3]}], {a: 3}); + + // Missing, null, $exists matching. + coll.remove({}); + assert.writeOK(coll.insert({_id: 0})); + assert.writeOK(coll.insert({_id: 1, a: null})); + assert.writeOK(coll.insert({_id: 3, a: 0})); + assertResults([{_id: 0}, {_id: 1, a: null}], {a: null}); + assertResults(null, {a: {$exists: true}}); + assertResults(null, {a: {$exists: false}}); + + // $elemMatch + coll.remove({}); + assert.writeOK(coll.insert({_id: 0, a: [1, 2]})); + assert.writeOK(coll.insert({_id: 1, a: [1, 2, 3]})); + assertResults([{_id: 1, a: [1, 2, 3]}], {a: {$elemMatch: {$gt: 1, $mod: [2, 1]}}}); + + coll.remove({}); + assert.writeOK(coll.insert({_id: 0, a: [{b: 1}, {c: 2}]})); + assert.writeOK(coll.insert({_id: 1, a: [{b: 1, c: 2}]})); + assertResults([{_id: 1, a: [{b: 1, c: 2}]}], {a: {$elemMatch: {b: 1, c: 2}}}); + + // $size + coll.remove({}); + assert.writeOK(coll.insert({})); + assert.writeOK(coll.insert({a: null})); + assert.writeOK(coll.insert({a: []})); + assert.writeOK(coll.insert({a: [1]})); + assert.writeOK(coll.insert({a: [1, 2]})); + assertResults(null, {a: {$size: 0}}); + assertResults(null, {a: {$size: 1}}); + assertResults(null, {a: {$size: 2}}); + + // $type + coll.remove({}); + assert.writeOK(coll.insert({})); + assert.writeOK(coll.insert({a: null})); + assert.writeOK(coll.insert({a: NumberInt(1)})); + assert.writeOK(coll.insert({a: NumberLong(2)})); + assert.writeOK(coll.insert({a: 66.6})); + assert.writeOK(coll.insert({a: 'abc'})); + assert.writeOK(coll.insert({a: /xyz/})); + assert.writeOK(coll.insert({a: {q: 1}})); + assert.writeOK(coll.insert({a: true})); + assert.writeOK(coll.insert({a: new Date()})); + assert.writeOK(coll.insert({a: new ObjectId()})); + for (let type = 1; type <= 18; ++type) { + assertResults(null, {a: {$type: type}}); + } + + coll.remove({}); + assert.writeOK(coll.insert({_id: 0, a: 1})); + assert.writeOK(coll.insert({_id: 1, a: 2})); + assert.writeOK(coll.insert({_id: 2, a: 3})); + + // $and + assertResults([{_id: 1, a: 2}], {$and: [{a: 2}, {_id: 1}]}); + assertResults([], {$and: [{a: 1}, {_id: 1}]}); + assertResults([{_id: 1, a: 2}, {_id: 2, a: 3}], + {$and: [{$or: [{_id: 1}, {a: 3}]}, {$or: [{_id: 2}, {a: 2}]}]}); + + // $or + assertResults([{_id: 0, a: 1}, {_id: 2, a: 3}], {$or: [{_id: 0}, {a: 3}]}); } - // $atomic does not affect results. - t.remove({}); - t.save({_id: 0, a: 1}); - t.save({_id: 1, a: 2}); - t.save({_id: 2, a: 3}); - assertResults([{_id: 0, a: 1}], {a: 1, $atomic: true}); - assertResults([{_id: 1, a: 2}], {a: 2, $atomic: true}); - assertResults([{_id: 1, a: 2}, {_id: 2, a: 3}], {a: {$gt: 1}, $atomic: true}); - assertResults([{_id: 0, a: 1}, {_id: 1, a: 2}], {a: {$lte: 2}, $atomic: true}); - assertResults([{_id: 0, a: 1}, {_id: 2, a: 3}], {a: {$in: [1, 3]}, $atomic: true}); - - // $and - assertResults([{_id: 1, a: 2}], {$and: [{a: 2}, {_id: 1}]}); - assertResults([], {$and: [{a: 1}, {_id: 1}]}); - assertResults([{_id: 1, a: 2}, {_id: 2, a: 3}], - {$and: [{$or: [{_id: 1}, {a: 3}]}, {$or: [{_id: 2}, {a: 2}]}]}); - - // $or - assertResults([{_id: 0, a: 1}, {_id: 2, a: 3}], {$or: [{_id: 0}, {a: 3}]}); -} - -checkMatchResults(false); -t.ensureIndex({a: 1}); -checkMatchResults(true); -t.ensureIndex({'a.b': 1}); -t.ensureIndex({'a.c': 1}); -checkMatchResults(true); + checkMatchResults(false); + coll.createIndex({a: 1}); + checkMatchResults(true); + coll.createIndex({'a.b': 1}); + coll.createIndex({'a.c': 1}); + checkMatchResults(true); +})(); diff --git a/jstests/core/doc_validation_invalid_validators.js b/jstests/core/doc_validation_invalid_validators.js index df254f333fa..0a923bd8cb8 100644 --- a/jstests/core/doc_validation_invalid_validators.js +++ b/jstests/core/doc_validation_invalid_validators.js @@ -15,6 +15,7 @@ assert.commandFailed(db.createCollection(collName, {validator: 7})); assert.commandFailed(db.createCollection(collName, {validator: "assert"})); assert.commandFailed(db.createCollection(collName, {validator: {$jsonSchema: {invalid: 1}}})); + assert.commandFailed(db.createCollection(collName, {validator: {$isolated: 1}})); // Check some disallowed match statements. assert.commandFailed(db.createCollection(collName, {validator: {$text: "bob"}})); @@ -54,6 +55,7 @@ db.runCommand({"collMod": collName, "validator": {$expr: {$eq: ["$a", "$$unbound"]}}})); assert.commandFailed( db.runCommand({"collMod": collName, "validator": {$jsonSchema: {invalid: 7}}})); + assert.commandFailed(db.runCommand({"collMod": collName, "validator": {$isolated: 1}})); coll.drop(); diff --git a/jstests/core/isolated.js b/jstests/core/isolated.js new file mode 100644 index 00000000000..9295b9e38b7 --- /dev/null +++ b/jstests/core/isolated.js @@ -0,0 +1,170 @@ +// Tests for the $isolated update/delete operator. +(function() { + "use strict"; + + const coll = db.isolated_test; + coll.drop(); + + const isMaster = db.runCommand("ismaster"); + assert.commandWorked(isMaster); + const isMongos = (isMaster.msg === "isdbgrid"); + + // + // $isolated in aggregate $match stage. + // + + assert.commandFailed(db.runCommand( + {aggregate: coll.getName(), pipeline: [{$match: {$isolated: 1}}], cursor: {}})); + + // + // $isolated in count. + // + + assert.throws(function() { + coll.find({$isolated: 1}).count(); + }); + + // + // $isolated in distinct. + // + + assert.throws(function() { + coll.distinct("a", {$isolated: 1}); + }); + + // + // $isolated in find. + // + + // $isolated is not allowed in a query filter. + assert.throws(function() { + coll.find({$isolated: 1}).itcount(); + }); + + // $isolated is not allowed in find with explain. + assert.throws(function() { + coll.find({$isolated: 1}).explain(); + }); + + // $isolated is not allowed in $elemMatch projection. + assert.writeOK(coll.insert({a: [{b: 5}]})); + assert.throws(function() { + coll.find({}, {a: {$elemMatch: {$isolated: 1}}}).itcount(); + }); + + // + // $isolated in findAndModify. + // + + // $isolated is allowed in the query when upsert=false. + assert.writeOK(coll.insert({_id: 0, a: 5})); + assert.eq( + {_id: 0, a: 5, b: 6}, + coll.findAndModify({query: {_id: 0, $isolated: 1}, update: {$set: {b: 6}}, new: true})); + + // $isolated is allowed in the query when upsert=true. + assert.writeOK(coll.insert({_id: 1, a: 5})); + assert.eq( + {_id: 1, a: 5}, + coll.findAndModify({query: {_id: 1, $isolated: 1}, update: {$set: {b: 6}}, upsert: true})); + + // $isolated is not allowed in $pull filter. + assert.writeOK(coll.insert({_id: 2, a: [{b: 5}]})); + assert.throws(function() { + coll.findAndModify({query: {_id: 2}, update: {$pull: {a: {$isolated: 1}}}}); + }); + + // $isolated is not allowed in arrayFilters. + if (db.getMongo().writeMode() === "commands") { + coll.drop(); + assert.writeOK(coll.insert({_id: 3, a: [{b: 5}]})); + assert.throws(function() { + coll.findAndModify({ + query: {_id: 3}, + update: {$set: {"a.$[i].b": 6}}, + arrayFilters: [{"i.b": 5, $isolated: 1}] + }); + }); + } + + // + // $isolated in geoNear. + // + + assert.writeOK(coll.insert({geo: {type: "Point", coordinates: [0, 0]}, a: 5})); + assert.commandWorked(coll.ensureIndex({geo: "2dsphere"})); + assert.commandFailed(db.runCommand({ + geoNear: coll.getName(), + near: {type: "Point", coordinates: [0, 0]}, + spherical: true, + query: {$isolated: 1} + })); + + // + // $isolated in mapReduce. + // + + assert.writeOK(coll.insert({a: 5})); + assert.throws(function() { + coll.mapReduce( + function() { + emit(this.a, 1); + }, + function(key, values) { + return Array.sum(values); + }, + {out: {inline: 1}, query: {$isolated: 1}}); + }); + + // + // $isolated in remove. + // + + coll.drop(); + assert.writeOK(coll.insert({_id: 0, a: 5})); + let writeRes = coll.remove({_id: 0, $isolated: 1}); + assert.writeOK(writeRes); + assert.eq(1, writeRes.nRemoved); + + // + // $isolated in update. + // + + // $isolated is allowed in the filter of an update command. + assert.writeOK(coll.insert({_id: 0, a: 5})); + assert.writeOK(coll.update({_id: 0, $isolated: 1}, {$set: {b: 6}})); + assert.eq({_id: 0, a: 5, b: 6}, coll.findOne({_id: 0})); + + // $isolated is not allowed in a $pull filter. + coll.drop(); + assert.writeOK(coll.insert({_id: 0, a: [{b: 5}]})); + assert.writeErrorWithCode(coll.update({_id: 0}, {$pull: {a: {$isolated: 1}}}), + ErrorCodes.QueryFeatureNotAllowed); + + // $isolated is not allowed in arrayFilters. + if (db.getMongo().writeMode() === "commands") { + coll.drop(); + assert.writeOK(coll.insert({_id: 0, a: [{b: 5}]})); + assert.writeErrorWithCode( + coll.update( + {_id: 0}, {$set: {"a.$[i].b": 6}}, {arrayFilters: [{"i.b": 5, $isolated: 1}]}), + ErrorCodes.QueryFeatureNotAllowed); + } + + // + // $isolated in a view. + // + + assert.commandFailed(db.createView("invalid", coll.getName(), [{$match: {$isolated: 1}}])); + assert.commandFailed( + db.createView("invalid", coll.getName(), [{$match: {a: {$gt: 5}, $isolated: 1}}])); + + // + // $isolated in a partial index filter. + // + + assert.commandFailed(coll.createIndex({a: 1}, {partialFilterExpression: {$isolated: 1}})); + assert.commandFailed( + coll.createIndex({a: 1}, {partialFilterExpression: {a: {$lt: 5}, $isolated: 1}})); + +})(); diff --git a/jstests/core/list_databases.js b/jstests/core/list_databases.js index d98fc8f68c4..44a4387e1b7 100644 --- a/jstests/core/list_databases.js +++ b/jstests/core/list_databases.js @@ -89,4 +89,6 @@ listDatabases: 1, filter: {a: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}} })); + + assert.commandFailed(db.adminCommand({listDatabases: 1, filter: {$isolated: 1}})); }()); diff --git a/jstests/core/update_arrayFilters.js b/jstests/core/update_arrayFilters.js index fd6b5d15e4a..c8584db5c1e 100644 --- a/jstests/core/update_arrayFilters.js +++ b/jstests/core/update_arrayFilters.js @@ -27,6 +27,11 @@ assert.writeError(coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: ["bad"]}), ErrorCodes.TypeMismatch); + // $isolated in array filter fails to parse. + assert.writeError( + coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 2, $isolated: true}]}), + ErrorCodes.FailedToParse); + // Bad array filter fails to parse. res = coll.update({_id: 0}, {$set: {"a.$[i]": 5}}, {arrayFilters: [{i: 0, j: 0}]}); assert.writeErrorWithCode(res, ErrorCodes.FailedToParse); diff --git a/jstests/multiVersion/upgrade_with_isolated_persisted_to_disk.js b/jstests/multiVersion/upgrade_with_isolated_persisted_to_disk.js new file mode 100644 index 00000000000..cbbd72fc19c --- /dev/null +++ b/jstests/multiVersion/upgrade_with_isolated_persisted_to_disk.js @@ -0,0 +1,126 @@ +/** + * Test that mongod will behave as expected in the following scenarios dealing with the currently + * (3.6) banned $isolated. + * - A collection validator with $isolated created in pre-3.6 successfully parses in a 3.6 + * mongod. + * - A view with $isolated in the $match stage created in pre-3.6 will fail to parse in a 3.6 + * mongod. + * - A partial index with $isolated in the filter expression created in pre-3.6 is still usable + * in a 3.6 mongod. + * + * @tags: [requires_persistence] + */ + +(function() { + "use strict"; + + const testName = "upgrade_with_isolated_persisted_to_disk"; + let dbpath = MongoRunner.dataPath + testName + "validator"; + + let conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "3.4"}); + assert.neq(null, conn, "mongod was unable to start up"); + let testDB = conn.getDB(testName); + + // + // Test collection validator with $isolated. + // + + const validator = {$isolated: 1, a: "foo"}; + + // Create a collection with the specified validator. + assert.commandWorked(testDB.createCollection("coll", {validator: validator})); + let coll = testDB.coll; + + // The validator should cause this insert to fail. + assert.writeError(coll.insert({a: "bar"}), ErrorCodes.DocumentValidationFailure); + + MongoRunner.stopMongod(conn); + + conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "latest", noCleanData: true}); + assert.neq(null, conn, "mongod was unable to start with a validator containing $isolated."); + testDB = conn.getDB(testName); + coll = testDB.coll; + + // The validator should continue to reject this insert despite upgrading to the latest binary + // version. + assert.writeError(coll.insert({a: "bar"}), ErrorCodes.DocumentValidationFailure); + + MongoRunner.stopMongod(conn); + + // + // Test view with $isolated in a $match stage. + // + + dbpath = MongoRunner.dataPath + testName + "view"; + conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "3.4"}); + assert.neq(null, conn, "mongod was unable to start up"); + testDB = conn.getDB(testName); + coll = testDB.coll; + + // Creating a view with $isolated is allowed if binary version is 3.4. + const viewName = "isolated_view"; + assert.writeOK(coll.insert({a: 1})); + assert.commandWorked( + testDB.createView(viewName, coll.getName(), [{$match: {$isolated: 1, a: 1}}])); + assert.eq(1, testDB.getCollection("system.views").find().itcount()); + assert.eq(1, testDB.getCollection(viewName).find().itcount()); + + MongoRunner.stopMongod(conn); + + // Starting up with the latest binary version should pass despite invalid view definition. + conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "latest", noCleanData: true}); + assert.neq(null, conn, "mongod was unable to start with a view containing $isolated."); + testDB = conn.getDB(testName); + coll = testDB.coll; + + // While the view exists, it cannot be used as the view definition is no longer valid. + assert.eq(1, testDB.getCollection("system.views").find().itcount()); + assert.throws(() => testDB.getCollection(viewName).find().itcount()); + + // Recovery by collMod is not allowed. + assert.commandFailed(testDB.runCommand({collMod: viewName, pipeline: [{$match: {a: 1}}]})); + + // Explicitly remove the view from the system.views collection. + assert.writeOK(testDB.getCollection("system.views").remove({_id: viewName})); + testDB.getCollection(viewName).drop(); + + // Create a new view with a valid pipeline specification. + assert.commandWorked(testDB.createView(viewName, coll.getName(), [{$match: {a: 1}}])); + assert.eq(1, testDB.getCollection(viewName).find().itcount()); + + MongoRunner.stopMongod(conn); + + // + // Test partial index with $isolated. + // + + dbpath = MongoRunner.dataPath + testName + "partial_index"; + conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "3.4"}); + assert.neq(null, conn, "mongod was unable to start up"); + testDB = conn.getDB(testName); + coll = testDB.coll; + + // Create index with $isolated in the partial filter expression. + assert.writeOK(coll.insert({x: 1, a: 1})); + assert.writeOK(coll.insert({x: 1, a: 5})); + assert.commandWorked( + coll.createIndex({x: 1}, {partialFilterExpression: {a: {$lt: 5}, $isolated: 1}})); + + assert.eq(1, coll.find({a: {$lt: 5}}).hint({x: 1}).itcount()); + + // Restart with the latest binary version and verify that the index is still usable. + MongoRunner.stopMongod(conn); + conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: "latest", noCleanData: true}); + assert.neq(null, conn, "mongod was unable to start with a partial index containing $isolated."); + testDB = conn.getDB(testName); + coll = testDB.coll; + + assert.eq(1, coll.find({a: {$lt: 5}}).hint({x: 1}).itcount()); + + // Verify that $isolated is not allowed in a partial filter expression in the latest binary + // version. + assert.commandFailed( + coll.createIndex({x: 1}, {partialFilterExpression: {a: {$gt: 5}, $isolated: 1}})); + + MongoRunner.stopMongod(conn); +}()); diff --git a/src/mongo/db/catalog/SConscript b/src/mongo/db/catalog/SConscript index 6480a091972..8a92c492aa6 100644 --- a/src/mongo/db/catalog/SConscript +++ b/src/mongo/db/catalog/SConscript @@ -63,6 +63,7 @@ env.Library( '$BUILD_DIR/mongo/db/common', '$BUILD_DIR/mongo/db/index/index_descriptor', '$BUILD_DIR/mongo/db/index_names', + '$BUILD_DIR/mongo/db/matcher/expressions', '$BUILD_DIR/mongo/db/query/collation/collator_factory_interface', '$BUILD_DIR/mongo/util/fail_point', ], @@ -76,6 +77,7 @@ env.CppUnitTest( ], LIBDEPS=[ '$BUILD_DIR/mongo/db/query/query_test_service_context', + '$BUILD_DIR/mongo/db/matcher/expressions', 'index_key_validate', ], ) diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp index 47ee32c4158..1ccf0f50b6b 100644 --- a/src/mongo/db/catalog/database_impl.cpp +++ b/src/mongo/db/catalog/database_impl.cpp @@ -1046,7 +1046,7 @@ auto mongo::userCreateNSImpl(OperationContext* opCtx, // enough, so we rewrite it here. return {ErrorCodes::QueryFeatureNotAllowed, str::stream() << "The featureCompatibilityVersion must be 3.6 to create a " - "collection validator using 3.6 query featuress. See " + "collection validator using 3.6 query features. See " << feature_compatibility_version::kDochubLink << "."}; } else { diff --git a/src/mongo/db/catalog/index_catalog_entry_impl.cpp b/src/mongo/db/catalog/index_catalog_entry_impl.cpp index afa073f7ce0..fdaee922b71 100644 --- a/src/mongo/db/catalog/index_catalog_entry_impl.cpp +++ b/src/mongo/db/catalog/index_catalog_entry_impl.cpp @@ -129,9 +129,16 @@ IndexCatalogEntryImpl::IndexCatalogEntryImpl(IndexCatalogEntry* const this_, BSONObj filter = filterElement.Obj(); boost::intrusive_ptr<ExpressionContext> expCtx( new ExpressionContext(opCtx, _collator.get())); + + // Parsing the partial filter expression is not expected to fail here since the + // expression would have been successfully parsed upstream during index creation. However, + // filters that were allowed in partial filter expressions prior to 3.6 may be present in + // the index catalog and must also successfully parse. StatusWithMatchExpression statusWithMatcher = - MatchExpressionParser::parse(filter, std::move(expCtx)); - // this should be checked in create, so can blow up here + MatchExpressionParser::parse(filter, + std::move(expCtx), + ExtensionsCallbackNoop(), + MatchExpressionParser::AllowedFeatures::kIsolated); invariantOK(statusWithMatcher.getStatus()); _filterExpression = std::move(statusWithMatcher.getValue()); LOG(2) << "have filter expression for " << _ns << " " << _descriptor->indexName() << " " diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp index 578291a06e9..071a134963a 100644 --- a/src/mongo/db/catalog/index_catalog_impl.cpp +++ b/src/mongo/db/catalog/index_catalog_impl.cpp @@ -667,11 +667,17 @@ Status IndexCatalogImpl::_isSpecOk(OperationContext* opCtx, const BSONObj& spec) // The collator must outlive the constructed MatchExpression. boost::intrusive_ptr<ExpressionContext> expCtx( new ExpressionContext(opCtx, collator.get())); + + // Parsing the partial filter expression is not expected to fail here since the + // expression would have been successfully parsed upstream during index creation. However, + // filters that were allowed in partial filter expressions prior to 3.6 may be present in + // the index catalog and must also successfully parse (e.g., partial index filters with the + // $isolated/$atomic option). StatusWithMatchExpression statusWithMatcher = MatchExpressionParser::parse(filterElement.Obj(), std::move(expCtx), ExtensionsCallbackNoop(), - MatchExpressionParser::kBanAllSpecialFeatures); + MatchExpressionParser::kIsolated); if (!statusWithMatcher.isOK()) { return statusWithMatcher.getStatus(); } diff --git a/src/mongo/db/catalog/index_key_validate.cpp b/src/mongo/db/catalog/index_key_validate.cpp index 0a8c30263b5..ca5ac2aaea5 100644 --- a/src/mongo/db/catalog/index_key_validate.cpp +++ b/src/mongo/db/catalog/index_key_validate.cpp @@ -41,6 +41,7 @@ #include "mongo/db/index/index_descriptor.h" #include "mongo/db/index_names.h" #include "mongo/db/jsobj.h" +#include "mongo/db/matcher/expression_parser.h" #include "mongo/db/namespace_string.h" #include "mongo/db/query/collation/collator_factory_interface.h" #include "mongo/db/service_context.h" @@ -209,6 +210,7 @@ Status validateKeyPattern(const BSONObj& key, IndexDescriptor::IndexVersion inde } StatusWith<BSONObj> validateIndexSpec( + OperationContext* opCtx, const BSONObj& indexSpec, const NamespaceString& expectedNamespace, const ServerGlobalParams::FeatureCompatibility& featureCompatibility) { @@ -333,6 +335,34 @@ StatusWith<BSONObj> validateIndexSpec( } hasCollationField = true; + } else if (IndexDescriptor::kPartialFilterExprFieldName == indexSpecElemFieldName) { + if (indexSpecElem.type() != BSONType::Object) { + return {ErrorCodes::TypeMismatch, + str::stream() << "The field '" + << IndexDescriptor::kPartialFilterExprFieldName + << "' must be an object, but got " + << typeName(indexSpecElem.type())}; + } + + // Just use the simple collator, even though the index may have a separate collation + // specified or may inherit the default collation from the collection. It's legal to + // parse with the wrong collation, since the collation can be set on a MatchExpression + // after the fact. Here, we don't bother checking the collation after the fact, since + // this invocation of the parser is just for validity checking. + auto simpleCollator = nullptr; + boost::intrusive_ptr<ExpressionContext> expCtx( + new ExpressionContext(opCtx, simpleCollator)); + + // Special match expression features (e.g. $jsonSchema, $expr, ...) are not allowed in + // a partialFilterExpression on index creation. + auto statusWithMatcher = + MatchExpressionParser::parse(indexSpecElem.Obj(), + std::move(expCtx), + ExtensionsCallbackNoop(), + MatchExpressionParser::kBanAllSpecialFeatures); + if (!statusWithMatcher.isOK()) { + return statusWithMatcher.getStatus(); + } } else { // We can assume field name is valid at this point. Validation of fieldname is handled // prior to this in validateIndexSpecFieldNames(). diff --git a/src/mongo/db/catalog/index_key_validate.h b/src/mongo/db/catalog/index_key_validate.h index 9ee07df7dc5..d8d1b544041 100644 --- a/src/mongo/db/catalog/index_key_validate.h +++ b/src/mongo/db/catalog/index_key_validate.h @@ -53,6 +53,7 @@ Status validateKeyPattern(const BSONObj& key, IndexDescriptor::IndexVersion inde * status is returned. */ StatusWith<BSONObj> validateIndexSpec( + OperationContext* opCtx, const BSONObj& indexSpec, const NamespaceString& expectedNamespace, const ServerGlobalParams::FeatureCompatibility& featureCompatibility); diff --git a/src/mongo/db/catalog/index_spec_validate_test.cpp b/src/mongo/db/catalog/index_spec_validate_test.cpp index ca9179df5c4..3b8634c68ef 100644 --- a/src/mongo/db/catalog/index_spec_validate_test.cpp +++ b/src/mongo/db/catalog/index_spec_validate_test.cpp @@ -50,6 +50,7 @@ using index_key_validate::validateIdIndexSpec; using index_key_validate::validateIndexSpecCollation; const NamespaceString kTestNamespace("test", "index_spec_validate"); +constexpr OperationContext* kDefaultOpCtx = nullptr; /** * Helper function used to return the fields of a BSONObj in a consistent order. @@ -69,19 +70,22 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfKeyPatternIsNotAnObject) { featureCompatibility.validateFeaturesAsMaster.store(true); ASSERT_EQ(ErrorCodes::TypeMismatch, - validateIndexSpec(BSON("key" << 1 << "name" + validateIndexSpec(kDefaultOpCtx, + BSON("key" << 1 << "name" << "indexName"), kTestNamespace, featureCompatibility)); ASSERT_EQ(ErrorCodes::TypeMismatch, - validateIndexSpec(BSON("key" + validateIndexSpec(kDefaultOpCtx, + BSON("key" << "not an object" << "name" << "indexName"), kTestNamespace, featureCompatibility)); ASSERT_EQ(ErrorCodes::TypeMismatch, - validateIndexSpec(BSON("key" << BSONArray() << "name" + validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSONArray() << "name" << "indexName"), kTestNamespace, featureCompatibility)); @@ -93,12 +97,14 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfFieldRepeatedInKeyPattern) { featureCompatibility.validateFeaturesAsMaster.store(true); ASSERT_EQ(ErrorCodes::BadValue, - validateIndexSpec(BSON("key" << BSON("field" << 1 << "field" << 1) << "name" + validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1 << "field" << 1) << "name" << "indexName"), kTestNamespace, featureCompatibility)); ASSERT_EQ(ErrorCodes::BadValue, - validateIndexSpec(BSON("key" << BSON("field" << 1 << "otherField" << -1 << "field" + validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1 << "otherField" << -1 << "field" << "2dsphere") << "name" << "indexName"), @@ -112,7 +118,8 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfKeyPatternIsNotPresent) { featureCompatibility.validateFeaturesAsMaster.store(true); ASSERT_EQ(ErrorCodes::FailedToParse, - validateIndexSpec(BSON("name" + validateIndexSpec(kDefaultOpCtx, + BSON("name" << "indexName"), kTestNamespace, featureCompatibility)); @@ -124,7 +131,8 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfNameIsNotAString) { featureCompatibility.validateFeaturesAsMaster.store(true); ASSERT_EQ(ErrorCodes::TypeMismatch, - validateIndexSpec(BSON("key" << BSON("field" << 1) << "name" << 1), + validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1) << "name" << 1), kTestNamespace, featureCompatibility)); } @@ -134,9 +142,11 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfNameIsNotPresent) { featureCompatibility.setVersion(ServerGlobalParams::FeatureCompatibility::Version::k36); featureCompatibility.validateFeaturesAsMaster.store(true); - ASSERT_EQ( - ErrorCodes::FailedToParse, - validateIndexSpec(BSON("key" << BSON("field" << 1)), kTestNamespace, featureCompatibility)); + ASSERT_EQ(ErrorCodes::FailedToParse, + validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1)), + kTestNamespace, + featureCompatibility)); } TEST(IndexSpecValidateTest, ReturnsAnErrorIfNamespaceIsNotAString) { @@ -145,14 +155,16 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfNamespaceIsNotAString) { featureCompatibility.validateFeaturesAsMaster.store(true); ASSERT_EQ(ErrorCodes::TypeMismatch, - validateIndexSpec(BSON("key" << BSON("field" << 1) << "name" + validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1) << "name" << "indexName" << "ns" << 1), kTestNamespace, featureCompatibility)); ASSERT_EQ(ErrorCodes::TypeMismatch, - validateIndexSpec(BSON("key" << BSON("field" << 1) << "name" + validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1) << "name" << "indexName" << "ns" << BSONObj()), @@ -166,7 +178,8 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfNamespaceIsEmptyString) { featureCompatibility.validateFeaturesAsMaster.store(true); ASSERT_EQ(ErrorCodes::BadValue, - validateIndexSpec(BSON("key" << BSON("field" << 1) << "name" + validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1) << "name" << "indexName" << "ns" << ""), @@ -180,7 +193,8 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfNamespaceDoesNotMatch) { featureCompatibility.validateFeaturesAsMaster.store(true); ASSERT_EQ(ErrorCodes::BadValue, - validateIndexSpec(BSON("key" << BSON("field" << 1) << "name" + validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1) << "name" << "indexName" << "ns" << "some string"), @@ -190,7 +204,8 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfNamespaceDoesNotMatch) { // Verify that we reject the index specification when the "ns" field only contains the // collection name. ASSERT_EQ(ErrorCodes::BadValue, - validateIndexSpec(BSON("key" << BSON("field" << 1) << "name" + validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1) << "name" << "indexName" << "ns" << kTestNamespace.coll()), @@ -203,7 +218,8 @@ TEST(IndexSpecValidateTest, ReturnsIndexSpecWithNamespaceFilledInIfItIsNotPresen featureCompatibility.setVersion(ServerGlobalParams::FeatureCompatibility::Version::k36); featureCompatibility.validateFeaturesAsMaster.store(true); - auto result = validateIndexSpec(BSON("key" << BSON("field" << 1) << "name" + auto result = validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1) << "name" << "indexName" << "v" << 1), @@ -221,7 +237,8 @@ TEST(IndexSpecValidateTest, ReturnsIndexSpecWithNamespaceFilledInIfItIsNotPresen sorted(result.getValue())); // Verify that the index specification we returned is still considered valid. - ASSERT_OK(validateIndexSpec(result.getValue(), kTestNamespace, featureCompatibility)); + ASSERT_OK( + validateIndexSpec(kDefaultOpCtx, result.getValue(), kTestNamespace, featureCompatibility)); } TEST(IndexSpecValidateTest, ReturnsIndexSpecUnchangedIfNamespaceAndVersionArePresent) { @@ -229,7 +246,8 @@ TEST(IndexSpecValidateTest, ReturnsIndexSpecUnchangedIfNamespaceAndVersionArePre featureCompatibility.setVersion(ServerGlobalParams::FeatureCompatibility::Version::k36); featureCompatibility.validateFeaturesAsMaster.store(true); - auto result = validateIndexSpec(BSON("key" << BSON("field" << 1) << "name" + auto result = validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1) << "name" << "indexName" << "ns" << kTestNamespace.ns() @@ -255,14 +273,16 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfVersionIsNotANumber) { featureCompatibility.validateFeaturesAsMaster.store(true); ASSERT_EQ(ErrorCodes::TypeMismatch, - validateIndexSpec(BSON("key" << BSON("field" << 1) << "name" + validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1) << "name" << "indexName" << "v" << "not a number"), kTestNamespace, featureCompatibility)); ASSERT_EQ(ErrorCodes::TypeMismatch, - validateIndexSpec(BSON("key" << BSON("field" << 1) << "name" + validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1) << "name" << "indexName" << "v" << BSONObj()), @@ -276,28 +296,32 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfVersionIsNotRepresentableAsInt) { featureCompatibility.validateFeaturesAsMaster.store(true); ASSERT_EQ(ErrorCodes::BadValue, - validateIndexSpec(BSON("key" << BSON("field" << 1) << "name" + validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1) << "name" << "indexName" << "v" << 2.2), kTestNamespace, featureCompatibility)); ASSERT_EQ(ErrorCodes::BadValue, - validateIndexSpec(BSON("key" << BSON("field" << 1) << "name" + validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1) << "name" << "indexName" << "v" << std::nan("1")), kTestNamespace, featureCompatibility)); ASSERT_EQ(ErrorCodes::BadValue, - validateIndexSpec(BSON("key" << BSON("field" << 1) << "name" + validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1) << "name" << "indexName" << "v" << std::numeric_limits<double>::infinity()), kTestNamespace, featureCompatibility)); ASSERT_EQ(ErrorCodes::BadValue, - validateIndexSpec(BSON("key" << BSON("field" << 1) << "name" + validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1) << "name" << "indexName" << "v" << std::numeric_limits<long long>::max()), @@ -311,7 +335,8 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfVersionIsV0) { featureCompatibility.validateFeaturesAsMaster.store(true); ASSERT_EQ(ErrorCodes::CannotCreateIndex, - validateIndexSpec(BSON("key" << BSON("field" << 1) << "name" + validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1) << "name" << "indexName" << "v" << 0), @@ -325,7 +350,8 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfVersionIsUnsupported) { featureCompatibility.validateFeaturesAsMaster.store(true); ASSERT_EQ(ErrorCodes::CannotCreateIndex, - validateIndexSpec(BSON("key" << BSON("field" << 1) << "name" + validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1) << "name" << "indexName" << "v" << 3 @@ -336,7 +362,8 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfVersionIsUnsupported) { featureCompatibility)); ASSERT_EQ(ErrorCodes::CannotCreateIndex, - validateIndexSpec(BSON("key" << BSON("field" << 1) << "name" + validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1) << "name" << "indexName" << "v" << -3LL), @@ -349,7 +376,8 @@ TEST(IndexSpecValidateTest, AcceptsIndexVersionsThatAreAllowedForCreation) { featureCompatibility.setVersion(ServerGlobalParams::FeatureCompatibility::Version::k36); featureCompatibility.validateFeaturesAsMaster.store(true); - auto result = validateIndexSpec(BSON("key" << BSON("field" << 1) << "name" + auto result = validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1) << "name" << "indexName" << "v" << 1), @@ -366,7 +394,8 @@ TEST(IndexSpecValidateTest, AcceptsIndexVersionsThatAreAllowedForCreation) { << 1)), sorted(result.getValue())); - result = validateIndexSpec(BSON("key" << BSON("field" << 1) << "name" + result = validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1) << "name" << "indexName" << "v" << 2LL), @@ -389,7 +418,8 @@ TEST(IndexSpecValidateTest, DefaultIndexVersionIsV2) { featureCompatibility.setVersion(ServerGlobalParams::FeatureCompatibility::Version::k36); featureCompatibility.validateFeaturesAsMaster.store(true); - auto result = validateIndexSpec(BSON("key" << BSON("field" << 1) << "name" + auto result = validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1) << "name" << "indexName" << "ns" << kTestNamespace.ns()), @@ -407,7 +437,8 @@ TEST(IndexSpecValidateTest, DefaultIndexVersionIsV2) { sorted(result.getValue())); // Verify that the index specification we returned is still considered valid. - ASSERT_OK(validateIndexSpec(result.getValue(), kTestNamespace, featureCompatibility)); + ASSERT_OK( + validateIndexSpec(kDefaultOpCtx, result.getValue(), kTestNamespace, featureCompatibility)); } TEST(IndexSpecValidateTest, AcceptsIndexVersionV1) { @@ -415,7 +446,8 @@ TEST(IndexSpecValidateTest, AcceptsIndexVersionV1) { featureCompatibility.setVersion(ServerGlobalParams::FeatureCompatibility::Version::k36); featureCompatibility.validateFeaturesAsMaster.store(true); - auto result = validateIndexSpec(BSON("key" << BSON("field" << 1) << "name" + auto result = validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1) << "name" << "indexName" << "v" << 1), @@ -439,21 +471,24 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfCollationIsNotAnObject) { featureCompatibility.validateFeaturesAsMaster.store(true); ASSERT_EQ(ErrorCodes::TypeMismatch, - validateIndexSpec(BSON("key" << BSON("field" << 1) << "name" + validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1) << "name" << "indexName" << "collation" << 1), kTestNamespace, featureCompatibility)); ASSERT_EQ(ErrorCodes::TypeMismatch, - validateIndexSpec(BSON("key" << BSON("field" << 1) << "name" + validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1) << "name" << "indexName" << "collation" << "not an object"), kTestNamespace, featureCompatibility)); ASSERT_EQ(ErrorCodes::TypeMismatch, - validateIndexSpec(BSON("key" << BSON("field" << 1) << "name" + validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1) << "name" << "indexName" << "collation" << BSONArray()), @@ -467,7 +502,8 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfCollationIsEmpty) { featureCompatibility.validateFeaturesAsMaster.store(true); ASSERT_EQ(ErrorCodes::BadValue, - validateIndexSpec(BSON("key" << BSON("field" << 1) << "name" + validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1) << "name" << "indexName" << "collation" << BSONObj()), @@ -481,7 +517,8 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfCollationIsPresentAndVersionIsLessTh featureCompatibility.validateFeaturesAsMaster.store(true); ASSERT_EQ(ErrorCodes::CannotCreateIndex, - validateIndexSpec(BSON("key" << BSON("field" << 1) << "name" + validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1) << "name" << "indexName" << "collation" << BSON("locale" @@ -497,7 +534,8 @@ TEST(IndexSpecValidateTest, AcceptsAnyNonEmptyObjectValueForCollation) { featureCompatibility.setVersion(ServerGlobalParams::FeatureCompatibility::Version::k36); featureCompatibility.validateFeaturesAsMaster.store(true); - auto result = validateIndexSpec(BSON("key" << BSON("field" << 1) << "name" + auto result = validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1) << "name" << "indexName" << "v" << 2 @@ -520,7 +558,8 @@ TEST(IndexSpecValidateTest, AcceptsAnyNonEmptyObjectValueForCollation) { << "simple"))), sorted(result.getValue())); - result = validateIndexSpec(BSON("key" << BSON("field" << 1) << "name" + result = validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1) << "name" << "indexName" << "v" << 2 @@ -547,7 +586,8 @@ TEST(IndexSpecValidateTest, AcceptsIndexSpecIfCollationIsPresentAndVersionIsEqua featureCompatibility.setVersion(ServerGlobalParams::FeatureCompatibility::Version::k36); featureCompatibility.validateFeaturesAsMaster.store(true); - auto result = validateIndexSpec(BSON("key" << BSON("field" << 1) << "name" + auto result = validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1) << "name" << "indexName" << "v" << 2 @@ -576,7 +616,8 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfUnknownFieldIsPresentInSpecV2) { featureCompatibility.setVersion(ServerGlobalParams::FeatureCompatibility::Version::k36); featureCompatibility.validateFeaturesAsMaster.store(true); - auto result = validateIndexSpec(BSON("key" << BSON("field" << 1) << "name" + auto result = validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1) << "name" << "indexName" << "v" << 2 @@ -592,7 +633,8 @@ TEST(IndexSpecValidateTest, ReturnsAnErrorIfUnknownFieldIsPresentInSpecV1) { featureCompatibility.setVersion(ServerGlobalParams::FeatureCompatibility::Version::k36); featureCompatibility.validateFeaturesAsMaster.store(true); - auto result = validateIndexSpec(BSON("key" << BSON("field" << 1) << "name" + auto result = validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1) << "name" << "indexName" << "v" << 1 @@ -824,5 +866,50 @@ TEST(IndexSpecCollationValidateTest, FillsInCollationFieldWithCollectionDefaultI sorted(result.getValue())); } +TEST(IndexSpecPartialFilterTest, FailsIfPartialFilterIsNotAnObject) { + ServerGlobalParams::FeatureCompatibility featureCompatibility; + featureCompatibility.setVersion(ServerGlobalParams::FeatureCompatibility::Version::k36); + featureCompatibility.validateFeaturesAsMaster.store(true); + + auto result = validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1) << "name" + << "indexName" + << "partialFilterExpression" + << 1), + kTestNamespace, + featureCompatibility); + ASSERT_EQ(result.getStatus(), ErrorCodes::TypeMismatch); +} + +TEST(IndexSpecPartialFilterTest, FailsIfPartialFilterContainsBannedFeature) { + ServerGlobalParams::FeatureCompatibility featureCompatibility; + featureCompatibility.setVersion(ServerGlobalParams::FeatureCompatibility::Version::k36); + featureCompatibility.validateFeaturesAsMaster.store(true); + + auto result = validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1) << "name" + << "indexName" + << "partialFilterExpression" + << BSON("$isolated" << 1)), + kTestNamespace, + featureCompatibility); + ASSERT_EQ(result.getStatus(), ErrorCodes::QueryFeatureNotAllowed); +} + +TEST(IndexSpecPartialFilterTest, AcceptsValidPartialFilterExpression) { + ServerGlobalParams::FeatureCompatibility featureCompatibility; + featureCompatibility.setVersion(ServerGlobalParams::FeatureCompatibility::Version::k36); + featureCompatibility.validateFeaturesAsMaster.store(true); + + auto result = validateIndexSpec(kDefaultOpCtx, + BSON("key" << BSON("field" << 1) << "name" + << "indexName" + << "partialFilterExpression" + << BSON("a" << 1)), + kTestNamespace, + featureCompatibility); + ASSERT_OK(result.getStatus()); +} + } // namespace } // namespace mongo diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp index f34680a7382..01f37ae4ac6 100644 --- a/src/mongo/db/commands/create_indexes.cpp +++ b/src/mongo/db/commands/create_indexes.cpp @@ -73,6 +73,7 @@ const StringData kCommandName = "createIndexes"_sd; * malformed, then an error status is returned. */ StatusWith<std::vector<BSONObj>> parseAndValidateIndexSpecs( + OperationContext* opCtx, const NamespaceString& ns, const BSONObj& cmdObj, const ServerGlobalParams::FeatureCompatibility& featureCompatibility) { @@ -99,7 +100,7 @@ StatusWith<std::vector<BSONObj>> parseAndValidateIndexSpecs( } auto indexSpecStatus = index_key_validate::validateIndexSpec( - indexesElem.Obj(), ns, featureCompatibility); + opCtx, indexesElem.Obj(), ns, featureCompatibility); if (!indexSpecStatus.isOK()) { return indexSpecStatus.getStatus(); } @@ -242,7 +243,7 @@ public: return appendCommandStatus(result, status); auto specsWithStatus = - parseAndValidateIndexSpecs(ns, cmdObj, serverGlobalParams.featureCompatibility); + parseAndValidateIndexSpecs(opCtx, ns, cmdObj, serverGlobalParams.featureCompatibility); if (!specsWithStatus.isOK()) { return appendCommandStatus(result, specsWithStatus.getStatus()); } diff --git a/src/mongo/db/commands/dbcommands.cpp b/src/mongo/db/commands/dbcommands.cpp index 15b1841b580..693bb58d5a0 100644 --- a/src/mongo/db/commands/dbcommands.cpp +++ b/src/mongo/db/commands/dbcommands.cpp @@ -514,7 +514,7 @@ public: // Perform index spec validation. idIndexSpec = uassertStatusOK(index_key_validate::validateIndexSpec( - idIndexSpec, ns, serverGlobalParams.featureCompatibility)); + opCtx, idIndexSpec, ns, serverGlobalParams.featureCompatibility)); uassertStatusOK(index_key_validate::validateIdIndexSpec(idIndexSpec)); // Validate or fill in _id index collation. diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp index 3ef407c1d3f..e4473be4b48 100644 --- a/src/mongo/db/commands/find_cmd.cpp +++ b/src/mongo/db/commands/find_cmd.cpp @@ -163,7 +163,8 @@ public: std::move(qrStatus.getValue()), expCtx, extensionsCallback, - MatchExpressionParser::kAllowAllSpecialFeatures); + MatchExpressionParser::kAllowAllSpecialFeatures & + ~MatchExpressionParser::AllowedFeatures::kIsolated); if (!statusWithCQ.isOK()) { return statusWithCQ.getStatus(); } @@ -280,7 +281,8 @@ public: std::move(qr), expCtx, extensionsCallback, - MatchExpressionParser::kAllowAllSpecialFeatures); + MatchExpressionParser::kAllowAllSpecialFeatures & + ~MatchExpressionParser::AllowedFeatures::kIsolated); if (!statusWithCQ.isOK()) { return appendCommandStatus(result, statusWithCQ.getStatus()); } diff --git a/src/mongo/db/commands/geo_near_cmd.cpp b/src/mongo/db/commands/geo_near_cmd.cpp index 0df75717f42..0389d550618 100644 --- a/src/mongo/db/commands/geo_near_cmd.cpp +++ b/src/mongo/db/commands/geo_near_cmd.cpp @@ -221,7 +221,8 @@ public: std::move(qr), expCtx, extensionsCallback, - MatchExpressionParser::kAllowAllSpecialFeatures); + MatchExpressionParser::kAllowAllSpecialFeatures & + ~MatchExpressionParser::AllowedFeatures::kIsolated); if (!statusWithCQ.isOK()) { errmsg = "Can't parse filter / create query"; return false; diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp index f8882ec158f..c6ff02b154c 100644 --- a/src/mongo/db/commands/mr.cpp +++ b/src/mongo/db/commands/mr.cpp @@ -431,7 +431,7 @@ void State::prepTempCollection() { BSON("key" << BSON("0" << 1) << "ns" << _config.incLong.ns() << "name" << "_temp_0"); auto indexSpec = uassertStatusOK(index_key_validate::validateIndexSpec( - rawIndexSpec, _config.incLong, serverGlobalParams.featureCompatibility)); + _opCtx, rawIndexSpec, _config.incLong, serverGlobalParams.featureCompatibility)); Status status = incColl->getIndexCatalog() ->createIndexOnEmptyCollection(_opCtx, indexSpec) @@ -1122,7 +1122,8 @@ void State::finalReduce(OperationContext* opCtx, CurOp* curOp, ProgressMeterHold std::move(qr), expCtx, extensionsCallback, - MatchExpressionParser::kAllowAllSpecialFeatures); + MatchExpressionParser::kAllowAllSpecialFeatures & + ~MatchExpressionParser::AllowedFeatures::kIsolated); verify(statusWithCQ.isOK()); std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); @@ -1488,12 +1489,13 @@ public: const ExtensionsCallbackReal extensionsCallback(opCtx, &config.nss); const boost::intrusive_ptr<ExpressionContext> expCtx; - auto statusWithCQ = - CanonicalQuery::canonicalize(opCtx, - std::move(qr), - expCtx, - extensionsCallback, - MatchExpressionParser::kAllowAllSpecialFeatures); + auto statusWithCQ = CanonicalQuery::canonicalize( + opCtx, + std::move(qr), + expCtx, + extensionsCallback, + MatchExpressionParser::kAllowAllSpecialFeatures & + ~MatchExpressionParser::AllowedFeatures::kIsolated); if (!statusWithCQ.isOK()) { uasserted(17238, "Can't canonicalize query " + config.filter.toString()); return 0; diff --git a/src/mongo/db/commands/plan_cache_commands_test.cpp b/src/mongo/db/commands/plan_cache_commands_test.cpp index e8a80bf2266..b93b9da43e2 100644 --- a/src/mongo/db/commands/plan_cache_commands_test.cpp +++ b/src/mongo/db/commands/plan_cache_commands_test.cpp @@ -231,7 +231,6 @@ TEST(PlanCacheCommandsTest, Canonicalize) { ASSERT_OK(statusWithCQ.getStatus()); unique_ptr<CanonicalQuery> query = std::move(statusWithCQ.getValue()); - // Equivalent query should generate same key. statusWithCQ = PlanCacheCommand::canonicalize(opCtx.get(), nss.ns(), fromjson("{query: {b: 1, a: 1}}")); @@ -268,6 +267,24 @@ TEST(PlanCacheCommandsTest, Canonicalize) { ASSERT_NOT_EQUALS(planCache.computeKey(*query), planCache.computeKey(*projectionQuery)); } +TEST(PlanCacheCommandsTest, PlanCacheIgnoresIsolated) { + PlanCache planCache; + QueryTestServiceContext serviceContext; + auto opCtx = serviceContext.makeOperationContext(); + + // Query with $isolated should generate the same key as a query without $siolated. + auto statusWithCQ = + PlanCacheCommand::canonicalize(opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}}")); + ASSERT_OK(statusWithCQ.getStatus()); + unique_ptr<CanonicalQuery> query = std::move(statusWithCQ.getValue()); + + statusWithCQ = PlanCacheCommand::canonicalize( + opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, $isolated: 1}")); + ASSERT_OK(statusWithCQ.getStatus()); + unique_ptr<CanonicalQuery> queryWithIsolated = std::move(statusWithCQ.getValue()); + ASSERT_EQUALS(planCache.computeKey(*query), planCache.computeKey(*queryWithIsolated)); +} + /** * Tests for planCacheClear (single query shape) */ diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp index f3ed7df4f97..158e4b727a6 100644 --- a/src/mongo/db/dbhelpers.cpp +++ b/src/mongo/db/dbhelpers.cpp @@ -119,7 +119,8 @@ RecordId Helpers::findOne(OperationContext* opCtx, std::move(qr), expCtx, extensionsCallback, - MatchExpressionParser::kAllowAllSpecialFeatures); + MatchExpressionParser::kAllowAllSpecialFeatures & + ~MatchExpressionParser::AllowedFeatures::kIsolated); massert(17244, "Could not canonicalize " + query.toString(), statusWithCQ.isOK()); unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); diff --git a/src/mongo/db/matcher/expression_parser.cpp b/src/mongo/db/matcher/expression_parser.cpp index 4f3de904667..0cd9ec13a53 100644 --- a/src/mongo/db/matcher/expression_parser.cpp +++ b/src/mongo/db/matcher/expression_parser.cpp @@ -400,8 +400,13 @@ StatusWithMatchExpression parseAtomicOrIsolated( const ExtensionsCallback* extensionsCallback, MatchExpressionParser::AllowedFeatureSet allowedFeatures, DocumentParseLevel currentLevel) { + if ((allowedFeatures & MatchExpressionParser::AllowedFeatures::kIsolated) == 0u) { + return {Status(ErrorCodes::QueryFeatureNotAllowed, + "$isolated ($atomic) is not allowed in this context")}; + } if (currentLevel != DocumentParseLevel::kPredicateTopLevel) { - return {Status(ErrorCodes::FailedToParse, "$atomic/$isolated has to be at the top level")}; + return { + Status(ErrorCodes::FailedToParse, "$isolated ($atomic) has to be at the top level")}; } return {nullptr}; } diff --git a/src/mongo/db/matcher/expression_parser.h b/src/mongo/db/matcher/expression_parser.h index 11c74438e51..f997b4c1a49 100644 --- a/src/mongo/db/matcher/expression_parser.h +++ b/src/mongo/db/matcher/expression_parser.h @@ -96,6 +96,7 @@ public: kJavascript = 1 << 2, kExpr = 1 << 3, kJSONSchema = 1 << 4, + kIsolated = 1 << 5, }; using AllowedFeatureSet = unsigned long long; static constexpr AllowedFeatureSet kBanAllSpecialFeatures = 0; diff --git a/src/mongo/db/matcher/expression_parser_test.cpp b/src/mongo/db/matcher/expression_parser_test.cpp index e25a322248f..669986004f4 100644 --- a/src/mongo/db/matcher/expression_parser_test.cpp +++ b/src/mongo/db/matcher/expression_parser_test.cpp @@ -65,19 +65,54 @@ TEST(MatchExpressionParserTest, Multiple1) { ASSERT(!result.getValue()->matchesBSON(BSON("x" << 5 << "y" << 4))); } -TEST(AtomicMatchExpressionTest, AtomicOperator1) { - BSONObj query = BSON("x" << 5 << "$atomic" << BSON("$gt" << 5 << "$lt" << 8)); +TEST(AtomicMatchExpressionTest, AtomicOperatorSuccessfullyParsesWhenFeatureBitIsSet) { + auto query = BSON("x" << 5 << "$atomic" << 1); boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest()); - StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx); - ASSERT_TRUE(result.isOK()); + auto result = MatchExpressionParser::parse( + query, expCtx, ExtensionsCallbackNoop(), MatchExpressionParser::AllowedFeatures::kIsolated); + ASSERT_OK(result.getStatus()); +} - query = BSON("x" << 5 << "$isolated" << 1); - result = MatchExpressionParser::parse(query, expCtx); - ASSERT_TRUE(result.isOK()); +TEST(AtomicMatchExpressionTest, AtomicOperatorFailsToParseIfNotTopLevel) { + boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest()); + auto query = BSON("x" << 5 << "y" << BSON("$atomic" << 1)); + auto result = MatchExpressionParser::parse( + query, expCtx, ExtensionsCallbackNoop(), MatchExpressionParser::AllowedFeatures::kIsolated); + ASSERT_NOT_OK(result.getStatus()); + ASSERT_EQ(ErrorCodes::BadValue, result.getStatus()); +} - query = BSON("x" << 5 << "y" << BSON("$isolated" << 1)); - result = MatchExpressionParser::parse(query, expCtx); - ASSERT_FALSE(result.isOK()); +TEST(AtomicMatchExpressionTest, AtomicOperatorFailsToParseIfFeatureBitIsNotSet) { + auto query = BSON("x" << 5 << "$atomic" << 1); + boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest()); + auto result = MatchExpressionParser::parse(query, expCtx); + ASSERT_EQ(ErrorCodes::QueryFeatureNotAllowed, result.getStatus()); +} + +TEST(IsolatedMatchExpressionTest, IsolatedOperatorSuccessfullyParsesWhenFeatureBitIsSet) { + auto query = BSON("x" << 5 << "$isolated" << 1); + boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest()); + auto result = MatchExpressionParser::parse( + query, expCtx, ExtensionsCallbackNoop(), MatchExpressionParser::AllowedFeatures::kIsolated); + ASSERT_OK(result.getStatus()); +} + +TEST(IsolatedMatchExpressionTest, IsolatedOperatorFailsToParseIfFeatureBitIsNotSet) { + // Query parsing fails if $isolated is not in the allowed feature set. + auto query = BSON("x" << 5 << "$isolated" << 1); + boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest()); + auto result = MatchExpressionParser::parse(query, expCtx); + ASSERT_NOT_OK(result.getStatus()); + ASSERT_EQ(ErrorCodes::QueryFeatureNotAllowed, result.getStatus()); +} + +TEST(IsolatedMatchExpressionTest, IsolatedOperatorFailsToParseIfNotTopLevel) { + boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest()); + auto query = BSON("x" << 5 << "y" << BSON("$isolated" << 1)); + auto result = MatchExpressionParser::parse( + query, expCtx, ExtensionsCallbackNoop(), MatchExpressionParser::AllowedFeatures::kIsolated); + ASSERT_NOT_OK(result.getStatus()); + ASSERT_EQ(ErrorCodes::BadValue, result.getStatus()); } TEST(MatchExpressionParserTest, MinDistanceWithoutNearFailsToParse) { diff --git a/src/mongo/db/matcher/schema/expression_parser_schema_test.cpp b/src/mongo/db/matcher/schema/expression_parser_schema_test.cpp index 13f3d9e1f6c..89ca1824b10 100644 --- a/src/mongo/db/matcher/schema/expression_parser_schema_test.cpp +++ b/src/mongo/db/matcher/schema/expression_parser_schema_test.cpp @@ -231,14 +231,14 @@ TEST(MatchExpressionParserSchemaTest, ObjectMatchCorrectlyParsesNestedObjectMatc result.getValue()->matchesBSON(fromjson("{a: [{b: 0}, {b: [{c: 0}, {c: 'string'}]}]}"))); } -TEST(MatchExpressionParserSchemaTest, ObjectMatchSubExprRejectsTopLevelOperators) { +TEST(MatchExpressionParserSchemaTest, ObjectMatchSubExprRejectsPathlessOperators) { auto query = fromjson( "{a: {$_internalSchemaObjectMatch: {" - " $isolated: 1" + " $expr: {$eq: ['$a', 5]}" "}}}"); boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest()); auto result = MatchExpressionParser::parse(query, expCtx); - ASSERT_EQ(result.getStatus(), ErrorCodes::FailedToParse); + ASSERT_EQ(result.getStatus(), ErrorCodes::BadValue); } // diff --git a/src/mongo/db/query/canonical_query_test.cpp b/src/mongo/db/query/canonical_query_test.cpp index ebef44cebc4..a745b06a8ef 100644 --- a/src/mongo/db/query/canonical_query_test.cpp +++ b/src/mongo/db/query/canonical_query_test.cpp @@ -175,13 +175,18 @@ TEST(CanonicalQueryTest, SortTreeNumChildrenComparison) { /** * Utility function to create a CanonicalQuery */ -unique_ptr<CanonicalQuery> canonicalize(const char* queryStr) { +unique_ptr<CanonicalQuery> canonicalize(const char* queryStr, + MatchExpressionParser::AllowedFeatureSet allowedFeatures = + MatchExpressionParser::kDefaultSpecialFeatures) { QueryTestServiceContext serviceContext; auto opCtx = serviceContext.makeOperationContext(); auto qr = stdx::make_unique<QueryRequest>(nss); qr->setFilter(fromjson(queryStr)); - auto statusWithCQ = CanonicalQuery::canonicalize(opCtx.get(), std::move(qr)); + + auto statusWithCQ = CanonicalQuery::canonicalize( + opCtx.get(), std::move(qr), nullptr, ExtensionsCallbackNoop(), allowedFeatures); + ASSERT_OK(statusWithCQ.getStatus()); return std::move(statusWithCQ.getValue()); } @@ -205,22 +210,30 @@ std::unique_ptr<CanonicalQuery> canonicalize(const char* queryStr, * Test that CanonicalQuery::isIsolated() returns correctly. */ TEST(CanonicalQueryTest, IsIsolatedReturnsTrueWithIsolated) { - unique_ptr<CanonicalQuery> cq = canonicalize("{$isolated: 1, x: 3}"); + unique_ptr<CanonicalQuery> cq = canonicalize("{$isolated: 1, x: 3}", + MatchExpressionParser::kDefaultSpecialFeatures | + MatchExpressionParser::kIsolated); ASSERT_TRUE(cq->isIsolated()); } TEST(CanonicalQueryTest, IsIsolatedReturnsTrueWithAtomic) { - unique_ptr<CanonicalQuery> cq = canonicalize("{$atomic: 1, x: 3}"); + unique_ptr<CanonicalQuery> cq = canonicalize("{$atomic: 1, x: 3}", + MatchExpressionParser::kDefaultSpecialFeatures | + MatchExpressionParser::kIsolated); ASSERT_TRUE(cq->isIsolated()); } TEST(CanonicalQueryTest, IsIsolatedReturnsFalseWithIsolated) { - unique_ptr<CanonicalQuery> cq = canonicalize("{$isolated: 0, x: 3}"); + unique_ptr<CanonicalQuery> cq = canonicalize("{$isolated: 0, x: 3}", + MatchExpressionParser::kDefaultSpecialFeatures | + MatchExpressionParser::kIsolated); ASSERT_FALSE(cq->isIsolated()); } TEST(CanonicalQueryTest, IsIsolatedReturnsFalseWithAtomic) { - unique_ptr<CanonicalQuery> cq = canonicalize("{$atomic: 0, x: 3}"); + unique_ptr<CanonicalQuery> cq = canonicalize("{$atomic: 0, x: 3}", + MatchExpressionParser::kDefaultSpecialFeatures | + MatchExpressionParser::kIsolated); ASSERT_FALSE(cq->isIsolated()); } diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp index 26a249f7928..e3419187390 100644 --- a/src/mongo/db/query/find.cpp +++ b/src/mongo/db/query/find.cpp @@ -533,7 +533,8 @@ std::string runQuery(OperationContext* opCtx, q, expCtx, ExtensionsCallbackReal(opCtx, &nss), - MatchExpressionParser::kAllowAllSpecialFeatures); + MatchExpressionParser::kAllowAllSpecialFeatures & + ~MatchExpressionParser::AllowedFeatures::kIsolated); if (!statusWithCQ.isOK()) { uasserted(17287, str::stream() << "Can't canonicalize query: " diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp index e818b7429c4..ad9553046dc 100644 --- a/src/mongo/db/query/get_executor.cpp +++ b/src/mongo/db/query/get_executor.cpp @@ -1248,7 +1248,8 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorCount( collection ? static_cast<const ExtensionsCallback&>( ExtensionsCallbackReal(opCtx, &collection->ns())) : static_cast<const ExtensionsCallback&>(ExtensionsCallbackNoop()), - MatchExpressionParser::kAllowAllSpecialFeatures); + MatchExpressionParser::kAllowAllSpecialFeatures & + ~MatchExpressionParser::AllowedFeatures::kIsolated); if (!statusWithCQ.isOK()) { return statusWithCQ.getStatus(); @@ -1505,7 +1506,8 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutorDistinct( std::move(qr), expCtx, extensionsCallback, - MatchExpressionParser::kAllowAllSpecialFeatures); + MatchExpressionParser::kAllowAllSpecialFeatures & + ~MatchExpressionParser::AllowedFeatures::kIsolated); if (!statusWithCQ.isOK()) { return statusWithCQ.getStatus(); } diff --git a/src/mongo/db/query/parsed_distinct.cpp b/src/mongo/db/query/parsed_distinct.cpp index 915636dd98c..7fbb38648b2 100644 --- a/src/mongo/db/query/parsed_distinct.cpp +++ b/src/mongo/db/query/parsed_distinct.cpp @@ -206,7 +206,8 @@ StatusWith<ParsedDistinct> ParsedDistinct::parse(OperationContext* opCtx, std::move(qr), expCtx, extensionsCallback, - MatchExpressionParser::kAllowAllSpecialFeatures); + MatchExpressionParser::kAllowAllSpecialFeatures & + ~MatchExpressionParser::AllowedFeatures::kIsolated); if (!cq.isOK()) { return cq.getStatus(); } diff --git a/src/mongo/db/system_index.cpp b/src/mongo/db/system_index.cpp index ccc6ccab06c..9a910570f51 100644 --- a/src/mongo/db/system_index.cpp +++ b/src/mongo/db/system_index.cpp @@ -99,7 +99,7 @@ void generateSystemIndexForExistingCollection(OperationContext* opCtx, const IndexSpec& spec) { try { auto indexSpecStatus = index_key_validate::validateIndexSpec( - spec.toBSON(), ns, serverGlobalParams.featureCompatibility); + opCtx, spec.toBSON(), ns, serverGlobalParams.featureCompatibility); BSONObj indexSpec = fassertStatusOK(40452, indexSpecStatus); log() << "No authorization index detected on " << ns @@ -198,16 +198,20 @@ void createSystemIndexes(OperationContext* opCtx, Collection* collection) { if (ns == AuthorizationManager::usersCollectionNamespace) { auto indexSpec = fassertStatusOK( 40455, - index_key_validate::validateIndexSpec( - v3SystemUsersIndexSpec.toBSON(), ns, serverGlobalParams.featureCompatibility)); + index_key_validate::validateIndexSpec(opCtx, + v3SystemUsersIndexSpec.toBSON(), + ns, + serverGlobalParams.featureCompatibility)); fassertStatusOK( 40456, collection->getIndexCatalog()->createIndexOnEmptyCollection(opCtx, indexSpec)); } else if (ns == AuthorizationManager::rolesCollectionNamespace) { auto indexSpec = fassertStatusOK( 40457, - index_key_validate::validateIndexSpec( - v3SystemRolesIndexSpec.toBSON(), ns, serverGlobalParams.featureCompatibility)); + index_key_validate::validateIndexSpec(opCtx, + v3SystemRolesIndexSpec.toBSON(), + ns, + serverGlobalParams.featureCompatibility)); fassertStatusOK( 40458, collection->getIndexCatalog()->createIndexOnEmptyCollection(opCtx, indexSpec)); |