summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorArun Banala <arun.banala@10gen.com>2019-10-28 11:39:55 +0000
committerevergreen <evergreen@mongodb.com>2019-10-28 11:39:55 +0000
commitecd0f0f60a1f7eeae77ac13a87dbf7f2f29e2ada (patch)
tree2d8ee6af610b8478b0ea0a6b8110d2af359d51f6
parent8b79b65dc042fc8ebb6f4dda2b9a212707e38985 (diff)
downloadmongo-ecd0f0f60a1f7eeae77ac13a87dbf7f2f29e2ada.tar.gz
SERVER-43911 Permit building compound hashed indexes
-rw-r--r--jstests/core/compound_hashed_index.js48
-rw-r--r--jstests/core/hashindex1.js106
-rw-r--r--jstests/core/index_plugins.js82
-rw-r--r--jstests/core/single_field_hashed_index.js105
-rw-r--r--jstests/multiVersion/compound_hashed_index.js148
-rw-r--r--src/mongo/db/catalog/index_key_validate.cpp20
-rw-r--r--src/mongo/db/catalog/index_key_validate_test.cpp44
-rw-r--r--src/mongo/db/index/expression_keys_private.cpp76
-rw-r--r--src/mongo/db/index/expression_keys_private.h2
-rw-r--r--src/mongo/db/index/expression_params.cpp21
-rw-r--r--src/mongo/db/index/expression_params.h2
-rw-r--r--src/mongo/db/index/hash_access_method.cpp10
-rw-r--r--src/mongo/db/index/hash_access_method.h5
-rw-r--r--src/mongo/db/index/hash_key_generator_test.cpp108
14 files changed, 575 insertions, 202 deletions
diff --git a/jstests/core/compound_hashed_index.js b/jstests/core/compound_hashed_index.js
new file mode 100644
index 00000000000..ac5670d5a83
--- /dev/null
+++ b/jstests/core/compound_hashed_index.js
@@ -0,0 +1,48 @@
+/**
+ * Test to verify the behaviour of compound hashed indexes.
+ */
+(function() {
+"use strict";
+
+const coll = db.compound_hashed_index;
+coll.drop();
+
+for (let i = 0; i < 20; i++) {
+ assert.commandWorked(
+ coll.insert({a: i, b: {subObj: "string_" + (i % 13)}, c: NumberInt(i % 10)}));
+}
+
+// Creation of compound hashed indexes work.
+assert.commandWorked(coll.createIndex({a: 1, b: "hashed", c: 1}));
+assert.commandWorked(coll.createIndex({a: "hashed", c: -1}));
+assert.commandWorked(coll.createIndex({b: "hashed", a: -1, c: 1}));
+
+// None of the index fields can be an array.
+assert.commandFailedWithCode(coll.insert({a: []}), 16766);
+assert.commandFailedWithCode(coll.insert({b: []}), 16766);
+assert.commandFailedWithCode(coll.insert({c: []}), 16766);
+
+// Test that having arrays along the path of the index is not allowed.
+assert.commandWorked(coll.createIndex({"field1.field2.0.field4": "hashed", "field2.0.field4": 1}));
+
+// Hashed field path cannot have arrays.
+assert.commandFailedWithCode(coll.insert({field1: []}), 16766);
+assert.commandFailedWithCode(coll.insert({field1: {field2: []}}), 16766);
+assert.commandFailedWithCode(coll.insert({field1: {field2: {0: []}}}), 16766);
+assert.commandFailedWithCode(coll.insert({field1: [{field2: {0: []}}]}), 16766);
+assert.commandFailedWithCode(coll.insert({field1: {field2: {0: {field4: []}}}}), 16766);
+
+// Range field path cannot have arrays.
+assert.commandFailedWithCode(coll.insert({field2: []}), 16766);
+assert.commandFailedWithCode(coll.insert({field2: {0: [0]}}), 16766);
+
+// Verify that updates gets rejected when a document is modified to contain array along the path.
+assert.commandFailedWithCode(coll.update({}, {field2: []}), 16766);
+assert.commandFailedWithCode(coll.update({}, {field2: {0: {field4: []}}}), 16766);
+assert.commandFailedWithCode(coll.update({}, {field1: []}), 16766);
+
+// Verify inserts and updates work when there are no arrays along path.
+assert.commandWorked(coll.insert({field1: {field2: {0: {otherField: []}}}}));
+assert.commandWorked(coll.insert({field1: {field2: {0: {field4: 1}}}}));
+assert.commandWorked(coll.update({}, {field1: {field2: {0: {field4: 1}}}}));
+})(); \ No newline at end of file
diff --git a/jstests/core/hashindex1.js b/jstests/core/hashindex1.js
deleted file mode 100644
index 61f98f980f3..00000000000
--- a/jstests/core/hashindex1.js
+++ /dev/null
@@ -1,106 +0,0 @@
-// Cannot implicitly shard accessed collections because of extra shard key index in sharded
-// collection.
-//
-// @tags: [assumes_no_implicit_index_creation, requires_fastcount]
-
-var t = db.hashindex1;
-t.drop();
-
-// Include helpers for analyzing explain output.
-load("jstests/libs/analyze_plan.js");
-
-assert.commandWorked(db.createCollection(t.getName()));
-
-// test non-single field hashed indexes don't get created (maybe change later)
-var badspec = {a: "hashed", b: 1};
-assert.commandFailedWithCode(t.ensureIndex(badspec), 16763);
-assert.eq(t.getIndexes().length, 1, "only _id index should be created");
-
-// test unique index not created (maybe change later)
-var goodspec = {a: "hashed"};
-assert.commandFailedWithCode(t.ensureIndex(goodspec, {"unique": true}), 16764);
-assert.eq(t.getIndexes().length, 1, "unique index got created.");
-
-// now test that non-unique index does get created
-assert.commandWorked(t.ensureIndex(goodspec));
-assert.eq(t.getIndexes().length, 2, "hashed index didn't get created");
-
-// test basic inserts
-for (i = 0; i < 10; i++) {
- assert.commandWorked(t.insert({a: i}));
-}
-assert.eq(t.find().count(), 10, "basic insert didn't work");
-assert.eq(t.find().hint(goodspec).toArray().length, 10, "basic insert didn't work");
-assert.eq(t.find({a: 3}).hint({_id: 1}).toArray()[0]._id,
- t.find({a: 3}).hint(goodspec).toArray()[0]._id,
- "hashindex lookup didn't work");
-
-// make sure things with the same hash are not both returned
-assert.commandWorked(t.insert({a: 3.1}));
-assert.eq(t.find().count(), 11, "additional insert didn't work");
-assert.eq(t.find({a: 3.1}).hint(goodspec).toArray().length, 1);
-assert.eq(t.find({a: 3}).hint(goodspec).toArray().length, 1);
-// test right obj is found
-assert.eq(t.find({a: 3.1}).hint(goodspec).toArray()[0].a, 3.1);
-
-// Make sure we're using the hashed index.
-var explain = t.find({a: 1}).explain();
-assert(isIxscan(db, explain.queryPlanner.winningPlan), "not using hashed index");
-
-// SERVER-12222
-// printjson( t.find({a : {$gte : 3 , $lte : 3}}).explain() )
-// assert.eq( t.find({a : {$gte : 3 , $lte : 3}}).explain().cursor ,
-// cursorname ,
-// "not using hashed cursor");
-var explain = t.find({c: 1}).explain();
-assert(!isIxscan(db, explain.queryPlanner.winningPlan), "using irrelevant hashed index");
-
-// Hash index used with a $in set membership predicate.
-var explain = t.find({a: {$in: [1, 2]}}).explain();
-printjson(explain);
-assert(isIxscan(db, explain.queryPlanner.winningPlan), "not using hashed index");
-
-// Hash index used with a singleton $and predicate conjunction.
-var explain = t.find({$and: [{a: 1}]}).explain();
-assert(isIxscan(db, explain.queryPlanner.winningPlan), "not using hashed index");
-
-// Hash index used with a non singleton $and predicate conjunction.
-var explain = t.find({$and: [{a: {$in: [1, 2]}}, {a: {$gt: 1}}]}).explain();
-assert(isIxscan(db, explain.queryPlanner.winningPlan), "not using hashed index");
-
-// test creation of index based on hash of _id index
-var goodspec2 = {'_id': "hashed"};
-assert.commandWorked(t.ensureIndex(goodspec2));
-assert.eq(t.getIndexes().length, 3, "_id index didn't get created");
-
-var newid = t.findOne()["_id"];
-assert.eq(t.find({_id: newid}).hint({_id: 1}).toArray()[0]._id,
- t.find({_id: newid}).hint(goodspec2).toArray()[0]._id,
- "using hashed index and different index returns different docs");
-
-// test creation of sparse hashed index
-var sparseindex = {b: "hashed"};
-assert.commandWorked(t.ensureIndex(sparseindex, {"sparse": true}));
-assert.eq(t.getIndexes().length, 4, "sparse index didn't get created");
-
-// test sparse index has smaller total items on after inserts
-for (i = 0; i < 10; i++) {
- assert.commandWorked(t.insert({b: i}));
-}
-var totalb = t.find().hint(sparseindex).toArray().length;
-assert.eq(totalb, 10, "sparse index has wrong total");
-
-var total = t.find().hint({"_id": 1}).toArray().length;
-var totala = t.find().hint(goodspec).toArray().length;
-assert.eq(total, totala, "non-sparse index has wrong total");
-assert.lt(totalb, totala, "sparse index should have smaller total");
-
-// Test that having arrays along the path of the index is not allowed.
-assert.commandWorked(t.createIndex({"field1.field2.0.field4": "hashed"}));
-assert.commandFailedWithCode(t.insert({field1: []}), 16766);
-assert.commandFailedWithCode(t.insert({field1: {field2: []}}), 16766);
-assert.commandFailedWithCode(t.insert({field1: {field2: {0: []}}}), 16766);
-assert.commandFailedWithCode(t.insert({field1: [{field2: {0: []}}]}), 16766);
-assert.commandFailedWithCode(t.insert({field1: {field2: {0: {field4: []}}}}), 16766);
-assert.commandWorked(t.insert({field1: {field2: {0: {otherField: []}}}}));
-assert.commandWorked(t.insert({field1: {field2: {0: {field4: 1}}}}));
diff --git a/jstests/core/index_plugins.js b/jstests/core/index_plugins.js
index 0ec565d1d50..3cd1c42f627 100644
--- a/jstests/core/index_plugins.js
+++ b/jstests/core/index_plugins.js
@@ -1,62 +1,74 @@
// Test creation of compound indexes with special index types.
-
-var coll = db.index_plugins;
+(function() {
+"use strict";
+const coll = db.index_plugins;
coll.drop();
// Test building special index types on a single field.
-assert.commandWorked(coll.ensureIndex({a: "hashed"}));
+assert.commandWorked(coll.createIndex({a: "hashed"}));
coll.dropIndexes();
-assert.commandWorked(coll.ensureIndex({a: "2d"}));
+assert.commandWorked(coll.createIndex({a: "2d"}));
coll.dropIndexes();
-assert.commandWorked(coll.ensureIndex({a: "2dsphere"}));
+assert.commandWorked(coll.createIndex({a: "2dsphere"}));
coll.dropIndexes();
-assert.commandWorked(coll.ensureIndex({a: "text"}));
+assert.commandWorked(coll.createIndex({a: "text"}));
coll.dropIndexes();
-assert.commandFailed(coll.ensureIndex({a: "geoHaystack"}, {bucketSize: 1})); // compound required
+assert.commandFailed(coll.createIndex({a: "geoHaystack"}, {bucketSize: 1})); // compound required
// Test compounding special index types with an ascending index.
-assert.commandWorked(coll.ensureIndex({a: "2dsphere", b: 1}));
+assert.commandWorked(coll.createIndex({a: "2dsphere", b: 1}));
coll.dropIndexes();
-assert.commandWorked(coll.ensureIndex({a: 1, b: "2dsphere"}));
+assert.commandWorked(coll.createIndex({a: 1, b: "2dsphere"}));
coll.dropIndexes();
-assert.commandWorked(coll.ensureIndex({a: "text", b: 1}));
+assert.commandWorked(coll.createIndex({a: "text", b: 1}));
coll.dropIndexes();
-assert.commandWorked(coll.ensureIndex({a: 1, b: "text"}));
+assert.commandWorked(coll.createIndex({a: 1, b: "text"}));
coll.dropIndexes();
-assert.commandWorked(coll.ensureIndex({a: "2d", b: 1}));
-coll.dropIndexes();
-assert.commandFailed(coll.ensureIndex({a: 1, b: "2d"})); // unsupported
+assert.commandWorked(coll.createIndex({a: "2d", b: 1}));
+assert.commandWorked(coll.createIndex({a: "hashed", b: 1}));
+assert.commandWorked(coll.createIndex({a: 1, b: "hashed"}));
-assert.commandWorked(coll.ensureIndex({a: "geoHaystack", b: 1}, {bucketSize: 1}));
coll.dropIndexes();
-assert.commandFailed(coll.ensureIndex({a: 1, b: "geoHaystack"}, {bucketSize: 1})); // unsupported
+assert.commandFailed(coll.createIndex({a: 1, b: "2d"})); // unsupported
-assert.commandFailed(coll.ensureIndex({a: "hashed", b: 1})); // unsupported
-assert.commandFailed(coll.ensureIndex({a: 1, b: "hashed"})); // unsupported
+assert.commandWorked(coll.createIndex({a: "geoHaystack", b: 1}, {bucketSize: 1}));
+coll.dropIndexes();
+assert.commandFailed(coll.createIndex({a: 1, b: "geoHaystack"}, {bucketSize: 1})); // unsupported
// Test compound index where multiple fields have same special index type.
-
-assert.commandWorked(coll.ensureIndex({a: "2dsphere", b: "2dsphere"}));
coll.dropIndexes();
-assert.commandWorked(coll.ensureIndex({a: "text", b: "text"}));
-coll.dropIndexes();
-
-assert.commandFailed(coll.ensureIndex({a: "2d", b: "2d"})); // unsupported
-assert.commandFailed(coll.ensureIndex({a: "geoHaystack", b: "geoHaystack"}, // unsupported
- {bucketSize: 1}));
-
-assert.commandFailed(coll.ensureIndex({a: "hashed", b: "hashed"})); // unsupported
+assert.commandWorked(coll.createIndex({a: "2dsphere", b: "2dsphere"}));
+assert.commandWorked(coll.createIndex({a: "text", b: "text"}));
+// Unsupported.
+assert.commandFailed(coll.createIndex({a: "2d", b: "2d"}));
+assert.commandFailedWithCode(coll.createIndex({a: "hashed", b: "hashed"}), 31303);
+assert.commandFailedWithCode(coll.createIndex({c: 1, a: "hashed", b: "hashed"}), 31303);
// Test compounding different special index types with each other.
-
-assert.commandFailed(coll.ensureIndex({a: "2d", b: "hashed"})); // unsupported
-assert.commandFailed(coll.ensureIndex({a: "hashed", b: "2dsphere"})); // unsupported
-assert.commandFailed(coll.ensureIndex({a: "2dsphere", b: "text"})); // unsupported
-assert.commandFailed(coll.ensureIndex({a: "text", b: "geoHaystack"})); // unsupported
-assert.commandFailed(coll.ensureIndex({a: "geoHaystack", b: "2d"}, // unsupported
- {bucketSize: 1}));
+const incompatableIndexTypes = ["2d", "2dsphere", "geoHaystack", "hashed", "text"];
+for (let indexType1 of incompatableIndexTypes) {
+ for (let indexType2 of incompatableIndexTypes) {
+ if (indexType1 == indexType2) {
+ continue;
+ }
+ assert.commandFailedWithCode(coll.createIndex({a: indexType1, b: indexType2}),
+ ErrorCodes.CannotCreateIndex);
+ assert.commandFailedWithCode(coll.createIndex({a: indexType1, b: indexType2, c: 1}),
+ ErrorCodes.CannotCreateIndex);
+ assert.commandFailedWithCode(coll.createIndex({c: -1, a: indexType1, b: indexType2}),
+ ErrorCodes.CannotCreateIndex);
+ assert.commandFailedWithCode(coll.createIndex({a: indexType1, c: 1, b: indexType2}),
+ ErrorCodes.CannotCreateIndex);
+ }
+ assert.commandFailedWithCode(coll.createIndex({"$**": 1, b: indexType1}),
+ ErrorCodes.CannotCreateIndex);
+ assert.commandFailedWithCode(
+ coll.createIndex({a: "geoHaystack", b: indexType1}, {bucketSize: 1}),
+ [ErrorCodes.CannotCreateIndex, 16770]);
+}
+})();
diff --git a/jstests/core/single_field_hashed_index.js b/jstests/core/single_field_hashed_index.js
new file mode 100644
index 00000000000..6f4fa090aea
--- /dev/null
+++ b/jstests/core/single_field_hashed_index.js
@@ -0,0 +1,105 @@
+/**
+ * Tests the basic behaviours and properties of single-field hashed indexes.
+ * Cannot implicitly shard accessed collections because of extra shard key index in sharded
+ * collection.
+ * @tags: [assumes_no_implicit_index_creation, requires_fastcount]
+ */
+(function() {
+"use strict";
+load("jstests/libs/analyze_plan.js"); // For isIxscan().
+
+const t = db.single_field_hashed_index;
+t.drop();
+
+assert.commandWorked(db.createCollection(t.getName()));
+const indexSpec = {
+ a: "hashed"
+};
+
+// Test unique index not created (maybe change later).
+assert.commandFailedWithCode(t.createIndex(indexSpec, {"unique": true}), 16764);
+assert.eq(t.getIndexes().length, 1, "unique index got created.");
+
+// Test compound hashed indexes work.
+assert.commandWorked(t.createIndex(indexSpec));
+assert.eq(t.getIndexes().length, 2);
+
+// Test basic inserts.
+for (let i = 0; i < 10; i++) {
+ assert.commandWorked(t.insert({a: i}));
+}
+assert.eq(t.find().count(), 10, "basic insert didn't work");
+assert.eq(t.find().hint(indexSpec).toArray().length, 10, "basic insert didn't work");
+assert.eq(t.find({a: 3}).hint({_id: 1}).toArray()[0]._id,
+ t.find({a: 3}).hint(indexSpec).toArray()[0]._id,
+ "hashindex lookup didn't work");
+
+// Make sure things with the same hash are not both returned.
+assert.commandWorked(t.insert({a: 3.1}));
+assert.eq(t.find().count(), 11, "additional insert didn't work");
+assert.eq(t.find({a: 3.1}).hint(indexSpec).toArray().length, 1);
+assert.eq(t.find({a: 3}).hint(indexSpec).toArray().length, 1);
+// Test right obj is found.
+assert.eq(t.find({a: 3.1}).hint(indexSpec).toArray()[0].a, 3.1);
+
+// Make sure we're using the hashed index.
+let explain = t.find({a: 1}).explain();
+assert(isIxscan(db, explain.queryPlanner.winningPlan), "not using hashed index");
+
+explain = t.find({c: 1}).explain();
+assert(!isIxscan(db, explain.queryPlanner.winningPlan), "using irrelevant hashed index");
+
+// Hash index used with a $in set membership predicate.
+explain = t.find({a: {$in: [1, 2]}}).explain();
+printjson(explain);
+assert(isIxscan(db, explain.queryPlanner.winningPlan), "not using hashed index");
+
+// Hash index used with a singleton $and predicate conjunction.
+explain = t.find({$and: [{a: 1}]}).explain();
+assert(isIxscan(db, explain.queryPlanner.winningPlan), "not using hashed index");
+
+// Hash index used with a non singleton $and predicate conjunction.
+explain = t.find({$and: [{a: {$in: [1, 2]}}, {a: {$gt: 1}}]}).explain();
+assert(isIxscan(db, explain.queryPlanner.winningPlan), "not using hashed index");
+
+// Test creation of index based on hash of _id index.
+const indexSpec2 = {
+ '_id': "hashed"
+};
+assert.commandWorked(t.createIndex(indexSpec2));
+assert.eq(t.getIndexes().length, 3, "_id index didn't get created");
+
+const newid = t.findOne()["_id"];
+assert.eq(t.find({_id: newid}).hint({_id: 1}).toArray()[0]._id,
+ t.find({_id: newid}).hint(indexSpec2).toArray()[0]._id,
+ "using hashed index and different index returns different docs");
+
+// Test creation of sparse hashed index.
+const sparseIndex = {
+ b: "hashed"
+};
+assert.commandWorked(t.createIndex(sparseIndex, {"sparse": true}));
+assert.eq(t.getIndexes().length, 4, "sparse index didn't get created");
+
+// Test sparse index has smaller total items on after inserts.
+for (let i = 0; i < 10; i++) {
+ assert.commandWorked(t.insert({b: i}));
+}
+const totalb = t.find().hint(sparseIndex).toArray().length;
+assert.eq(totalb, 10, "sparse index has wrong total");
+
+const total = t.find().hint({"_id": 1}).toArray().length;
+const totala = t.find().hint(indexSpec).toArray().length;
+assert.eq(total, totala, "non-sparse index has wrong total");
+assert.lt(totalb, totala, "sparse index should have smaller total");
+
+// Test that having arrays along the path of the index is not allowed.
+assert.commandWorked(t.createIndex({"field1.field2.0.field4": "hashed"}));
+assert.commandFailedWithCode(t.insert({field1: []}), 16766);
+assert.commandFailedWithCode(t.insert({field1: {field2: []}}), 16766);
+assert.commandFailedWithCode(t.insert({field1: {field2: {0: []}}}), 16766);
+assert.commandFailedWithCode(t.insert({field1: [{field2: {0: []}}]}), 16766);
+assert.commandFailedWithCode(t.insert({field1: {field2: {0: {field4: []}}}}), 16766);
+assert.commandWorked(t.insert({field1: {field2: {0: {otherField: []}}}}));
+assert.commandWorked(t.insert({field1: {field2: {0: {field4: 1}}}}));
+})(); \ No newline at end of file
diff --git a/jstests/multiVersion/compound_hashed_index.js b/jstests/multiVersion/compound_hashed_index.js
new file mode 100644
index 00000000000..baa47b6cd58
--- /dev/null
+++ b/jstests/multiVersion/compound_hashed_index.js
@@ -0,0 +1,148 @@
+/**
+ * Tests the behaviour of compound hashed indexes with different FCV versions.
+ *
+ * Compound hashed index creation is enabled in 4.4. In this multi version test, we ensure that
+ * - We cannot create compound hashed index when binary is 4.4 and FCV is 4.2 or when binary
+ * is 4.2.
+ * - We can create compound hashed indexes when FCV is 4.4.
+ * - We can start the server with FCV 4.2 with existing compound hashed indexes.
+ * - We cannot start the server with 4.2 binary with existing compound hashed indexes.
+ * - A compound hashed index can be dropped while in FCV 4.2.
+ * - TODO SERVER-43913: Verify that a compound hashed index built in FCV 4.4 continues to work when
+ * we downgrade to FCV 4.2.
+ */
+(function() {
+"use strict";
+
+load("jstests/multiVersion/libs/multi_rs.js"); // For upgradeSet.
+load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
+
+TestData.skipCheckDBHashes = true; // Skip db hashes when restarting the replset.
+
+const nodeOptions42 = {
+ binVersion: "last-stable"
+};
+const nodeOptions44 = {
+ binVersion: "latest"
+};
+
+// Set up a new replSet consisting of 3 nodes, initially running on 4.2 binaries.
+const rst = new ReplSetTest({nodes: 3, nodeOptions: nodeOptions42});
+
+if (!startSetIfSupportsReadMajority(rst)) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ rst.stopSet();
+ return;
+}
+rst.initiate();
+
+let testDB = rst.getPrimary().getDB(jsTestName());
+let coll = testDB.coll;
+coll.drop();
+
+// Verifies that the instance is running with the specified binary version and FCV.
+function assertVersionAndFCV(db, versions, fcv) {
+ const majorMinorVersion = db.version().substring(0, 3);
+ assert(versions.includes(majorMinorVersion));
+ assert.eq(
+ assert.commandWorked(db.adminCommand({getParameter: 1, featureCompatibilityVersion: 1}))
+ .featureCompatibilityVersion.version,
+ fcv);
+}
+
+// Restarts the given replset nodes, or the entire replset if no nodes are specified.
+function restartReplSetNodes(replSet, nodes, options) {
+ const defaultOpts = {remember: true, appendOptions: true, startClean: false};
+ options = Object.assign(defaultOpts, (options || {}));
+ nodes = (nodes || replSet.nodes);
+ assert(Array.isArray(nodes));
+ for (let node of nodes) {
+ // Merge the new options into the existing options for the given nodes.
+ Object.assign(replSet.nodeOptions[`n${replSet.getNodeId(node)}`], options);
+ }
+ replSet.restart(nodes, options);
+}
+
+// Verify that the replset is on binary version 4.2 and FCV 4.2.
+assertVersionAndFCV(testDB, ["4.2"], lastStableFCV);
+
+jsTestLog("Cannot create a compound hashed index on a replset running binary 4.2.");
+assert.commandFailedWithCode(coll.createIndex({a: "hashed", b: 1}), 16763);
+
+// Upgrade the set to the new binary version, but keep the feature compatibility version at 4.2.
+rst.upgradeSet(nodeOptions44);
+testDB = rst.getPrimary().getDB(jsTestName());
+coll = testDB.coll;
+assertVersionAndFCV(testDB, ["4.4", "4.3"], lastStableFCV);
+
+jsTestLog("Cannot create a compound hashed index on binary 4.4 with FCV 4.2.");
+assert.commandFailedWithCode(coll.createIndex({c: 1, a: "hashed", b: 1}), 16763);
+assert.commandFailedWithCode(coll.createIndex({a: "hashed", b: 1}), 16763);
+
+jsTestLog("Can create a compound hashed index on binary 4.4 with FCV 4.4.");
+assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
+assert.commandWorked(coll.createIndex({c: 1, a: "hashed", b: 1}));
+assert.commandWorked(coll.createIndex({b: "hashed", c: 1}));
+assert.commandWorked(coll.insert([{a: 1, b: 1, c: 1}, {a: 2, b: 2}]));
+rst.awaitReplication();
+
+jsTestLog("Can use an existing compound hashed index after downgrading FCV to 4.2.");
+// TODO SERVER-43913: Verify that the queries can use the indexes created above.
+assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+
+jsTestLog("Cannot create a new compound hashed index after downgrading FCV to 4.2.");
+let coll_other = testDB.coll;
+assert.commandFailedWithCode(coll_other.createIndex({c: 1, a: "hashed", b: 1}), 16763);
+
+jsTestLog("Can restart the replset in FCV 4.2 with a compound hashed index present.");
+restartReplSetNodes(rst);
+testDB = rst.getPrimary().getDB(jsTestName());
+coll = testDB.coll;
+assertVersionAndFCV(testDB, ["4.4", "4.3"], lastStableFCV);
+
+jsTestLog(
+ "Can restart the Secondaries in FCV 4.2 and resync the compound hashed index from the Primary.");
+restartReplSetNodes(rst, rst.getSecondaries(), {startClean: true});
+rst.awaitSecondaryNodes();
+rst.awaitReplication();
+
+// Verify that the Secondaries have both recreated the compound hashed index.
+let secondaries = rst.getSecondaries();
+assert.eq(secondaries.length, 2);
+for (let sec of secondaries) {
+ assert.eq(sec.getCollection(coll.getFullName())
+ .aggregate([{$indexStats: {}}, {$match: {"key.a": "hashed"}}])
+ .toArray()
+ .length,
+ 1);
+}
+
+jsTestLog("Can drop an existing compound hashed index in FCV 4.2.");
+assert.commandWorked(coll.dropIndex({c: 1, a: "hashed", b: 1}));
+
+jsTestLog("Cannot recreate the dropped compound hashed index in FCV 4.2.");
+assert.commandFailedWithCode(coll.createIndex({c: 1, a: "hashed", b: 1}), 16763);
+
+// Set the FCV to 4.2 and test that a 4.2 binary fails to start when a compound hashed index that
+// was built on 4.4 is still present in the catalog.
+jsTestLog("Cannot start 4.2 binary with compound hashed index present.");
+assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+secondaries = rst.getSecondaries();
+assert.eq(secondaries.length, 2);
+rst.awaitReplication();
+try {
+ restartReplSetNodes(rst, [secondaries[0]], nodeOptions42);
+ assert(false, "Expected 'restartReplSetNodes' to throw");
+} catch (err) {
+ assert.eq(err.message, `Failed to start node ${rst.getNodeId(secondaries[0])}`);
+ // HashAccessMethod should throw this error when the index spec is validated during startup.
+ assert(rawMongoProgramOutput().match("exception in initAndListen: Location16763"));
+}
+
+jsTestLog("Restart the failed node on binary 4.4 and gracefully shut down the replset.");
+Object.assign(rst.nodeOptions[`n${rst.getNodeId(secondaries[0])}`], nodeOptions44);
+rst.start(secondaries[0], nodeOptions44);
+
+rst.awaitReplication();
+rst.stopSet();
+}()); \ No newline at end of file
diff --git a/src/mongo/db/catalog/index_key_validate.cpp b/src/mongo/db/catalog/index_key_validate.cpp
index 7b057de6c57..1c5c0f180de 100644
--- a/src/mongo/db/catalog/index_key_validate.cpp
+++ b/src/mongo/db/catalog/index_key_validate.cpp
@@ -40,6 +40,7 @@
#include "mongo/base/status.h"
#include "mongo/base/status_with.h"
+#include "mongo/db/commands/test_commands_enabled.h"
#include "mongo/db/field_ref.h"
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/index/wildcard_key_generator.h"
@@ -304,6 +305,21 @@ StatusWith<BSONObj> validateIndexSpec(
<< "Values in the index key pattern cannot be empty strings"};
}
}
+
+ // Allow compound hashed index only if FCV is 4.4.
+ const auto isFeatureDisabled =
+ (featureCompatibility.isVersionInitialized() &&
+ featureCompatibility.getVersion() <
+ ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44) ||
+ !getTestCommandsEnabled();
+ if (isFeatureDisabled && (indexSpecElem.embeddedObject().nFields() > 1) &&
+ (IndexNames::findPluginName(indexSpecElem.embeddedObject()) ==
+ IndexNames::HASHED)) {
+ return {ErrorCodes::Error(16763),
+ "Compound hashed indexes can only be created with FCV 4.4 and with test "
+ "commands enabled "};
+ }
+
hasKeyPatternField = true;
} else if (IndexDescriptor::kIndexNameFieldName == indexSpecElemFieldName) {
if (indexSpecElem.type() != BSONType::String) {
@@ -374,8 +390,8 @@ StatusWith<BSONObj> validateIndexSpec(
boost::intrusive_ptr<ExpressionContext> expCtx(
new ExpressionContext(opCtx, simpleCollator));
- // Special match expression features (e.g. $jsonSchema, $expr, ...) are not allowed in
- // a partialFilterExpression on index creation.
+ // Special match expression features (e.g. $jsonSchema, $expr, ...) are not allowed in a
+ // partialFilterExpression on index creation.
auto statusWithMatcher =
MatchExpressionParser::parse(indexSpecElem.Obj(),
std::move(expCtx),
diff --git a/src/mongo/db/catalog/index_key_validate_test.cpp b/src/mongo/db/catalog/index_key_validate_test.cpp
index d61cbb8e0d7..b71769d0e5b 100644
--- a/src/mongo/db/catalog/index_key_validate_test.cpp
+++ b/src/mongo/db/catalog/index_key_validate_test.cpp
@@ -36,6 +36,7 @@
#include "mongo/bson/bsonmisc.h"
#include "mongo/bson/bsonobjbuilder.h"
+#include "mongo/bson/json.h"
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/query/query_knobs_gen.h"
#include "mongo/unittest/unittest.h"
@@ -283,6 +284,47 @@ TEST(IndexKeyValidateTest, KeyElementNameWildcardFailsWhenValueIsPluginNameWithV
ASSERT_EQ(status, ErrorCodes::CannotCreateIndex);
}
-} // namespace
+TEST(IndexKeyValidateTest, CompoundHashedIndexWithFCV44) {
+ ServerGlobalParams::FeatureCompatibility fcv;
+ fcv.setVersion(ServerGlobalParams::FeatureCompatibility::Version::kFullyUpgradedTo44);
+
+ // Validation fails when 'enableTestCommands' flag is not set.
+ auto status = index_key_validate::validateIndexSpec(
+ nullptr, fromjson("{key: {a : 'hashed', b: 1}, name: 'index'}"), fcv);
+ ASSERT_NOT_OK(status);
+ ASSERT_EQ(status.getStatus().code(), 16763);
+
+ // Validation succeeds with hashed prefix in the index and 'enableTestCommands' flag enabled.
+ setTestCommandsEnabled(true);
+ ASSERT_OK(index_key_validate::validateIndexSpec(
+ nullptr, fromjson("{key: {a : 'hashed', b: 1}, name: 'index'}"), fcv));
+
+ // Validation succeeds with hashed non-prefix in the index.
+ ASSERT_OK(index_key_validate::validateIndexSpec(
+ nullptr, fromjson("{key: {b: 1, a : 'hashed', c: 1}, name: 'index'}"), fcv));
+}
+
+TEST(IndexKeyValidateTest, CompoundHashedIndexWithFCV42) {
+ ServerGlobalParams::FeatureCompatibility fcv;
+ fcv.setVersion(ServerGlobalParams::FeatureCompatibility::Version::kFullyDowngradedTo42);
+ // Validation succeeds with single field hashed index.
+ ASSERT_OK(index_key_validate::validateIndexSpec(
+ nullptr, fromjson("{key: {a : 'hashed'}, name: 'index'}"), fcv));
+
+ // Validation fails with compound hashed index.
+ auto status = index_key_validate::validateIndexSpec(
+ nullptr, fromjson("{key: {a : 'hashed', b: 1}, name: 'index'}"), fcv);
+ ASSERT_NOT_OK(status);
+ ASSERT_EQ(status.getStatus().code(), 16763);
+
+ // validation fails even with 'enableTestCommands' flag enabled.
+ setTestCommandsEnabled(true);
+ status = index_key_validate::validateIndexSpec(
+ nullptr, fromjson("{key: {a : 'hashed', b: 1}, name: 'index'}"), fcv);
+ ASSERT_NOT_OK(status);
+ ASSERT_EQ(status.getStatus().code(), 16763);
+}
+
+} // namespace
} // namespace mongo
diff --git a/src/mongo/db/index/expression_keys_private.cpp b/src/mongo/db/index/expression_keys_private.cpp
index 677059eb783..eda7587c7dc 100644
--- a/src/mongo/db/index/expression_keys_private.cpp
+++ b/src/mongo/db/index/expression_keys_private.cpp
@@ -57,7 +57,6 @@ namespace {
using namespace mongo;
namespace dps = ::mongo::dotted_path_support;
-
//
// Helper functions for getS2Keys
//
@@ -374,7 +373,7 @@ void ExpressionKeysPrivate::getFTSKeys(const BSONObj& obj,
// static
void ExpressionKeysPrivate::getHashKeys(const BSONObj& obj,
- const string& hashedField,
+ const BSONObj& keyPattern,
HashSeed seed,
int hashVersion,
bool isSparse,
@@ -383,42 +382,49 @@ void ExpressionKeysPrivate::getHashKeys(const BSONObj& obj,
KeyString::Version keyStringVersion,
Ordering ordering,
boost::optional<RecordId> id) {
- const char* cstr = hashedField.c_str();
- BSONElement fieldVal = dps::extractElementAtPathOrArrayAlongPath(obj, cstr);
-
- // If we hit an array while traversing the path, 'cstr' will point to the path component
- // immediately following the array, or the null termination byte if the final field in the path
- // was an array.
- uassert(
- 16766,
- str::stream()
- << "Error: hashed indexes do not currently support array values. Found array at path: "
- << hashedField.substr(
- 0, hashedField.size() - StringData(cstr).size() - !StringData(cstr).empty()),
- fieldVal.type() != BSONType::Array);
-
- // Convert strings to comparison keys.
- BSONObj fieldValObj;
- if (!fieldVal.eoo()) {
- BSONObjBuilder bob;
- CollationIndexKey::collationAwareIndexKeyAppend(fieldVal, collator, &bob);
- fieldValObj = bob.obj();
- fieldVal = fieldValObj.firstElement();
- KeyString::HeapBuilder keyString(keyStringVersion, ordering);
- keyString.appendNumberLong(makeSingleHashKey(fieldVal, seed, hashVersion));
- if (id) {
- keyString.appendRecordId(*id);
+ static const BSONObj nullObj = BSON("" << BSONNULL);
+ auto hasFieldValue = false;
+ KeyString::HeapBuilder keyString(keyStringVersion, ordering);
+ for (auto&& indexEntry : keyPattern) {
+ auto indexPath = indexEntry.fieldNameStringData();
+ auto* cstr = indexPath.rawData();
+ auto fieldVal = dps::extractElementAtPathOrArrayAlongPath(obj, cstr);
+
+ // If we hit an array while traversing the path, 'cstr' will point to the path component
+ // immediately following the array, or the null termination byte if the final field in the
+ // path was an array.
+ uassert(16766,
+ str::stream() << "Error: hashed indexes do not currently support array values. "
+ "Found array at path: "
+ << indexPath.substr(0,
+ indexPath.size() - StringData(cstr).size() -
+ !StringData(cstr).empty()),
+ fieldVal.type() != BSONType::Array);
+
+ BSONObj fieldValObj;
+ if (fieldVal.eoo()) {
+ fieldVal = nullObj.firstElement();
+ } else {
+ BSONObjBuilder bob;
+ CollationIndexKey::collationAwareIndexKeyAppend(fieldVal, collator, &bob);
+ fieldValObj = bob.obj();
+ fieldVal = fieldValObj.firstElement();
+ hasFieldValue = true;
}
- keys->insert(keyString.release());
- } else if (!isSparse) {
- BSONObj nullObj = BSON("" << BSONNULL);
- KeyString::HeapBuilder keyString(keyStringVersion, ordering);
- keyString.appendNumberLong(makeSingleHashKey(nullObj.firstElement(), seed, hashVersion));
- if (id) {
- keyString.appendRecordId(*id);
+
+ if (indexEntry.isNumber()) {
+ keyString.appendBSONElement(fieldVal);
+ } else {
+ keyString.appendNumberLong(makeSingleHashKey(fieldVal, seed, hashVersion));
}
- keys->insert(keyString.release());
}
+ if (isSparse && !hasFieldValue) {
+ return;
+ }
+ if (id) {
+ keyString.appendRecordId(*id);
+ }
+ keys->insert(keyString.release());
}
// static
diff --git a/src/mongo/db/index/expression_keys_private.h b/src/mongo/db/index/expression_keys_private.h
index 17e24f98bf7..8a03b566d7d 100644
--- a/src/mongo/db/index/expression_keys_private.h
+++ b/src/mongo/db/index/expression_keys_private.h
@@ -86,7 +86,7 @@ public:
* Generates keys for hash access method.
*/
static void getHashKeys(const BSONObj& obj,
- const std::string& hashedField,
+ const BSONObj& keyPattern,
HashSeed seed,
int hashVersion,
bool isSparse,
diff --git a/src/mongo/db/index/expression_params.cpp b/src/mongo/db/index/expression_params.cpp
index 4dc0ebbb8d9..29977a5baab 100644
--- a/src/mongo/db/index/expression_params.cpp
+++ b/src/mongo/db/index/expression_params.cpp
@@ -72,7 +72,7 @@ void ExpressionParams::parseTwoDParams(const BSONObj& infoObj, TwoDIndexingParam
void ExpressionParams::parseHashParams(const BSONObj& infoObj,
HashSeed* seedOut,
int* versionOut,
- std::string* fieldOut) {
+ BSONObj* keyPattern) {
// Default _seed to DEFAULT_HASH_SEED if "seed" is not included in the index spec
// or if the value of "seed" is not a number
@@ -91,10 +91,21 @@ void ExpressionParams::parseHashParams(const BSONObj& infoObj,
// the value of "hashversion" is not a number
*versionOut = infoObj["hashVersion"].numberInt();
- // Get the hashfield name
- BSONElement firstElt = infoObj.getObjectField("key").firstElement();
- massert(16765, "error: no hashed index field", firstElt.str().compare(IndexNames::HASHED) == 0);
- *fieldOut = firstElt.fieldName();
+ // Extract and validate the index key pattern
+ int numHashFields = 0;
+ *keyPattern = infoObj.getObjectField("key");
+ for (auto&& indexField : *keyPattern) {
+ // The 'indexField' can either be ascending (1), descending (-1), or HASHED. Any other field
+ // types should have failed validation while parsing.
+ invariant(indexField.isNumber() || (indexField.valueStringData() == IndexNames::HASHED));
+ numHashFields += (indexField.isNumber()) ? 0 : 1;
+ }
+ // We shouldn't be here if there are no hashed fields in the index.
+ invariant(numHashFields > 0);
+ uassert(31303,
+ str::stream() << "A maximum of one index field is allowed to be hashed but found "
+ << numHashFields << " for 'key' " << *keyPattern,
+ numHashFields == 1);
}
void ExpressionParams::parseHaystackParams(const BSONObj& infoObj,
diff --git a/src/mongo/db/index/expression_params.h b/src/mongo/db/index/expression_params.h
index 6356c6f71bd..157a933c0b3 100644
--- a/src/mongo/db/index/expression_params.h
+++ b/src/mongo/db/index/expression_params.h
@@ -48,7 +48,7 @@ void parseTwoDParams(const BSONObj& infoObj, TwoDIndexingParams* out);
void parseHashParams(const BSONObj& infoObj,
HashSeed* seedOut,
int* versionOut,
- std::string* fieldOut);
+ BSONObj* keyPattern);
void parseHaystackParams(const BSONObj& infoObj,
std::string* geoFieldOut,
diff --git a/src/mongo/db/index/hash_access_method.cpp b/src/mongo/db/index/hash_access_method.cpp
index f8a58929e87..0a218177016 100644
--- a/src/mongo/db/index/hash_access_method.cpp
+++ b/src/mongo/db/index/hash_access_method.cpp
@@ -41,16 +41,10 @@ HashAccessMethod::HashAccessMethod(IndexCatalogEntry* btreeState,
: AbstractIndexAccessMethod(btreeState, std::move(btree)) {
const IndexDescriptor* descriptor = btreeState->descriptor();
- // We can change these if the single-field limitation is lifted later.
- uassert(16763,
- "Currently only single field hashed index supported.",
- 1 == descriptor->getNumFields());
-
uassert(16764,
"Currently hashed indexes cannot guarantee uniqueness. Use a regular index.",
!descriptor->unique());
-
- ExpressionParams::parseHashParams(descriptor->infoObj(), &_seed, &_hashVersion, &_hashedField);
+ ExpressionParams::parseHashParams(descriptor->infoObj(), &_seed, &_hashVersion, &_keyPattern);
_collator = btreeState->getCollator();
}
@@ -61,7 +55,7 @@ void HashAccessMethod::doGetKeys(const BSONObj& obj,
MultikeyPaths* multikeyPaths,
boost::optional<RecordId> id) const {
ExpressionKeysPrivate::getHashKeys(obj,
- _hashedField,
+ _keyPattern,
_seed,
_hashVersion,
_descriptor->isSparse(),
diff --git a/src/mongo/db/index/hash_access_method.h b/src/mongo/db/index/hash_access_method.h
index 7dd6536de75..9bdaef9c0b7 100644
--- a/src/mongo/db/index/hash_access_method.h
+++ b/src/mongo/db/index/hash_access_method.h
@@ -61,8 +61,7 @@ private:
MultikeyPaths* multikeyPaths,
boost::optional<RecordId> id) const final;
- // Only one of our fields is hashed. This is the field name for it.
- std::string _hashedField;
+ BSONObj _keyPattern;
// _seed defaults to zero.
HashSeed _seed;
@@ -70,8 +69,6 @@ private:
// _hashVersion defaults to zero.
int _hashVersion;
- BSONObj _missingKey;
-
// Null if this index orders strings according to the simple binary compare. If non-null,
// represents the collator used to generate index keys for indexed strings.
const CollatorInterface* _collator;
diff --git a/src/mongo/db/index/hash_key_generator_test.cpp b/src/mongo/db/index/hash_key_generator_test.cpp
index fb84de9f5d1..c97879cb204 100644
--- a/src/mongo/db/index/hash_key_generator_test.cpp
+++ b/src/mongo/db/index/hash_key_generator_test.cpp
@@ -89,8 +89,9 @@ TEST(HashKeyGeneratorTest, CollationAppliedBeforeHashing) {
BSONObj obj = fromjson("{a: 'string'}");
KeyStringSet actualKeys;
CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
+ BSONObj indexSpec = fromjson("{a: 'hashed'}");
ExpressionKeysPrivate::getHashKeys(obj,
- "a",
+ indexSpec,
kHashSeed,
kHashVersion,
false,
@@ -110,8 +111,10 @@ TEST(HashKeyGeneratorTest, CollationDoesNotAffectNonStringFields) {
BSONObj obj = fromjson("{a: 5}");
KeyStringSet actualKeys;
CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
+
+ BSONObj indexSpec = fromjson("{a: 'hashed'}");
ExpressionKeysPrivate::getHashKeys(obj,
- "a",
+ indexSpec,
kHashSeed,
kHashVersion,
false,
@@ -131,8 +134,10 @@ TEST(HashKeyGeneratorTest, CollatorAppliedBeforeHashingNestedObject) {
BSONObj backwardsObj = fromjson("{a: {b: 'gnirts'}}");
KeyStringSet actualKeys;
CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
+
+ BSONObj indexSpec = fromjson("{a: 'hashed'}");
ExpressionKeysPrivate::getHashKeys(obj,
- "a",
+ indexSpec,
kHashSeed,
kHashVersion,
false,
@@ -147,11 +152,37 @@ TEST(HashKeyGeneratorTest, CollatorAppliedBeforeHashingNestedObject) {
ASSERT(assertKeysetsEqual(expectedKeys, actualKeys));
}
+TEST(HashKeyGeneratorTest, CollationAppliedforAllIndexFields) {
+ BSONObj obj = fromjson("{a: {b: 'abc', c: 'def'}}");
+ BSONObj backwardsObj = fromjson("{a: {b: 'cba', c: 'fed'}}");
+ KeyStringSet actualKeys;
+ CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
+
+ BSONObj indexSpec = fromjson("{'a.c': 1, 'a': 'hashed'}");
+ ExpressionKeysPrivate::getHashKeys(obj,
+ indexSpec,
+ kHashSeed,
+ kHashVersion,
+ true,
+ &collator,
+ &actualKeys,
+ KeyString::Version::kLatestVersion,
+ Ordering::make(BSONObj()));
+
+ KeyStringSet expectedKeys;
+ KeyString::HeapBuilder keyString(KeyString::Version::kLatestVersion, Ordering::make(BSONObj()));
+ keyString.appendBSONElement(backwardsObj["a"]["c"]);
+ keyString.appendNumberLong(BSONElementHasher::hash64(backwardsObj["a"], kHashSeed));
+ expectedKeys.insert(keyString.release());
+ ASSERT(assertKeysetsEqual(expectedKeys, actualKeys));
+}
+
TEST(HashKeyGeneratorTest, NoCollation) {
BSONObj obj = fromjson("{a: 'string'}");
KeyStringSet actualKeys;
+ BSONObj indexSpec = fromjson("{a: 'hashed'}");
ExpressionKeysPrivate::getHashKeys(obj,
- "a",
+ indexSpec,
kHashSeed,
kHashVersion,
false,
@@ -166,4 +197,73 @@ TEST(HashKeyGeneratorTest, NoCollation) {
ASSERT(assertKeysetsEqual(expectedKeys, actualKeys));
}
+TEST(HashKeyGeneratorTest, CompoundIndexEmptyObject) {
+ BSONObj obj = fromjson("{}");
+ KeyStringSet actualKeys;
+ BSONObj indexSpec = fromjson("{a: 'hashed', b: 1, c: 1}");
+ ExpressionKeysPrivate::getHashKeys(obj,
+ indexSpec,
+ kHashSeed,
+ kHashVersion,
+ false,
+ nullptr,
+ &actualKeys,
+ KeyString::Version::kLatestVersion,
+ Ordering::make(BSONObj()));
+
+ // Verify that we inserted null indexes for empty input object.
+ KeyStringSet expectedKeys;
+ KeyString::HeapBuilder keyString(KeyString::Version::kLatestVersion, Ordering::make(BSONObj()));
+ auto nullBSON = BSON("" << BSONNULL);
+ auto nullElement = nullBSON.firstElement();
+ keyString.appendNumberLong(BSONElementHasher::hash64(nullElement, kHashSeed));
+ keyString.appendBSONElement(nullElement);
+ keyString.appendBSONElement(nullElement);
+ expectedKeys.insert(keyString.release());
+ ASSERT(assertKeysetsEqual(expectedKeys, actualKeys));
+}
+
+TEST(HashKeyGeneratorTest, SparseIndex) {
+ BSONObj obj = fromjson("{k: 1}");
+ KeyStringSet actualKeys;
+ BSONObj indexSpec = fromjson("{a: 'hashed', b: 1, c: 1}");
+ ExpressionKeysPrivate::getHashKeys(obj,
+ indexSpec,
+ kHashSeed,
+ kHashVersion,
+ true, // isSparse
+ nullptr,
+ &actualKeys,
+ KeyString::Version::kLatestVersion,
+ Ordering::make(BSONObj()));
+ // Verify that no index entries were added to the sparse index.
+ ASSERT(assertKeysetsEqual(KeyStringSet(), actualKeys));
+}
+
+TEST(HashKeyGeneratorTest, SparseIndexWithAFieldPresent) {
+ BSONObj obj = fromjson("{a: 2}");
+ KeyStringSet actualKeys;
+ BSONObj indexSpec = fromjson("{a: 'hashed', b: 1, c: 1}");
+ ExpressionKeysPrivate::getHashKeys(obj,
+ indexSpec,
+ kHashSeed,
+ kHashVersion,
+ true, // isSparse
+ nullptr,
+ &actualKeys,
+ KeyString::Version::kLatestVersion,
+ Ordering::make(BSONObj()));
+
+ // Verify that we inserted null entries for the misssing fields.
+ KeyStringSet expectedKeys;
+ KeyString::HeapBuilder keyString(KeyString::Version::kLatestVersion, Ordering::make(BSONObj()));
+ auto nullBSON = BSON("" << BSONNULL);
+ auto nullElement = nullBSON.firstElement();
+ keyString.appendNumberLong(BSONElementHasher::hash64(obj["a"], kHashSeed));
+ keyString.appendBSONElement(nullElement);
+ keyString.appendBSONElement(nullElement);
+ expectedKeys.insert(keyString.release());
+ ASSERT(assertKeysetsEqual(expectedKeys, actualKeys));
+}
+
} // namespace