diff options
author | Mihai Andrei <mihai.andrei@10gen.com> | 2020-11-05 21:32:26 -0500 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2020-11-11 23:00:49 +0000 |
commit | 9f2dbad788d016c379c52c6e7323356aae52e6e0 (patch) | |
tree | fb7885c1f6493ddd1869f7f4ccf06a87e1775c1a | |
parent | cba2cfc0e778ffd1c21e6d03976c2aee21a059b1 (diff) | |
download | mongo-9f2dbad788d016c379c52c6e7323356aae52e6e0.tar.gz |
SERVER-47081 Disallow creation of haystack indexes and verify that haystack indexes are maintained in mixed version clusters
-rw-r--r-- | jstests/aggregation/sources/merge/requires_unique_index.js | 6 | ||||
-rw-r--r-- | jstests/core/geo_big_polygon2.js | 1 | ||||
-rw-r--r-- | jstests/core/index_plugins.js | 11 | ||||
-rw-r--r-- | jstests/core/txns/commands_not_allowed_in_txn.js | 5 | ||||
-rw-r--r-- | jstests/multiVersion/delete_haystack_indexes_on_upgrade.js | 7 | ||||
-rw-r--r-- | jstests/multiVersion/haystack_indexes_maintained_correctly.js | 208 | ||||
-rw-r--r-- | jstests/multiVersion/keystring_index.js | 10 | ||||
-rw-r--r-- | jstests/multiVersion/libs/data_generators.js | 18 | ||||
-rw-r--r-- | src/mongo/db/catalog/index_catalog_impl.cpp | 13 | ||||
-rw-r--r-- | src/mongo/db/catalog/index_key_validate.cpp | 17 | ||||
-rw-r--r-- | src/mongo/dbtests/clienttests.cpp | 17 |
11 files changed, 236 insertions, 77 deletions
diff --git a/jstests/aggregation/sources/merge/requires_unique_index.js b/jstests/aggregation/sources/merge/requires_unique_index.js index 7aeb11b5da2..dbc5788428c 100644 --- a/jstests/aggregation/sources/merge/requires_unique_index.js +++ b/jstests/aggregation/sources/merge/requires_unique_index.js @@ -241,12 +241,6 @@ function dropWithoutImplicitRecreate(coll) { assertMergeFailsWithoutUniqueIndex({source: source, onFields: "geo", target: target}); dropWithoutImplicitRecreate(target); - assert.commandWorked( - target.createIndex({geo: "geoHaystack", a: 1}, {unique: true, bucketSize: 5})); - assertMergeFailsWithoutUniqueIndex({source: source, onFields: ["a", "geo"], target: target}); - assertMergeFailsWithoutUniqueIndex({source: source, onFields: ["geo", "a"], target: target}); - - dropWithoutImplicitRecreate(target); // MongoDB does not support unique hashed indexes. assert.commandFailedWithCode(target.createIndex({a: "hashed"}, {unique: true}), 16764); assert.commandWorked(target.createIndex({a: "hashed"})); diff --git a/jstests/core/geo_big_polygon2.js b/jstests/core/geo_big_polygon2.js index 16ece83f659..80339cb4fed 100644 --- a/jstests/core/geo_big_polygon2.js +++ b/jstests/core/geo_big_polygon2.js @@ -576,7 +576,6 @@ objects.forEach(function(o) { assert.commandWorked(coll.ensureIndex({geo: "2dsphere", a: 1}), "compound index, geo"); // These other index types will fail because of the GeoJSON documents assert.commandFailed(coll.ensureIndex({geo: "2dsphere", a: "text"}), "compound index, geo & text"); -assert.commandFailed(coll.ensureIndex({geo: "geoHaystack"}, {bucketSize: 1}), "geoHaystack index"); assert.commandFailed(coll.ensureIndex({geo: "2d"}), "2d index"); totalObjects = coll.count(); diff --git a/jstests/core/index_plugins.js b/jstests/core/index_plugins.js index 3cd1c42f627..e535d189e6c 100644 --- a/jstests/core/index_plugins.js +++ b/jstests/core/index_plugins.js @@ -15,8 +15,6 @@ coll.dropIndexes(); assert.commandWorked(coll.createIndex({a: "text"})); coll.dropIndexes(); -assert.commandFailed(coll.createIndex({a: "geoHaystack"}, {bucketSize: 1})); // compound required - // Test compounding special index types with an ascending index. assert.commandWorked(coll.createIndex({a: "2dsphere", b: 1})); @@ -36,10 +34,6 @@ assert.commandWorked(coll.createIndex({a: 1, b: "hashed"})); coll.dropIndexes(); assert.commandFailed(coll.createIndex({a: 1, b: "2d"})); // unsupported -assert.commandWorked(coll.createIndex({a: "geoHaystack", b: 1}, {bucketSize: 1})); -coll.dropIndexes(); -assert.commandFailed(coll.createIndex({a: 1, b: "geoHaystack"}, {bucketSize: 1})); // unsupported - // Test compound index where multiple fields have same special index type. coll.dropIndexes(); assert.commandWorked(coll.createIndex({a: "2dsphere", b: "2dsphere"})); @@ -50,7 +44,7 @@ assert.commandFailedWithCode(coll.createIndex({a: "hashed", b: "hashed"}), 31303 assert.commandFailedWithCode(coll.createIndex({c: 1, a: "hashed", b: "hashed"}), 31303); // Test compounding different special index types with each other. -const incompatableIndexTypes = ["2d", "2dsphere", "geoHaystack", "hashed", "text"]; +const incompatableIndexTypes = ["2d", "2dsphere", "hashed", "text"]; for (let indexType1 of incompatableIndexTypes) { for (let indexType2 of incompatableIndexTypes) { if (indexType1 == indexType2) { @@ -67,8 +61,5 @@ for (let indexType1 of incompatableIndexTypes) { } assert.commandFailedWithCode(coll.createIndex({"$**": 1, b: indexType1}), ErrorCodes.CannotCreateIndex); - assert.commandFailedWithCode( - coll.createIndex({a: "geoHaystack", b: indexType1}, {bucketSize: 1}), - [ErrorCodes.CannotCreateIndex, 16770]); } })(); diff --git a/jstests/core/txns/commands_not_allowed_in_txn.js b/jstests/core/txns/commands_not_allowed_in_txn.js index 912f01a0a2a..0231162e284 100644 --- a/jstests/core/txns/commands_not_allowed_in_txn.js +++ b/jstests/core/txns/commands_not_allowed_in_txn.js @@ -29,10 +29,7 @@ const isMongos = assert.commandWorked(db.runCommand("hello")).msg === "isdbgrid" assert.commandWorked(testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}})); assert.commandWorked(testDB.runCommand({ createIndexes: collName, - indexes: [ - {name: "geo_2d", key: {geo: "2d"}}, - {key: {haystack: "geoHaystack", a: 1}, name: "haystack_geo", bucketSize: 1} - ], + indexes: [{name: "geo_2d", key: {geo: "2d"}}], writeConcern: {w: "majority"} })); diff --git a/jstests/multiVersion/delete_haystack_indexes_on_upgrade.js b/jstests/multiVersion/delete_haystack_indexes_on_upgrade.js index f72805f474e..2707a39489f 100644 --- a/jstests/multiVersion/delete_haystack_indexes_on_upgrade.js +++ b/jstests/multiVersion/delete_haystack_indexes_on_upgrade.js @@ -43,7 +43,7 @@ function createIndexes(coll) { // Verify that haystack indexes are deleted when upgrading a standalone. function runStandaloneTest() { // Set up a v4.4 mongod. - const dbPath = MongoRunner.dataPath + "delete_haystack"; + const dbPath = MongoRunner.dataPath + "/delete_haystack"; let mongo = MongoRunner.runMongod({dbpath: dbPath, binVersion: "last-lts"}); assert.neq(null, mongo, "mongod was unable to start up"); let testDB = mongo.getDB(dbName); @@ -60,8 +60,11 @@ function runStandaloneTest() { testDB = mongo.getDB(dbName); coll = testDB[collName]; - // The haystack index should still be present before the FCV is set. + // The haystack index should still be present before the FCV is set and the validate command + // should succeed. IndexBuildTest.assertIndexes(coll, indexList.length, indexList); + const validate = assert.commandWorked(coll.validate({full: true})); + assert.eq(true, validate.valid); // Set the FCV. const adminDB = mongo.getDB("admin"); diff --git a/jstests/multiVersion/haystack_indexes_maintained_correctly.js b/jstests/multiVersion/haystack_indexes_maintained_correctly.js new file mode 100644 index 00000000000..f471ef492ab --- /dev/null +++ b/jstests/multiVersion/haystack_indexes_maintained_correctly.js @@ -0,0 +1,208 @@ +/** + * Verifies that haystack indexes cannot be created on 4.9+ binaries, but are maintained + * correctly in mixed version clusters. + * + * TODO SERVER-51871: This test can be deleted once 5.0 becomes last-lts. + */ +(function() { +"use strict"; +load("jstests/multiVersion/libs/multi_rs.js"); // For 'upgradeSecondaries()/upgradePrimary()' +load('jstests/noPassthrough/libs/index_build.js'); // For 'assertIndexes()' +load("jstests/libs/fixture_helpers.js"); // For 'isSharded()' + +const dbName = "test"; +const collName = jsTestName(); +const geoIndexKey = { + loc: "geoHaystack", + x: 1 +}; +const geoIndexName = "geo"; +const indexList = ["_id_", geoIndexName]; +const nonGeoIndexList = ["_id_"]; + +function insertInitialDocuments(coll) { + const documentList = + [{_id: 0, loc: [1, 2], x: 'foo', y: 2}, {_id: 1, loc: [1.5, 1.5], x: 'bar', y: 1}]; + assert.commandWorked(coll.insert(documentList)); +} + +/** + * Calls 'validate()' on 'coll' and verifies that the documents which are inserted into 'coll' + * produce the correct number of index keys for the geoHaystack index in this test. + */ +function validateAndAssertCorrectIndexKeys(coll) { + const validateResult = assert.commandWorked(coll.validate({full: true})); + let validateOutput; + if (FixtureHelpers.isSharded(coll)) { + assert(validateResult.hasOwnProperty("raw")); + validateOutput = validateResult["raw"]; + } else { + validateOutput = validateResult; + } + + // There should be as many index keys as there are documents. + const expectedNumKeys = coll.find().itcount(); + let keys = 0; + if (FixtureHelpers.isSharded(coll)) { + for (const shard of Object.keys(validateOutput)) { + keys += validateOutput[shard]["keysPerIndex"][geoIndexName]; + assert.eq(0, validateOutput[shard]["errors"].length); + assert.eq(true, validateOutput[shard]["valid"]); + } + } else { + keys = validateOutput["keysPerIndex"][geoIndexName]; + assert.eq(0, validateOutput["errors"].length); + assert.eq(true, validateOutput["valid"]); + } + + assert.eq(expectedNumKeys, keys); +} + +// Verify that haystack indexes cannot be created on a standalone in the latest version regardless +// of the FCV. +function runStandaloneTest() { + const mongo = MongoRunner.runMongod({binVersion: "latest"}); + const testDB = mongo.getDB(dbName); + const coll = testDB[collName]; + for (const fcv of [lastLTSFCV, latestFCV]) { + assert.commandWorked(mongo.adminCommand({setFeatureCompatibilityVersion: fcv})); + assert.commandFailedWithCode( + coll.createIndex(geoIndexKey, {name: geoIndexName, bucketSize: 1}), + ErrorCodes.CannotCreateIndex); + } + MongoRunner.stopMongod(mongo); +} + +// Verify that haystack indexes are maintained properly on a mixed version replica set. +function runReplicaSetTest() { + // Set up a mixed version replica-set: both nodes will be initialized to the last-lts + // binary version (4.4), but the secondary will be initialized to the latest binary version. + const rst = new ReplSetTest({nodes: [{binVersion: "last-lts"}, {binVersion: "latest"}]}); + rst.startSet(); + rst.initiate(); + let primaryDB = rst.getPrimary().getDB(dbName); + let primaryColl = primaryDB[collName]; + insertInitialDocuments(primaryColl); + rst.awaitReplication(); + + // Creating a haystack index should still work. + assert.commandWorked(primaryDB.runCommand({ + "createIndexes": collName, + indexes: [{key: geoIndexKey, name: geoIndexName, bucketSize: 1}], + writeConcern: {w: 2} + })); + + // The haystack index should replicate correctly to the secondary. + const secondaryDB = rst.getSecondary().getDB(dbName); + const secondaryColl = secondaryDB[collName]; + IndexBuildTest.assertIndexes(secondaryColl, indexList.length, indexList); + + // Verify that documents which are inserted after the index is built produce valid index keys. + assert.commandWorked( + primaryColl.insert([{_id: 4, loc: [4, 4], x: "baz"}, {_id: 5, loc: [5, 5], x: "baz"}], + {writeConcern: {w: 2}})); + validateAndAssertCorrectIndexKeys(primaryColl); + validateAndAssertCorrectIndexKeys(secondaryColl); + + // Upgrade the primary and attempt to re-create the index after the upgrade. + assert.commandWorked( + primaryDB.runCommand({"dropIndexes": collName, index: geoIndexName, writeConcern: {w: 2}})); + rst.upgradePrimary(rst.getPrimary(), {binVersion: "latest"}); + rst.awaitNodesAgreeOnPrimary(); + + // Even though we haven't bumped the FCV, index creation should still fail on a primary in + // the latest version. + primaryDB = rst.getPrimary().getDB(dbName); + primaryColl = primaryDB[collName]; + assert.commandFailedWithCode( + primaryColl.createIndex(geoIndexKey, {name: geoIndexName, bucketSize: 1}), + ErrorCodes.CannotCreateIndex); + + rst.stopSet(); +} + +// Verify that haystack indexes are maintained properly in a mixed version sharded cluster. +function runShardingTest() { + // Set up a mixed version sharded cluster, where shard0's nodes are initialized to the last-lts + // binary version (4.4) and shard1's nodes are initialized to the latest binary version. + const st = new ShardingTest({ + shards: { + rs0: {nodes: [{binVersion: "last-lts"}, {binVersion: "last-lts"}]}, + rs1: {nodes: [{binVersion: "latest"}, {binVersion: "latest"}]} + }, + other: {mongosOptions: {binVersion: "last-lts"}} + }); + + // Test that indexes are maintained properly during chunk migration. More precisely, verify + // that when a chunk from a shard consisting of 4.4 nodes with a haystack index is moved to a + // shard consisting of nodes in the latest binary version, the haystack index is built + // correctly on the shard in the latest binary version. + const mongos = st.s; + const ns = dbName + "." + collName; + + // Create a sharded collection with two chunks: [MinKey, 1), [1, MaxKey], both on shard0. + assert.commandWorked(mongos.adminCommand({enableSharding: dbName})); + assert.commandWorked(mongos.adminCommand({movePrimary: dbName, to: st.shard0.shardName})); + assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {_id: 1}})); + assert.commandWorked(mongos.adminCommand({split: ns, middle: {_id: 1}})); + + // Insert some documents and create a haystack index. + const db = mongos.getDB(dbName); + const coll = db[collName]; + insertInitialDocuments(coll); + assert.commandWorked(coll.createIndex(geoIndexKey, {name: geoIndexName, bucketSize: 1})); + + // Wait for shard0 to finish replicating its documents and building the index. + st.rs0.awaitReplication(); + + // Move the [1, MaxKey] chunk to shard1. + assert.commandWorked(mongos.adminCommand( + {moveChunk: ns, find: {_id: 1}, to: st.shard1.shardName, _waitForDelete: true})); + st.rs1.awaitReplication(); + + // Verify that shard1 has the haystack index after the chunk was moved. + const shard1primary = st.rs1.getPrimary(); + const shard1DB = shard1primary.getDB(dbName); + const shard1Coll = shard1DB[collName]; + IndexBuildTest.assertIndexes(shard1Coll, indexList.length, indexList); + + validateAndAssertCorrectIndexKeys(coll); + + // Verify that inserting documents into a shard consisting of nodes in the latest version with + // an existing haystack index will create the correct index keys for the index. + assert.commandWorked( + coll.insert([{_id: 4, loc: [4, 4], x: "baz"}, {_id: 5, loc: [5, 5], x: "blah"}], + {writeConcern: {w: 2}})); + + validateAndAssertCorrectIndexKeys(coll); + + // Creating a new haystack index against a sharded cluster with at least one shard upgraded to + // the latest version should fail. + assert.commandWorked( + db.runCommand({"dropIndexes": collName, index: geoIndexName, writeConcern: {w: 2}})); + assert.commandFailedWithCode(coll.createIndex(geoIndexKey, {name: geoIndexName, bucketSize: 1}), + ErrorCodes.CannotCreateIndex); + + // Though the command failed, the haystack index will still be created on shard0 since it is in + // version 4.4. + const shard0DB = st.rs0.getPrimary().getDB(dbName); + const shard0coll = shard0DB[collName]; + IndexBuildTest.assertIndexes(shard0coll, indexList.length, indexList); + + // Since shard1 is in the latest version, it will not have the geoHaystack index. + IndexBuildTest.assertIndexes(shard1Coll, nonGeoIndexList.length, nonGeoIndexList); + + // Though the 'dropIndexes' command will fail because shard1 does not have the haystack + // index, it should still remove the haystack index on shard0. + assert.commandFailedWithCode( + mongos.getDB(dbName).runCommand( + {"dropIndexes": collName, index: geoIndexName, writeConcern: {w: 2}}), + ErrorCodes.IndexNotFound); + IndexBuildTest.assertIndexes(shard0coll, nonGeoIndexList.length, nonGeoIndexList); + st.stop(); +} + +runStandaloneTest(); +runReplicaSetTest(); +runShardingTest(); +})();
\ No newline at end of file diff --git a/jstests/multiVersion/keystring_index.js b/jstests/multiVersion/keystring_index.js index 283eac5a4bd..105e6937212 100644 --- a/jstests/multiVersion/keystring_index.js +++ b/jstests/multiVersion/keystring_index.js @@ -13,7 +13,6 @@ * The following index types are tested: * - btree * - 2d - * - geoHaystack * - 2dsphere * - text * - hashed* @@ -51,15 +50,6 @@ const indexTypes = [ spec: {loc: "2d"}, }, { - indexName: "hayStack", - createDoc: i => ({ - loc: {lng: (i / 2.0) * (i / 2.0), lat: (i / 2.0)}, - a: {x: i, y: i + 1, z: [i, i + 1]}, - }), - spec: {loc: "geoHaystack", a: 1}, - createIndexOptions: {bucketSize: 1}, - }, - { indexName: "2dSphere", createDoc: i => { if (i == 0) diff --git a/jstests/multiVersion/libs/data_generators.js b/jstests/multiVersion/libs/data_generators.js index 5f3ccf20e10..4a30ca2e463 100644 --- a/jstests/multiVersion/libs/data_generators.js +++ b/jstests/multiVersion/libs/data_generators.js @@ -453,14 +453,6 @@ function IndexDataGenerator(options) { return index; } - function GenHaystackIndex(seed) { - var index = {}; - index[getNextUniqueKey()] = "geoHaystack"; - // Haystack indexes need a non geo field, and the geo field must be first - index[getNextUniqueKey()] = (seed % 2) == 1 ? 1 : -1; - return index; - } - function GenTextIndex(seed) { var index = {}; index[getNextUniqueKey()] = "text"; @@ -519,14 +511,6 @@ function IndexDataGenerator(options) { return attributes; } - function GenHaystackIndexOptions(seed) { - var attributes = GenIndexOptions(seed); - // When using a haystack index, the following additional index properties are required: - // { "bucketSize" : <bucket value> } - attributes["bucketSize"] = (seed * 10000) % 100 + 10; - return attributes; - } - function GenTextIndexOptions(seed) { return GenIndexOptions(seed); } @@ -562,8 +546,6 @@ function IndexDataGenerator(options) { {"spec": Gen2dsphereIndex(7), "options": Gen2dSphereIndexOptions(12)}, // 2d {"spec": Gen2dIndex(8), "options": Gen2dIndexOptions(13)}, - // Haystack - {"spec": GenHaystackIndex(9), "options": GenHaystackIndexOptions(13)}, // Text Indexes {"spec": GenTextIndex(10), "options": GenTextIndexOptions(14)}, diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp index ddf67684ecc..562c1422c8b 100644 --- a/src/mongo/db/catalog/index_catalog_impl.cpp +++ b/src/mongo/db/catalog/index_catalog_impl.cpp @@ -120,11 +120,12 @@ Status IndexCatalogImpl::init(OperationContext* opCtx) { durableCatalog->getIndexSpec(opCtx, _collection->getCatalogId(), indexName).getOwned(); BSONObj keyPattern = spec.getObjectField("key"); + // TODO SERVER-51871: Delete this block once 5.0 becomes last-lts. if (spec.hasField(IndexDescriptor::kGeoHaystackBucketSize)) { LOGV2_OPTIONS(4670602, {logv2::LogTag::kStartupWarnings}, "Found an existing geoHaystack index in the catalog. Support for " - "geoHaystack indexes has been deprecated. Instead create a 2d index. See " + "geoHaystack indexes has been removed. Instead create a 2d index. See " "https://dochub.mongodb.org/core/4.4-deprecate-geoHaystack"); } auto descriptor = std::make_unique<IndexDescriptor>(_getAccessMethodName(keyPattern), spec); @@ -314,9 +315,6 @@ void IndexCatalogImpl::_logInternalState(OperationContext* opCtx, } } -namespace { -std::string lastHaystackIndexLogged = ""; -} StatusWith<BSONObj> IndexCatalogImpl::prepareSpecForCreate( OperationContext* opCtx, const BSONObj& original, @@ -331,14 +329,13 @@ StatusWith<BSONObj> IndexCatalogImpl::prepareSpecForCreate( auto indexName = validatedSpec.getField("name").String(); // This gets hit twice per index, so we keep track of what we last logged to avoid logging the // same line for the same index twice. - if (validatedSpec.hasField(IndexDescriptor::kGeoHaystackBucketSize) && - lastHaystackIndexLogged.compare(indexName) != 0) { + // TODO SERVER-51871: Delete this block once 5.0 becomes last-lts. + if (validatedSpec.hasField(IndexDescriptor::kGeoHaystackBucketSize)) { LOGV2_OPTIONS(4670601, {logv2::LogTag::kStartupWarnings}, "Support for " - "geoHaystack indexes has been deprecated. Instead create a 2d index. See " + "geoHaystack indexes has been removed. Instead create a 2d index. See " "https://dochub.mongodb.org/core/4.4-deprecate-geoHaystack"); - lastHaystackIndexLogged = indexName; } // Check whether this is a non-_id index and there are any settings disallowing this server diff --git a/src/mongo/db/catalog/index_key_validate.cpp b/src/mongo/db/catalog/index_key_validate.cpp index aedf34c8233..829a2b7392c 100644 --- a/src/mongo/db/catalog/index_key_validate.cpp +++ b/src/mongo/db/catalog/index_key_validate.cpp @@ -293,10 +293,20 @@ StatusWith<BSONObj> validateIndexSpec( keys.push_back(keyElemFieldName); } + // TODO SERVER-51871: When 5.0 becomes last-lts, this check should be moved into + // 'validateKeyPattern()'. It must currently be done here so that haystack indexes + // continue to replicate correctly before the upgrade to FCV "4.9" is complete. + const auto keyPattern = indexSpecElem.Obj(); + if (IndexNames::findPluginName(keyPattern) == IndexNames::GEO_HAYSTACK) { + return {ErrorCodes::CannotCreateIndex, + str::stream() + << "GeoHaystack indexes cannot be created in version 4.9 and above"}; + } + // Here we always validate the key pattern according to the most recent rules, in order // to enforce that all new indexes have well-formed key patterns. Status keyPatternValidateStatus = - validateKeyPattern(indexSpecElem.Obj(), IndexDescriptor::kLatestIndexVersion); + validateKeyPattern(keyPattern, IndexDescriptor::kLatestIndexVersion); if (!keyPatternValidateStatus.isOK()) { return keyPatternValidateStatus; } @@ -434,6 +444,11 @@ StatusWith<BSONObj> validateIndexSpec( return ex.toStatus(str::stream() << "Failed to parse: " << IndexDescriptor::kPathProjectionFieldName); } + } else if (IndexDescriptor::kGeoHaystackBucketSize == indexSpecElemFieldName) { + return {ErrorCodes::CannotCreateIndex, + str::stream() + << "The 'bucketSize' parameter is disallowed because " + "geoHaystack indexes are no longer supported in version 4.9 and above"}; } else { // We can assume field name is valid at this point. Validation of fieldname is handled // prior to this in validateIndexSpecFieldNames(). diff --git a/src/mongo/dbtests/clienttests.cpp b/src/mongo/dbtests/clienttests.cpp index e847a3b2fad..9cb2f1592aa 100644 --- a/src/mongo/dbtests/clienttests.cpp +++ b/src/mongo/dbtests/clienttests.cpp @@ -335,22 +335,6 @@ public: } }; -class CreateHaystackIndex : public Base { -public: - CreateHaystackIndex() : Base("CreateHaystackIndex") {} - void run() { - const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); - OperationContext& opCtx = *opCtxPtr; - DBDirectClient db(&opCtx); - - db.createIndex(ns(), - IndexSpec() - .addKey("aField", IndexSpec::kIndexTypeGeoHaystack) - .addKey("otherField", IndexSpec::kIndexTypeDescending) - .geoHaystackBucketSize(1.0)); - } -}; - class Create2DSphereIndex : public Base { public: Create2DSphereIndex() : Base("Create2DSphereIndex") {} @@ -409,7 +393,6 @@ public: add<CreateUniqueSparseDropDupsIndexInBackground>(); add<CreateComplexTextIndex>(); add<Create2DIndex>(); - add<CreateHaystackIndex>(); add<Create2DSphereIndex>(); add<CreateHashedIndex>(); add<CreateIndexFailure>(); |