summaryrefslogtreecommitdiff
path: root/src/mongo/db/index
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db/index')
-rw-r--r--src/mongo/db/index/2d_access_method.cpp34
-rw-r--r--src/mongo/db/index/2d_access_method.h33
-rw-r--r--src/mongo/db/index/2d_common.h10
-rw-r--r--src/mongo/db/index/btree_access_method.cpp49
-rw-r--r--src/mongo/db/index/btree_access_method.h32
-rw-r--r--src/mongo/db/index/btree_key_generator.cpp595
-rw-r--r--src/mongo/db/index/btree_key_generator.h364
-rw-r--r--src/mongo/db/index/btree_key_generator_test.cpp1543
-rw-r--r--src/mongo/db/index/expression_keys_private.cpp774
-rw-r--r--src/mongo/db/index/expression_keys_private.h163
-rw-r--r--src/mongo/db/index/expression_params.h269
-rw-r--r--src/mongo/db/index/external_key_generator.cpp235
-rw-r--r--src/mongo/db/index/external_key_generator.h4
-rw-r--r--src/mongo/db/index/fts_access_method.cpp10
-rw-r--r--src/mongo/db/index/fts_access_method.h22
-rw-r--r--src/mongo/db/index/hash_access_method.cpp41
-rw-r--r--src/mongo/db/index/hash_access_method.h32
-rw-r--r--src/mongo/db/index/haystack_access_method.cpp151
-rw-r--r--src/mongo/db/index/haystack_access_method.h68
-rw-r--r--src/mongo/db/index/haystack_access_method_internal.h100
-rw-r--r--src/mongo/db/index/index_access_method.cpp641
-rw-r--r--src/mongo/db/index/index_access_method.h410
-rw-r--r--src/mongo/db/index/index_descriptor.cpp89
-rw-r--r--src/mongo/db/index/index_descriptor.h372
-rw-r--r--src/mongo/db/index/s2_access_method.cpp111
-rw-r--r--src/mongo/db/index/s2_access_method.h34
-rw-r--r--src/mongo/db/index/s2_common.h84
27 files changed, 3142 insertions, 3128 deletions
diff --git a/src/mongo/db/index/2d_access_method.cpp b/src/mongo/db/index/2d_access_method.cpp
index db421cb0ffe..531df3369b1 100644
--- a/src/mongo/db/index/2d_access_method.cpp
+++ b/src/mongo/db/index/2d_access_method.cpp
@@ -39,23 +39,21 @@
namespace mongo {
- TwoDAccessMethod::TwoDAccessMethod(IndexCatalogEntry* btreeState,
- SortedDataInterface* btree)
- : IndexAccessMethod(btreeState, btree) {
-
- const IndexDescriptor* descriptor = btreeState->descriptor();
-
- ExpressionParams::parseTwoDParams(descriptor->infoObj(), &_params);
- }
-
- /** Finds the key objects to put in an index */
- void TwoDAccessMethod::getKeys(const BSONObj& obj, BSONObjSet* keys) const {
- ExpressionKeysPrivate::get2DKeys(obj, _params, keys, NULL);
- }
-
- /** Finds all locations in a geo-indexed object */
- void TwoDAccessMethod::getKeys(const BSONObj& obj, vector<BSONObj>& locs) const {
- ExpressionKeysPrivate::get2DKeys(obj, _params, NULL, &locs);
- }
+TwoDAccessMethod::TwoDAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree)
+ : IndexAccessMethod(btreeState, btree) {
+ const IndexDescriptor* descriptor = btreeState->descriptor();
+
+ ExpressionParams::parseTwoDParams(descriptor->infoObj(), &_params);
+}
+
+/** Finds the key objects to put in an index */
+void TwoDAccessMethod::getKeys(const BSONObj& obj, BSONObjSet* keys) const {
+ ExpressionKeysPrivate::get2DKeys(obj, _params, keys, NULL);
+}
+
+/** Finds all locations in a geo-indexed object */
+void TwoDAccessMethod::getKeys(const BSONObj& obj, vector<BSONObj>& locs) const {
+ ExpressionKeysPrivate::get2DKeys(obj, _params, NULL, &locs);
+}
} // namespace mongo
diff --git a/src/mongo/db/index/2d_access_method.h b/src/mongo/db/index/2d_access_method.h
index 208df9235cd..644b6addc5b 100644
--- a/src/mongo/db/index/2d_access_method.h
+++ b/src/mongo/db/index/2d_access_method.h
@@ -35,25 +35,28 @@
namespace mongo {
- class IndexCatalogEntry;
- class IndexDescriptor;
- struct TwoDIndexingParams;
+class IndexCatalogEntry;
+class IndexDescriptor;
+struct TwoDIndexingParams;
- class TwoDAccessMethod : public IndexAccessMethod {
- public:
- TwoDAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree);
+class TwoDAccessMethod : public IndexAccessMethod {
+public:
+ TwoDAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree);
- private:
+private:
+ const IndexDescriptor* getDescriptor() {
+ return _descriptor;
+ }
+ TwoDIndexingParams& getParams() {
+ return _params;
+ }
- const IndexDescriptor* getDescriptor() { return _descriptor; }
- TwoDIndexingParams& getParams() { return _params; }
+ // This really gets the 'locs' from the provided obj.
+ void getKeys(const BSONObj& obj, std::vector<BSONObj>& locs) const;
- // This really gets the 'locs' from the provided obj.
- void getKeys(const BSONObj& obj, std::vector<BSONObj>& locs) const;
+ virtual void getKeys(const BSONObj& obj, BSONObjSet* keys) const;
- virtual void getKeys(const BSONObj& obj, BSONObjSet* keys) const;
-
- TwoDIndexingParams _params;
- };
+ TwoDIndexingParams _params;
+};
} // namespace mongo
diff --git a/src/mongo/db/index/2d_common.h b/src/mongo/db/index/2d_common.h
index d30b8cd07df..e3ed231f9a4 100644
--- a/src/mongo/db/index/2d_common.h
+++ b/src/mongo/db/index/2d_common.h
@@ -35,10 +35,10 @@
namespace mongo {
- struct TwoDIndexingParams {
- std::string geo;
- std::vector<std::pair<std::string, int> > other;
- std::shared_ptr<GeoHashConverter> geoHashConverter;
- };
+struct TwoDIndexingParams {
+ std::string geo;
+ std::vector<std::pair<std::string, int>> other;
+ std::shared_ptr<GeoHashConverter> geoHashConverter;
+};
} // namespace mongo
diff --git a/src/mongo/db/index/btree_access_method.cpp b/src/mongo/db/index/btree_access_method.cpp
index 7bb294342c8..6bc97956fe8 100644
--- a/src/mongo/db/index/btree_access_method.cpp
+++ b/src/mongo/db/index/btree_access_method.cpp
@@ -36,36 +36,33 @@
namespace mongo {
- using std::vector;
+using std::vector;
- // Standard Btree implementation below.
- BtreeAccessMethod::BtreeAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree )
- : IndexAccessMethod(btreeState, btree) {
+// Standard Btree implementation below.
+BtreeAccessMethod::BtreeAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree)
+ : IndexAccessMethod(btreeState, btree) {
+ // The key generation wants these values.
+ vector<const char*> fieldNames;
+ vector<BSONElement> fixed;
- // The key generation wants these values.
- vector<const char*> fieldNames;
- vector<BSONElement> fixed;
-
- BSONObjIterator it(_descriptor->keyPattern());
- while (it.more()) {
- BSONElement elt = it.next();
- fieldNames.push_back(elt.fieldName());
- fixed.push_back(BSONElement());
- }
-
- if (0 == _descriptor->version()) {
- _keyGenerator.reset(new BtreeKeyGeneratorV0(fieldNames, fixed,
- _descriptor->isSparse()));
- } else if (1 == _descriptor->version()) {
- _keyGenerator.reset(new BtreeKeyGeneratorV1(fieldNames, fixed,
- _descriptor->isSparse()));
- } else {
- massert(16745, "Invalid index version for key generation.", false );
- }
+ BSONObjIterator it(_descriptor->keyPattern());
+ while (it.more()) {
+ BSONElement elt = it.next();
+ fieldNames.push_back(elt.fieldName());
+ fixed.push_back(BSONElement());
}
- void BtreeAccessMethod::getKeys(const BSONObj& obj, BSONObjSet* keys) const {
- _keyGenerator->getKeys(obj, keys);
+ if (0 == _descriptor->version()) {
+ _keyGenerator.reset(new BtreeKeyGeneratorV0(fieldNames, fixed, _descriptor->isSparse()));
+ } else if (1 == _descriptor->version()) {
+ _keyGenerator.reset(new BtreeKeyGeneratorV1(fieldNames, fixed, _descriptor->isSparse()));
+ } else {
+ massert(16745, "Invalid index version for key generation.", false);
}
+}
+
+void BtreeAccessMethod::getKeys(const BSONObj& obj, BSONObjSet* keys) const {
+ _keyGenerator->getKeys(obj, keys);
+}
} // namespace mongo
diff --git a/src/mongo/db/index/btree_access_method.h b/src/mongo/db/index/btree_access_method.h
index 4133b41b892..4c20deeb931 100644
--- a/src/mongo/db/index/btree_access_method.h
+++ b/src/mongo/db/index/btree_access_method.h
@@ -37,21 +37,21 @@
namespace mongo {
- class IndexDescriptor;
-
- /**
- * The IndexAccessMethod for a Btree index.
- * Any index created with {field: 1} or {field: -1} uses this.
- */
- class BtreeAccessMethod : public IndexAccessMethod {
- public:
- BtreeAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree );
-
- private:
- virtual void getKeys(const BSONObj& obj, BSONObjSet* keys) const;
-
- // Our keys differ for V0 and V1.
- std::unique_ptr<BtreeKeyGenerator> _keyGenerator;
- };
+class IndexDescriptor;
+
+/**
+ * The IndexAccessMethod for a Btree index.
+ * Any index created with {field: 1} or {field: -1} uses this.
+ */
+class BtreeAccessMethod : public IndexAccessMethod {
+public:
+ BtreeAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree);
+
+private:
+ virtual void getKeys(const BSONObj& obj, BSONObjSet* keys) const;
+
+ // Our keys differ for V0 and V1.
+ std::unique_ptr<BtreeKeyGenerator> _keyGenerator;
+};
} // namespace mongo
diff --git a/src/mongo/db/index/btree_key_generator.cpp b/src/mongo/db/index/btree_key_generator.cpp
index bccf688e468..e323272b044 100644
--- a/src/mongo/db/index/btree_key_generator.cpp
+++ b/src/mongo/db/index/btree_key_generator.cpp
@@ -32,361 +32,358 @@
namespace mongo {
- // SortStage checks for this error code in order to informatively error when we try to sort keys
- // with parallel arrays.
- const int BtreeKeyGenerator::ParallelArraysCode = 10088;
+// SortStage checks for this error code in order to informatively error when we try to sort keys
+// with parallel arrays.
+const int BtreeKeyGenerator::ParallelArraysCode = 10088;
namespace {
- const BSONObj nullObj = BSON("" << BSONNULL);
- const BSONElement nullElt = nullObj.firstElement();
- const BSONObj undefinedObj = BSON("" << BSONUndefined);
- const BSONElement undefinedElt = undefinedObj.firstElement();
+const BSONObj nullObj = BSON("" << BSONNULL);
+const BSONElement nullElt = nullObj.firstElement();
+const BSONObj undefinedObj = BSON("" << BSONUndefined);
+const BSONElement undefinedElt = undefinedObj.firstElement();
-} // namespace
+} // namespace
- BtreeKeyGenerator::BtreeKeyGenerator(std::vector<const char*> fieldNames,
- std::vector<BSONElement> fixed,
- bool isSparse)
- : _fieldNames(fieldNames), _isSparse(isSparse), _fixed(fixed) {
-
- BSONObjBuilder nullKeyBuilder;
- for (size_t i = 0; i < fieldNames.size(); ++i) {
- nullKeyBuilder.appendNull("");
- }
- _nullKey = nullKeyBuilder.obj();
-
- _isIdIndex = fieldNames.size() == 1 && std::string("_id") == fieldNames[0];
+BtreeKeyGenerator::BtreeKeyGenerator(std::vector<const char*> fieldNames,
+ std::vector<BSONElement> fixed,
+ bool isSparse)
+ : _fieldNames(fieldNames), _isSparse(isSparse), _fixed(fixed) {
+ BSONObjBuilder nullKeyBuilder;
+ for (size_t i = 0; i < fieldNames.size(); ++i) {
+ nullKeyBuilder.appendNull("");
}
+ _nullKey = nullKeyBuilder.obj();
- void BtreeKeyGenerator::getKeys(const BSONObj &obj, BSONObjSet *keys) const {
- if (_isIdIndex) {
- // we special case for speed
- BSONElement e = obj["_id"];
- if ( e.eoo() ) {
- keys->insert(_nullKey);
- }
- else {
- int size = e.size() + 5 /* bson over head*/ - 3 /* remove _id string */;
- BSONObjBuilder b(size);
- b.appendAs(e, "");
- keys->insert(b.obj());
- invariant(keys->begin()->objsize() == size);
- }
- return;
- }
+ _isIdIndex = fieldNames.size() == 1 && std::string("_id") == fieldNames[0];
+}
- // '_fieldNames' and '_fixed' are passed by value so that they can be mutated as part of the
- // getKeys call. :|
- getKeysImpl(_fieldNames, _fixed, obj, keys);
- if (keys->empty() && ! _isSparse) {
+void BtreeKeyGenerator::getKeys(const BSONObj& obj, BSONObjSet* keys) const {
+ if (_isIdIndex) {
+ // we special case for speed
+ BSONElement e = obj["_id"];
+ if (e.eoo()) {
keys->insert(_nullKey);
+ } else {
+ int size = e.size() + 5 /* bson over head*/ - 3 /* remove _id string */;
+ BSONObjBuilder b(size);
+ b.appendAs(e, "");
+ keys->insert(b.obj());
+ invariant(keys->begin()->objsize() == size);
}
+ return;
}
- static void assertParallelArrays( const char *first, const char *second ) {
- std::stringstream ss;
- ss << "cannot index parallel arrays [" << first << "] [" << second << "]";
- uasserted( BtreeKeyGenerator::ParallelArraysCode , ss.str() );
+ // '_fieldNames' and '_fixed' are passed by value so that they can be mutated as part of the
+ // getKeys call. :|
+ getKeysImpl(_fieldNames, _fixed, obj, keys);
+ if (keys->empty() && !_isSparse) {
+ keys->insert(_nullKey);
}
+}
- BtreeKeyGeneratorV0::BtreeKeyGeneratorV0(std::vector<const char*> fieldNames,
- std::vector<BSONElement> fixed,
- bool isSparse)
- : BtreeKeyGenerator(fieldNames, fixed, isSparse) { }
-
- void BtreeKeyGeneratorV0::getKeysImpl(std::vector<const char*> fieldNames,
- std::vector<BSONElement> fixed,
- const BSONObj &obj,
- BSONObjSet *keys) const {
- BSONElement arrElt;
- unsigned arrIdx = ~0;
- unsigned numNotFound = 0;
-
- for ( unsigned i = 0; i < fieldNames.size(); ++i ) {
- if ( *fieldNames[ i ] == '\0' )
- continue;
+static void assertParallelArrays(const char* first, const char* second) {
+ std::stringstream ss;
+ ss << "cannot index parallel arrays [" << first << "] [" << second << "]";
+ uasserted(BtreeKeyGenerator::ParallelArraysCode, ss.str());
+}
- BSONElement e = obj.getFieldDottedOrArray( fieldNames[ i ] );
+BtreeKeyGeneratorV0::BtreeKeyGeneratorV0(std::vector<const char*> fieldNames,
+ std::vector<BSONElement> fixed,
+ bool isSparse)
+ : BtreeKeyGenerator(fieldNames, fixed, isSparse) {}
- if ( e.eoo() ) {
- e = nullElt; // no matching field
- numNotFound++;
- }
+void BtreeKeyGeneratorV0::getKeysImpl(std::vector<const char*> fieldNames,
+ std::vector<BSONElement> fixed,
+ const BSONObj& obj,
+ BSONObjSet* keys) const {
+ BSONElement arrElt;
+ unsigned arrIdx = ~0;
+ unsigned numNotFound = 0;
- if ( e.type() != Array )
- fieldNames[ i ] = ""; // no matching field or non-array match
+ for (unsigned i = 0; i < fieldNames.size(); ++i) {
+ if (*fieldNames[i] == '\0')
+ continue;
- if ( *fieldNames[ i ] == '\0' )
- // no need for further object expansion (though array expansion still possible)
- fixed[ i ] = e;
+ BSONElement e = obj.getFieldDottedOrArray(fieldNames[i]);
- if ( e.type() == Array && arrElt.eoo() ) {
- // we only expand arrays on a single path -- track the path here
- arrIdx = i;
- arrElt = e;
- }
+ if (e.eoo()) {
+ e = nullElt; // no matching field
+ numNotFound++;
+ }
- // enforce single array path here
- if ( e.type() == Array && e.rawdata() != arrElt.rawdata() ) {
- assertParallelArrays( e.fieldName(), arrElt.fieldName() );
- }
+ if (e.type() != Array)
+ fieldNames[i] = ""; // no matching field or non-array match
+
+ if (*fieldNames[i] == '\0')
+ // no need for further object expansion (though array expansion still possible)
+ fixed[i] = e;
+
+ if (e.type() == Array && arrElt.eoo()) {
+ // we only expand arrays on a single path -- track the path here
+ arrIdx = i;
+ arrElt = e;
}
- bool allFound = true; // have we found elements for all field names in the key spec?
- for (std::vector<const char*>::const_iterator i = fieldNames.begin(); i != fieldNames.end();
- ++i ) {
- if ( **i != '\0' ) {
- allFound = false;
- break;
- }
+ // enforce single array path here
+ if (e.type() == Array && e.rawdata() != arrElt.rawdata()) {
+ assertParallelArrays(e.fieldName(), arrElt.fieldName());
}
+ }
- if ( _isSparse && numNotFound == _fieldNames.size()) {
- // we didn't find any fields
- // so we're not going to index this document
- return;
+ bool allFound = true; // have we found elements for all field names in the key spec?
+ for (std::vector<const char*>::const_iterator i = fieldNames.begin(); i != fieldNames.end();
+ ++i) {
+ if (**i != '\0') {
+ allFound = false;
+ break;
}
+ }
- bool insertArrayNull = false;
+ if (_isSparse && numNotFound == _fieldNames.size()) {
+ // we didn't find any fields
+ // so we're not going to index this document
+ return;
+ }
- if ( allFound ) {
- if ( arrElt.eoo() ) {
- // no terminal array element to expand
- BSONObjBuilder b(_sizeTracker);
- for (std::vector< BSONElement >::iterator i = fixed.begin(); i != fixed.end(); ++i)
- b.appendAs( *i, "" );
- keys->insert( b.obj() );
- }
- else {
- // terminal array element to expand, so generate all keys
- BSONObjIterator i( arrElt.embeddedObject() );
- if ( i.more() ) {
- while (i.more()) {
- BSONObjBuilder b(_sizeTracker);
- for (unsigned j = 0; j < fixed.size(); ++j) {
- if ( j == arrIdx )
- b.appendAs( i.next(), "" );
- else
- b.appendAs( fixed[ j ], "" );
- }
- keys->insert( b.obj() );
- }
- }
- else if ( fixed.size() > 1 ) {
- insertArrayNull = true;
- }
- }
- }
- else {
- // nonterminal array element to expand, so recurse
- verify( !arrElt.eoo() );
- BSONObjIterator i( arrElt.embeddedObject() );
- if ( i.more() ) {
+ bool insertArrayNull = false;
+
+ if (allFound) {
+ if (arrElt.eoo()) {
+ // no terminal array element to expand
+ BSONObjBuilder b(_sizeTracker);
+ for (std::vector<BSONElement>::iterator i = fixed.begin(); i != fixed.end(); ++i)
+ b.appendAs(*i, "");
+ keys->insert(b.obj());
+ } else {
+ // terminal array element to expand, so generate all keys
+ BSONObjIterator i(arrElt.embeddedObject());
+ if (i.more()) {
while (i.more()) {
- BSONElement e = i.next();
- if ( e.type() == Object ) {
- getKeysImpl( fieldNames, fixed, e.embeddedObject(), keys );
+ BSONObjBuilder b(_sizeTracker);
+ for (unsigned j = 0; j < fixed.size(); ++j) {
+ if (j == arrIdx)
+ b.appendAs(i.next(), "");
+ else
+ b.appendAs(fixed[j], "");
}
+ keys->insert(b.obj());
}
- }
- else {
+ } else if (fixed.size() > 1) {
insertArrayNull = true;
}
}
-
- if ( insertArrayNull ) {
- // x : [] - need to insert undefined
- BSONObjBuilder b(_sizeTracker);
- for (unsigned j = 0; j < fixed.size(); ++j) {
- if ( j == arrIdx ) {
- b.appendUndefined( "" );
- }
- else {
- BSONElement e = fixed[j];
- if ( e.eoo() )
- b.appendNull( "" );
- else
- b.appendAs( e , "" );
+ } else {
+ // nonterminal array element to expand, so recurse
+ verify(!arrElt.eoo());
+ BSONObjIterator i(arrElt.embeddedObject());
+ if (i.more()) {
+ while (i.more()) {
+ BSONElement e = i.next();
+ if (e.type() == Object) {
+ getKeysImpl(fieldNames, fixed, e.embeddedObject(), keys);
}
}
- keys->insert( b.obj() );
+ } else {
+ insertArrayNull = true;
}
}
- BtreeKeyGeneratorV1::BtreeKeyGeneratorV1(std::vector<const char*> fieldNames,
- std::vector<BSONElement> fixed,
- bool isSparse)
- : BtreeKeyGenerator(fieldNames, fixed, isSparse),
- _emptyPositionalInfo(fieldNames.size()) {
- }
-
- BSONElement BtreeKeyGeneratorV1::extractNextElement(const BSONObj &obj,
- const PositionalPathInfo& positionalInfo,
- const char** field,
- bool* arrayNestedArray) const {
- std::string firstField = mongoutils::str::before(*field, '.');
- bool haveObjField = !obj.getField(firstField).eoo();
- BSONElement arrField = positionalInfo.positionallyIndexedElt;
-
- // An index component field name cannot exist in both a document
- // array and one of that array's children.
- uassert(16746,
- mongoutils::str::stream() <<
- "Ambiguous field name found in array (do not use numeric field names in "
- "embedded elements in an array), field: '" << arrField.fieldName() <<
- "' for array: " << positionalInfo.arrayObj,
- !haveObjField || !positionalInfo.hasPositionallyIndexedElt());
-
- *arrayNestedArray = false;
- if ( haveObjField ) {
- return obj.getFieldDottedOrArray(*field);
- }
- else if (positionalInfo.hasPositionallyIndexedElt()) {
- if ( arrField.type() == Array ) {
- *arrayNestedArray = true;
+ if (insertArrayNull) {
+ // x : [] - need to insert undefined
+ BSONObjBuilder b(_sizeTracker);
+ for (unsigned j = 0; j < fixed.size(); ++j) {
+ if (j == arrIdx) {
+ b.appendUndefined("");
+ } else {
+ BSONElement e = fixed[j];
+ if (e.eoo())
+ b.appendNull("");
+ else
+ b.appendAs(e, "");
}
- *field = positionalInfo.remainingPath;
- return positionalInfo.dottedElt;
}
- return BSONElement();
+ keys->insert(b.obj());
}
+}
- void BtreeKeyGeneratorV1::_getKeysArrEltFixed(
- std::vector<const char*>* fieldNames,
- std::vector<BSONElement>* fixed,
- const BSONElement& arrEntry,
- BSONObjSet* keys,
- unsigned numNotFound,
- const BSONElement& arrObjElt,
- const std::set<unsigned>& arrIdxs,
- bool mayExpandArrayUnembedded,
- const std::vector<PositionalPathInfo>& positionalInfo) const {
- // Set up any terminal array values.
- for (std::set<unsigned>::const_iterator j = arrIdxs.begin(); j != arrIdxs.end(); ++j) {
- unsigned idx = *j;
- if (*(*fieldNames)[idx] == '\0') {
- (*fixed)[idx] = mayExpandArrayUnembedded ? arrEntry : arrObjElt;
- }
+BtreeKeyGeneratorV1::BtreeKeyGeneratorV1(std::vector<const char*> fieldNames,
+ std::vector<BSONElement> fixed,
+ bool isSparse)
+ : BtreeKeyGenerator(fieldNames, fixed, isSparse), _emptyPositionalInfo(fieldNames.size()) {}
+
+BSONElement BtreeKeyGeneratorV1::extractNextElement(const BSONObj& obj,
+ const PositionalPathInfo& positionalInfo,
+ const char** field,
+ bool* arrayNestedArray) const {
+ std::string firstField = mongoutils::str::before(*field, '.');
+ bool haveObjField = !obj.getField(firstField).eoo();
+ BSONElement arrField = positionalInfo.positionallyIndexedElt;
+
+ // An index component field name cannot exist in both a document
+ // array and one of that array's children.
+ uassert(16746,
+ mongoutils::str::stream()
+ << "Ambiguous field name found in array (do not use numeric field names in "
+ "embedded elements in an array), field: '" << arrField.fieldName()
+ << "' for array: " << positionalInfo.arrayObj,
+ !haveObjField || !positionalInfo.hasPositionallyIndexedElt());
+
+ *arrayNestedArray = false;
+ if (haveObjField) {
+ return obj.getFieldDottedOrArray(*field);
+ } else if (positionalInfo.hasPositionallyIndexedElt()) {
+ if (arrField.type() == Array) {
+ *arrayNestedArray = true;
}
-
- // Recurse.
- getKeysImplWithArray(*fieldNames,
- *fixed,
- arrEntry.type() == Object ? arrEntry.embeddedObject() : BSONObj(),
- keys,
- numNotFound,
- positionalInfo);
+ *field = positionalInfo.remainingPath;
+ return positionalInfo.dottedElt;
}
-
- void BtreeKeyGeneratorV1::getKeysImpl(std::vector<const char*> fieldNames,
- std::vector<BSONElement> fixed,
- const BSONObj& obj,
- BSONObjSet* keys) const {
- getKeysImplWithArray(fieldNames, fixed, obj, keys, 0, _emptyPositionalInfo);
+ return BSONElement();
+}
+
+void BtreeKeyGeneratorV1::_getKeysArrEltFixed(
+ std::vector<const char*>* fieldNames,
+ std::vector<BSONElement>* fixed,
+ const BSONElement& arrEntry,
+ BSONObjSet* keys,
+ unsigned numNotFound,
+ const BSONElement& arrObjElt,
+ const std::set<unsigned>& arrIdxs,
+ bool mayExpandArrayUnembedded,
+ const std::vector<PositionalPathInfo>& positionalInfo) const {
+ // Set up any terminal array values.
+ for (std::set<unsigned>::const_iterator j = arrIdxs.begin(); j != arrIdxs.end(); ++j) {
+ unsigned idx = *j;
+ if (*(*fieldNames)[idx] == '\0') {
+ (*fixed)[idx] = mayExpandArrayUnembedded ? arrEntry : arrObjElt;
+ }
}
- void BtreeKeyGeneratorV1::getKeysImplWithArray(
- std::vector<const char*> fieldNames,
- std::vector<BSONElement> fixed,
- const BSONObj& obj,
- BSONObjSet* keys,
- unsigned numNotFound,
- const std::vector<PositionalPathInfo>& positionalInfo) const {
- BSONElement arrElt;
- std::set<unsigned> arrIdxs;
- bool mayExpandArrayUnembedded = true;
- for (unsigned i = 0; i < fieldNames.size(); ++i) {
- if ( *fieldNames[ i ] == '\0' ) {
- continue;
- }
+ // Recurse.
+ getKeysImplWithArray(*fieldNames,
+ *fixed,
+ arrEntry.type() == Object ? arrEntry.embeddedObject() : BSONObj(),
+ keys,
+ numNotFound,
+ positionalInfo);
+}
+
+void BtreeKeyGeneratorV1::getKeysImpl(std::vector<const char*> fieldNames,
+ std::vector<BSONElement> fixed,
+ const BSONObj& obj,
+ BSONObjSet* keys) const {
+ getKeysImplWithArray(fieldNames, fixed, obj, keys, 0, _emptyPositionalInfo);
+}
+
+void BtreeKeyGeneratorV1::getKeysImplWithArray(
+ std::vector<const char*> fieldNames,
+ std::vector<BSONElement> fixed,
+ const BSONObj& obj,
+ BSONObjSet* keys,
+ unsigned numNotFound,
+ const std::vector<PositionalPathInfo>& positionalInfo) const {
+ BSONElement arrElt;
+ std::set<unsigned> arrIdxs;
+ bool mayExpandArrayUnembedded = true;
+ for (unsigned i = 0; i < fieldNames.size(); ++i) {
+ if (*fieldNames[i] == '\0') {
+ continue;
+ }
- bool arrayNestedArray;
- // Extract element matching fieldName[ i ] from object xor array.
- BSONElement e = extractNextElement(obj,
- positionalInfo[i],
- &fieldNames[i],
- &arrayNestedArray);
-
- if ( e.eoo() ) {
- // if field not present, set to null
- fixed[ i ] = nullElt;
- // done expanding this field name
- fieldNames[ i ] = "";
- numNotFound++;
- }
- else if ( e.type() == Array ) {
- arrIdxs.insert( i );
- if ( arrElt.eoo() ) {
- // we only expand arrays on a single path -- track the path here
- arrElt = e;
- }
- else if ( e.rawdata() != arrElt.rawdata() ) {
- // enforce single array path here
- assertParallelArrays( e.fieldName(), arrElt.fieldName() );
- }
- if ( arrayNestedArray ) {
- mayExpandArrayUnembedded = false;
- }
+ bool arrayNestedArray;
+ // Extract element matching fieldName[ i ] from object xor array.
+ BSONElement e =
+ extractNextElement(obj, positionalInfo[i], &fieldNames[i], &arrayNestedArray);
+
+ if (e.eoo()) {
+ // if field not present, set to null
+ fixed[i] = nullElt;
+ // done expanding this field name
+ fieldNames[i] = "";
+ numNotFound++;
+ } else if (e.type() == Array) {
+ arrIdxs.insert(i);
+ if (arrElt.eoo()) {
+ // we only expand arrays on a single path -- track the path here
+ arrElt = e;
+ } else if (e.rawdata() != arrElt.rawdata()) {
+ // enforce single array path here
+ assertParallelArrays(e.fieldName(), arrElt.fieldName());
}
- else {
- // not an array - no need for further expansion
- fixed[ i ] = e;
+ if (arrayNestedArray) {
+ mayExpandArrayUnembedded = false;
}
+ } else {
+ // not an array - no need for further expansion
+ fixed[i] = e;
}
+ }
- if ( arrElt.eoo() ) {
- // No array, so generate a single key.
- if ( _isSparse && numNotFound == fieldNames.size()) {
- return;
- }
- BSONObjBuilder b(_sizeTracker);
- for (std::vector< BSONElement >::iterator i = fixed.begin(); i != fixed.end(); ++i) {
- b.appendAs( *i, "" );
- }
- keys->insert( b.obj() );
+ if (arrElt.eoo()) {
+ // No array, so generate a single key.
+ if (_isSparse && numNotFound == fieldNames.size()) {
+ return;
}
- else if ( arrElt.embeddedObject().firstElement().eoo() ) {
- // Empty array, so set matching fields to undefined.
- _getKeysArrEltFixed(&fieldNames, &fixed, undefinedElt, keys, numNotFound, arrElt,
- arrIdxs, true, _emptyPositionalInfo);
+ BSONObjBuilder b(_sizeTracker);
+ for (std::vector<BSONElement>::iterator i = fixed.begin(); i != fixed.end(); ++i) {
+ b.appendAs(*i, "");
}
- else {
- BSONObj arrObj = arrElt.embeddedObject();
-
- // For positional key patterns, e.g. {'a.1.b': 1}, we lookup the indexed array element
- // and then traverse the remainder of the field path up front. This prevents us from
- // having to look up the indexed element again on each recursive call (i.e. once per
- // array element).
- std::vector<PositionalPathInfo> subPositionalInfo(fixed.size());
- for (size_t i = 0; i < fieldNames.size(); ++i) {
- if (*fieldNames[i] == '\0') {
- // We've reached the end of the path.
- continue;
- }
-
- StringData part = fieldNames[i];
- part = part.substr(0, part.find('.'));
- subPositionalInfo[i].positionallyIndexedElt = arrObj[part];
- if (subPositionalInfo[i].positionallyIndexedElt.eoo()) {
- // Not indexing an array by position.
- continue;
- }
-
- // We're indexing an array element by its position. Traverse the remainder of the
- // field path now.
- subPositionalInfo[i].arrayObj = arrObj;
- subPositionalInfo[i].remainingPath = fieldNames[i];
- subPositionalInfo[i].dottedElt =
- arrObj.getFieldDottedOrArray(subPositionalInfo[i].remainingPath);
+ keys->insert(b.obj());
+ } else if (arrElt.embeddedObject().firstElement().eoo()) {
+ // Empty array, so set matching fields to undefined.
+ _getKeysArrEltFixed(&fieldNames,
+ &fixed,
+ undefinedElt,
+ keys,
+ numNotFound,
+ arrElt,
+ arrIdxs,
+ true,
+ _emptyPositionalInfo);
+ } else {
+ BSONObj arrObj = arrElt.embeddedObject();
+
+ // For positional key patterns, e.g. {'a.1.b': 1}, we lookup the indexed array element
+ // and then traverse the remainder of the field path up front. This prevents us from
+ // having to look up the indexed element again on each recursive call (i.e. once per
+ // array element).
+ std::vector<PositionalPathInfo> subPositionalInfo(fixed.size());
+ for (size_t i = 0; i < fieldNames.size(); ++i) {
+ if (*fieldNames[i] == '\0') {
+ // We've reached the end of the path.
+ continue;
}
- // Generate a key for each element of the indexed array.
- BSONObjIterator i(arrObj);
- while (i.more()) {
- _getKeysArrEltFixed(&fieldNames, &fixed, i.next(), keys, numNotFound, arrElt,
- arrIdxs, mayExpandArrayUnembedded, subPositionalInfo);
+ StringData part = fieldNames[i];
+ part = part.substr(0, part.find('.'));
+ subPositionalInfo[i].positionallyIndexedElt = arrObj[part];
+ if (subPositionalInfo[i].positionallyIndexedElt.eoo()) {
+ // Not indexing an array by position.
+ continue;
}
+
+ // We're indexing an array element by its position. Traverse the remainder of the
+ // field path now.
+ subPositionalInfo[i].arrayObj = arrObj;
+ subPositionalInfo[i].remainingPath = fieldNames[i];
+ subPositionalInfo[i].dottedElt =
+ arrObj.getFieldDottedOrArray(subPositionalInfo[i].remainingPath);
+ }
+
+ // Generate a key for each element of the indexed array.
+ BSONObjIterator i(arrObj);
+ while (i.more()) {
+ _getKeysArrEltFixed(&fieldNames,
+ &fixed,
+ i.next(),
+ keys,
+ numNotFound,
+ arrElt,
+ arrIdxs,
+ mayExpandArrayUnembedded,
+ subPositionalInfo);
}
}
+}
} // namespace mongo
diff --git a/src/mongo/db/index/btree_key_generator.h b/src/mongo/db/index/btree_key_generator.h
index fe376d7f32b..cb156354b51 100644
--- a/src/mongo/db/index/btree_key_generator.h
+++ b/src/mongo/db/index/btree_key_generator.h
@@ -34,192 +34,194 @@
namespace mongo {
+/**
+ * Internal class used by BtreeAccessMethod to generate keys for indexed documents.
+ * This class is meant to be kept under the index access layer.
+ */
+class BtreeKeyGenerator {
+public:
+ BtreeKeyGenerator(std::vector<const char*> fieldNames,
+ std::vector<BSONElement> fixed,
+ bool isSparse);
+
+ virtual ~BtreeKeyGenerator() {}
+
+ void getKeys(const BSONObj& obj, BSONObjSet* keys) const;
+
+ static const int ParallelArraysCode;
+
+protected:
+ // These are used by the getKeysImpl(s) below.
+ std::vector<const char*> _fieldNames;
+ bool _isIdIndex;
+ bool _isSparse;
+ BSONObj _nullKey; // a full key with all fields null
+ BSONSizeTracker _sizeTracker;
+
+private:
+ // We have V0 and V1. Sigh.
+ virtual void getKeysImpl(std::vector<const char*> fieldNames,
+ std::vector<BSONElement> fixed,
+ const BSONObj& obj,
+ BSONObjSet* keys) const = 0;
+
+ std::vector<BSONElement> _fixed;
+};
+
+class BtreeKeyGeneratorV0 : public BtreeKeyGenerator {
+public:
+ BtreeKeyGeneratorV0(std::vector<const char*> fieldNames,
+ std::vector<BSONElement> fixed,
+ bool isSparse);
+
+ virtual ~BtreeKeyGeneratorV0() {}
+
+private:
+ virtual void getKeysImpl(std::vector<const char*> fieldNames,
+ std::vector<BSONElement> fixed,
+ const BSONObj& obj,
+ BSONObjSet* keys) const;
+};
+
+class BtreeKeyGeneratorV1 : public BtreeKeyGenerator {
+public:
+ BtreeKeyGeneratorV1(std::vector<const char*> fieldNames,
+ std::vector<BSONElement> fixed,
+ bool isSparse);
+
+ virtual ~BtreeKeyGeneratorV1() {}
+
+private:
/**
- * Internal class used by BtreeAccessMethod to generate keys for indexed documents.
- * This class is meant to be kept under the index access layer.
+ * Stores info regarding traversal of a positional path. A path through a document is
+ * considered positional if this path element names an array element. Generally this means
+ * that the field name consists of [0-9]+, but the implementation just calls .Obj() on
+ * the array and looks for the named field. This logic happens even if the field does
+ * not match [0-9]+.
+ *
+ * Example:
+ * The path 'a.1.b' can sometimes be positional due to path element '1'. In the document
+ * {a: [{b: 98}, {b: 99}]} it would be considered positional, and would refer to
+ * element 99. In the document {a: [{'1': {b: 97}}]}, the path is *not* considered
+ * positional and would refer to element 97.
*/
- class BtreeKeyGenerator {
- public:
- BtreeKeyGenerator(std::vector<const char*> fieldNames,
- std::vector<BSONElement> fixed,
- bool isSparse);
-
- virtual ~BtreeKeyGenerator() { }
-
- void getKeys(const BSONObj& obj, BSONObjSet* keys) const;
-
- static const int ParallelArraysCode;
-
- protected:
- // These are used by the getKeysImpl(s) below.
- std::vector<const char*> _fieldNames;
- bool _isIdIndex;
- bool _isSparse;
- BSONObj _nullKey; // a full key with all fields null
- BSONSizeTracker _sizeTracker;
-
- private:
- // We have V0 and V1. Sigh.
- virtual void getKeysImpl(std::vector<const char*> fieldNames,
- std::vector<BSONElement> fixed,
- const BSONObj& obj,
- BSONObjSet* keys) const = 0;
-
- std::vector<BSONElement> _fixed;
+ struct PositionalPathInfo {
+ PositionalPathInfo() : remainingPath("") {}
+
+ bool hasPositionallyIndexedElt() const {
+ return !positionallyIndexedElt.eoo();
+ }
+
+ // Stores the array element indexed by position. If the key pattern has no positional
+ // element, then this is EOO.
+ //
+ // Example:
+ // Suppose the key pattern is {"a.0.x": 1} and we're extracting keys for document
+ // {a: [{x: 98}, {x: 99}]}. We should store element {x: 98} here.
+ BSONElement positionallyIndexedElt;
+
+ // The array to which 'positionallyIndexedElt' belongs.
+ BSONObj arrayObj;
+
+ // If we find a positionally indexed element, we traverse the remainder of the path
+ // until we find either another array element or the end of the path. The result of
+ // this traversal (implemented using getFieldDottedOrArray()), is stored here and used
+ // during the recursive call for each array element.
+ //
+ // Example:
+ // Suppose we have key pattern {"a.1.b.0.c": 1}. The document for which we are
+ // generating keys is {a: [0, {b: [{c: 99}]}]}. We will find that {b: [{c: 99}]}
+ // is a positionally indexed element and store it as 'positionallyIndexedElt'.
+ //
+ // We then call getFieldDottedOrArray() to traverse the remainder of the path,
+ // "b.1.c". The result is the array [{c: 99}] which is stored here as 'dottedElt'.
+ BSONElement dottedElt;
+
+ // The remaining path that must be traversed in 'dottedElt' to find the indexed
+ // element(s).
+ //
+ // Example:
+ // Continuing the example above, 'remainingPath' will be "0.c". Note that the path
+ // "0.c" refers to element 99 in 'dottedElt', [{c: 99}].
+ const char* remainingPath;
};
- class BtreeKeyGeneratorV0 : public BtreeKeyGenerator {
- public:
- BtreeKeyGeneratorV0(std::vector<const char*> fieldNames,
- std::vector<BSONElement> fixed,
- bool isSparse);
-
- virtual ~BtreeKeyGeneratorV0() { }
+ /**
+ * @param fieldNames - fields to index, may be postfixes in recursive calls
+ * @param fixed - values that have already been identified for their index fields
+ * @param obj - object from which keys should be extracted, based on names in fieldNames
+ * @param keys - set where index keys are written
+ * @param numNotFound - number of index fields that have already been identified as missing
+ * @param array - array from which keys should be extracted, based on names in fieldNames
+ * If obj and array are both nonempty, obj will be one of the elements of array.
+ */
+ virtual void getKeysImpl(std::vector<const char*> fieldNames,
+ std::vector<BSONElement> fixed,
+ const BSONObj& obj,
+ BSONObjSet* keys) const;
- private:
- virtual void getKeysImpl(std::vector<const char*> fieldNames,
- std::vector<BSONElement> fixed,
- const BSONObj& obj,
- BSONObjSet* keys) const;
- };
+ /**
+ * This recursive method does the heavy-lifting for getKeysImpl().
+ */
+ void getKeysImplWithArray(std::vector<const char*> fieldNames,
+ std::vector<BSONElement> fixed,
+ const BSONObj& obj,
+ BSONObjSet* keys,
+ unsigned numNotFound,
+ const std::vector<PositionalPathInfo>& positionalInfo) const;
+ /**
+ * A call to getKeysImplWithArray() begins by calling this for each field in the key
+ * pattern. It uses getFieldDottedOrArray() to traverse the path '*field' in 'obj'.
+ *
+ * The 'positionalInfo' arg is used for handling a field path where 'obj' has an
+ * array indexed by position. See the comments for PositionalPathInfo for more detail.
+ *
+ * Returns the element extracted as a result of traversing the path, or an indexed array
+ * if we encounter one during the path traversal.
+ *
+ * Out-parameters:
+ * --Sets *field to the remaining path that must be traversed.
+ * --Sets *arrayNestedArray to true if the returned BSONElement is a nested array that is
+ * indexed by position in its parent array. Otherwise sets *arrayNestedArray to false.
+ *
+ * Example:
+ * Suppose we have key pattern {"a.b.c": 1} and we're extracting keys from document
+ * {a: [{b: {c: 98}}, {b: {c: 99}}]}. On the first call to extractNextElement(), 'obj'
+ * will be the full document, {a: [{b: {c: 98}}, {b: {c: 99}}]}. The 'positionalInfo'
+ * argument is not relevant, because the array is not being positionally indexed.
+ * '*field' will point to "a.b.c".
+ *
+ * The return value will be the array element [{b: {c: 98}}, {b: {c: 99}}], because path
+ * traversal stops when an indexed array is encountered. Furthermore, '*field' will be set
+ * to "b.c".
+ *
+ * extractNextElement() will then be called from a recursive call to
+ * getKeysImplWithArray() for each array element. For instance, it will get called with
+ * 'obj' {b: {c: 98}} and '*field' pointing to "b.c". It will return element 98 and
+ * set '*field' to "". Similarly, it will return elemtn 99 and set '*field' to "" for
+ * the second array element.
+ */
+ BSONElement extractNextElement(const BSONObj& obj,
+ const PositionalPathInfo& positionalInfo,
+ const char** field,
+ bool* arrayNestedArray) const;
- class BtreeKeyGeneratorV1 : public BtreeKeyGenerator {
- public:
- BtreeKeyGeneratorV1(std::vector<const char*> fieldNames,
- std::vector<BSONElement> fixed,
- bool isSparse);
-
- virtual ~BtreeKeyGeneratorV1() { }
-
- private:
- /**
- * Stores info regarding traversal of a positional path. A path through a document is
- * considered positional if this path element names an array element. Generally this means
- * that the field name consists of [0-9]+, but the implementation just calls .Obj() on
- * the array and looks for the named field. This logic happens even if the field does
- * not match [0-9]+.
- *
- * Example:
- * The path 'a.1.b' can sometimes be positional due to path element '1'. In the document
- * {a: [{b: 98}, {b: 99}]} it would be considered positional, and would refer to
- * element 99. In the document {a: [{'1': {b: 97}}]}, the path is *not* considered
- * positional and would refer to element 97.
- */
- struct PositionalPathInfo {
- PositionalPathInfo() : remainingPath("") {}
-
- bool hasPositionallyIndexedElt() const { return !positionallyIndexedElt.eoo(); }
-
- // Stores the array element indexed by position. If the key pattern has no positional
- // element, then this is EOO.
- //
- // Example:
- // Suppose the key pattern is {"a.0.x": 1} and we're extracting keys for document
- // {a: [{x: 98}, {x: 99}]}. We should store element {x: 98} here.
- BSONElement positionallyIndexedElt;
-
- // The array to which 'positionallyIndexedElt' belongs.
- BSONObj arrayObj;
-
- // If we find a positionally indexed element, we traverse the remainder of the path
- // until we find either another array element or the end of the path. The result of
- // this traversal (implemented using getFieldDottedOrArray()), is stored here and used
- // during the recursive call for each array element.
- //
- // Example:
- // Suppose we have key pattern {"a.1.b.0.c": 1}. The document for which we are
- // generating keys is {a: [0, {b: [{c: 99}]}]}. We will find that {b: [{c: 99}]}
- // is a positionally indexed element and store it as 'positionallyIndexedElt'.
- //
- // We then call getFieldDottedOrArray() to traverse the remainder of the path,
- // "b.1.c". The result is the array [{c: 99}] which is stored here as 'dottedElt'.
- BSONElement dottedElt;
-
- // The remaining path that must be traversed in 'dottedElt' to find the indexed
- // element(s).
- //
- // Example:
- // Continuing the example above, 'remainingPath' will be "0.c". Note that the path
- // "0.c" refers to element 99 in 'dottedElt', [{c: 99}].
- const char* remainingPath;
- };
-
- /**
- * @param fieldNames - fields to index, may be postfixes in recursive calls
- * @param fixed - values that have already been identified for their index fields
- * @param obj - object from which keys should be extracted, based on names in fieldNames
- * @param keys - set where index keys are written
- * @param numNotFound - number of index fields that have already been identified as missing
- * @param array - array from which keys should be extracted, based on names in fieldNames
- * If obj and array are both nonempty, obj will be one of the elements of array.
- */
- virtual void getKeysImpl(std::vector<const char*> fieldNames,
- std::vector<BSONElement> fixed,
- const BSONObj& obj,
- BSONObjSet* keys) const;
-
- /**
- * This recursive method does the heavy-lifting for getKeysImpl().
- */
- void getKeysImplWithArray(std::vector<const char*> fieldNames,
- std::vector<BSONElement> fixed,
- const BSONObj& obj,
- BSONObjSet* keys,
- unsigned numNotFound,
- const std::vector<PositionalPathInfo>& positionalInfo) const;
- /**
- * A call to getKeysImplWithArray() begins by calling this for each field in the key
- * pattern. It uses getFieldDottedOrArray() to traverse the path '*field' in 'obj'.
- *
- * The 'positionalInfo' arg is used for handling a field path where 'obj' has an
- * array indexed by position. See the comments for PositionalPathInfo for more detail.
- *
- * Returns the element extracted as a result of traversing the path, or an indexed array
- * if we encounter one during the path traversal.
- *
- * Out-parameters:
- * --Sets *field to the remaining path that must be traversed.
- * --Sets *arrayNestedArray to true if the returned BSONElement is a nested array that is
- * indexed by position in its parent array. Otherwise sets *arrayNestedArray to false.
- *
- * Example:
- * Suppose we have key pattern {"a.b.c": 1} and we're extracting keys from document
- * {a: [{b: {c: 98}}, {b: {c: 99}}]}. On the first call to extractNextElement(), 'obj'
- * will be the full document, {a: [{b: {c: 98}}, {b: {c: 99}}]}. The 'positionalInfo'
- * argument is not relevant, because the array is not being positionally indexed.
- * '*field' will point to "a.b.c".
- *
- * The return value will be the array element [{b: {c: 98}}, {b: {c: 99}}], because path
- * traversal stops when an indexed array is encountered. Furthermore, '*field' will be set
- * to "b.c".
- *
- * extractNextElement() will then be called from a recursive call to
- * getKeysImplWithArray() for each array element. For instance, it will get called with
- * 'obj' {b: {c: 98}} and '*field' pointing to "b.c". It will return element 98 and
- * set '*field' to "". Similarly, it will return elemtn 99 and set '*field' to "" for
- * the second array element.
- */
- BSONElement extractNextElement(const BSONObj& obj,
- const PositionalPathInfo& positionalInfo,
- const char** field,
- bool* arrayNestedArray) const;
-
- /**
- * Sets extracted elements in 'fixed' for field paths that we have traversed to the end.
- *
- * Then calls getKeysImplWithArray() recursively.
- */
- void _getKeysArrEltFixed(std::vector<const char*>* fieldNames,
- std::vector<BSONElement>* fixed,
- const BSONElement& arrEntry,
- BSONObjSet* keys,
- unsigned numNotFound,
- const BSONElement& arrObjElt,
- const std::set<unsigned>& arrIdxs,
- bool mayExpandArrayUnembedded,
- const std::vector<PositionalPathInfo>& positionalInfo) const;
-
- const std::vector<PositionalPathInfo> _emptyPositionalInfo;
- };
+ /**
+ * Sets extracted elements in 'fixed' for field paths that we have traversed to the end.
+ *
+ * Then calls getKeysImplWithArray() recursively.
+ */
+ void _getKeysArrEltFixed(std::vector<const char*>* fieldNames,
+ std::vector<BSONElement>* fixed,
+ const BSONElement& arrEntry,
+ BSONObjSet* keys,
+ unsigned numNotFound,
+ const BSONElement& arrObjElt,
+ const std::set<unsigned>& arrIdxs,
+ bool mayExpandArrayUnembedded,
+ const std::vector<PositionalPathInfo>& positionalInfo) const;
+
+ const std::vector<PositionalPathInfo> _emptyPositionalInfo;
+};
} // namespace mongo
diff --git a/src/mongo/db/index/btree_key_generator_test.cpp b/src/mongo/db/index/btree_key_generator_test.cpp
index f01f6552f18..23792fb9b87 100644
--- a/src/mongo/db/index/btree_key_generator_test.cpp
+++ b/src/mongo/db/index/btree_key_generator_test.cpp
@@ -41,791 +41,790 @@ using std::vector;
namespace {
- //
- // Helper functions
- //
+//
+// Helper functions
+//
- std::string dumpKeyset(const BSONObjSet& objs) {
- std::stringstream ss;
- ss << "[ ";
- for (BSONObjSet::iterator i = objs.begin(); i != objs.end(); ++i) {
- ss << i->toString() << " ";
- }
- ss << "]";
-
- return ss.str();
+std::string dumpKeyset(const BSONObjSet& objs) {
+ std::stringstream ss;
+ ss << "[ ";
+ for (BSONObjSet::iterator i = objs.begin(); i != objs.end(); ++i) {
+ ss << i->toString() << " ";
}
+ ss << "]";
- bool keysetsMatch(const BSONObjSet& expected, const BSONObjSet& actual) {
- if (expected.size() != actual.size()) {
- return false;
- }
-
- for (BSONObjSet::iterator i = expected.begin(); i != expected.end(); ++i) {
- if (actual.end() == actual.find(*i)) {
- return false;
- }
- }
+ return ss.str();
+}
- return true;
+bool keysetsMatch(const BSONObjSet& expected, const BSONObjSet& actual) {
+ if (expected.size() != actual.size()) {
+ return false;
}
- bool testKeygen(const BSONObj& kp, const BSONObj& obj,
- const BSONObjSet& expectedKeys, bool sparse = false) {
- //
- // Step 1: construct the btree key generator object, using the
- // index key pattern.
- //
- vector<const char*> fieldNames;
- vector<BSONElement> fixed;
-
- BSONObjIterator it(kp);
- while (it.more()) {
- BSONElement elt = it.next();
- fieldNames.push_back(elt.fieldName());
- fixed.push_back(BSONElement());
- }
-
- unique_ptr<BtreeKeyGenerator> keyGen(
- new BtreeKeyGeneratorV1(fieldNames, fixed, sparse));
-
- //
- // Step 2: ask 'keyGen' to generate index keys for the object 'obj'.
- //
- BSONObjSet actualKeys;
- keyGen->getKeys(obj, &actualKeys);
-
- //
- // Step 3: check that the results match the expected result.
- //
- bool match = keysetsMatch(expectedKeys, actualKeys);
- if (!match) {
- cout << "Expected: " << dumpKeyset(expectedKeys) << ", "
- << "Actual: " << dumpKeyset(actualKeys) << endl;
+ for (BSONObjSet::iterator i = expected.begin(); i != expected.end(); ++i) {
+ if (actual.end() == actual.find(*i)) {
+ return false;
}
-
- return match;
}
+ return true;
+}
+
+bool testKeygen(const BSONObj& kp,
+ const BSONObj& obj,
+ const BSONObjSet& expectedKeys,
+ bool sparse = false) {
//
- // Unit tests
+ // Step 1: construct the btree key generator object, using the
+ // index key pattern.
//
+ vector<const char*> fieldNames;
+ vector<BSONElement> fixed;
- TEST(BtreeKeyGeneratorTest, GetKeysFromObjectSimple) {
- BSONObj keyPattern = fromjson("{a: 1}");
- BSONObj genKeysFrom = fromjson("{b: 4, a: 5}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 5}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromObjectDotted) {
- BSONObj keyPattern = fromjson("{'a.b': 1}");
- BSONObj genKeysFrom = fromjson("{a: {b: 4}, c: 'foo'}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 4}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromArraySimple) {
- BSONObj keyPattern = fromjson("{a: 1}");
- BSONObj genKeysFrom = fromjson("{a: [1, 2, 3]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1}"));
- expectedKeys.insert(fromjson("{'': 2}"));
- expectedKeys.insert(fromjson("{'': 3}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromArrayFirstElement) {
- BSONObj keyPattern = fromjson("{a: 1, b: 1}");
- BSONObj genKeysFrom = fromjson("{a: [1, 2, 3], b: 2}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1, '': 2}"));
- expectedKeys.insert(fromjson("{'': 2, '': 2}"));
- expectedKeys.insert(fromjson("{'': 3, '': 2}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+ BSONObjIterator it(kp);
+ while (it.more()) {
+ BSONElement elt = it.next();
+ fieldNames.push_back(elt.fieldName());
+ fixed.push_back(BSONElement());
}
- TEST(BtreeKeyGeneratorTest, GetKeysFromArraySecondElement) {
- BSONObj keyPattern = fromjson("{first: 1, a: 1}");
- BSONObj genKeysFrom = fromjson("{first: 5, a: [1, 2, 3]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 5, '': 1}"));
- expectedKeys.insert(fromjson("{'': 5, '': 2}"));
- expectedKeys.insert(fromjson("{'': 5, '': 3}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromSecondLevelArray) {
- BSONObj keyPattern = fromjson("{'a.b': 1}");
- BSONObj genKeysFrom = fromjson("{a: {b: [1, 2, 3]}}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1}"));
- expectedKeys.insert(fromjson("{'': 2}"));
- expectedKeys.insert(fromjson("{'': 3}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromParallelArraysBasic) {
- BSONObj keyPattern = fromjson("{'a': 1, 'b': 1}");
- BSONObj genKeysFrom = fromjson("{a: [1, 2, 3], b: [1, 2, 3]}}");
- BSONObjSet expectedKeys;
- ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromArraySubobjectBasic) {
- BSONObj keyPattern = fromjson("{'a.b': 1}");
- BSONObj genKeysFrom = fromjson("{a: [{b:1,c:4}, {b:2,c:4}, {b:3,c:4}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1}"));
- expectedKeys.insert(fromjson("{'': 2}"));
- expectedKeys.insert(fromjson("{'': 3}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysArraySubobjectCompoundIndex) {
- BSONObj keyPattern = fromjson("{'a.b': 1, d: 99}");
- BSONObj genKeysFrom = fromjson("{a: [{b:1,c:4}, {b:2,c:4}, {b:3,c:4}], d: 99}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1, '': 99}"));
- expectedKeys.insert(fromjson("{'': 2, '': 99}"));
- expectedKeys.insert(fromjson("{'': 3, '': 99}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysArraySubobjectSingleMissing) {
- BSONObj keyPattern = fromjson("{'a.b': 1}");
- BSONObj genKeysFrom = fromjson("{a: [{foo: 41}, {b:1,c:4}, {b:2,c:4}, {b:3,c:4}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': null}"));
- expectedKeys.insert(fromjson("{'': 1}"));
- expectedKeys.insert(fromjson("{'': 2}"));
- expectedKeys.insert(fromjson("{'': 3}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromArraySubobjectMissing) {
- BSONObj keyPattern = fromjson("{'a.b': 1}");
- BSONObj genKeysFrom = fromjson("{a: [{foo: 41}, {foo: 41}, {foo: 41}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': null}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysMissingField) {
- BSONObj keyPattern = fromjson("{a: 1}");
- BSONObj genKeysFrom = fromjson("{b: 1}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': null}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysSubobjectMissing) {
- BSONObj keyPattern = fromjson("{'a.b': 1}");
- BSONObj genKeysFrom = fromjson("{a: [1, 2]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': null}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromCompound) {
- BSONObj keyPattern = fromjson("{x: 1, y: 1}");
- BSONObj genKeysFrom = fromjson("{x: 'a', y: 'b'}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 'a', '': 'b'}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
+ unique_ptr<BtreeKeyGenerator> keyGen(new BtreeKeyGeneratorV1(fieldNames, fixed, sparse));
- TEST(BtreeKeyGeneratorTest, GetKeysFromCompoundMissing) {
- BSONObj keyPattern = fromjson("{x: 1, y: 1}");
- BSONObj genKeysFrom = fromjson("{x: 'a'}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 'a', '': null}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromArraySubelementComplex) {
- BSONObj keyPattern = fromjson("{'a.b': 1}");
- BSONObj genKeysFrom = fromjson("{a:[{b:[2]}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 2}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromParallelArraysComplex) {
- BSONObj keyPattern = fromjson("{'a.b': 1, 'a.c': 1}");
- BSONObj genKeysFrom = fromjson("{a:[{b:[1],c:[2]}]}");
- BSONObjSet expectedKeys;
- ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysAlternateMissing) {
- BSONObj keyPattern = fromjson("{'a.b': 1, 'a.c': 1}");
- BSONObj genKeysFrom = fromjson("{a:[{b:1},{c:2}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': null, '': 2}"));
- expectedKeys.insert(fromjson("{'': 1, '': null}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromMultiComplex) {
- BSONObj keyPattern = fromjson("{'a.b': 1}");
- BSONObj genKeysFrom = fromjson("{a:[{b:1},{b:[1,2,3]}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1}"));
- expectedKeys.insert(fromjson("{'': 2}"));
- expectedKeys.insert(fromjson("{'': 3}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysArrayEmpty) {
- BSONObj keyPattern = fromjson("{a: 1}");
- BSONObj genKeysFrom = fromjson("{a:[1,2]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1}"));
- expectedKeys.insert(fromjson("{'': 2}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a: [1]}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': 1}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a: null}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': null}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a: []}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': undefined}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromDoubleArray) {
- BSONObj keyPattern = fromjson("{a: 1, a: 1}");
- BSONObj genKeysFrom = fromjson("{a:[1,2]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1, '': 1}"));
- expectedKeys.insert(fromjson("{'': 2, '': 2}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromDoubleEmptyArray) {
- BSONObj keyPattern = fromjson("{a: 1, a: 1}");
- BSONObj genKeysFrom = fromjson("{a:[]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': undefined, '': undefined}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromMultiEmptyArray) {
- BSONObj keyPattern = fromjson("{a: 1, b: 1}");
- BSONObj genKeysFrom = fromjson("{a: 1, b: [1, 2]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1, '': 1}"));
- expectedKeys.insert(fromjson("{'': 1, '': 2}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a: 1, b: [1]}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': 1, '': 1}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a: 1, b: []}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': 1, '': undefined}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromNestedEmptyArray) {
- BSONObj keyPattern = fromjson("{'a.b': 1}");
- BSONObj genKeysFrom = fromjson("{a:[]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': null}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromMultiNestedEmptyArray) {
- BSONObj keyPattern = fromjson("{'a.b': 1, 'a.c': 1}");
- BSONObj genKeysFrom = fromjson("{a:[]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': null, '': null}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromUnevenNestedEmptyArray) {
- BSONObj keyPattern = fromjson("{'a': 1, 'a.b': 1}");
- BSONObj genKeysFrom = fromjson("{a:[]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': undefined, '': null}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a:[{b: 1}]}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': {b:1}, '': 1}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a:[{b: []}]}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': {b:[]}, '': undefined}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromReverseUnevenNestedEmptyArray) {
- BSONObj keyPattern = fromjson("{'a.b': 1, 'a': 1}");
- BSONObj genKeysFrom = fromjson("{a:[]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': null, '': undefined}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, SparseReverseUnevenNestedEmptyArray) {
- BSONObj keyPattern = fromjson("{'a.b': 1, 'a': 1}");
- BSONObj genKeysFrom = fromjson("{a:[]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': null, '': undefined}"));
- // true means sparse
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromSparseEmptyArray) {
- BSONObj keyPattern = fromjson("{'a.b': 1}");
- BSONObj genKeysFrom = fromjson("{a:1}");
- BSONObjSet expectedKeys;
- // true means sparse
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
-
- genKeysFrom = fromjson("{a:[]}");
- // true means sparse
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
-
- genKeysFrom = fromjson("{a:[{c:1}]}");
- // true means sparse
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromSparseEmptyArraySecond) {
- BSONObj keyPattern = fromjson("{z: 1, 'a.b': 1}");
- BSONObj genKeysFrom = fromjson("{a:1}");
- BSONObjSet expectedKeys;
- // true means sparse
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
-
- genKeysFrom = fromjson("{a:[]}");
- // true means sparse
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
-
- genKeysFrom = fromjson("{a:[{c:1}]}");
- // true means sparse
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
- }
-
- TEST(BtreeKeyGeneratorTest, SparseNonObjectMissingNestedField) {
- BSONObj keyPattern = fromjson("{'a.b': 1}");
- BSONObj genKeysFrom = fromjson("{a:[]}");
- BSONObjSet expectedKeys;
- // true means sparse
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
-
- genKeysFrom = fromjson("{a:[1]}");
- // true means sparse
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
-
- genKeysFrom = fromjson("{a:[1,{b:1}]}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': 1}"));
- // true means sparse
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromIndexedArrayIndex) {
- BSONObj keyPattern = fromjson("{'a.0': 1}");
- BSONObj genKeysFrom = fromjson("{a:[1]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a:[[1]]}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': [1]}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a:[[]]}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': undefined}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a:[[]]}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': undefined}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a:{'0':1}}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': 1}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a:[{'0':1}]}");
- expectedKeys.clear();
- ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
-
- genKeysFrom = fromjson("{a:[1,{'0':2}]}");
- ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromDoubleIndexedArrayIndex) {
- BSONObj keyPattern = fromjson("{'a.0.0': 1}");
- BSONObj genKeysFrom = fromjson("{a:[[1]]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a:[[]]}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': null}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a:[]}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': null}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a:[[[]]]}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': undefined}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromObjectWithinArray) {
- BSONObj keyPattern = fromjson("{'a.0.b': 1}");
- BSONObj genKeysFrom = fromjson("{a:[{b:1}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a:[{b:[1]}]}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': 1}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a:[{b:[[1]]}]}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': [1]}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a:[[{b:1}]]}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': 1}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a:[[{b:[1]}]]}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': 1}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a:[[{b:[[1]]}]]}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': [1]}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a:[[{b:[]}]]}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': undefined}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFromArrayWithinObjectWithinArray) {
- BSONObj keyPattern = fromjson("{'a.0.b.0': 1}");
- BSONObj genKeysFrom = fromjson("{a:[{b:[1]}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, ParallelArraysInNestedObjects) {
- BSONObj keyPattern = fromjson("{'a.a': 1, 'b.a': 1}");
- BSONObj genKeysFrom = fromjson("{a:{a:[1]}, b:{a:[1]}}");
- BSONObjSet expectedKeys;
- ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
- }
-
- TEST(BtreeKeyGeneratorTest, ParallelArraysUneven) {
- BSONObj keyPattern = fromjson("{'b.a': 1, 'a': 1}");
- BSONObj genKeysFrom = fromjson("{b:{a:[1]}, a:[1,2]}");
- BSONObjSet expectedKeys;
- ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
- }
-
- TEST(BtreeKeyGeneratorTest, MultipleArraysNotParallel) {
- BSONObj keyPattern = fromjson("{'a.b.c': 1}");
- BSONObj genKeysFrom = fromjson("{a: [1, 2, {b: {c: [3, 4]}}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': null}"));
- expectedKeys.insert(fromjson("{'': 3}"));
- expectedKeys.insert(fromjson("{'': 4}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, MultipleArraysNotParallelCompound) {
- BSONObj keyPattern = fromjson("{'a.b.c': 1, 'a.b.d': 1}");
- BSONObj genKeysFrom = fromjson("{a: [1, 2, {b: {c: [3, 4], d: 5}}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': null, '': null}"));
- expectedKeys.insert(fromjson("{'': 3, '': 5}"));
- expectedKeys.insert(fromjson("{'': 4, '': 5}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysComplexNestedArrays) {
- BSONObj keyPattern = fromjson(
- "{'a.b.c.d': 1, 'a.g': 1, 'a.b.f': 1, 'a.b.c': 1, 'a.b.e': 1}");
- BSONObj genKeysFrom = fromjson(
- "{a: [1, {b: [2, {c: [3, {d: 1}], e: 4}, 5, {f: 6}], g: 7}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'':null, '':null, '':null, '':null, '':null}"));
- expectedKeys.insert(fromjson("{'':null, '':7, '':null, '':null, '':null}"));
- expectedKeys.insert(fromjson("{'':null, '':7, '':null, '':3, '':4}"));
- expectedKeys.insert(fromjson("{'':null, '':7, '':6, '':null, '':null}"));
- expectedKeys.insert(fromjson("{'':1, '':7, '':null, '':{d: 1}, '':4}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- // Descriptive test. Should future index versions recursively index nested arrays?
- TEST(BtreeKeyGeneratorTest, GetKeys2DArray) {
- BSONObj keyPattern = fromjson("{a: 1}");
- BSONObj genKeysFrom = fromjson("{a: [[2]]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': [2]}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- // Descriptive test. Should parallel indexed arrays be allowed? If not, should empty
- // or single-element arrays be considered for the parallel array check?
- TEST(BtreeKeyGeneratorTest, GetKeysParallelEmptyArrays) {
- BSONObj keyPattern = fromjson("{a: 1, b: 1}");
- BSONObj genKeysFrom = fromjson("{a: [], b: []}");
- BSONObjSet expectedKeys;
- ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysParallelArraysOneArrayEmpty) {
- BSONObj keyPattern = fromjson("{a: 1, b: 1}");
- BSONObj genKeysFrom = fromjson("{a: [], b: [1, 2, 3]}");
- BSONObjSet expectedKeys;
- ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysParallelArraysOneArrayEmptyNested) {
- BSONObj keyPattern = fromjson("{'a.b.c': 1, 'a.b.d': 1}");
- BSONObj genKeysFrom = fromjson("{a: [{b: [{c: [1, 2, 3], d: []}]}]}");
- BSONObjSet expectedKeys;
- ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
- }
-
- // Descriptive test. The semantics for key generation are odd for positional key patterns.
- TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternMissingElement) {
- BSONObj keyPattern = fromjson("{'a.2': 1}");
- BSONObj genKeysFrom = fromjson("{a: [{'2': 5}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 5}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- // Descriptive test. The semantics for key generation are odd for positional key patterns.
- TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternNestedArray) {
- BSONObj keyPattern = fromjson("{'a.2': 1}");
- BSONObj genKeysFrom = fromjson("{a: [[1, 2, 5]]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': null}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- // Descriptive test. The semantics for key generation are odd for positional key patterns.
- TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternNestedArray2) {
- BSONObj keyPattern = fromjson("{'a.2': 1}");
- BSONObj genKeysFrom = fromjson("{a: [[1, 2, 5], [3, 4, 6], [0, 1, 2]]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': [0, 1, 2]}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- // Descriptive test. The semantics for key generation are odd for positional key patterns.
- TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternNestedArray3) {
- BSONObj keyPattern = fromjson("{'a.2': 1}");
- BSONObj genKeysFrom = fromjson("{a: [{'0': 1, '1': 2, '2': 5}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 5}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- // Descriptive test. The semantics for key generation are odd for positional key patterns.
- TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternNestedArray4) {
- BSONObj keyPattern = fromjson("{'a.b.2': 1}");
- BSONObj genKeysFrom = fromjson("{a: [{b: [[1, 2, 5]]}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': null}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- // Descriptive test. The semantics for key generation are odd for positional key patterns.
- TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternNestedArray5) {
- BSONObj keyPattern = fromjson("{'a.2': 1}");
- BSONObj genKeysFrom = fromjson("{a: [[1, 2, 5], {'2': 6}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': null}"));
- expectedKeys.insert(fromjson("{'': 6}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetNullKeyNestedArray) {
- BSONObj keyPattern = fromjson("{'a.b': 1}");
- BSONObj genKeysFrom = fromjson("{a: [[1, 2, 5]]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': null}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysUnevenNestedArrays) {
- BSONObj keyPattern = fromjson("{a: 1, 'a.b': 1}");
- BSONObj genKeysFrom = fromjson("{a: [1, {b: [2, 3, 4]}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1, '': null}"));
- expectedKeys.insert(fromjson("{'': {b:[2,3,4]}, '': 2}"));
- expectedKeys.insert(fromjson("{'': {b:[2,3,4]}, '': 3}"));
- expectedKeys.insert(fromjson("{'': {b:[2,3,4]}, '': 4}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- // Descriptive test. Should we define better semantics for future index versions in the case of
- // repeated field names?
- TEST(BtreeKeyGeneratorTest, GetKeysRepeatedFieldName) {
- BSONObj keyPattern = fromjson("{a: 1}");
- BSONObj genKeysFrom = fromjson("{a: 2, a: 3}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 2}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- // Descriptive test. Future index versions may want different or at least more consistent
- // handling of empty path components.
- TEST(BtreeKeyGeneratorTest, GetKeysEmptyPathPiece) {
- BSONObj keyPattern = fromjson("{'a..c': 1}");
- BSONObj genKeysFrom = fromjson("{a: {'': [{c: 1}, {c: 2}]}}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1}"));
- expectedKeys.insert(fromjson("{'': 2}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- // Descriptive test. Future index versions may want different or at least more consistent
- // handling of empty path components.
- TEST(BtreeKeyGeneratorTest, GetKeysLastPathPieceEmpty) {
- BSONObj keyPattern = fromjson("{'a.': 1}");
-
- BSONObj genKeysFrom = fromjson("{a: 2}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 2}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
-
- genKeysFrom = fromjson("{a: {'': 2}}");
- expectedKeys.clear();
- expectedKeys.insert(fromjson("{'': {'': 2}}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFirstPathPieceEmpty) {
- BSONObj keyPattern = fromjson("{'.a': 1}");
- BSONObj genKeysFrom = fromjson("{a: 2}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': null}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, GetKeysFirstPathPieceEmpty2) {
- BSONObj keyPattern = fromjson("{'.a': 1}");
- BSONObj genKeysFrom = fromjson("{'': [{a: [1, 2, 3]}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1}"));
- expectedKeys.insert(fromjson("{'': 2}"));
- expectedKeys.insert(fromjson("{'': 3}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, PositionalKeyPatternParallelArrays) {
- BSONObj keyPattern = fromjson("{a: 1, 'b.0': 1}");
- BSONObj genKeysFrom = fromjson("{a: [1], b: [2]}");
- BSONObjSet expectedKeys;
- ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
- }
-
- // Descriptive test.
- TEST(BtreeKeyGeneratorTest, PositionalKeyPatternNestedArrays) {
- BSONObj keyPattern = fromjson("{'a.0.b': 1}");
- BSONObj genKeysFrom = fromjson("{a: [[{b: 1}]]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- // Descriptive test.
- TEST(BtreeKeyGeneratorTest, PositionalKeyPatternNestedArrays2) {
- BSONObj keyPattern = fromjson("{'a.0.0.b': 1}");
- BSONObj genKeysFrom = fromjson("{a: [[{b: 1}]]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- // Descriptive test.
- TEST(BtreeKeyGeneratorTest, PositionalKeyPatternNestedArrays3) {
- BSONObj keyPattern = fromjson("{'a.0.0.b': 1}");
- BSONObj genKeysFrom = fromjson("{a: [[[ {b: 1} ]]]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 1}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- // Descriptive test.
- TEST(BtreeKeyGeneratorTest, PositionalKeyPatternNestedArrays4) {
- BSONObj keyPattern = fromjson("{'a.0.0.b': 1}");
- BSONObj genKeysFrom = fromjson("{a: [[[[ {b: 1} ]]]]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': null}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- TEST(BtreeKeyGeneratorTest, PositionalKeyPatternNestedArrays5) {
- BSONObj keyPattern = fromjson("{'a.b.1': 1}");
- BSONObj genKeysFrom = fromjson("{a: [{b: [1, 2]}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': 2}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- // Descriptive test.
- TEST(BtreeKeyGeneratorTest, PositionalKeyPatternNestedArrays6) {
- BSONObj keyPattern = fromjson("{'a': 1, 'a.b': 1, 'a.0.b':1, 'a.b.0': 1, 'a.0.b.0': 1}");
- BSONObj genKeysFrom = fromjson("{a: [{b: [1,2]}, {b: 3}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': {b:3}, '': 3, '': 1, '': null, '': 1}"));
- expectedKeys.insert(fromjson("{'': {b:3}, '': 3, '': 2, '': null, '': 1}"));
- expectedKeys.insert(fromjson("{'': {b:[1,2]}, '': 1, '': 1, '': 1, '': 1}"));
- expectedKeys.insert(fromjson("{'': {b:[1,2]}, '': 2, '': 2, '': 1, '': 1}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
-
- // Descriptive test.
- TEST(BtreeKeyGeneratorTest, PositionalKeyPatternNestedArrays7) {
- BSONObj keyPattern = fromjson("{'a': 1, 'a.b': 1, 'a.0.b':1, 'a.b.0': 1, 'a.0.b.0': 1}");
- BSONObj genKeysFrom = fromjson("{a: [{b: [1,2]}, {b: {'0': 3}}]}");
- BSONObjSet expectedKeys;
- expectedKeys.insert(fromjson("{'': {b:{'0':3}}, '': {'0':3}, '': 1, '': 3, '': 1}"));
- expectedKeys.insert(fromjson("{'': {b:{'0':3}}, '': {'0':3}, '': 2, '': 3, '': 1}"));
- expectedKeys.insert(fromjson("{'': {b:[1,2]}, '': 1, '': 1, '': 1, '': 1}"));
- expectedKeys.insert(fromjson("{'': {b:[1,2]}, '': 2, '': 2, '': 1, '': 1}"));
- ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
- }
+ //
+ // Step 2: ask 'keyGen' to generate index keys for the object 'obj'.
+ //
+ BSONObjSet actualKeys;
+ keyGen->getKeys(obj, &actualKeys);
-} // namespace
+ //
+ // Step 3: check that the results match the expected result.
+ //
+ bool match = keysetsMatch(expectedKeys, actualKeys);
+ if (!match) {
+ cout << "Expected: " << dumpKeyset(expectedKeys) << ", "
+ << "Actual: " << dumpKeyset(actualKeys) << endl;
+ }
+
+ return match;
+}
+
+//
+// Unit tests
+//
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromObjectSimple) {
+ BSONObj keyPattern = fromjson("{a: 1}");
+ BSONObj genKeysFrom = fromjson("{b: 4, a: 5}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 5}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromObjectDotted) {
+ BSONObj keyPattern = fromjson("{'a.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a: {b: 4}, c: 'foo'}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 4}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromArraySimple) {
+ BSONObj keyPattern = fromjson("{a: 1}");
+ BSONObj genKeysFrom = fromjson("{a: [1, 2, 3]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1}"));
+ expectedKeys.insert(fromjson("{'': 2}"));
+ expectedKeys.insert(fromjson("{'': 3}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromArrayFirstElement) {
+ BSONObj keyPattern = fromjson("{a: 1, b: 1}");
+ BSONObj genKeysFrom = fromjson("{a: [1, 2, 3], b: 2}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1, '': 2}"));
+ expectedKeys.insert(fromjson("{'': 2, '': 2}"));
+ expectedKeys.insert(fromjson("{'': 3, '': 2}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromArraySecondElement) {
+ BSONObj keyPattern = fromjson("{first: 1, a: 1}");
+ BSONObj genKeysFrom = fromjson("{first: 5, a: [1, 2, 3]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 5, '': 1}"));
+ expectedKeys.insert(fromjson("{'': 5, '': 2}"));
+ expectedKeys.insert(fromjson("{'': 5, '': 3}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromSecondLevelArray) {
+ BSONObj keyPattern = fromjson("{'a.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a: {b: [1, 2, 3]}}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1}"));
+ expectedKeys.insert(fromjson("{'': 2}"));
+ expectedKeys.insert(fromjson("{'': 3}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromParallelArraysBasic) {
+ BSONObj keyPattern = fromjson("{'a': 1, 'b': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [1, 2, 3], b: [1, 2, 3]}}");
+ BSONObjSet expectedKeys;
+ ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromArraySubobjectBasic) {
+ BSONObj keyPattern = fromjson("{'a.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [{b:1,c:4}, {b:2,c:4}, {b:3,c:4}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1}"));
+ expectedKeys.insert(fromjson("{'': 2}"));
+ expectedKeys.insert(fromjson("{'': 3}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysArraySubobjectCompoundIndex) {
+ BSONObj keyPattern = fromjson("{'a.b': 1, d: 99}");
+ BSONObj genKeysFrom = fromjson("{a: [{b:1,c:4}, {b:2,c:4}, {b:3,c:4}], d: 99}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1, '': 99}"));
+ expectedKeys.insert(fromjson("{'': 2, '': 99}"));
+ expectedKeys.insert(fromjson("{'': 3, '': 99}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysArraySubobjectSingleMissing) {
+ BSONObj keyPattern = fromjson("{'a.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [{foo: 41}, {b:1,c:4}, {b:2,c:4}, {b:3,c:4}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': null}"));
+ expectedKeys.insert(fromjson("{'': 1}"));
+ expectedKeys.insert(fromjson("{'': 2}"));
+ expectedKeys.insert(fromjson("{'': 3}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromArraySubobjectMissing) {
+ BSONObj keyPattern = fromjson("{'a.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [{foo: 41}, {foo: 41}, {foo: 41}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': null}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysMissingField) {
+ BSONObj keyPattern = fromjson("{a: 1}");
+ BSONObj genKeysFrom = fromjson("{b: 1}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': null}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysSubobjectMissing) {
+ BSONObj keyPattern = fromjson("{'a.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [1, 2]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': null}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromCompound) {
+ BSONObj keyPattern = fromjson("{x: 1, y: 1}");
+ BSONObj genKeysFrom = fromjson("{x: 'a', y: 'b'}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 'a', '': 'b'}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromCompoundMissing) {
+ BSONObj keyPattern = fromjson("{x: 1, y: 1}");
+ BSONObj genKeysFrom = fromjson("{x: 'a'}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 'a', '': null}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromArraySubelementComplex) {
+ BSONObj keyPattern = fromjson("{'a.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a:[{b:[2]}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 2}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromParallelArraysComplex) {
+ BSONObj keyPattern = fromjson("{'a.b': 1, 'a.c': 1}");
+ BSONObj genKeysFrom = fromjson("{a:[{b:[1],c:[2]}]}");
+ BSONObjSet expectedKeys;
+ ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysAlternateMissing) {
+ BSONObj keyPattern = fromjson("{'a.b': 1, 'a.c': 1}");
+ BSONObj genKeysFrom = fromjson("{a:[{b:1},{c:2}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': null, '': 2}"));
+ expectedKeys.insert(fromjson("{'': 1, '': null}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromMultiComplex) {
+ BSONObj keyPattern = fromjson("{'a.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a:[{b:1},{b:[1,2,3]}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1}"));
+ expectedKeys.insert(fromjson("{'': 2}"));
+ expectedKeys.insert(fromjson("{'': 3}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysArrayEmpty) {
+ BSONObj keyPattern = fromjson("{a: 1}");
+ BSONObj genKeysFrom = fromjson("{a:[1,2]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1}"));
+ expectedKeys.insert(fromjson("{'': 2}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a: [1]}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': 1}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a: null}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': null}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a: []}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': undefined}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromDoubleArray) {
+ BSONObj keyPattern = fromjson("{a: 1, a: 1}");
+ BSONObj genKeysFrom = fromjson("{a:[1,2]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1, '': 1}"));
+ expectedKeys.insert(fromjson("{'': 2, '': 2}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromDoubleEmptyArray) {
+ BSONObj keyPattern = fromjson("{a: 1, a: 1}");
+ BSONObj genKeysFrom = fromjson("{a:[]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': undefined, '': undefined}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromMultiEmptyArray) {
+ BSONObj keyPattern = fromjson("{a: 1, b: 1}");
+ BSONObj genKeysFrom = fromjson("{a: 1, b: [1, 2]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1, '': 1}"));
+ expectedKeys.insert(fromjson("{'': 1, '': 2}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a: 1, b: [1]}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': 1, '': 1}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a: 1, b: []}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': 1, '': undefined}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromNestedEmptyArray) {
+ BSONObj keyPattern = fromjson("{'a.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a:[]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': null}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromMultiNestedEmptyArray) {
+ BSONObj keyPattern = fromjson("{'a.b': 1, 'a.c': 1}");
+ BSONObj genKeysFrom = fromjson("{a:[]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': null, '': null}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromUnevenNestedEmptyArray) {
+ BSONObj keyPattern = fromjson("{'a': 1, 'a.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a:[]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': undefined, '': null}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a:[{b: 1}]}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': {b:1}, '': 1}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a:[{b: []}]}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': {b:[]}, '': undefined}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromReverseUnevenNestedEmptyArray) {
+ BSONObj keyPattern = fromjson("{'a.b': 1, 'a': 1}");
+ BSONObj genKeysFrom = fromjson("{a:[]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': null, '': undefined}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, SparseReverseUnevenNestedEmptyArray) {
+ BSONObj keyPattern = fromjson("{'a.b': 1, 'a': 1}");
+ BSONObj genKeysFrom = fromjson("{a:[]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': null, '': undefined}"));
+ // true means sparse
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromSparseEmptyArray) {
+ BSONObj keyPattern = fromjson("{'a.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a:1}");
+ BSONObjSet expectedKeys;
+ // true means sparse
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
+
+ genKeysFrom = fromjson("{a:[]}");
+ // true means sparse
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
+
+ genKeysFrom = fromjson("{a:[{c:1}]}");
+ // true means sparse
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromSparseEmptyArraySecond) {
+ BSONObj keyPattern = fromjson("{z: 1, 'a.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a:1}");
+ BSONObjSet expectedKeys;
+ // true means sparse
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
+
+ genKeysFrom = fromjson("{a:[]}");
+ // true means sparse
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
+
+ genKeysFrom = fromjson("{a:[{c:1}]}");
+ // true means sparse
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
+}
+
+TEST(BtreeKeyGeneratorTest, SparseNonObjectMissingNestedField) {
+ BSONObj keyPattern = fromjson("{'a.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a:[]}");
+ BSONObjSet expectedKeys;
+ // true means sparse
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
+
+ genKeysFrom = fromjson("{a:[1]}");
+ // true means sparse
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
+
+ genKeysFrom = fromjson("{a:[1,{b:1}]}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': 1}"));
+ // true means sparse
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, true));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromIndexedArrayIndex) {
+ BSONObj keyPattern = fromjson("{'a.0': 1}");
+ BSONObj genKeysFrom = fromjson("{a:[1]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a:[[1]]}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': [1]}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a:[[]]}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': undefined}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a:[[]]}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': undefined}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a:{'0':1}}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': 1}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a:[{'0':1}]}");
+ expectedKeys.clear();
+ ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
+
+ genKeysFrom = fromjson("{a:[1,{'0':2}]}");
+ ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromDoubleIndexedArrayIndex) {
+ BSONObj keyPattern = fromjson("{'a.0.0': 1}");
+ BSONObj genKeysFrom = fromjson("{a:[[1]]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a:[[]]}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': null}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a:[]}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': null}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a:[[[]]]}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': undefined}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromObjectWithinArray) {
+ BSONObj keyPattern = fromjson("{'a.0.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a:[{b:1}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a:[{b:[1]}]}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': 1}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a:[{b:[[1]]}]}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': [1]}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a:[[{b:1}]]}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': 1}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a:[[{b:[1]}]]}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': 1}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a:[[{b:[[1]]}]]}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': [1]}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a:[[{b:[]}]]}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': undefined}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFromArrayWithinObjectWithinArray) {
+ BSONObj keyPattern = fromjson("{'a.0.b.0': 1}");
+ BSONObj genKeysFrom = fromjson("{a:[{b:[1]}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, ParallelArraysInNestedObjects) {
+ BSONObj keyPattern = fromjson("{'a.a': 1, 'b.a': 1}");
+ BSONObj genKeysFrom = fromjson("{a:{a:[1]}, b:{a:[1]}}");
+ BSONObjSet expectedKeys;
+ ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
+}
+
+TEST(BtreeKeyGeneratorTest, ParallelArraysUneven) {
+ BSONObj keyPattern = fromjson("{'b.a': 1, 'a': 1}");
+ BSONObj genKeysFrom = fromjson("{b:{a:[1]}, a:[1,2]}");
+ BSONObjSet expectedKeys;
+ ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
+}
+
+TEST(BtreeKeyGeneratorTest, MultipleArraysNotParallel) {
+ BSONObj keyPattern = fromjson("{'a.b.c': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [1, 2, {b: {c: [3, 4]}}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': null}"));
+ expectedKeys.insert(fromjson("{'': 3}"));
+ expectedKeys.insert(fromjson("{'': 4}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, MultipleArraysNotParallelCompound) {
+ BSONObj keyPattern = fromjson("{'a.b.c': 1, 'a.b.d': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [1, 2, {b: {c: [3, 4], d: 5}}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': null, '': null}"));
+ expectedKeys.insert(fromjson("{'': 3, '': 5}"));
+ expectedKeys.insert(fromjson("{'': 4, '': 5}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysComplexNestedArrays) {
+ BSONObj keyPattern = fromjson("{'a.b.c.d': 1, 'a.g': 1, 'a.b.f': 1, 'a.b.c': 1, 'a.b.e': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [1, {b: [2, {c: [3, {d: 1}], e: 4}, 5, {f: 6}], g: 7}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'':null, '':null, '':null, '':null, '':null}"));
+ expectedKeys.insert(fromjson("{'':null, '':7, '':null, '':null, '':null}"));
+ expectedKeys.insert(fromjson("{'':null, '':7, '':null, '':3, '':4}"));
+ expectedKeys.insert(fromjson("{'':null, '':7, '':6, '':null, '':null}"));
+ expectedKeys.insert(fromjson("{'':1, '':7, '':null, '':{d: 1}, '':4}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+// Descriptive test. Should future index versions recursively index nested arrays?
+TEST(BtreeKeyGeneratorTest, GetKeys2DArray) {
+ BSONObj keyPattern = fromjson("{a: 1}");
+ BSONObj genKeysFrom = fromjson("{a: [[2]]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': [2]}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+// Descriptive test. Should parallel indexed arrays be allowed? If not, should empty
+// or single-element arrays be considered for the parallel array check?
+TEST(BtreeKeyGeneratorTest, GetKeysParallelEmptyArrays) {
+ BSONObj keyPattern = fromjson("{a: 1, b: 1}");
+ BSONObj genKeysFrom = fromjson("{a: [], b: []}");
+ BSONObjSet expectedKeys;
+ ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysParallelArraysOneArrayEmpty) {
+ BSONObj keyPattern = fromjson("{a: 1, b: 1}");
+ BSONObj genKeysFrom = fromjson("{a: [], b: [1, 2, 3]}");
+ BSONObjSet expectedKeys;
+ ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysParallelArraysOneArrayEmptyNested) {
+ BSONObj keyPattern = fromjson("{'a.b.c': 1, 'a.b.d': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [{b: [{c: [1, 2, 3], d: []}]}]}");
+ BSONObjSet expectedKeys;
+ ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
+}
+
+// Descriptive test. The semantics for key generation are odd for positional key patterns.
+TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternMissingElement) {
+ BSONObj keyPattern = fromjson("{'a.2': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [{'2': 5}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 5}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+// Descriptive test. The semantics for key generation are odd for positional key patterns.
+TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternNestedArray) {
+ BSONObj keyPattern = fromjson("{'a.2': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [[1, 2, 5]]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': null}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+// Descriptive test. The semantics for key generation are odd for positional key patterns.
+TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternNestedArray2) {
+ BSONObj keyPattern = fromjson("{'a.2': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [[1, 2, 5], [3, 4, 6], [0, 1, 2]]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': [0, 1, 2]}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+// Descriptive test. The semantics for key generation are odd for positional key patterns.
+TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternNestedArray3) {
+ BSONObj keyPattern = fromjson("{'a.2': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [{'0': 1, '1': 2, '2': 5}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 5}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+// Descriptive test. The semantics for key generation are odd for positional key patterns.
+TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternNestedArray4) {
+ BSONObj keyPattern = fromjson("{'a.b.2': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [{b: [[1, 2, 5]]}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': null}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+// Descriptive test. The semantics for key generation are odd for positional key patterns.
+TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternNestedArray5) {
+ BSONObj keyPattern = fromjson("{'a.2': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [[1, 2, 5], {'2': 6}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': null}"));
+ expectedKeys.insert(fromjson("{'': 6}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetNullKeyNestedArray) {
+ BSONObj keyPattern = fromjson("{'a.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [[1, 2, 5]]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': null}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysUnevenNestedArrays) {
+ BSONObj keyPattern = fromjson("{a: 1, 'a.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [1, {b: [2, 3, 4]}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1, '': null}"));
+ expectedKeys.insert(fromjson("{'': {b:[2,3,4]}, '': 2}"));
+ expectedKeys.insert(fromjson("{'': {b:[2,3,4]}, '': 3}"));
+ expectedKeys.insert(fromjson("{'': {b:[2,3,4]}, '': 4}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+// Descriptive test. Should we define better semantics for future index versions in the case of
+// repeated field names?
+TEST(BtreeKeyGeneratorTest, GetKeysRepeatedFieldName) {
+ BSONObj keyPattern = fromjson("{a: 1}");
+ BSONObj genKeysFrom = fromjson("{a: 2, a: 3}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 2}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+// Descriptive test. Future index versions may want different or at least more consistent
+// handling of empty path components.
+TEST(BtreeKeyGeneratorTest, GetKeysEmptyPathPiece) {
+ BSONObj keyPattern = fromjson("{'a..c': 1}");
+ BSONObj genKeysFrom = fromjson("{a: {'': [{c: 1}, {c: 2}]}}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1}"));
+ expectedKeys.insert(fromjson("{'': 2}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+// Descriptive test. Future index versions may want different or at least more consistent
+// handling of empty path components.
+TEST(BtreeKeyGeneratorTest, GetKeysLastPathPieceEmpty) {
+ BSONObj keyPattern = fromjson("{'a.': 1}");
+
+ BSONObj genKeysFrom = fromjson("{a: 2}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 2}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+
+ genKeysFrom = fromjson("{a: {'': 2}}");
+ expectedKeys.clear();
+ expectedKeys.insert(fromjson("{'': {'': 2}}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFirstPathPieceEmpty) {
+ BSONObj keyPattern = fromjson("{'.a': 1}");
+ BSONObj genKeysFrom = fromjson("{a: 2}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': null}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, GetKeysFirstPathPieceEmpty2) {
+ BSONObj keyPattern = fromjson("{'.a': 1}");
+ BSONObj genKeysFrom = fromjson("{'': [{a: [1, 2, 3]}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1}"));
+ expectedKeys.insert(fromjson("{'': 2}"));
+ expectedKeys.insert(fromjson("{'': 3}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, PositionalKeyPatternParallelArrays) {
+ BSONObj keyPattern = fromjson("{a: 1, 'b.0': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [1], b: [2]}");
+ BSONObjSet expectedKeys;
+ ASSERT_THROWS(testKeygen(keyPattern, genKeysFrom, expectedKeys), UserException);
+}
+
+// Descriptive test.
+TEST(BtreeKeyGeneratorTest, PositionalKeyPatternNestedArrays) {
+ BSONObj keyPattern = fromjson("{'a.0.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [[{b: 1}]]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+// Descriptive test.
+TEST(BtreeKeyGeneratorTest, PositionalKeyPatternNestedArrays2) {
+ BSONObj keyPattern = fromjson("{'a.0.0.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [[{b: 1}]]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+// Descriptive test.
+TEST(BtreeKeyGeneratorTest, PositionalKeyPatternNestedArrays3) {
+ BSONObj keyPattern = fromjson("{'a.0.0.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [[[ {b: 1} ]]]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 1}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+// Descriptive test.
+TEST(BtreeKeyGeneratorTest, PositionalKeyPatternNestedArrays4) {
+ BSONObj keyPattern = fromjson("{'a.0.0.b': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [[[[ {b: 1} ]]]]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': null}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+TEST(BtreeKeyGeneratorTest, PositionalKeyPatternNestedArrays5) {
+ BSONObj keyPattern = fromjson("{'a.b.1': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [{b: [1, 2]}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': 2}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+// Descriptive test.
+TEST(BtreeKeyGeneratorTest, PositionalKeyPatternNestedArrays6) {
+ BSONObj keyPattern = fromjson("{'a': 1, 'a.b': 1, 'a.0.b':1, 'a.b.0': 1, 'a.0.b.0': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [{b: [1,2]}, {b: 3}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': {b:3}, '': 3, '': 1, '': null, '': 1}"));
+ expectedKeys.insert(fromjson("{'': {b:3}, '': 3, '': 2, '': null, '': 1}"));
+ expectedKeys.insert(fromjson("{'': {b:[1,2]}, '': 1, '': 1, '': 1, '': 1}"));
+ expectedKeys.insert(fromjson("{'': {b:[1,2]}, '': 2, '': 2, '': 1, '': 1}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+// Descriptive test.
+TEST(BtreeKeyGeneratorTest, PositionalKeyPatternNestedArrays7) {
+ BSONObj keyPattern = fromjson("{'a': 1, 'a.b': 1, 'a.0.b':1, 'a.b.0': 1, 'a.0.b.0': 1}");
+ BSONObj genKeysFrom = fromjson("{a: [{b: [1,2]}, {b: {'0': 3}}]}");
+ BSONObjSet expectedKeys;
+ expectedKeys.insert(fromjson("{'': {b:{'0':3}}, '': {'0':3}, '': 1, '': 3, '': 1}"));
+ expectedKeys.insert(fromjson("{'': {b:{'0':3}}, '': {'0':3}, '': 2, '': 3, '': 1}"));
+ expectedKeys.insert(fromjson("{'': {b:[1,2]}, '': 1, '': 1, '': 1, '': 1}"));
+ expectedKeys.insert(fromjson("{'': {b:[1,2]}, '': 2, '': 2, '': 1, '': 1}"));
+ ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys));
+}
+
+} // namespace
diff --git a/src/mongo/db/index/expression_keys_private.cpp b/src/mongo/db/index/expression_keys_private.cpp
index 1202b77ce0f..559175444bf 100644
--- a/src/mongo/db/index/expression_keys_private.cpp
+++ b/src/mongo/db/index/expression_keys_private.cpp
@@ -48,471 +48,475 @@
namespace {
- using namespace mongo;
-
- //
- // Helper functions for getHaystackKeys
- //
-
- /**
- * Build a new BSONObj with root in it. If e is non-empty, append that to the key.
- * Insert the BSONObj into keys.
- * Used by getHaystackKeys.
- */
- void addKey(const string& root, const BSONElement& e, BSONObjSet* keys) {
- BSONObjBuilder buf;
- buf.append("", root);
-
- if (e.eoo())
- buf.appendNull("");
- else
- buf.appendAs(e, "");
-
- keys->insert(buf.obj());
- }
+using namespace mongo;
- //
- // Helper functions for getS2Keys
- //
+//
+// Helper functions for getHaystackKeys
+//
- static void S2KeysFromRegion(S2RegionCoverer *coverer, const S2Region &region,
- vector<string> *out) {
- vector<S2CellId> covering;
- coverer->GetCovering(region, &covering);
- for (size_t i = 0; i < covering.size(); ++i) {
- out->push_back(covering[i].toString());
- }
+/**
+ * Build a new BSONObj with root in it. If e is non-empty, append that to the key.
+ * Insert the BSONObj into keys.
+ * Used by getHaystackKeys.
+ */
+void addKey(const string& root, const BSONElement& e, BSONObjSet* keys) {
+ BSONObjBuilder buf;
+ buf.append("", root);
+
+ if (e.eoo())
+ buf.appendNull("");
+ else
+ buf.appendAs(e, "");
+
+ keys->insert(buf.obj());
+}
+
+//
+// Helper functions for getS2Keys
+//
+
+static void S2KeysFromRegion(S2RegionCoverer* coverer,
+ const S2Region& region,
+ vector<string>* out) {
+ vector<S2CellId> covering;
+ coverer->GetCovering(region, &covering);
+ for (size_t i = 0; i < covering.size(); ++i) {
+ out->push_back(covering[i].toString());
}
+}
- Status S2GetKeysForElement(const BSONElement& element,
- const S2IndexingParams& params,
- vector<string>* out) {
- GeometryContainer geoContainer;
- Status status = geoContainer.parseFromStorage(element);
- if (!status.isOK()) return status;
-
- S2RegionCoverer coverer;
- params.configureCoverer(&coverer);
+Status S2GetKeysForElement(const BSONElement& element,
+ const S2IndexingParams& params,
+ vector<string>* out) {
+ GeometryContainer geoContainer;
+ Status status = geoContainer.parseFromStorage(element);
+ if (!status.isOK())
+ return status;
- // Don't index big polygon
- if (geoContainer.getNativeCRS() == STRICT_SPHERE) {
- return Status(ErrorCodes::BadValue, "can't index geometry with strict winding order");
- }
+ S2RegionCoverer coverer;
+ params.configureCoverer(&coverer);
- // Only certain geometries can be indexed in the old index format S2_INDEX_VERSION_1. See
- // definition of S2IndexVersion for details.
- if (params.indexVersion == S2_INDEX_VERSION_1 && !geoContainer.isSimpleContainer()) {
- return Status(ErrorCodes::BadValue,
- str::stream()
- << "given geometry can't be indexed in the old index format");
- }
-
- // Project the geometry into spherical space
- if (!geoContainer.supportsProject(SPHERE)) {
- return Status(ErrorCodes::BadValue,
- str::stream() << "can't project geometry into spherical CRS: "
- << element.toString(false));
- }
- geoContainer.projectInto(SPHERE);
-
- invariant(geoContainer.hasS2Region());
+ // Don't index big polygon
+ if (geoContainer.getNativeCRS() == STRICT_SPHERE) {
+ return Status(ErrorCodes::BadValue, "can't index geometry with strict winding order");
+ }
- S2KeysFromRegion(&coverer, geoContainer.getS2Region(), out);
- return Status::OK();
+ // Only certain geometries can be indexed in the old index format S2_INDEX_VERSION_1. See
+ // definition of S2IndexVersion for details.
+ if (params.indexVersion == S2_INDEX_VERSION_1 && !geoContainer.isSimpleContainer()) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "given geometry can't be indexed in the old index format");
}
+ // Project the geometry into spherical space
+ if (!geoContainer.supportsProject(SPHERE)) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "can't project geometry into spherical CRS: "
+ << element.toString(false));
+ }
+ geoContainer.projectInto(SPHERE);
- /**
- * Get the index keys for elements that are GeoJSON.
- * Used by getS2Keys.
- */
- void getS2GeoKeys(const BSONObj& document, const BSONElementSet& elements,
- const S2IndexingParams& params,
- BSONObjSet* out) {
- for (BSONElementSet::iterator i = elements.begin(); i != elements.end(); ++i) {
- vector<string> cells;
- Status status = S2GetKeysForElement(*i, params, &cells);
- uassert(16755, str::stream() << "Can't extract geo keys: " << document << " "
- << status.reason(), status.isOK());
+ invariant(geoContainer.hasS2Region());
- uassert(16756, "Unable to generate keys for (likely malformed) geometry: "
- + document.toString(),
- cells.size() > 0);
+ S2KeysFromRegion(&coverer, geoContainer.getS2Region(), out);
+ return Status::OK();
+}
- for (vector<string>::const_iterator it = cells.begin(); it != cells.end(); ++it) {
- BSONObjBuilder b;
- b.append("", *it);
- out->insert(b.obj());
- }
- }
- if (0 == out->size()) {
+/**
+ * Get the index keys for elements that are GeoJSON.
+ * Used by getS2Keys.
+ */
+void getS2GeoKeys(const BSONObj& document,
+ const BSONElementSet& elements,
+ const S2IndexingParams& params,
+ BSONObjSet* out) {
+ for (BSONElementSet::iterator i = elements.begin(); i != elements.end(); ++i) {
+ vector<string> cells;
+ Status status = S2GetKeysForElement(*i, params, &cells);
+ uassert(16755,
+ str::stream() << "Can't extract geo keys: " << document << " " << status.reason(),
+ status.isOK());
+
+ uassert(16756,
+ "Unable to generate keys for (likely malformed) geometry: " + document.toString(),
+ cells.size() > 0);
+
+ for (vector<string>::const_iterator it = cells.begin(); it != cells.end(); ++it) {
BSONObjBuilder b;
- b.appendNull("");
+ b.append("", *it);
out->insert(b.obj());
}
}
- /**
- * Expands array and appends items to 'out'.
- * Used by getOneLiteralKey.
- */
- void getS2LiteralKeysArray(const BSONObj& obj, BSONObjSet* out) {
- BSONObjIterator objIt(obj);
- if (!objIt.more()) {
- // Empty arrays are indexed as undefined.
- BSONObjBuilder b;
- b.appendUndefined("");
- out->insert(b.obj());
- } else {
- // Non-empty arrays are exploded.
- while (objIt.more()) {
- BSONObjBuilder b;
- b.appendAs(objIt.next(), "");
- out->insert(b.obj());
- }
- }
+ if (0 == out->size()) {
+ BSONObjBuilder b;
+ b.appendNull("");
+ out->insert(b.obj());
}
+}
- /**
- * If 'elt' is an array, expands elt and adds items to 'out'.
- * Otherwise, adds 'elt' as a single element.
- * Used by getLiteralKeys.
- */
- void getS2OneLiteralKey(const BSONElement& elt, BSONObjSet* out) {
- if (Array == elt.type()) {
- getS2LiteralKeysArray(elt.Obj(), out);
- } else {
- // One thing, not an array, index as-is.
+/**
+ * Expands array and appends items to 'out'.
+ * Used by getOneLiteralKey.
+ */
+void getS2LiteralKeysArray(const BSONObj& obj, BSONObjSet* out) {
+ BSONObjIterator objIt(obj);
+ if (!objIt.more()) {
+ // Empty arrays are indexed as undefined.
+ BSONObjBuilder b;
+ b.appendUndefined("");
+ out->insert(b.obj());
+ } else {
+ // Non-empty arrays are exploded.
+ while (objIt.more()) {
BSONObjBuilder b;
- b.appendAs(elt, "");
+ b.appendAs(objIt.next(), "");
out->insert(b.obj());
}
}
+}
- /**
- * elements is a non-geo field. Add the values literally, expanding arrays.
- * Used by getS2Keys.
- */
- void getS2LiteralKeys(const BSONElementSet& elements, BSONObjSet* out) {
- if (0 == elements.size()) {
- // Missing fields are indexed as null.
- BSONObjBuilder b;
- b.appendNull("");
- out->insert(b.obj());
- } else {
- for (BSONElementSet::iterator i = elements.begin(); i != elements.end(); ++i) {
- getS2OneLiteralKey(*i, out);
- }
+/**
+ * If 'elt' is an array, expands elt and adds items to 'out'.
+ * Otherwise, adds 'elt' as a single element.
+ * Used by getLiteralKeys.
+ */
+void getS2OneLiteralKey(const BSONElement& elt, BSONObjSet* out) {
+ if (Array == elt.type()) {
+ getS2LiteralKeysArray(elt.Obj(), out);
+ } else {
+ // One thing, not an array, index as-is.
+ BSONObjBuilder b;
+ b.appendAs(elt, "");
+ out->insert(b.obj());
+ }
+}
+
+/**
+ * elements is a non-geo field. Add the values literally, expanding arrays.
+ * Used by getS2Keys.
+ */
+void getS2LiteralKeys(const BSONElementSet& elements, BSONObjSet* out) {
+ if (0 == elements.size()) {
+ // Missing fields are indexed as null.
+ BSONObjBuilder b;
+ b.appendNull("");
+ out->insert(b.obj());
+ } else {
+ for (BSONElementSet::iterator i = elements.begin(); i != elements.end(); ++i) {
+ getS2OneLiteralKey(*i, out);
}
}
+}
-} // namespace
+} // namespace
namespace mongo {
- using std::pair;
- using std::string;
- using std::vector;
-
- // static
- void ExpressionKeysPrivate::get2DKeys(const BSONObj &obj,
- const TwoDIndexingParams& params,
- BSONObjSet* keys,
- std::vector<BSONObj>* locs) {
- BSONElementMSet bSet;
+using std::pair;
+using std::string;
+using std::vector;
- // Get all the nested location fields, but don't return individual elements from
- // the last array, if it exists.
- obj.getFieldsDotted(params.geo.c_str(), bSet, false);
+// static
+void ExpressionKeysPrivate::get2DKeys(const BSONObj& obj,
+ const TwoDIndexingParams& params,
+ BSONObjSet* keys,
+ std::vector<BSONObj>* locs) {
+ BSONElementMSet bSet;
- if (bSet.empty())
- return;
+ // Get all the nested location fields, but don't return individual elements from
+ // the last array, if it exists.
+ obj.getFieldsDotted(params.geo.c_str(), bSet, false);
- for (BSONElementMSet::iterator setI = bSet.begin(); setI != bSet.end(); ++setI) {
- BSONElement geo = *setI;
+ if (bSet.empty())
+ return;
- if (geo.eoo() || !geo.isABSONObj())
- continue;
+ for (BSONElementMSet::iterator setI = bSet.begin(); setI != bSet.end(); ++setI) {
+ BSONElement geo = *setI;
- //
- // Grammar for location lookup:
- // locs ::= [loc,loc,...,loc]|{<k>:loc,<k>:loc,...,<k>:loc}|loc
- // loc ::= { <k1> : #, <k2> : # }|[#, #]|{}
- //
- // Empty locations are ignored, preserving single-location semantics
- //
+ if (geo.eoo() || !geo.isABSONObj())
+ continue;
- BSONObj embed = geo.embeddedObject();
- if (embed.isEmpty())
- continue;
+ //
+ // Grammar for location lookup:
+ // locs ::= [loc,loc,...,loc]|{<k>:loc,<k>:loc,...,<k>:loc}|loc
+ // loc ::= { <k1> : #, <k2> : # }|[#, #]|{}
+ //
+ // Empty locations are ignored, preserving single-location semantics
+ //
- // Differentiate between location arrays and locations
- // by seeing if the first element value is a number
- bool singleElement = embed.firstElement().isNumber();
+ BSONObj embed = geo.embeddedObject();
+ if (embed.isEmpty())
+ continue;
- BSONObjIterator oi(embed);
+ // Differentiate between location arrays and locations
+ // by seeing if the first element value is a number
+ bool singleElement = embed.firstElement().isNumber();
- while (oi.more()) {
- BSONObj locObj;
+ BSONObjIterator oi(embed);
- if (singleElement) {
- locObj = embed;
- } else {
- BSONElement locElement = oi.next();
+ while (oi.more()) {
+ BSONObj locObj;
- uassert(16804, mongoutils::str::stream() <<
- "location object expected, location array not in correct format",
- locElement.isABSONObj());
+ if (singleElement) {
+ locObj = embed;
+ } else {
+ BSONElement locElement = oi.next();
- locObj = locElement.embeddedObject();
- if(locObj.isEmpty())
- continue;
- }
+ uassert(16804,
+ mongoutils::str::stream()
+ << "location object expected, location array not in correct format",
+ locElement.isABSONObj());
- BSONObjBuilder b(64);
+ locObj = locElement.embeddedObject();
+ if (locObj.isEmpty())
+ continue;
+ }
- // Remember the actual location object if needed
- if (locs)
- locs->push_back(locObj);
+ BSONObjBuilder b(64);
- // Stop if we don't need to get anything but location objects
- if (!keys) {
- if (singleElement) break;
- else continue;
- }
+ // Remember the actual location object if needed
+ if (locs)
+ locs->push_back(locObj);
- params.geoHashConverter->hash(locObj, &obj).appendHashMin(&b, "");
-
- // Go through all the other index keys
- for (vector<pair<string, int> >::const_iterator i = params.other.begin();
- i != params.other.end(); ++i) {
- // Get *all* fields for the index key
- BSONElementSet eSet;
- obj.getFieldsDotted(i->first, eSet);
-
- if (eSet.size() == 0)
- b.appendNull("");
- else if (eSet.size() == 1)
- b.appendAs(*(eSet.begin()), "");
- else {
- // If we have more than one key, store as an array of the objects
- BSONArrayBuilder aBuilder;
-
- for (BSONElementSet::iterator ei = eSet.begin(); ei != eSet.end();
- ++ei) {
- aBuilder.append(*ei);
- }
+ // Stop if we don't need to get anything but location objects
+ if (!keys) {
+ if (singleElement)
+ break;
+ else
+ continue;
+ }
- b.append("", aBuilder.arr());
+ params.geoHashConverter->hash(locObj, &obj).appendHashMin(&b, "");
+
+ // Go through all the other index keys
+ for (vector<pair<string, int>>::const_iterator i = params.other.begin();
+ i != params.other.end();
+ ++i) {
+ // Get *all* fields for the index key
+ BSONElementSet eSet;
+ obj.getFieldsDotted(i->first, eSet);
+
+ if (eSet.size() == 0)
+ b.appendNull("");
+ else if (eSet.size() == 1)
+ b.appendAs(*(eSet.begin()), "");
+ else {
+ // If we have more than one key, store as an array of the objects
+ BSONArrayBuilder aBuilder;
+
+ for (BSONElementSet::iterator ei = eSet.begin(); ei != eSet.end(); ++ei) {
+ aBuilder.append(*ei);
}
+
+ b.append("", aBuilder.arr());
}
- keys->insert(b.obj());
- if(singleElement) break;
}
+ keys->insert(b.obj());
+ if (singleElement)
+ break;
}
}
-
- // static
- void ExpressionKeysPrivate::getFTSKeys(const BSONObj &obj,
- const fts::FTSSpec& ftsSpec,
- BSONObjSet* keys) {
- fts::FTSIndexFormat::getKeys(ftsSpec, obj, keys);
+}
+
+// static
+void ExpressionKeysPrivate::getFTSKeys(const BSONObj& obj,
+ const fts::FTSSpec& ftsSpec,
+ BSONObjSet* keys) {
+ fts::FTSIndexFormat::getKeys(ftsSpec, obj, keys);
+}
+
+// static
+void ExpressionKeysPrivate::getHashKeys(const BSONObj& obj,
+ const string& hashedField,
+ HashSeed seed,
+ int hashVersion,
+ bool isSparse,
+ BSONObjSet* keys) {
+ const char* cstr = hashedField.c_str();
+ BSONElement fieldVal = obj.getFieldDottedOrArray(cstr);
+ uassert(16766,
+ "Error: hashed indexes do not currently support array values",
+ fieldVal.type() != Array);
+
+ if (!fieldVal.eoo()) {
+ BSONObj key = BSON("" << makeSingleHashKey(fieldVal, seed, hashVersion));
+ keys->insert(key);
+ } else if (!isSparse) {
+ BSONObj nullObj = BSON("" << BSONNULL);
+ keys->insert(BSON("" << makeSingleHashKey(nullObj.firstElement(), seed, hashVersion)));
}
-
- // static
- void ExpressionKeysPrivate::getHashKeys(const BSONObj& obj,
- const string& hashedField,
- HashSeed seed,
- int hashVersion,
- bool isSparse,
+}
+
+// static
+long long int ExpressionKeysPrivate::makeSingleHashKey(const BSONElement& e, HashSeed seed, int v) {
+ massert(16767, "Only HashVersion 0 has been defined", v == 0);
+ return BSONElementHasher::hash64(e, seed);
+}
+
+// static
+void ExpressionKeysPrivate::getHaystackKeys(const BSONObj& obj,
+ const std::string& geoField,
+ const std::vector<std::string>& otherFields,
+ double bucketSize,
BSONObjSet* keys) {
+ BSONElement loc = obj.getFieldDotted(geoField);
- const char* cstr = hashedField.c_str();
- BSONElement fieldVal = obj.getFieldDottedOrArray(cstr);
- uassert(16766, "Error: hashed indexes do not currently support array values",
- fieldVal.type() != Array );
-
- if (!fieldVal.eoo()) {
- BSONObj key = BSON( "" << makeSingleHashKey(fieldVal, seed, hashVersion));
- keys->insert(key);
- }
- else if (!isSparse) {
- BSONObj nullObj = BSON("" << BSONNULL);
- keys->insert(BSON("" << makeSingleHashKey(nullObj.firstElement(), seed, hashVersion)));
- }
+ if (loc.eoo()) {
+ return;
}
- // static
- long long int ExpressionKeysPrivate::makeSingleHashKey(const BSONElement& e,
- HashSeed seed,
- int v) {
- massert(16767, "Only HashVersion 0 has been defined" , v == 0 );
- return BSONElementHasher::hash64(e, seed);
- }
-
- // static
- void ExpressionKeysPrivate::getHaystackKeys(const BSONObj& obj,
- const std::string& geoField,
- const std::vector<std::string>& otherFields,
- double bucketSize,
- BSONObjSet* keys) {
-
- BSONElement loc = obj.getFieldDotted(geoField);
-
- if (loc.eoo()) { return; }
-
- // NOTE: We explicitly test nFields >= 2 to support legacy users who may have indexed
- // (intentionally or unintentionally) objects/arrays with more than two fields.
- uassert(16775, str::stream() << "cannot extract [lng, lat] array or object from " << obj,
+ // NOTE: We explicitly test nFields >= 2 to support legacy users who may have indexed
+ // (intentionally or unintentionally) objects/arrays with more than two fields.
+ uassert(16775,
+ str::stream() << "cannot extract [lng, lat] array or object from " << obj,
loc.isABSONObj() && loc.Obj().nFields() >= 2);
- string root;
- {
- BSONObjIterator i(loc.Obj());
- BSONElement x = i.next();
- BSONElement y = i.next();
- root = makeHaystackString(hashHaystackElement(x, bucketSize),
- hashHaystackElement(y, bucketSize));
- }
-
- verify(otherFields.size() == 1);
-
- BSONElementSet all;
-
- // This is getFieldsDotted (plural not singular) since the object we're indexing
- // may be an array.
- obj.getFieldsDotted(otherFields[0], all);
-
- if (all.size() == 0) {
- // We're indexing a document that doesn't have the secondary non-geo field present.
- // XXX: do we want to add this even if all.size() > 0? result:empty search terms
- // match everything instead of only things w/empty search terms)
- addKey(root, BSONElement(), keys);
- } else {
- // Ex:If our secondary field is type: "foo" or type: {a:"foo", b:"bar"},
- // all.size()==1. We can query on the complete field.
- // Ex: If our secondary field is type: ["A", "B"] all.size()==2 and all has values
- // "A" and "B". The query looks for any of the fields in the array.
- for (BSONElementSet::iterator i = all.begin(); i != all.end(); ++i) {
- addKey(root, *i, keys);
- }
- }
- }
-
- // static
- int ExpressionKeysPrivate::hashHaystackElement(const BSONElement& e, double bucketSize) {
- uassert(16776, "geo field is not a number", e.isNumber());
- double d = e.numberDouble();
- d += 180;
- d /= bucketSize;
- return static_cast<int>(d);
+ string root;
+ {
+ BSONObjIterator i(loc.Obj());
+ BSONElement x = i.next();
+ BSONElement y = i.next();
+ root = makeHaystackString(hashHaystackElement(x, bucketSize),
+ hashHaystackElement(y, bucketSize));
}
- // static
- std::string ExpressionKeysPrivate::makeHaystackString(int hashedX, int hashedY) {
- mongoutils::str::stream ss;
- ss << hashedX << "_" << hashedY;
- return ss;
+ verify(otherFields.size() == 1);
+
+ BSONElementSet all;
+
+ // This is getFieldsDotted (plural not singular) since the object we're indexing
+ // may be an array.
+ obj.getFieldsDotted(otherFields[0], all);
+
+ if (all.size() == 0) {
+ // We're indexing a document that doesn't have the secondary non-geo field present.
+ // XXX: do we want to add this even if all.size() > 0? result:empty search terms
+ // match everything instead of only things w/empty search terms)
+ addKey(root, BSONElement(), keys);
+ } else {
+ // Ex:If our secondary field is type: "foo" or type: {a:"foo", b:"bar"},
+ // all.size()==1. We can query on the complete field.
+ // Ex: If our secondary field is type: ["A", "B"] all.size()==2 and all has values
+ // "A" and "B". The query looks for any of the fields in the array.
+ for (BSONElementSet::iterator i = all.begin(); i != all.end(); ++i) {
+ addKey(root, *i, keys);
+ }
}
-
- void ExpressionKeysPrivate::getS2Keys(const BSONObj& obj,
- const BSONObj& keyPattern,
- const S2IndexingParams& params,
- BSONObjSet* keys) {
- BSONObjSet keysToAdd;
-
- // Does one of our documents have a geo field?
- bool haveGeoField = false;
-
- // We output keys in the same order as the fields we index.
- BSONObjIterator i(keyPattern);
- while (i.more()) {
- BSONElement e = i.next();
-
- // First, we get the keys that this field adds. Either they're added literally from
- // the value of the field, or they're transformed if the field is geo.
- BSONElementSet fieldElements;
- // false means Don't expand the last array, duh.
- obj.getFieldsDotted(e.fieldName(), fieldElements, false);
-
- BSONObjSet keysForThisField;
- if (IndexNames::GEO_2DSPHERE == e.valuestr()) {
- if (S2_INDEX_VERSION_2 == params.indexVersion) {
- // For V2,
- // geo: null,
- // geo: undefined
- // geo: []
- // should all behave like there is no geo field. So we look for these cases and
- // throw out the field elements if we find them.
- if (1 == fieldElements.size()) {
- BSONElement elt = *fieldElements.begin();
- // Get the :null and :undefined cases.
- if (elt.isNull() || Undefined == elt.type()) {
+}
+
+// static
+int ExpressionKeysPrivate::hashHaystackElement(const BSONElement& e, double bucketSize) {
+ uassert(16776, "geo field is not a number", e.isNumber());
+ double d = e.numberDouble();
+ d += 180;
+ d /= bucketSize;
+ return static_cast<int>(d);
+}
+
+// static
+std::string ExpressionKeysPrivate::makeHaystackString(int hashedX, int hashedY) {
+ mongoutils::str::stream ss;
+ ss << hashedX << "_" << hashedY;
+ return ss;
+}
+
+void ExpressionKeysPrivate::getS2Keys(const BSONObj& obj,
+ const BSONObj& keyPattern,
+ const S2IndexingParams& params,
+ BSONObjSet* keys) {
+ BSONObjSet keysToAdd;
+
+ // Does one of our documents have a geo field?
+ bool haveGeoField = false;
+
+ // We output keys in the same order as the fields we index.
+ BSONObjIterator i(keyPattern);
+ while (i.more()) {
+ BSONElement e = i.next();
+
+ // First, we get the keys that this field adds. Either they're added literally from
+ // the value of the field, or they're transformed if the field is geo.
+ BSONElementSet fieldElements;
+ // false means Don't expand the last array, duh.
+ obj.getFieldsDotted(e.fieldName(), fieldElements, false);
+
+ BSONObjSet keysForThisField;
+ if (IndexNames::GEO_2DSPHERE == e.valuestr()) {
+ if (S2_INDEX_VERSION_2 == params.indexVersion) {
+ // For V2,
+ // geo: null,
+ // geo: undefined
+ // geo: []
+ // should all behave like there is no geo field. So we look for these cases and
+ // throw out the field elements if we find them.
+ if (1 == fieldElements.size()) {
+ BSONElement elt = *fieldElements.begin();
+ // Get the :null and :undefined cases.
+ if (elt.isNull() || Undefined == elt.type()) {
+ fieldElements.clear();
+ } else if (elt.isABSONObj()) {
+ // And this is the :[] case.
+ BSONObj obj = elt.Obj();
+ if (0 == obj.nFields()) {
fieldElements.clear();
}
- else if (elt.isABSONObj()) {
- // And this is the :[] case.
- BSONObj obj = elt.Obj();
- if (0 == obj.nFields()) {
- fieldElements.clear();
- }
- }
- }
-
- // V2 2dsphere indices require that at least one geo field to be present in a
- // document in order to index it.
- if (fieldElements.size() > 0) {
- haveGeoField = true;
}
}
- getS2GeoKeys(obj, fieldElements, params, &keysForThisField);
- } else {
- getS2LiteralKeys(fieldElements, &keysForThisField);
+ // V2 2dsphere indices require that at least one geo field to be present in a
+ // document in order to index it.
+ if (fieldElements.size() > 0) {
+ haveGeoField = true;
+ }
}
- // We expect there to be the missing field element present in the keys if data is
- // missing. So, this should be non-empty.
- verify(!keysForThisField.empty());
+ getS2GeoKeys(obj, fieldElements, params, &keysForThisField);
+ } else {
+ getS2LiteralKeys(fieldElements, &keysForThisField);
+ }
- // We take the Cartesian product of all of the keys. This requires that we have
- // some keys to take the Cartesian product with. If keysToAdd.empty(), we
- // initialize it.
- if (keysToAdd.empty()) {
- keysToAdd = keysForThisField;
- continue;
- }
+ // We expect there to be the missing field element present in the keys if data is
+ // missing. So, this should be non-empty.
+ verify(!keysForThisField.empty());
- BSONObjSet updatedKeysToAdd;
- for (BSONObjSet::const_iterator it = keysToAdd.begin(); it != keysToAdd.end();
- ++it) {
- for (BSONObjSet::const_iterator newIt = keysForThisField.begin();
- newIt!= keysForThisField.end(); ++newIt) {
- BSONObjBuilder b;
- b.appendElements(*it);
- b.append(newIt->firstElement());
- updatedKeysToAdd.insert(b.obj());
- }
- }
- keysToAdd = updatedKeysToAdd;
+ // We take the Cartesian product of all of the keys. This requires that we have
+ // some keys to take the Cartesian product with. If keysToAdd.empty(), we
+ // initialize it.
+ if (keysToAdd.empty()) {
+ keysToAdd = keysForThisField;
+ continue;
}
- // Make sure that if we're V2 there's at least one geo field present in the doc.
- if (S2_INDEX_VERSION_2 == params.indexVersion) {
- if (!haveGeoField) {
- return;
+ BSONObjSet updatedKeysToAdd;
+ for (BSONObjSet::const_iterator it = keysToAdd.begin(); it != keysToAdd.end(); ++it) {
+ for (BSONObjSet::const_iterator newIt = keysForThisField.begin();
+ newIt != keysForThisField.end();
+ ++newIt) {
+ BSONObjBuilder b;
+ b.appendElements(*it);
+ b.append(newIt->firstElement());
+ updatedKeysToAdd.insert(b.obj());
}
}
+ keysToAdd = updatedKeysToAdd;
+ }
- if (keysToAdd.size() > params.maxKeysPerInsert) {
- warning() << "Insert of geo object generated a high number of keys."
- << " num keys: " << keysToAdd.size()
- << " obj inserted: " << obj;
+ // Make sure that if we're V2 there's at least one geo field present in the doc.
+ if (S2_INDEX_VERSION_2 == params.indexVersion) {
+ if (!haveGeoField) {
+ return;
}
+ }
- *keys = keysToAdd;
+ if (keysToAdd.size() > params.maxKeysPerInsert) {
+ warning() << "Insert of geo object generated a high number of keys."
+ << " num keys: " << keysToAdd.size() << " obj inserted: " << obj;
}
+ *keys = keysToAdd;
+}
+
} // namespace mongo
diff --git a/src/mongo/db/index/expression_keys_private.h b/src/mongo/db/index/expression_keys_private.h
index 6e3fb1ea9a0..9aba295b579 100644
--- a/src/mongo/db/index/expression_keys_private.h
+++ b/src/mongo/db/index/expression_keys_private.h
@@ -36,95 +36,94 @@
namespace mongo {
- struct TwoDIndexingParams;
- struct S2IndexingParams;
+struct TwoDIndexingParams;
+struct S2IndexingParams;
- namespace fts {
+namespace fts {
- class FTSSpec;
+class FTSSpec;
- } // namespace fts
+} // namespace fts
+
+/**
+ * Do not use this class or any of its methods directly. The key generation of btree-indexed
+ * expression indices is kept outside of the access method for testing and for upgrade
+ * compatibility checking.
+ */
+class ExpressionKeysPrivate {
+public:
+ //
+ // 2d
+ //
+
+ static void get2DKeys(const BSONObj& obj,
+ const TwoDIndexingParams& params,
+ BSONObjSet* keys,
+ std::vector<BSONObj>* locs);
+
+ //
+ // FTS
+ //
+
+ static void getFTSKeys(const BSONObj& obj, const fts::FTSSpec& ftsSpec, BSONObjSet* keys);
+
+ //
+ // Hash
+ //
+
+ /**
+ * Generates keys for hash access method.
+ */
+ static void getHashKeys(const BSONObj& obj,
+ const std::string& hashedField,
+ HashSeed seed,
+ int hashVersion,
+ bool isSparse,
+ BSONObjSet* keys);
/**
- * Do not use this class or any of its methods directly. The key generation of btree-indexed
- * expression indices is kept outside of the access method for testing and for upgrade
- * compatibility checking.
+ * Hashing function used by both getHashKeys and the cursors we create.
+ * Exposed for testing in dbtests/namespacetests.cpp and
+ * so mongo/db/index_legacy.cpp can use it.
*/
- class ExpressionKeysPrivate {
- public:
-
- //
- // 2d
- //
-
- static void get2DKeys(const BSONObj &obj,
- const TwoDIndexingParams& params,
- BSONObjSet* keys,
- std::vector<BSONObj>* locs);
-
- //
- // FTS
- //
-
- static void getFTSKeys(const BSONObj &obj, const fts::FTSSpec& ftsSpec, BSONObjSet* keys);
-
- //
- // Hash
- //
-
- /**
- * Generates keys for hash access method.
- */
- static void getHashKeys(const BSONObj& obj,
- const std::string& hashedField,
- HashSeed seed,
- int hashVersion,
- bool isSparse,
+ static long long int makeSingleHashKey(const BSONElement& e, HashSeed seed, int v);
+
+ //
+ // Haystack
+ //
+
+ /**
+ * Generates keys for haystack access method.
+ */
+ static void getHaystackKeys(const BSONObj& obj,
+ const std::string& geoField,
+ const std::vector<std::string>& otherFields,
+ double bucketSize,
BSONObjSet* keys);
- /**
- * Hashing function used by both getHashKeys and the cursors we create.
- * Exposed for testing in dbtests/namespacetests.cpp and
- * so mongo/db/index_legacy.cpp can use it.
- */
- static long long int makeSingleHashKey(const BSONElement& e, HashSeed seed, int v);
-
- //
- // Haystack
- //
-
- /**
- * Generates keys for haystack access method.
- */
- static void getHaystackKeys(const BSONObj& obj,
- const std::string& geoField,
- const std::vector<std::string>& otherFields,
- double bucketSize,
- BSONObjSet* keys);
-
- /**
- * Returns a hash of a BSON element.
- * Used by getHaystackKeys and HaystackAccessMethod::searchCommand.
- */
- static int hashHaystackElement(const BSONElement& e, double bucketSize);
-
- /**
- * Joins two strings using underscore as separator.
- * Used by getHaystackKeys and HaystackAccessMethod::searchCommand.
- */
- static std::string makeHaystackString(int hashedX, int hashedY);
-
- //
- // S2
- //
-
- /**
- * Generates keys for S2 access method.
- */
- static void getS2Keys(const BSONObj& obj,
- const BSONObj& keyPattern,
- const S2IndexingParams& params,
- BSONObjSet* keys);
- };
+ /**
+ * Returns a hash of a BSON element.
+ * Used by getHaystackKeys and HaystackAccessMethod::searchCommand.
+ */
+ static int hashHaystackElement(const BSONElement& e, double bucketSize);
+
+ /**
+ * Joins two strings using underscore as separator.
+ * Used by getHaystackKeys and HaystackAccessMethod::searchCommand.
+ */
+ static std::string makeHaystackString(int hashedX, int hashedY);
+
+ //
+ // S2
+ //
+
+ /**
+ * Generates keys for S2 access method.
+ */
+ static void getS2Keys(const BSONObj& obj,
+ const BSONObj& keyPattern,
+ const S2IndexingParams& params,
+ BSONObjSet* keys);
+};
} // namespace mongo
diff --git a/src/mongo/db/index/expression_params.h b/src/mongo/db/index/expression_params.h
index e7b190f5eb9..a7613b027d3 100644
--- a/src/mongo/db/index/expression_params.h
+++ b/src/mongo/db/index/expression_params.h
@@ -34,150 +34,151 @@
namespace mongo {
- class ExpressionParams {
- public:
- static void parseTwoDParams(const BSONObj& infoObj, TwoDIndexingParams* out) {
- BSONObjIterator i(infoObj.getObjectField("key"));
-
- while (i.more()) {
- BSONElement e = i.next();
- if (e.type() == String && IndexNames::GEO_2D == e.valuestr()) {
- uassert(16800, "can't have 2 geo fields", out->geo.size() == 0);
- uassert(16801, "2d has to be first in index", out->other.size() == 0);
- out->geo = e.fieldName();
- } else {
- int order = 1;
- if (e.isNumber()) {
- order = static_cast<int>(e.Number());
- }
- out->other.push_back(std::make_pair(e.fieldName(), order));
+class ExpressionParams {
+public:
+ static void parseTwoDParams(const BSONObj& infoObj, TwoDIndexingParams* out) {
+ BSONObjIterator i(infoObj.getObjectField("key"));
+
+ while (i.more()) {
+ BSONElement e = i.next();
+ if (e.type() == String && IndexNames::GEO_2D == e.valuestr()) {
+ uassert(16800, "can't have 2 geo fields", out->geo.size() == 0);
+ uassert(16801, "2d has to be first in index", out->other.size() == 0);
+ out->geo = e.fieldName();
+ } else {
+ int order = 1;
+ if (e.isNumber()) {
+ order = static_cast<int>(e.Number());
}
+ out->other.push_back(std::make_pair(e.fieldName(), order));
}
-
- uassert(16802, "no geo field specified", out->geo.size());
-
- GeoHashConverter::Parameters hashParams;
- Status paramStatus = GeoHashConverter::parseParameters(infoObj, &hashParams);
- uassertStatusOK(paramStatus);
-
- out->geoHashConverter.reset(new GeoHashConverter(hashParams));
}
- static void parseHashParams(const BSONObj& infoObj,
- HashSeed* seedOut,
- int* versionOut,
- std::string* fieldOut) {
-
- // Default _seed to DEFAULT_HASH_SEED if "seed" is not included in the index spec
- // or if the value of "seed" is not a number
-
- // *** WARNING ***
- // Choosing non-default seeds will invalidate hashed sharding
- // Changing the seed default will break existing indexes and sharded collections
- if (infoObj["seed"].eoo()) {
- *seedOut = BSONElementHasher::DEFAULT_HASH_SEED;
- }
- else {
- *seedOut = infoObj["seed"].numberInt();
- }
-
- // In case we have hashed indexes based on other hash functions in the future, we store
- // a hashVersion number. If hashVersion changes, "makeSingleHashKey" will need to change
- // accordingly. Defaults to 0 if "hashVersion" is not included in the index spec or if
- // the value of "hashversion" is not a number
- *versionOut = infoObj["hashVersion"].numberInt();
-
- // Get the hashfield name
- BSONElement firstElt = infoObj.getObjectField("key").firstElement();
- massert(16765, "error: no hashed index field",
- firstElt.str().compare(IndexNames::HASHED) == 0);
- *fieldOut = firstElt.fieldName();
+ uassert(16802, "no geo field specified", out->geo.size());
+
+ GeoHashConverter::Parameters hashParams;
+ Status paramStatus = GeoHashConverter::parseParameters(infoObj, &hashParams);
+ uassertStatusOK(paramStatus);
+
+ out->geoHashConverter.reset(new GeoHashConverter(hashParams));
+ }
+
+ static void parseHashParams(const BSONObj& infoObj,
+ HashSeed* seedOut,
+ int* versionOut,
+ std::string* fieldOut) {
+ // Default _seed to DEFAULT_HASH_SEED if "seed" is not included in the index spec
+ // or if the value of "seed" is not a number
+
+ // *** WARNING ***
+ // Choosing non-default seeds will invalidate hashed sharding
+ // Changing the seed default will break existing indexes and sharded collections
+ if (infoObj["seed"].eoo()) {
+ *seedOut = BSONElementHasher::DEFAULT_HASH_SEED;
+ } else {
+ *seedOut = infoObj["seed"].numberInt();
}
- static void parseHaystackParams(const BSONObj& infoObj,
- std::string* geoFieldOut,
- std::vector<std::string>* otherFieldsOut,
- double* bucketSizeOut) {
-
- BSONElement e = infoObj["bucketSize"];
- uassert(16777, "need bucketSize", e.isNumber());
- *bucketSizeOut = e.numberDouble();
- uassert(16769, "bucketSize cannot be zero", *bucketSizeOut != 0.0);
-
- // Example:
- // db.foo.ensureIndex({ pos : "geoHaystack", type : 1 }, { bucketSize : 1 })
- BSONObjIterator i(infoObj.getObjectField("key"));
- while (i.more()) {
- BSONElement e = i.next();
- if (e.type() == String && IndexNames::GEO_HAYSTACK == e.valuestr()) {
- uassert(16770, "can't have more than one geo field", geoFieldOut->size() == 0);
- uassert(16771, "the geo field has to be first in index",
- otherFieldsOut->size() == 0);
- *geoFieldOut = e.fieldName();
- } else {
- uassert(16772, "geoSearch can only have 1 non-geo field for now",
- otherFieldsOut->size() == 0);
- otherFieldsOut->push_back(e.fieldName());
- }
+ // In case we have hashed indexes based on other hash functions in the future, we store
+ // a hashVersion number. If hashVersion changes, "makeSingleHashKey" will need to change
+ // accordingly. Defaults to 0 if "hashVersion" is not included in the index spec or if
+ // the value of "hashversion" is not a number
+ *versionOut = infoObj["hashVersion"].numberInt();
+
+ // Get the hashfield name
+ BSONElement firstElt = infoObj.getObjectField("key").firstElement();
+ massert(
+ 16765, "error: no hashed index field", firstElt.str().compare(IndexNames::HASHED) == 0);
+ *fieldOut = firstElt.fieldName();
+ }
+
+ static void parseHaystackParams(const BSONObj& infoObj,
+ std::string* geoFieldOut,
+ std::vector<std::string>* otherFieldsOut,
+ double* bucketSizeOut) {
+ BSONElement e = infoObj["bucketSize"];
+ uassert(16777, "need bucketSize", e.isNumber());
+ *bucketSizeOut = e.numberDouble();
+ uassert(16769, "bucketSize cannot be zero", *bucketSizeOut != 0.0);
+
+ // Example:
+ // db.foo.ensureIndex({ pos : "geoHaystack", type : 1 }, { bucketSize : 1 })
+ BSONObjIterator i(infoObj.getObjectField("key"));
+ while (i.more()) {
+ BSONElement e = i.next();
+ if (e.type() == String && IndexNames::GEO_HAYSTACK == e.valuestr()) {
+ uassert(16770, "can't have more than one geo field", geoFieldOut->size() == 0);
+ uassert(
+ 16771, "the geo field has to be first in index", otherFieldsOut->size() == 0);
+ *geoFieldOut = e.fieldName();
+ } else {
+ uassert(16772,
+ "geoSearch can only have 1 non-geo field for now",
+ otherFieldsOut->size() == 0);
+ otherFieldsOut->push_back(e.fieldName());
}
}
-
- static void parse2dsphereParams(const BSONObj& infoObj,
- S2IndexingParams* out) {
- // Set up basic params.
- out->maxKeysPerInsert = 200;
-
- // This is advisory.
- out->maxCellsInCovering = 50;
-
- // Near distances are specified in meters...sometimes.
- out->radius = kRadiusOfEarthInMeters;
-
- // These are not advisory.
- out->finestIndexedLevel = configValueWithDefaultInt(infoObj,
- "finestIndexedLevel",
- S2::kAvgEdge.GetClosestLevel(500.0 / out->radius));
-
- out->coarsestIndexedLevel = configValueWithDefaultInt(infoObj,
- "coarsestIndexedLevel",
- S2::kAvgEdge.GetClosestLevel(100 * 1000.0 / out->radius));
-
- static const std::string kIndexVersionFieldName("2dsphereIndexVersion");
-
- // Determine which version of this index we're using. If none was set in the descriptor,
- // assume S2_INDEX_VERSION_1 (alas, the first version predates the existence of the version
- // field).
- out->indexVersion = static_cast<S2IndexVersion>(configValueWithDefaultInt(infoObj,
- kIndexVersionFieldName,
- S2_INDEX_VERSION_1));
-
- uassert(16747, "coarsestIndexedLevel must be >= 0", out->coarsestIndexedLevel >= 0);
- uassert(16748, "finestIndexedLevel must be <= 30", out->finestIndexedLevel <= 30);
- uassert(16749, "finestIndexedLevel must be >= coarsestIndexedLevel",
- out->finestIndexedLevel >= out->coarsestIndexedLevel);
-
- massert(17395,
- mongoutils::str::stream() << "unsupported geo index version { " << kIndexVersionFieldName
- << " : " << out->indexVersion << " }, only support versions: ["
- << S2_INDEX_VERSION_1 << "," << S2_INDEX_VERSION_2 << "]",
- out->indexVersion == S2_INDEX_VERSION_2 || out->indexVersion == S2_INDEX_VERSION_1);
- }
-
- private:
- static double configValueWithDefaultDouble(const BSONObj& infoObj,
- const std::string& name,
- double def) {
- BSONElement e = infoObj[name];
- if (e.isNumber()) { return e.numberDouble(); }
- return def;
+ }
+
+ static void parse2dsphereParams(const BSONObj& infoObj, S2IndexingParams* out) {
+ // Set up basic params.
+ out->maxKeysPerInsert = 200;
+
+ // This is advisory.
+ out->maxCellsInCovering = 50;
+
+ // Near distances are specified in meters...sometimes.
+ out->radius = kRadiusOfEarthInMeters;
+
+ // These are not advisory.
+ out->finestIndexedLevel = configValueWithDefaultInt(
+ infoObj, "finestIndexedLevel", S2::kAvgEdge.GetClosestLevel(500.0 / out->radius));
+
+ out->coarsestIndexedLevel =
+ configValueWithDefaultInt(infoObj,
+ "coarsestIndexedLevel",
+ S2::kAvgEdge.GetClosestLevel(100 * 1000.0 / out->radius));
+
+ static const std::string kIndexVersionFieldName("2dsphereIndexVersion");
+
+ // Determine which version of this index we're using. If none was set in the descriptor,
+ // assume S2_INDEX_VERSION_1 (alas, the first version predates the existence of the version
+ // field).
+ out->indexVersion = static_cast<S2IndexVersion>(
+ configValueWithDefaultInt(infoObj, kIndexVersionFieldName, S2_INDEX_VERSION_1));
+
+ uassert(16747, "coarsestIndexedLevel must be >= 0", out->coarsestIndexedLevel >= 0);
+ uassert(16748, "finestIndexedLevel must be <= 30", out->finestIndexedLevel <= 30);
+ uassert(16749,
+ "finestIndexedLevel must be >= coarsestIndexedLevel",
+ out->finestIndexedLevel >= out->coarsestIndexedLevel);
+
+ massert(17395,
+ mongoutils::str::stream() << "unsupported geo index version { "
+ << kIndexVersionFieldName << " : " << out->indexVersion
+ << " }, only support versions: [" << S2_INDEX_VERSION_1
+ << "," << S2_INDEX_VERSION_2 << "]",
+ out->indexVersion == S2_INDEX_VERSION_2 || out->indexVersion == S2_INDEX_VERSION_1);
+ }
+
+private:
+ static double configValueWithDefaultDouble(const BSONObj& infoObj,
+ const std::string& name,
+ double def) {
+ BSONElement e = infoObj[name];
+ if (e.isNumber()) {
+ return e.numberDouble();
}
+ return def;
+ }
- static int configValueWithDefaultInt(const BSONObj& infoObj, const std::string& name, int def) {
- BSONElement e = infoObj[name];
- if (e.isNumber()) { return e.numberInt(); }
- return def;
+ static int configValueWithDefaultInt(const BSONObj& infoObj, const std::string& name, int def) {
+ BSONElement e = infoObj[name];
+ if (e.isNumber()) {
+ return e.numberInt();
}
-
- };
+ return def;
+ }
+};
} // namespace mongo
diff --git a/src/mongo/db/index/external_key_generator.cpp b/src/mongo/db/index/external_key_generator.cpp
index 4173880d00b..9f93bd6f439 100644
--- a/src/mongo/db/index/external_key_generator.cpp
+++ b/src/mongo/db/index/external_key_generator.cpp
@@ -42,75 +42,68 @@
namespace mongo {
namespace {
- void getKeysForUpgradeChecking(const BSONObj& infoObj,
- const BSONObj& doc,
- BSONObjSet* keys) {
-
- BSONObj keyPattern = infoObj.getObjectField("key");
-
- string type = IndexNames::findPluginName(keyPattern);
-
- if (IndexNames::GEO_2D == type) {
- TwoDIndexingParams params;
- ExpressionParams::parseTwoDParams(infoObj, &params);
- ExpressionKeysPrivate::get2DKeys(doc, params, keys, NULL);
- }
- else if (IndexNames::GEO_HAYSTACK == type) {
- string geoField;
- vector<string> otherFields;
- double bucketSize;
- ExpressionParams::parseHaystackParams(infoObj, &geoField, &otherFields, &bucketSize);
- ExpressionKeysPrivate::getHaystackKeys(doc, geoField, otherFields, bucketSize, keys);
- }
- else if (IndexNames::GEO_2DSPHERE == type) {
- S2IndexingParams params;
- ExpressionParams::parse2dsphereParams(infoObj, &params);
- ExpressionKeysPrivate::getS2Keys(doc, keyPattern, params, keys);
+void getKeysForUpgradeChecking(const BSONObj& infoObj, const BSONObj& doc, BSONObjSet* keys) {
+ BSONObj keyPattern = infoObj.getObjectField("key");
+
+ string type = IndexNames::findPluginName(keyPattern);
+
+ if (IndexNames::GEO_2D == type) {
+ TwoDIndexingParams params;
+ ExpressionParams::parseTwoDParams(infoObj, &params);
+ ExpressionKeysPrivate::get2DKeys(doc, params, keys, NULL);
+ } else if (IndexNames::GEO_HAYSTACK == type) {
+ string geoField;
+ vector<string> otherFields;
+ double bucketSize;
+ ExpressionParams::parseHaystackParams(infoObj, &geoField, &otherFields, &bucketSize);
+ ExpressionKeysPrivate::getHaystackKeys(doc, geoField, otherFields, bucketSize, keys);
+ } else if (IndexNames::GEO_2DSPHERE == type) {
+ S2IndexingParams params;
+ ExpressionParams::parse2dsphereParams(infoObj, &params);
+ ExpressionKeysPrivate::getS2Keys(doc, keyPattern, params, keys);
+ } else if (IndexNames::TEXT == type) {
+ fts::FTSSpec spec(infoObj);
+ ExpressionKeysPrivate::getFTSKeys(doc, spec, keys);
+ } else if (IndexNames::HASHED == type) {
+ HashSeed seed;
+ int version;
+ string field;
+ ExpressionParams::parseHashParams(infoObj, &seed, &version, &field);
+ ExpressionKeysPrivate::getHashKeys(
+ doc, field, seed, version, infoObj["sparse"].trueValue(), keys);
+ } else {
+ invariant(IndexNames::BTREE == type);
+
+ std::vector<const char*> fieldNames;
+ std::vector<BSONElement> fixed;
+ BSONObjIterator keyIt(keyPattern);
+ while (keyIt.more()) {
+ BSONElement patternElt = keyIt.next();
+ fieldNames.push_back(patternElt.fieldName());
+ fixed.push_back(BSONElement());
}
- else if (IndexNames::TEXT == type) {
- fts::FTSSpec spec(infoObj);
- ExpressionKeysPrivate::getFTSKeys(doc, spec, keys);
- }
- else if (IndexNames::HASHED == type) {
- HashSeed seed;
- int version;
- string field;
- ExpressionParams::parseHashParams(infoObj, &seed, &version, &field);
- ExpressionKeysPrivate::getHashKeys(doc, field, seed, version, infoObj["sparse"].trueValue(), keys);
- }
- else {
- invariant(IndexNames::BTREE == type);
-
- std::vector<const char *> fieldNames;
- std::vector<BSONElement> fixed;
- BSONObjIterator keyIt(keyPattern);
- while (keyIt.more()) {
- BSONElement patternElt = keyIt.next();
- fieldNames.push_back(patternElt.fieldName());
- fixed.push_back(BSONElement());
- }
- // XXX: do we care about version
- BtreeKeyGeneratorV1 keyGen(fieldNames, fixed, infoObj["sparse"].trueValue());
+ // XXX: do we care about version
+ BtreeKeyGeneratorV1 keyGen(fieldNames, fixed, infoObj["sparse"].trueValue());
- keyGen.getKeys(doc, keys);
- }
+ keyGen.getKeys(doc, keys);
}
-
- // cloned from key.cpp to build the below set
- const int binDataCodeLengths[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 32};
- const std::set<int> acceptableBinDataLengths(
- binDataCodeLengths,
- binDataCodeLengths + (sizeof(binDataCodeLengths) / sizeof(int)));
-
- // modified version of the KeyV1Owned constructor that returns the would-be-key's datasize()
- int keyV1Size(const BSONObj& obj) {
- BSONObj::iterator i(obj);
- int size = 0;
- const int traditionalSize = obj.objsize() + 1;
- while (i.more()) {
- BSONElement e = i.next();
- switch (e.type()) {
+}
+
+// cloned from key.cpp to build the below set
+const int binDataCodeLengths[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 32};
+const std::set<int> acceptableBinDataLengths(binDataCodeLengths,
+ binDataCodeLengths +
+ (sizeof(binDataCodeLengths) / sizeof(int)));
+
+// modified version of the KeyV1Owned constructor that returns the would-be-key's datasize()
+int keyV1Size(const BSONObj& obj) {
+ BSONObj::iterator i(obj);
+ int size = 0;
+ const int traditionalSize = obj.objsize() + 1;
+ while (i.more()) {
+ BSONElement e = i.next();
+ switch (e.type()) {
case MinKey:
case jstNULL:
case MaxKey:
@@ -121,86 +114,82 @@ namespace {
size += 1;
size += OID::kOIDSize;
break;
- case BinData:
- {
- int t = e.binDataType();
- // 0-7 and 0x80 to 0x87 are supported by Key
- if( (t & 0x78) == 0 && t != ByteArrayDeprecated ) {
- int len;
- e.binData(len);
- if (acceptableBinDataLengths.count(len)) {
- size += 1;
- size += 1;
- size += len;
- break;
- }
+ case BinData: {
+ int t = e.binDataType();
+ // 0-7 and 0x80 to 0x87 are supported by Key
+ if ((t & 0x78) == 0 && t != ByteArrayDeprecated) {
+ int len;
+ e.binData(len);
+ if (acceptableBinDataLengths.count(len)) {
+ size += 1;
+ size += 1;
+ size += len;
+ break;
}
- return traditionalSize;
}
+ return traditionalSize;
+ }
case Date:
size += 1;
size += sizeof(Date_t);
break;
- case String:
- {
- size += 1;
- // note we do not store the terminating null, to save space.
- unsigned x = (unsigned) e.valuestrsize() - 1;
- if (x > 255) {
- return traditionalSize;
- }
- size += 1;
- size += x;
- break;
+ case String: {
+ size += 1;
+ // note we do not store the terminating null, to save space.
+ unsigned x = (unsigned)e.valuestrsize() - 1;
+ if (x > 255) {
+ return traditionalSize;
}
+ size += 1;
+ size += x;
+ break;
+ }
case NumberInt:
size += 1;
size += sizeof(double);
break;
- case NumberLong:
- {
- long long n = e._numberLong();
- long long m = 2LL << 52;
- if( n >= m || n <= -m ) {
- // can't represent exactly as a double
- return traditionalSize;
- }
- size += 1;
- size += sizeof(double);
- break;
+ case NumberLong: {
+ long long n = e._numberLong();
+ long long m = 2LL << 52;
+ if (n >= m || n <= -m) {
+ // can't represent exactly as a double
+ return traditionalSize;
}
- case NumberDouble:
- {
- double d = e._numberDouble();
- if (std::isnan(d)) {
- return traditionalSize;
- }
- size += 1;
- size += sizeof(double);
- break;
+ size += 1;
+ size += sizeof(double);
+ break;
+ }
+ case NumberDouble: {
+ double d = e._numberDouble();
+ if (std::isnan(d)) {
+ return traditionalSize;
}
+ size += 1;
+ size += sizeof(double);
+ break;
+ }
default:
// if other types involved, store as traditional BSON
return traditionalSize;
- }
}
- return size;
}
+ return size;
+}
-} // namespace
-
- bool isAnyIndexKeyTooLarge(const BSONObj& index, const BSONObj& doc) {
- BSONObjSet keys;
- getKeysForUpgradeChecking(index, doc, &keys);
+} // namespace
- int largestKeySize = 0;
+bool isAnyIndexKeyTooLarge(const BSONObj& index, const BSONObj& doc) {
+ BSONObjSet keys;
+ getKeysForUpgradeChecking(index, doc, &keys);
- for (BSONObjSet::const_iterator it = keys.begin(); it != keys.end(); ++it) {
- largestKeySize = std::max(largestKeySize, keyV1Size(*it));
- }
+ int largestKeySize = 0;
- // BtreeData_V1::KeyMax is 1024
- return largestKeySize > 1024;
+ for (BSONObjSet::const_iterator it = keys.begin(); it != keys.end(); ++it) {
+ largestKeySize = std::max(largestKeySize, keyV1Size(*it));
}
+ // BtreeData_V1::KeyMax is 1024
+ return largestKeySize > 1024;
+}
+
} // namespace mongo
diff --git a/src/mongo/db/index/external_key_generator.h b/src/mongo/db/index/external_key_generator.h
index a340fd56e0e..9dad28f7413 100644
--- a/src/mongo/db/index/external_key_generator.h
+++ b/src/mongo/db/index/external_key_generator.h
@@ -40,7 +40,7 @@
*/
namespace mongo {
- // Returns whether or not the largest key the index will generate for the document is too large.
- bool isAnyIndexKeyTooLarge(const BSONObj& index, const BSONObj& doc);
+// Returns whether or not the largest key the index will generate for the document is too large.
+bool isAnyIndexKeyTooLarge(const BSONObj& index, const BSONObj& doc);
} // namespace mongo
diff --git a/src/mongo/db/index/fts_access_method.cpp b/src/mongo/db/index/fts_access_method.cpp
index 52bc3424518..9676fcbec45 100644
--- a/src/mongo/db/index/fts_access_method.cpp
+++ b/src/mongo/db/index/fts_access_method.cpp
@@ -31,11 +31,11 @@
namespace mongo {
- FTSAccessMethod::FTSAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree )
- : IndexAccessMethod(btreeState, btree), _ftsSpec(btreeState->descriptor()->infoObj()) { }
+FTSAccessMethod::FTSAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree)
+ : IndexAccessMethod(btreeState, btree), _ftsSpec(btreeState->descriptor()->infoObj()) {}
- void FTSAccessMethod::getKeys(const BSONObj& obj, BSONObjSet* keys) const {
- ExpressionKeysPrivate::getFTSKeys(obj, _ftsSpec, keys);
- }
+void FTSAccessMethod::getKeys(const BSONObj& obj, BSONObjSet* keys) const {
+ ExpressionKeysPrivate::getFTSKeys(obj, _ftsSpec, keys);
+}
} // namespace mongo
diff --git a/src/mongo/db/index/fts_access_method.h b/src/mongo/db/index/fts_access_method.h
index 9cf3e825c89..794d1efe360 100644
--- a/src/mongo/db/index/fts_access_method.h
+++ b/src/mongo/db/index/fts_access_method.h
@@ -36,17 +36,19 @@
namespace mongo {
- class FTSAccessMethod : public IndexAccessMethod {
- public:
- FTSAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree);
+class FTSAccessMethod : public IndexAccessMethod {
+public:
+ FTSAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree);
- const fts::FTSSpec& getSpec() const { return _ftsSpec; }
+ const fts::FTSSpec& getSpec() const {
+ return _ftsSpec;
+ }
- private:
- // Implemented:
- virtual void getKeys(const BSONObj& obj, BSONObjSet* keys) const;
+private:
+ // Implemented:
+ virtual void getKeys(const BSONObj& obj, BSONObjSet* keys) const;
- fts::FTSSpec _ftsSpec;
- };
+ fts::FTSSpec _ftsSpec;
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/index/hash_access_method.cpp b/src/mongo/db/index/hash_access_method.cpp
index 47504706590..8262c9019ab 100644
--- a/src/mongo/db/index/hash_access_method.cpp
+++ b/src/mongo/db/index/hash_access_method.cpp
@@ -33,26 +33,25 @@
namespace mongo {
- HashAccessMethod::HashAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree)
- : IndexAccessMethod(btreeState, btree) {
-
- const IndexDescriptor* descriptor = btreeState->descriptor();
-
- // We can change these if the single-field limitation is lifted later.
- uassert(16763, "Currently only single field hashed index supported.",
- 1 == descriptor->getNumFields());
-
- uassert(16764, "Currently hashed indexes cannot guarantee uniqueness. Use a regular index.",
- !descriptor->unique());
-
- ExpressionParams::parseHashParams(descriptor->infoObj(),
- &_seed,
- &_hashVersion,
- &_hashedField);
- }
-
- void HashAccessMethod::getKeys(const BSONObj& obj, BSONObjSet* keys) const {
- ExpressionKeysPrivate::getHashKeys(obj, _hashedField, _seed, _hashVersion, _descriptor->isSparse(), keys);
- }
+HashAccessMethod::HashAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree)
+ : IndexAccessMethod(btreeState, btree) {
+ const IndexDescriptor* descriptor = btreeState->descriptor();
+
+ // We can change these if the single-field limitation is lifted later.
+ uassert(16763,
+ "Currently only single field hashed index supported.",
+ 1 == descriptor->getNumFields());
+
+ uassert(16764,
+ "Currently hashed indexes cannot guarantee uniqueness. Use a regular index.",
+ !descriptor->unique());
+
+ ExpressionParams::parseHashParams(descriptor->infoObj(), &_seed, &_hashVersion, &_hashedField);
+}
+
+void HashAccessMethod::getKeys(const BSONObj& obj, BSONObjSet* keys) const {
+ ExpressionKeysPrivate::getHashKeys(
+ obj, _hashedField, _seed, _hashVersion, _descriptor->isSparse(), keys);
+}
} // namespace mongo
diff --git a/src/mongo/db/index/hash_access_method.h b/src/mongo/db/index/hash_access_method.h
index 3e986cbc1b0..ea3f36bb647 100644
--- a/src/mongo/db/index/hash_access_method.h
+++ b/src/mongo/db/index/hash_access_method.h
@@ -38,26 +38,26 @@
namespace mongo {
- /**
- * This is the access method for "hashed" indices.
- */
- class HashAccessMethod : public IndexAccessMethod {
- public:
- HashAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree);
+/**
+ * This is the access method for "hashed" indices.
+ */
+class HashAccessMethod : public IndexAccessMethod {
+public:
+ HashAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree);
- private:
- virtual void getKeys(const BSONObj& obj, BSONObjSet* keys) const;
+private:
+ virtual void getKeys(const BSONObj& obj, BSONObjSet* keys) const;
- // Only one of our fields is hashed. This is the field name for it.
- std::string _hashedField;
+ // Only one of our fields is hashed. This is the field name for it.
+ std::string _hashedField;
- // _seed defaults to zero.
- HashSeed _seed;
+ // _seed defaults to zero.
+ HashSeed _seed;
- // _hashVersion defaults to zero.
- int _hashVersion;
+ // _hashVersion defaults to zero.
+ int _hashVersion;
- BSONObj _missingKey;
- };
+ BSONObj _missingKey;
+};
} // namespace mongo
diff --git a/src/mongo/db/index/haystack_access_method.cpp b/src/mongo/db/index/haystack_access_method.cpp
index 263568af37a..66f1b09e813 100644
--- a/src/mongo/db/index/haystack_access_method.cpp
+++ b/src/mongo/db/index/haystack_access_method.cpp
@@ -44,94 +44,97 @@
namespace mongo {
- using std::unique_ptr;
-
- HaystackAccessMethod::HaystackAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree)
- : IndexAccessMethod(btreeState, btree) {
+using std::unique_ptr;
+
+HaystackAccessMethod::HaystackAccessMethod(IndexCatalogEntry* btreeState,
+ SortedDataInterface* btree)
+ : IndexAccessMethod(btreeState, btree) {
+ const IndexDescriptor* descriptor = btreeState->descriptor();
+
+ ExpressionParams::parseHaystackParams(
+ descriptor->infoObj(), &_geoField, &_otherFields, &_bucketSize);
+
+ uassert(16773, "no geo field specified", _geoField.size());
+ uassert(16774, "no non-geo fields specified", _otherFields.size());
+}
+
+void HaystackAccessMethod::getKeys(const BSONObj& obj, BSONObjSet* keys) const {
+ ExpressionKeysPrivate::getHaystackKeys(obj, _geoField, _otherFields, _bucketSize, keys);
+}
+
+void HaystackAccessMethod::searchCommand(OperationContext* txn,
+ Collection* collection,
+ const BSONObj& nearObj,
+ double maxDistance,
+ const BSONObj& search,
+ BSONObjBuilder* result,
+ unsigned limit) {
+ Timer t;
+
+ LOG(1) << "SEARCH near:" << nearObj << " maxDistance:" << maxDistance << " search: " << search
+ << endl;
+ int x, y;
+ {
+ BSONObjIterator i(nearObj);
+ x = ExpressionKeysPrivate::hashHaystackElement(i.next(), _bucketSize);
+ y = ExpressionKeysPrivate::hashHaystackElement(i.next(), _bucketSize);
+ }
+ int scale = static_cast<int>(ceil(maxDistance / _bucketSize));
- const IndexDescriptor* descriptor = btreeState->descriptor();
+ GeoHaystackSearchHopper hopper(txn, nearObj, maxDistance, limit, _geoField, collection);
- ExpressionParams::parseHaystackParams(descriptor->infoObj(),
- &_geoField,
- &_otherFields,
- &_bucketSize);
+ long long btreeMatches = 0;
- uassert(16773, "no geo field specified", _geoField.size());
- uassert(16774, "no non-geo fields specified", _otherFields.size());
- }
-
- void HaystackAccessMethod::getKeys(const BSONObj& obj, BSONObjSet* keys) const {
- ExpressionKeysPrivate::getHaystackKeys(obj, _geoField, _otherFields, _bucketSize, keys);
- }
+ for (int a = -scale; a <= scale && !hopper.limitReached(); ++a) {
+ for (int b = -scale; b <= scale && !hopper.limitReached(); ++b) {
+ BSONObjBuilder bb;
+ bb.append("", ExpressionKeysPrivate::makeHaystackString(x + a, y + b));
- void HaystackAccessMethod::searchCommand(OperationContext* txn, Collection* collection,
- const BSONObj& nearObj, double maxDistance,
- const BSONObj& search, BSONObjBuilder* result,
- unsigned limit) {
- Timer t;
-
- LOG(1) << "SEARCH near:" << nearObj << " maxDistance:" << maxDistance
- << " search: " << search << endl;
- int x, y;
- {
- BSONObjIterator i(nearObj);
- x = ExpressionKeysPrivate::hashHaystackElement(i.next(), _bucketSize);
- y = ExpressionKeysPrivate::hashHaystackElement(i.next(), _bucketSize);
- }
- int scale = static_cast<int>(ceil(maxDistance / _bucketSize));
+ for (unsigned i = 0; i < _otherFields.size(); i++) {
+ // See if the non-geo field we're indexing on is in the provided search term.
+ BSONElement e = search.getFieldDotted(_otherFields[i]);
+ if (e.eoo())
+ bb.appendNull("");
+ else
+ bb.appendAs(e, "");
+ }
- GeoHaystackSearchHopper hopper(txn, nearObj, maxDistance, limit, _geoField, collection);
+ BSONObj key = bb.obj();
- long long btreeMatches = 0;
+ unordered_set<RecordId, RecordId::Hasher> thisPass;
- for (int a = -scale; a <= scale && !hopper.limitReached(); ++a) {
- for (int b = -scale; b <= scale && !hopper.limitReached(); ++b) {
- BSONObjBuilder bb;
- bb.append("", ExpressionKeysPrivate::makeHaystackString(x + a, y + b));
- for (unsigned i = 0; i < _otherFields.size(); i++) {
- // See if the non-geo field we're indexing on is in the provided search term.
- BSONElement e = search.getFieldDotted(_otherFields[i]);
- if (e.eoo())
- bb.appendNull("");
- else
- bb.appendAs(e, "");
+ unique_ptr<PlanExecutor> exec(
+ InternalPlanner::indexScan(txn, collection, _descriptor, key, key, true));
+ PlanExecutor::ExecState state;
+ RecordId loc;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(NULL, &loc))) {
+ if (hopper.limitReached()) {
+ break;
}
-
- BSONObj key = bb.obj();
-
- unordered_set<RecordId, RecordId::Hasher> thisPass;
-
-
- unique_ptr<PlanExecutor> exec(InternalPlanner::indexScan(txn, collection,
- _descriptor, key, key, true));
- PlanExecutor::ExecState state;
- RecordId loc;
- while (PlanExecutor::ADVANCED == (state = exec->getNext(NULL, &loc))) {
- if (hopper.limitReached()) { break; }
- pair<unordered_set<RecordId, RecordId::Hasher>::iterator, bool> p
- = thisPass.insert(loc);
- // If a new element was inserted (haven't seen the RecordId before), p.second
- // is true.
- if (p.second) {
- hopper.consider(loc);
- btreeMatches++;
- }
+ pair<unordered_set<RecordId, RecordId::Hasher>::iterator, bool> p =
+ thisPass.insert(loc);
+ // If a new element was inserted (haven't seen the RecordId before), p.second
+ // is true.
+ if (p.second) {
+ hopper.consider(loc);
+ btreeMatches++;
}
}
}
+ }
- BSONArrayBuilder arr(result->subarrayStart("results"));
- int num = hopper.appendResultsTo(&arr);
- arr.done();
+ BSONArrayBuilder arr(result->subarrayStart("results"));
+ int num = hopper.appendResultsTo(&arr);
+ arr.done();
- {
- BSONObjBuilder b(result->subobjStart("stats"));
- b.append("time", t.millis());
- b.appendNumber("btreeMatches", btreeMatches);
- b.append("n", num);
- b.done();
- }
+ {
+ BSONObjBuilder b(result->subobjStart("stats"));
+ b.append("time", t.millis());
+ b.appendNumber("btreeMatches", btreeMatches);
+ b.append("n", num);
+ b.done();
}
+}
} // namespace mongo
diff --git a/src/mongo/db/index/haystack_access_method.h b/src/mongo/db/index/haystack_access_method.h
index 1aac55a25e8..d79de3bfffc 100644
--- a/src/mongo/db/index/haystack_access_method.h
+++ b/src/mongo/db/index/haystack_access_method.h
@@ -35,41 +35,45 @@
namespace mongo {
- class Collection;
- class OperationContext;
+class Collection;
+class OperationContext;
- /**
- * Maps (lat, lng) to the bucketSize-sided square bucket that contains it.
- * Examines all documents in a given radius of a given point.
- * Returns all documents that match a given search restriction.
- * See http://dochub.mongodb.org/core/haystackindexes
- *
- * Use when you want to look for restaurants within 25 miles with a certain name.
- * Don't use when you want to find the closest open restaurants; see 2d.cpp for that.
- *
- * Usage:
- * db.foo.ensureIndex({ pos : "geoHaystack", type : 1 }, { bucketSize : 1 })
- * pos is the name of the field to be indexed that has lat/lng data in an array.
- * type is the name of the secondary field to be indexed.
- * bucketSize specifies the dimension of the square bucket for the data in pos.
- * ALL fields are mandatory.
- */
- class HaystackAccessMethod : public IndexAccessMethod {
- public:
- HaystackAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree);
+/**
+ * Maps (lat, lng) to the bucketSize-sided square bucket that contains it.
+ * Examines all documents in a given radius of a given point.
+ * Returns all documents that match a given search restriction.
+ * See http://dochub.mongodb.org/core/haystackindexes
+ *
+ * Use when you want to look for restaurants within 25 miles with a certain name.
+ * Don't use when you want to find the closest open restaurants; see 2d.cpp for that.
+ *
+ * Usage:
+ * db.foo.ensureIndex({ pos : "geoHaystack", type : 1 }, { bucketSize : 1 })
+ * pos is the name of the field to be indexed that has lat/lng data in an array.
+ * type is the name of the secondary field to be indexed.
+ * bucketSize specifies the dimension of the square bucket for the data in pos.
+ * ALL fields are mandatory.
+ */
+class HaystackAccessMethod : public IndexAccessMethod {
+public:
+ HaystackAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree);
- protected:
- friend class GeoHaystackSearchCommand;
- void searchCommand(OperationContext* txn, Collection* collection,
- const BSONObj& nearObj, double maxDistance, const BSONObj& search,
- BSONObjBuilder* result, unsigned limit);
+protected:
+ friend class GeoHaystackSearchCommand;
+ void searchCommand(OperationContext* txn,
+ Collection* collection,
+ const BSONObj& nearObj,
+ double maxDistance,
+ const BSONObj& search,
+ BSONObjBuilder* result,
+ unsigned limit);
- private:
- virtual void getKeys(const BSONObj& obj, BSONObjSet* keys) const;
+private:
+ virtual void getKeys(const BSONObj& obj, BSONObjSet* keys) const;
- std::string _geoField;
- std::vector<std::string> _otherFields;
- double _bucketSize;
- };
+ std::string _geoField;
+ std::vector<std::string> _otherFields;
+ double _bucketSize;
+};
} // namespace mongo
diff --git a/src/mongo/db/index/haystack_access_method_internal.h b/src/mongo/db/index/haystack_access_method_internal.h
index 2f068bfc15f..0ab511f42e4 100644
--- a/src/mongo/db/index/haystack_access_method_internal.h
+++ b/src/mongo/db/index/haystack_access_method_internal.h
@@ -35,58 +35,60 @@
namespace mongo {
- class GeoHaystackSearchHopper {
- public:
- /**
- * Constructed with a point, a max distance from that point, and a max number of
- * matched points to store.
- * @param n The centroid that we're searching
- * @param maxDistance The maximum distance to consider from that point
- * @param limit The maximum number of results to return
- * @param geoField Which field in the provided RecordId has the point to test.
- */
- GeoHaystackSearchHopper(OperationContext* txn,
- const BSONObj& nearObj,
- double maxDistance,
- unsigned limit,
- const std::string& geoField,
- const Collection* collection)
- : _txn(txn),
- _collection(collection),
- _near(nearObj),
- _maxDistance(maxDistance),
- _limit(limit),
- _geoField(geoField) { }
+class GeoHaystackSearchHopper {
+public:
+ /**
+ * Constructed with a point, a max distance from that point, and a max number of
+ * matched points to store.
+ * @param n The centroid that we're searching
+ * @param maxDistance The maximum distance to consider from that point
+ * @param limit The maximum number of results to return
+ * @param geoField Which field in the provided RecordId has the point to test.
+ */
+ GeoHaystackSearchHopper(OperationContext* txn,
+ const BSONObj& nearObj,
+ double maxDistance,
+ unsigned limit,
+ const std::string& geoField,
+ const Collection* collection)
+ : _txn(txn),
+ _collection(collection),
+ _near(nearObj),
+ _maxDistance(maxDistance),
+ _limit(limit),
+ _geoField(geoField) {}
- // Consider the point in loc, and keep it if it's within _maxDistance (and we have space for
- // it)
- void consider(const RecordId& loc) {
- if (limitReached()) return;
- Point p(_collection->docFor(_txn, loc).value().getFieldDotted(_geoField));
- if (distance(_near, p) > _maxDistance)
- return;
- _locs.push_back(loc);
- }
+ // Consider the point in loc, and keep it if it's within _maxDistance (and we have space for
+ // it)
+ void consider(const RecordId& loc) {
+ if (limitReached())
+ return;
+ Point p(_collection->docFor(_txn, loc).value().getFieldDotted(_geoField));
+ if (distance(_near, p) > _maxDistance)
+ return;
+ _locs.push_back(loc);
+ }
- int appendResultsTo(BSONArrayBuilder* b) {
- for (unsigned i = 0; i <_locs.size(); i++)
- b->append(_collection->docFor(_txn, _locs[i]).value());
- return _locs.size();
- }
+ int appendResultsTo(BSONArrayBuilder* b) {
+ for (unsigned i = 0; i < _locs.size(); i++)
+ b->append(_collection->docFor(_txn, _locs[i]).value());
+ return _locs.size();
+ }
- // Have we stored as many points as we can?
- bool limitReached() const {
- return _locs.size() >= _limit;
- }
- private:
- OperationContext* _txn;
- const Collection* _collection;
+ // Have we stored as many points as we can?
+ bool limitReached() const {
+ return _locs.size() >= _limit;
+ }
- Point _near;
- double _maxDistance;
- unsigned _limit;
- const std::string _geoField;
- std::vector<RecordId> _locs;
- };
+private:
+ OperationContext* _txn;
+ const Collection* _collection;
+
+ Point _near;
+ double _maxDistance;
+ unsigned _limit;
+ const std::string _geoField;
+ std::vector<RecordId> _locs;
+};
} // namespace mongo
diff --git a/src/mongo/db/index/index_access_method.cpp b/src/mongo/db/index/index_access_method.cpp
index 2e1490f1f4d..18765949789 100644
--- a/src/mongo/db/index/index_access_method.cpp
+++ b/src/mongo/db/index/index_access_method.cpp
@@ -49,408 +49,397 @@
namespace mongo {
- using std::endl;
- using std::set;
- using std::vector;
-
- MONGO_EXPORT_SERVER_PARAMETER(failIndexKeyTooLong, bool, true);
-
- //
- // Comparison for external sorter interface
- //
-
- // Defined in db/structure/btree/key.cpp
- // XXX TODO: rename to something more descriptive, etc. etc.
- int oldCompare(const BSONObj& l,const BSONObj& r, const Ordering &o);
-
- class BtreeExternalSortComparison {
- public:
- BtreeExternalSortComparison(const BSONObj& ordering, int version)
- : _ordering(Ordering::make(ordering)),
- _version(version) {
- invariant(version == 1 || version == 0);
- }
+using std::endl;
+using std::set;
+using std::vector;
- typedef std::pair<BSONObj, RecordId> Data;
+MONGO_EXPORT_SERVER_PARAMETER(failIndexKeyTooLong, bool, true);
- int operator() (const Data& l, const Data& r) const {
- int x = (_version == 1
- ? l.first.woCompare(r.first, _ordering, /*considerfieldname*/false)
- : oldCompare(l.first, r.first, _ordering));
- if (x) { return x; }
- return l.second.compare(r.second);
- }
- private:
- const Ordering _ordering;
- const int _version;
- };
-
- IndexAccessMethod::IndexAccessMethod(IndexCatalogEntry* btreeState,
- SortedDataInterface* btree)
- : _btreeState(btreeState),
- _descriptor(btreeState->descriptor()),
- _newInterface(btree) {
- verify(0 == _descriptor->version() || 1 == _descriptor->version());
- }
+//
+// Comparison for external sorter interface
+//
- bool IndexAccessMethod::ignoreKeyTooLong(OperationContext *txn) {
- // Ignore this error if we're on a secondary or if the user requested it
- return !txn->isPrimaryFor(_btreeState->ns()) || !failIndexKeyTooLong;
+// Defined in db/structure/btree/key.cpp
+// XXX TODO: rename to something more descriptive, etc. etc.
+int oldCompare(const BSONObj& l, const BSONObj& r, const Ordering& o);
+
+class BtreeExternalSortComparison {
+public:
+ BtreeExternalSortComparison(const BSONObj& ordering, int version)
+ : _ordering(Ordering::make(ordering)), _version(version) {
+ invariant(version == 1 || version == 0);
}
- // Find the keys for obj, put them in the tree pointing to loc
- Status IndexAccessMethod::insert(OperationContext* txn,
- const BSONObj& obj,
- const RecordId& loc,
- const InsertDeleteOptions& options,
- int64_t* numInserted) {
- *numInserted = 0;
+ typedef std::pair<BSONObj, RecordId> Data;
- BSONObjSet keys;
- // Delegate to the subclass.
- getKeys(obj, &keys);
+ int operator()(const Data& l, const Data& r) const {
+ int x = (_version == 1 ? l.first.woCompare(r.first, _ordering, /*considerfieldname*/ false)
+ : oldCompare(l.first, r.first, _ordering));
+ if (x) {
+ return x;
+ }
+ return l.second.compare(r.second);
+ }
- Status ret = Status::OK();
- for (BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i) {
- Status status = _newInterface->insert(txn, *i, loc, options.dupsAllowed);
+private:
+ const Ordering _ordering;
+ const int _version;
+};
+
+IndexAccessMethod::IndexAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree)
+ : _btreeState(btreeState), _descriptor(btreeState->descriptor()), _newInterface(btree) {
+ verify(0 == _descriptor->version() || 1 == _descriptor->version());
+}
+
+bool IndexAccessMethod::ignoreKeyTooLong(OperationContext* txn) {
+ // Ignore this error if we're on a secondary or if the user requested it
+ return !txn->isPrimaryFor(_btreeState->ns()) || !failIndexKeyTooLong;
+}
+
+// Find the keys for obj, put them in the tree pointing to loc
+Status IndexAccessMethod::insert(OperationContext* txn,
+ const BSONObj& obj,
+ const RecordId& loc,
+ const InsertDeleteOptions& options,
+ int64_t* numInserted) {
+ *numInserted = 0;
+
+ BSONObjSet keys;
+ // Delegate to the subclass.
+ getKeys(obj, &keys);
+
+ Status ret = Status::OK();
+ for (BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i) {
+ Status status = _newInterface->insert(txn, *i, loc, options.dupsAllowed);
+
+ // Everything's OK, carry on.
+ if (status.isOK()) {
+ ++*numInserted;
+ continue;
+ }
- // Everything's OK, carry on.
- if (status.isOK()) {
- ++*numInserted;
- continue;
- }
+ // Error cases.
- // Error cases.
+ if (status.code() == ErrorCodes::KeyTooLong && ignoreKeyTooLong(txn)) {
+ continue;
+ }
- if (status.code() == ErrorCodes::KeyTooLong && ignoreKeyTooLong(txn)) {
+ if (status.code() == ErrorCodes::DuplicateKeyValue) {
+ // A document might be indexed multiple times during a background index build
+ // if it moves ahead of the collection scan cursor (e.g. via an update).
+ if (!_btreeState->isReady(txn)) {
+ LOG(3) << "key " << *i << " already in index during background indexing (ok)";
continue;
}
-
- if (status.code() == ErrorCodes::DuplicateKeyValue) {
- // A document might be indexed multiple times during a background index build
- // if it moves ahead of the collection scan cursor (e.g. via an update).
- if (!_btreeState->isReady(txn)) {
- LOG(3) << "key " << *i << " already in index during background indexing (ok)";
- continue;
- }
- }
-
- // Clean up after ourselves.
- for (BSONObjSet::const_iterator j = keys.begin(); j != i; ++j) {
- removeOneKey(txn, *j, loc, options.dupsAllowed);
- *numInserted = 0;
- }
-
- return status;
}
- if (*numInserted > 1) {
- _btreeState->setMultikey( txn );
+ // Clean up after ourselves.
+ for (BSONObjSet::const_iterator j = keys.begin(); j != i; ++j) {
+ removeOneKey(txn, *j, loc, options.dupsAllowed);
+ *numInserted = 0;
}
- return ret;
+ return status;
}
- void IndexAccessMethod::removeOneKey(OperationContext* txn,
- const BSONObj& key,
- const RecordId& loc,
- bool dupsAllowed) {
- try {
- _newInterface->unindex(txn, key, loc, dupsAllowed);
- } catch (AssertionException& e) {
- log() << "Assertion failure: _unindex failed "
- << _descriptor->indexNamespace() << endl;
- log() << "Assertion failure: _unindex failed: " << e.what()
- << " key:" << key.toString()
- << " dl:" << loc;
- logContext();
- }
+ if (*numInserted > 1) {
+ _btreeState->setMultikey(txn);
}
- std::unique_ptr<SortedDataInterface::Cursor> IndexAccessMethod::newCursor(
- OperationContext* txn,
- bool isForward) const {
- return _newInterface->newCursor(txn, isForward);
- }
+ return ret;
+}
- // Remove the provided doc from the index.
- Status IndexAccessMethod::remove(OperationContext* txn,
- const BSONObj &obj,
+void IndexAccessMethod::removeOneKey(OperationContext* txn,
+ const BSONObj& key,
const RecordId& loc,
- const InsertDeleteOptions &options,
- int64_t* numDeleted) {
-
- BSONObjSet keys;
- getKeys(obj, &keys);
- *numDeleted = 0;
-
- for (BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i) {
- removeOneKey(txn, *i, loc, options.dupsAllowed);
- ++*numDeleted;
- }
-
- return Status::OK();
+ bool dupsAllowed) {
+ try {
+ _newInterface->unindex(txn, key, loc, dupsAllowed);
+ } catch (AssertionException& e) {
+ log() << "Assertion failure: _unindex failed " << _descriptor->indexNamespace() << endl;
+ log() << "Assertion failure: _unindex failed: " << e.what() << " key:" << key.toString()
+ << " dl:" << loc;
+ logContext();
+ }
+}
+
+std::unique_ptr<SortedDataInterface::Cursor> IndexAccessMethod::newCursor(OperationContext* txn,
+ bool isForward) const {
+ return _newInterface->newCursor(txn, isForward);
+}
+
+// Remove the provided doc from the index.
+Status IndexAccessMethod::remove(OperationContext* txn,
+ const BSONObj& obj,
+ const RecordId& loc,
+ const InsertDeleteOptions& options,
+ int64_t* numDeleted) {
+ BSONObjSet keys;
+ getKeys(obj, &keys);
+ *numDeleted = 0;
+
+ for (BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i) {
+ removeOneKey(txn, *i, loc, options.dupsAllowed);
+ ++*numDeleted;
}
- // Return keys in l that are not in r.
- // Lifted basically verbatim from elsewhere.
- static void setDifference(const BSONObjSet &l, const BSONObjSet &r, vector<BSONObj*> *diff) {
- // l and r must use the same ordering spec.
- verify(l.key_comp().order() == r.key_comp().order());
- BSONObjSet::const_iterator i = l.begin();
- BSONObjSet::const_iterator j = r.begin();
- while ( 1 ) {
- if ( i == l.end() )
- break;
- while ( j != r.end() && j->woCompare( *i ) < 0 )
- j++;
- if ( j == r.end() || i->woCompare(*j) != 0 ) {
- const BSONObj *jo = &*i;
- diff->push_back( (BSONObj *) jo );
- }
- i++;
+ return Status::OK();
+}
+
+// Return keys in l that are not in r.
+// Lifted basically verbatim from elsewhere.
+static void setDifference(const BSONObjSet& l, const BSONObjSet& r, vector<BSONObj*>* diff) {
+ // l and r must use the same ordering spec.
+ verify(l.key_comp().order() == r.key_comp().order());
+ BSONObjSet::const_iterator i = l.begin();
+ BSONObjSet::const_iterator j = r.begin();
+ while (1) {
+ if (i == l.end())
+ break;
+ while (j != r.end() && j->woCompare(*i) < 0)
+ j++;
+ if (j == r.end() || i->woCompare(*j) != 0) {
+ const BSONObj* jo = &*i;
+ diff->push_back((BSONObj*)jo);
}
+ i++;
}
+}
- Status IndexAccessMethod::initializeAsEmpty(OperationContext* txn) {
- return _newInterface->initAsEmpty(txn);
- }
+Status IndexAccessMethod::initializeAsEmpty(OperationContext* txn) {
+ return _newInterface->initAsEmpty(txn);
+}
- Status IndexAccessMethod::touch(OperationContext* txn, const BSONObj& obj) {
- BSONObjSet keys;
- getKeys(obj, &keys);
+Status IndexAccessMethod::touch(OperationContext* txn, const BSONObj& obj) {
+ BSONObjSet keys;
+ getKeys(obj, &keys);
- std::unique_ptr<SortedDataInterface::Cursor> cursor(_newInterface->newCursor(txn));
- for (BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i) {
- cursor->seekExact(*i);
- }
-
- return Status::OK();
+ std::unique_ptr<SortedDataInterface::Cursor> cursor(_newInterface->newCursor(txn));
+ for (BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i) {
+ cursor->seekExact(*i);
}
+ return Status::OK();
+}
- Status IndexAccessMethod::touch( OperationContext* txn ) const {
- return _newInterface->touch(txn);
- }
- RecordId IndexAccessMethod::findSingle(OperationContext* txn, const BSONObj& key) const {
- std::unique_ptr<SortedDataInterface::Cursor> cursor(_newInterface->newCursor(txn));
- const auto requestedInfo = kDebugBuild ? SortedDataInterface::Cursor::kKeyAndLoc
- : SortedDataInterface::Cursor::kWantLoc;
- if (auto kv = cursor->seekExact(key, requestedInfo)) {
- // StorageEngine should guarantee these.
- dassert(!kv->loc.isNull());
- dassert(kv->key.woCompare(key, /*order*/BSONObj(), /*considerFieldNames*/false) == 0);
+Status IndexAccessMethod::touch(OperationContext* txn) const {
+ return _newInterface->touch(txn);
+}
- return kv->loc;
- }
-
- return RecordId();
- }
+RecordId IndexAccessMethod::findSingle(OperationContext* txn, const BSONObj& key) const {
+ std::unique_ptr<SortedDataInterface::Cursor> cursor(_newInterface->newCursor(txn));
+ const auto requestedInfo = kDebugBuild ? SortedDataInterface::Cursor::kKeyAndLoc
+ : SortedDataInterface::Cursor::kWantLoc;
+ if (auto kv = cursor->seekExact(key, requestedInfo)) {
+ // StorageEngine should guarantee these.
+ dassert(!kv->loc.isNull());
+ dassert(kv->key.woCompare(key, /*order*/ BSONObj(), /*considerFieldNames*/ false) == 0);
- Status IndexAccessMethod::validate(OperationContext* txn, bool full, int64_t* numKeys,
- BSONObjBuilder* output) {
- // XXX: long long vs int64_t
- long long keys = 0;
- _newInterface->fullValidate(txn, full, &keys, output);
- *numKeys = keys;
- return Status::OK();
+ return kv->loc;
}
- bool IndexAccessMethod::appendCustomStats(OperationContext* txn,
- BSONObjBuilder* output,
- double scale) const {
- return _newInterface->appendCustomStats(txn, output, scale);
+ return RecordId();
+}
+
+Status IndexAccessMethod::validate(OperationContext* txn,
+ bool full,
+ int64_t* numKeys,
+ BSONObjBuilder* output) {
+ // XXX: long long vs int64_t
+ long long keys = 0;
+ _newInterface->fullValidate(txn, full, &keys, output);
+ *numKeys = keys;
+ return Status::OK();
+}
+
+bool IndexAccessMethod::appendCustomStats(OperationContext* txn,
+ BSONObjBuilder* output,
+ double scale) const {
+ return _newInterface->appendCustomStats(txn, output, scale);
+}
+
+long long IndexAccessMethod::getSpaceUsedBytes(OperationContext* txn) const {
+ return _newInterface->getSpaceUsedBytes(txn);
+}
+
+Status IndexAccessMethod::validateUpdate(OperationContext* txn,
+ const BSONObj& from,
+ const BSONObj& to,
+ const RecordId& record,
+ const InsertDeleteOptions& options,
+ UpdateTicket* ticket,
+ const MatchExpression* indexFilter) {
+ if (indexFilter == NULL || indexFilter->matchesBSON(from))
+ getKeys(from, &ticket->oldKeys);
+ if (indexFilter == NULL || indexFilter->matchesBSON(to))
+ getKeys(to, &ticket->newKeys);
+ ticket->loc = record;
+ ticket->dupsAllowed = options.dupsAllowed;
+
+ setDifference(ticket->oldKeys, ticket->newKeys, &ticket->removed);
+ setDifference(ticket->newKeys, ticket->oldKeys, &ticket->added);
+
+ ticket->_isValid = true;
+
+ return Status::OK();
+}
+
+Status IndexAccessMethod::update(OperationContext* txn,
+ const UpdateTicket& ticket,
+ int64_t* numUpdated) {
+ if (!ticket._isValid) {
+ return Status(ErrorCodes::InternalError, "Invalid UpdateTicket in update");
}
- long long IndexAccessMethod::getSpaceUsedBytes( OperationContext* txn ) const {
- return _newInterface->getSpaceUsedBytes( txn );
+ if (ticket.oldKeys.size() + ticket.added.size() - ticket.removed.size() > 1) {
+ _btreeState->setMultikey(txn);
}
- Status IndexAccessMethod::validateUpdate(OperationContext* txn,
- const BSONObj &from,
- const BSONObj &to,
- const RecordId &record,
- const InsertDeleteOptions &options,
- UpdateTicket* ticket,
- const MatchExpression* indexFilter) {
-
- if (indexFilter == NULL || indexFilter->matchesBSON(from))
- getKeys(from, &ticket->oldKeys);
- if (indexFilter == NULL || indexFilter->matchesBSON(to))
- getKeys(to, &ticket->newKeys);
- ticket->loc = record;
- ticket->dupsAllowed = options.dupsAllowed;
-
- setDifference(ticket->oldKeys, ticket->newKeys, &ticket->removed);
- setDifference(ticket->newKeys, ticket->oldKeys, &ticket->added);
-
- ticket->_isValid = true;
-
- return Status::OK();
+ for (size_t i = 0; i < ticket.removed.size(); ++i) {
+ _newInterface->unindex(txn, *ticket.removed[i], ticket.loc, ticket.dupsAllowed);
}
- Status IndexAccessMethod::update(OperationContext* txn,
- const UpdateTicket& ticket,
- int64_t* numUpdated) {
- if (!ticket._isValid) {
- return Status(ErrorCodes::InternalError, "Invalid UpdateTicket in update");
- }
-
- if (ticket.oldKeys.size() + ticket.added.size() - ticket.removed.size() > 1) {
- _btreeState->setMultikey( txn );
- }
-
- for (size_t i = 0; i < ticket.removed.size(); ++i) {
- _newInterface->unindex(txn,
- *ticket.removed[i],
- ticket.loc,
- ticket.dupsAllowed);
- }
-
- for (size_t i = 0; i < ticket.added.size(); ++i) {
- Status status = _newInterface->insert(txn,
- *ticket.added[i],
- ticket.loc,
- ticket.dupsAllowed);
- if ( !status.isOK() ) {
- if (status.code() == ErrorCodes::KeyTooLong && ignoreKeyTooLong(txn)) {
- // Ignore.
- continue;
- }
-
- return status;
+ for (size_t i = 0; i < ticket.added.size(); ++i) {
+ Status status =
+ _newInterface->insert(txn, *ticket.added[i], ticket.loc, ticket.dupsAllowed);
+ if (!status.isOK()) {
+ if (status.code() == ErrorCodes::KeyTooLong && ignoreKeyTooLong(txn)) {
+ // Ignore.
+ continue;
}
- }
-
- *numUpdated = ticket.added.size();
- return Status::OK();
+ return status;
+ }
}
- std::unique_ptr<IndexAccessMethod::BulkBuilder> IndexAccessMethod::initiateBulk() {
-
- return std::unique_ptr<BulkBuilder>(new BulkBuilder(this, _descriptor));
+ *numUpdated = ticket.added.size();
+
+ return Status::OK();
+}
+
+std::unique_ptr<IndexAccessMethod::BulkBuilder> IndexAccessMethod::initiateBulk() {
+ return std::unique_ptr<BulkBuilder>(new BulkBuilder(this, _descriptor));
+}
+
+IndexAccessMethod::BulkBuilder::BulkBuilder(const IndexAccessMethod* index,
+ const IndexDescriptor* descriptor)
+ : _sorter(Sorter::make(
+ SortOptions()
+ .TempDir(storageGlobalParams.dbpath + "/_tmp")
+ .ExtSortAllowed()
+ .MaxMemoryUsageBytes(100 * 1024 * 1024),
+ BtreeExternalSortComparison(descriptor->keyPattern(), descriptor->version()))),
+ _real(index) {}
+
+Status IndexAccessMethod::BulkBuilder::insert(OperationContext* txn,
+ const BSONObj& obj,
+ const RecordId& loc,
+ const InsertDeleteOptions& options,
+ int64_t* numInserted) {
+ BSONObjSet keys;
+ _real->getKeys(obj, &keys);
+
+ _isMultiKey = _isMultiKey || (keys.size() > 1);
+
+ for (BSONObjSet::iterator it = keys.begin(); it != keys.end(); ++it) {
+ _sorter->add(*it, loc);
+ _keysInserted++;
}
- IndexAccessMethod::BulkBuilder::BulkBuilder(const IndexAccessMethod* index,
- const IndexDescriptor* descriptor)
- : _sorter(Sorter::make(SortOptions().TempDir(storageGlobalParams.dbpath + "/_tmp")
- .ExtSortAllowed()
- .MaxMemoryUsageBytes(100*1024*1024),
- BtreeExternalSortComparison(descriptor->keyPattern(),
- descriptor->version())))
- , _real(index) {
+ if (NULL != numInserted) {
+ *numInserted += keys.size();
}
- Status IndexAccessMethod::BulkBuilder::insert(OperationContext* txn,
- const BSONObj& obj,
- const RecordId& loc,
- const InsertDeleteOptions& options,
- int64_t* numInserted) {
- BSONObjSet keys;
- _real->getKeys(obj, &keys);
-
- _isMultiKey = _isMultiKey || (keys.size() > 1);
-
- for (BSONObjSet::iterator it = keys.begin(); it != keys.end(); ++it) {
- _sorter->add(*it, loc);
- _keysInserted++;
- }
+ return Status::OK();
+}
- if (NULL != numInserted) {
- *numInserted += keys.size();
- }
- return Status::OK();
- }
+Status IndexAccessMethod::commitBulk(OperationContext* txn,
+ std::unique_ptr<BulkBuilder> bulk,
+ bool mayInterrupt,
+ bool dupsAllowed,
+ set<RecordId>* dupsToDrop) {
+ Timer timer;
+ std::unique_ptr<BulkBuilder::Sorter::Iterator> i(bulk->_sorter->done());
- Status IndexAccessMethod::commitBulk(OperationContext* txn,
- std::unique_ptr<BulkBuilder> bulk,
- bool mayInterrupt,
- bool dupsAllowed,
- set<RecordId>* dupsToDrop) {
+ stdx::unique_lock<Client> lk(*txn->getClient());
+ ProgressMeterHolder pm(*txn->setMessage_inlock("Index Bulk Build: (2/3) btree bottom up",
+ "Index: (2/3) BTree Bottom Up Progress",
+ bulk->_keysInserted,
+ 10));
+ lk.unlock();
- Timer timer;
+ std::unique_ptr<SortedDataBuilderInterface> builder;
- std::unique_ptr<BulkBuilder::Sorter::Iterator> i(bulk->_sorter->done());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ WriteUnitOfWork wunit(txn);
- stdx::unique_lock<Client> lk(*txn->getClient());
- ProgressMeterHolder pm(*txn->setMessage_inlock("Index Bulk Build: (2/3) btree bottom up",
- "Index: (2/3) BTree Bottom Up Progress",
- bulk->_keysInserted,
- 10));
- lk.unlock();
+ if (bulk->_isMultiKey) {
+ _btreeState->setMultikey(txn);
+ }
- std::unique_ptr<SortedDataBuilderInterface> builder;
+ builder.reset(_newInterface->getBulkBuilder(txn, dupsAllowed));
+ wunit.commit();
+ }
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "setting index multikey flag", "");
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- WriteUnitOfWork wunit(txn);
+ while (i->more()) {
+ if (mayInterrupt) {
+ txn->checkForInterrupt();
+ }
- if (bulk->_isMultiKey) {
- _btreeState->setMultikey( txn );
- }
+ WriteUnitOfWork wunit(txn);
+ // Improve performance in the btree-building phase by disabling rollback tracking.
+ // This avoids copying all the written bytes to a buffer that is only used to roll back.
+ // Note that this is safe to do, as this entire index-build-in-progress will be cleaned
+ // up by the index system.
+ txn->recoveryUnit()->setRollbackWritesDisabled();
- builder.reset(_newInterface->getBulkBuilder(txn, dupsAllowed));
- wunit.commit();
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "setting index multikey flag", "");
+ // Get the next datum and add it to the builder.
+ BulkBuilder::Sorter::Data d = i->next();
+ Status status = builder->addKey(d.first, d.second);
- while (i->more()) {
- if (mayInterrupt) {
- txn->checkForInterrupt();
+ if (!status.isOK()) {
+ // Overlong key that's OK to skip?
+ if (status.code() == ErrorCodes::KeyTooLong && ignoreKeyTooLong(txn)) {
+ continue;
}
- WriteUnitOfWork wunit(txn);
- // Improve performance in the btree-building phase by disabling rollback tracking.
- // This avoids copying all the written bytes to a buffer that is only used to roll back.
- // Note that this is safe to do, as this entire index-build-in-progress will be cleaned
- // up by the index system.
- txn->recoveryUnit()->setRollbackWritesDisabled();
+ // Check if this is a duplicate that's OK to skip
+ if (status.code() == ErrorCodes::DuplicateKey) {
+ invariant(!dupsAllowed); // shouldn't be getting DupKey errors if dupsAllowed.
- // Get the next datum and add it to the builder.
- BulkBuilder::Sorter::Data d = i->next();
- Status status = builder->addKey(d.first, d.second);
-
- if (!status.isOK()) {
- // Overlong key that's OK to skip?
- if (status.code() == ErrorCodes::KeyTooLong && ignoreKeyTooLong(txn)) {
+ if (dupsToDrop) {
+ dupsToDrop->insert(d.second);
continue;
}
-
- // Check if this is a duplicate that's OK to skip
- if (status.code() == ErrorCodes::DuplicateKey) {
- invariant(!dupsAllowed); // shouldn't be getting DupKey errors if dupsAllowed.
-
- if (dupsToDrop) {
- dupsToDrop->insert(d.second);
- continue;
- }
- }
-
- return status;
}
- // If we're here either it's a dup and we're cool with it or the addKey went just
- // fine.
- pm.hit();
- wunit.commit();
+ return status;
}
- pm.finished();
-
- {
- stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)->setMessage_inlock("Index Bulk Build: (3/3) btree-middle",
- "Index: (3/3) BTree Middle Progress");
- }
+ // If we're here either it's a dup and we're cool with it or the addKey went just
+ // fine.
+ pm.hit();
+ wunit.commit();
+ }
- LOG(timer.seconds() > 10 ? 0 : 1 ) << "\t done building bottom layer, going to commit";
+ pm.finished();
- builder->commit(mayInterrupt);
- return Status::OK();
+ {
+ stdx::lock_guard<Client> lk(*txn->getClient());
+ CurOp::get(txn)->setMessage_inlock("Index Bulk Build: (3/3) btree-middle",
+ "Index: (3/3) BTree Middle Progress");
}
+ LOG(timer.seconds() > 10 ? 0 : 1) << "\t done building bottom layer, going to commit";
+
+ builder->commit(mayInterrupt);
+ return Status::OK();
+}
+
} // namespace mongo
#include "mongo/db/sorter/sorter.cpp"
diff --git a/src/mongo/db/index/index_access_method.h b/src/mongo/db/index/index_access_method.h
index 3f311128a2e..5dc88d30d80 100644
--- a/src/mongo/db/index/index_access_method.h
+++ b/src/mongo/db/index/index_access_method.h
@@ -40,250 +40,250 @@
namespace mongo {
- class BSONObjBuilder;
- class MatchExpression;
- class UpdateTicket;
- struct InsertDeleteOptions;
+class BSONObjBuilder;
+class MatchExpression;
+class UpdateTicket;
+struct InsertDeleteOptions;
+
+/**
+ * An IndexAccessMethod is the interface through which all the mutation, lookup, and
+ * traversal of index entries is done. The class is designed so that the underlying index
+ * data structure is opaque to the caller.
+ *
+ * IndexAccessMethods for existing indices are obtained through the system catalog.
+ *
+ * We assume the caller has whatever locks required. This interface is not thread safe.
+ *
+ */
+class IndexAccessMethod {
+ MONGO_DISALLOW_COPYING(IndexAccessMethod);
+
+public:
+ IndexAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree);
+ virtual ~IndexAccessMethod() {}
+
+ //
+ // Lookup, traversal, and mutation support
+ //
/**
- * An IndexAccessMethod is the interface through which all the mutation, lookup, and
- * traversal of index entries is done. The class is designed so that the underlying index
- * data structure is opaque to the caller.
- *
- * IndexAccessMethods for existing indices are obtained through the system catalog.
- *
- * We assume the caller has whatever locks required. This interface is not thread safe.
+ * Internally generate the keys {k1, ..., kn} for 'obj'. For each key k, insert (k ->
+ * 'loc') into the index. 'obj' is the object at the location 'loc'. If not NULL,
+ * 'numInserted' will be set to the number of keys added to the index for the document. If
+ * there is more than one key for 'obj', either all keys will be inserted or none will.
*
+ * The behavior of the insertion can be specified through 'options'.
*/
- class IndexAccessMethod {
- MONGO_DISALLOW_COPYING(IndexAccessMethod);
- public:
+ Status insert(OperationContext* txn,
+ const BSONObj& obj,
+ const RecordId& loc,
+ const InsertDeleteOptions& options,
+ int64_t* numInserted);
- IndexAccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree);
- virtual ~IndexAccessMethod() { }
+ /**
+ * Analogous to above, but remove the records instead of inserting them. If not NULL,
+ * numDeleted will be set to the number of keys removed from the index for the document.
+ */
+ Status remove(OperationContext* txn,
+ const BSONObj& obj,
+ const RecordId& loc,
+ const InsertDeleteOptions& options,
+ int64_t* numDeleted);
- //
- // Lookup, traversal, and mutation support
- //
+ /**
+ * Checks whether the index entries for the document 'from', which is placed at location
+ * 'loc' on disk, can be changed to the index entries for the doc 'to'. Provides a ticket
+ * for actually performing the update.
+ *
+ * Returns an error if the update is invalid. The ticket will also be marked as invalid.
+ * Returns OK if the update should proceed without error. The ticket is marked as valid.
+ *
+ * There is no obligation to perform the update after performing validation.
+ */
+ Status validateUpdate(OperationContext* txn,
+ const BSONObj& from,
+ const BSONObj& to,
+ const RecordId& loc,
+ const InsertDeleteOptions& options,
+ UpdateTicket* ticket,
+ const MatchExpression* indexFilter);
- /**
- * Internally generate the keys {k1, ..., kn} for 'obj'. For each key k, insert (k ->
- * 'loc') into the index. 'obj' is the object at the location 'loc'. If not NULL,
- * 'numInserted' will be set to the number of keys added to the index for the document. If
- * there is more than one key for 'obj', either all keys will be inserted or none will.
- *
- * The behavior of the insertion can be specified through 'options'.
- */
- Status insert(OperationContext* txn,
- const BSONObj& obj,
- const RecordId& loc,
- const InsertDeleteOptions& options,
- int64_t* numInserted);
+ /**
+ * Perform a validated update. The keys for the 'from' object will be removed, and the keys
+ * for the object 'to' will be added. Returns OK if the update succeeded, failure if it did
+ * not. If an update does not succeed, the index will be unmodified, and the keys for
+ * 'from' will remain. Assumes that the index has not changed since validateUpdate was
+ * called. If the index was changed, we may return an error, as our ticket may have been
+ * invalidated.
+ */
+ Status update(OperationContext* txn, const UpdateTicket& ticket, int64_t* numUpdated);
- /**
- * Analogous to above, but remove the records instead of inserting them. If not NULL,
- * numDeleted will be set to the number of keys removed from the index for the document.
- */
- Status remove(OperationContext* txn,
- const BSONObj& obj,
- const RecordId& loc,
- const InsertDeleteOptions& options,
- int64_t* numDeleted);
+ /**
+ * Returns an unpositioned cursor over 'this' index.
+ */
+ std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* txn,
+ bool isForward = true) const;
- /**
- * Checks whether the index entries for the document 'from', which is placed at location
- * 'loc' on disk, can be changed to the index entries for the doc 'to'. Provides a ticket
- * for actually performing the update.
- *
- * Returns an error if the update is invalid. The ticket will also be marked as invalid.
- * Returns OK if the update should proceed without error. The ticket is marked as valid.
- *
- * There is no obligation to perform the update after performing validation.
- */
- Status validateUpdate(OperationContext* txn,
- const BSONObj& from,
- const BSONObj& to,
- const RecordId& loc,
- const InsertDeleteOptions& options,
- UpdateTicket* ticket,
- const MatchExpression* indexFilter);
+ // ------ index level operations ------
- /**
- * Perform a validated update. The keys for the 'from' object will be removed, and the keys
- * for the object 'to' will be added. Returns OK if the update succeeded, failure if it did
- * not. If an update does not succeed, the index will be unmodified, and the keys for
- * 'from' will remain. Assumes that the index has not changed since validateUpdate was
- * called. If the index was changed, we may return an error, as our ticket may have been
- * invalidated.
- */
- Status update(OperationContext* txn, const UpdateTicket& ticket, int64_t* numUpdated);
- /**
- * Returns an unpositioned cursor over 'this' index.
- */
- std::unique_ptr<SortedDataInterface::Cursor> newCursor(OperationContext* txn,
- bool isForward = true) const;
+ /**
+ * initializes this index
+ * only called once for the lifetime of the index
+ * if called multiple times, is an error
+ */
+ Status initializeAsEmpty(OperationContext* txn);
- // ------ index level operations ------
+ /**
+ * Try to page-in the pages that contain the keys generated from 'obj'.
+ * This can be used to speed up future accesses to an index by trying to ensure the
+ * appropriate pages are not swapped out.
+ * See prefetch.cpp.
+ */
+ Status touch(OperationContext* txn, const BSONObj& obj);
+ /**
+ * this pages in the entire index
+ */
+ Status touch(OperationContext* txn) const;
- /**
- * initializes this index
- * only called once for the lifetime of the index
- * if called multiple times, is an error
- */
- Status initializeAsEmpty(OperationContext* txn);
+ /**
+ * Walk the entire index, checking the internal structure for consistency.
+ * Set numKeys to the number of keys in the index.
+ *
+ * 'output' is used to store results of validate when 'full' is true.
+ * If 'full' is false, 'output' may be NULL.
+ *
+ * Return OK if the index is valid.
+ *
+ * Currently wasserts that the index is invalid. This could/should be changed in
+ * the future to return a Status.
+ */
+ Status validate(OperationContext* txn, bool full, int64_t* numKeys, BSONObjBuilder* output);
- /**
- * Try to page-in the pages that contain the keys generated from 'obj'.
- * This can be used to speed up future accesses to an index by trying to ensure the
- * appropriate pages are not swapped out.
- * See prefetch.cpp.
- */
- Status touch(OperationContext* txn, const BSONObj& obj);
+ /**
+ * Add custom statistics about this index to BSON object builder, for display.
+ *
+ * 'scale' is a scaling factor to apply to all byte statistics.
+ *
+ * Returns true if stats were appended.
+ */
+ bool appendCustomStats(OperationContext* txn, BSONObjBuilder* result, double scale) const;
- /**
- * this pages in the entire index
- */
- Status touch(OperationContext* txn) const;
+ /**
+ * @return The number of bytes consumed by this index.
+ * Exactly what is counted is not defined based on padding, re-use, etc...
+ */
+ long long getSpaceUsedBytes(OperationContext* txn) const;
- /**
- * Walk the entire index, checking the internal structure for consistency.
- * Set numKeys to the number of keys in the index.
- *
- * 'output' is used to store results of validate when 'full' is true.
- * If 'full' is false, 'output' may be NULL.
- *
- * Return OK if the index is valid.
- *
- * Currently wasserts that the index is invalid. This could/should be changed in
- * the future to return a Status.
- */
- Status validate(OperationContext* txn, bool full, int64_t* numKeys, BSONObjBuilder* output);
+ RecordId findSingle(OperationContext* txn, const BSONObj& key) const;
- /**
- * Add custom statistics about this index to BSON object builder, for display.
- *
- * 'scale' is a scaling factor to apply to all byte statistics.
- *
- * Returns true if stats were appended.
- */
- bool appendCustomStats(OperationContext* txn, BSONObjBuilder* result, double scale) const;
+ //
+ // Bulk operations support
+ //
+ class BulkBuilder {
+ public:
/**
- * @return The number of bytes consumed by this index.
- * Exactly what is counted is not defined based on padding, re-use, etc...
+ * Insert into the BulkBuilder as-if inserting into an IndexAccessMethod.
*/
- long long getSpaceUsedBytes( OperationContext* txn ) const;
-
- RecordId findSingle( OperationContext* txn, const BSONObj& key ) const;
-
- //
- // Bulk operations support
- //
-
- class BulkBuilder {
- public:
- /**
- * Insert into the BulkBuilder as-if inserting into an IndexAccessMethod.
- */
- Status insert(OperationContext* txn,
- const BSONObj& obj,
- const RecordId& loc,
- const InsertDeleteOptions& options,
- int64_t* numInserted);
+ Status insert(OperationContext* txn,
+ const BSONObj& obj,
+ const RecordId& loc,
+ const InsertDeleteOptions& options,
+ int64_t* numInserted);
- private:
- friend class IndexAccessMethod;
+ private:
+ friend class IndexAccessMethod;
- using Sorter = mongo::Sorter<BSONObj, RecordId>;
+ using Sorter = mongo::Sorter<BSONObj, RecordId>;
- BulkBuilder(const IndexAccessMethod* index, const IndexDescriptor* descriptor);
+ BulkBuilder(const IndexAccessMethod* index, const IndexDescriptor* descriptor);
- std::unique_ptr<Sorter> _sorter;
- const IndexAccessMethod* _real;
- int64_t _keysInserted = 0;
- bool _isMultiKey = false;
- };
+ std::unique_ptr<Sorter> _sorter;
+ const IndexAccessMethod* _real;
+ int64_t _keysInserted = 0;
+ bool _isMultiKey = false;
+ };
- /**
- * Starts a bulk operation.
- * You work on the returned BulkBuilder and then call commitBulk.
- * This can return NULL, meaning bulk mode is not available.
- *
- * It is only legal to initiate bulk when the index is new and empty.
- */
- std::unique_ptr<BulkBuilder> initiateBulk();
+ /**
+ * Starts a bulk operation.
+ * You work on the returned BulkBuilder and then call commitBulk.
+ * This can return NULL, meaning bulk mode is not available.
+ *
+ * It is only legal to initiate bulk when the index is new and empty.
+ */
+ std::unique_ptr<BulkBuilder> initiateBulk();
- /**
- * Call this when you are ready to finish your bulk work.
- * Pass in the BulkBuilder returned from initiateBulk.
- * @param bulk - something created from initiateBulk
- * @param mayInterrupt - is this commit interruptable (will cancel)
- * @param dupsAllowed - if false, error or fill 'dups' if any duplicate values are found
- * @param dups - if NULL, error out on dups if not allowed
- * if not NULL, put the bad RecordIds there
- */
- Status commitBulk(OperationContext* txn,
- std::unique_ptr<BulkBuilder> bulk,
- bool mayInterrupt,
- bool dupsAllowed,
- std::set<RecordId>* dups);
+ /**
+ * Call this when you are ready to finish your bulk work.
+ * Pass in the BulkBuilder returned from initiateBulk.
+ * @param bulk - something created from initiateBulk
+ * @param mayInterrupt - is this commit interruptable (will cancel)
+ * @param dupsAllowed - if false, error or fill 'dups' if any duplicate values are found
+ * @param dups - if NULL, error out on dups if not allowed
+ * if not NULL, put the bad RecordIds there
+ */
+ Status commitBulk(OperationContext* txn,
+ std::unique_ptr<BulkBuilder> bulk,
+ bool mayInterrupt,
+ bool dupsAllowed,
+ std::set<RecordId>* dups);
- /**
- * Fills 'keys' with the keys that should be generated for 'obj' on this index.
- */
- virtual void getKeys(const BSONObj &obj, BSONObjSet *keys) const = 0;
+ /**
+ * Fills 'keys' with the keys that should be generated for 'obj' on this index.
+ */
+ virtual void getKeys(const BSONObj& obj, BSONObjSet* keys) const = 0;
- protected:
- // Determines whether it's OK to ignore ErrorCodes::KeyTooLong for this OperationContext
- bool ignoreKeyTooLong(OperationContext* txn);
+protected:
+ // Determines whether it's OK to ignore ErrorCodes::KeyTooLong for this OperationContext
+ bool ignoreKeyTooLong(OperationContext* txn);
- IndexCatalogEntry* _btreeState; // owned by IndexCatalogEntry
- const IndexDescriptor* _descriptor;
+ IndexCatalogEntry* _btreeState; // owned by IndexCatalogEntry
+ const IndexDescriptor* _descriptor;
- private:
- void removeOneKey(OperationContext* txn,
- const BSONObj& key,
- const RecordId& loc,
- bool dupsAllowed);
+private:
+ void removeOneKey(OperationContext* txn,
+ const BSONObj& key,
+ const RecordId& loc,
+ bool dupsAllowed);
- const std::unique_ptr<SortedDataInterface> _newInterface;
- };
+ const std::unique_ptr<SortedDataInterface> _newInterface;
+};
- /**
- * Updates are two steps: verify that it's a valid update, and perform it.
- * validateUpdate fills out the UpdateStatus and update actually applies it.
- */
- class UpdateTicket {
- // No public interface
- private:
- friend class IndexAccessMethod;
+/**
+ * Updates are two steps: verify that it's a valid update, and perform it.
+ * validateUpdate fills out the UpdateStatus and update actually applies it.
+ */
+class UpdateTicket {
+ // No public interface
+private:
+ friend class IndexAccessMethod;
- bool _isValid;
+ bool _isValid;
- BSONObjSet oldKeys;
- BSONObjSet newKeys;
+ BSONObjSet oldKeys;
+ BSONObjSet newKeys;
- // These point into the sets oldKeys and newKeys.
- std::vector<BSONObj*> removed;
- std::vector<BSONObj*> added;
+ // These point into the sets oldKeys and newKeys.
+ std::vector<BSONObj*> removed;
+ std::vector<BSONObj*> added;
- RecordId loc;
- bool dupsAllowed;
- };
+ RecordId loc;
+ bool dupsAllowed;
+};
- /**
- * Flags we can set for inserts and deletes (and updates, which are kind of both).
- */
- struct InsertDeleteOptions {
- InsertDeleteOptions() : logIfError(false), dupsAllowed(false) { }
+/**
+ * Flags we can set for inserts and deletes (and updates, which are kind of both).
+ */
+struct InsertDeleteOptions {
+ InsertDeleteOptions() : logIfError(false), dupsAllowed(false) {}
- // If there's an error, log() it.
- bool logIfError;
+ // If there's an error, log() it.
+ bool logIfError;
- // Are duplicate keys allowed in the index?
- bool dupsAllowed;
- };
+ // Are duplicate keys allowed in the index?
+ bool dupsAllowed;
+};
} // namespace mongo
diff --git a/src/mongo/db/index/index_descriptor.cpp b/src/mongo/db/index/index_descriptor.cpp
index 523b778163d..9d0de13b4d1 100644
--- a/src/mongo/db/index/index_descriptor.cpp
+++ b/src/mongo/db/index/index_descriptor.cpp
@@ -35,61 +35,54 @@
namespace mongo {
- namespace {
- void populateOptionsMap( std::map<StringData, BSONElement>& theMap,
- const BSONObj& spec ) {
-
- BSONObjIterator it( spec );
- while ( it.more() ) {
- const BSONElement e = it.next();
-
- StringData fieldName = e.fieldNameStringData();
- if ( fieldName == "key" ||
- fieldName == "ns" ||
- fieldName == "name" ||
- fieldName == "v" || // not considered for equivalence
- fieldName == "textIndexVersion" || // same as "v"
- fieldName == "2dsphereIndexVersion" || // same as "v"
- fieldName == "background" || // this is a creation time option only
- fieldName == "dropDups" || // this is now ignored
- fieldName == "sparse" || // checked specially
- fieldName == "unique" // check specially
- ) {
- continue;
- }
- theMap[ fieldName ] = e;
- }
+namespace {
+void populateOptionsMap(std::map<StringData, BSONElement>& theMap, const BSONObj& spec) {
+ BSONObjIterator it(spec);
+ while (it.more()) {
+ const BSONElement e = it.next();
+
+ StringData fieldName = e.fieldNameStringData();
+ if (fieldName == "key" || fieldName == "ns" || fieldName == "name" ||
+ fieldName == "v" || // not considered for equivalence
+ fieldName == "textIndexVersion" || // same as "v"
+ fieldName == "2dsphereIndexVersion" || // same as "v"
+ fieldName == "background" || // this is a creation time option only
+ fieldName == "dropDups" || // this is now ignored
+ fieldName == "sparse" || // checked specially
+ fieldName == "unique" // check specially
+ ) {
+ continue;
}
+ theMap[fieldName] = e;
}
+}
+}
- bool IndexDescriptor::areIndexOptionsEquivalent( const IndexDescriptor* other ) const {
-
- if ( isSparse() != other->isSparse() ) {
- return false;
- }
-
- if ( !isIdIndex() &&
- unique() != other->unique() ) {
- // Note: { _id: 1 } or { _id: -1 } implies unique: true.
- return false;
- }
+bool IndexDescriptor::areIndexOptionsEquivalent(const IndexDescriptor* other) const {
+ if (isSparse() != other->isSparse()) {
+ return false;
+ }
- // Then compare the rest of the options.
+ if (!isIdIndex() && unique() != other->unique()) {
+ // Note: { _id: 1 } or { _id: -1 } implies unique: true.
+ return false;
+ }
- std::map<StringData, BSONElement> existingOptionsMap;
- populateOptionsMap( existingOptionsMap, infoObj() );
+ // Then compare the rest of the options.
- std::map<StringData, BSONElement> newOptionsMap;
- populateOptionsMap( newOptionsMap, other->infoObj() );
+ std::map<StringData, BSONElement> existingOptionsMap;
+ populateOptionsMap(existingOptionsMap, infoObj());
- return existingOptionsMap == newOptionsMap;
- }
+ std::map<StringData, BSONElement> newOptionsMap;
+ populateOptionsMap(newOptionsMap, other->infoObj());
- void IndexDescriptor::_checkOk() const {
- if ( _magic == 123987 )
- return;
- log() << "uh oh: " << (void*)(this) << " " << _magic;
- invariant(0);
- }
+ return existingOptionsMap == newOptionsMap;
+}
+void IndexDescriptor::_checkOk() const {
+ if (_magic == 123987)
+ return;
+ log() << "uh oh: " << (void*)(this) << " " << _magic;
+ invariant(0);
+}
}
diff --git a/src/mongo/db/index/index_descriptor.h b/src/mongo/db/index/index_descriptor.h
index 742e5a3ea16..2bceb9aeb0a 100644
--- a/src/mongo/db/index/index_descriptor.h
+++ b/src/mongo/db/index/index_descriptor.h
@@ -39,181 +39,215 @@
namespace mongo {
- class IndexCatalog;
- class IndexCatalogEntry;
- class IndexCatalogEntryContainer;
+class IndexCatalog;
+class IndexCatalogEntry;
+class IndexCatalogEntryContainer;
+/**
+ * A cache of information computed from the memory-mapped per-index data (OnDiskIndexData).
+ * Contains accessors for the various immutable index parameters, and an accessor for the
+ * mutable "head" pointer which is index-specific.
+ *
+ * All synchronization is the responsibility of the caller.
+ */
+class IndexDescriptor {
+public:
/**
- * A cache of information computed from the memory-mapped per-index data (OnDiskIndexData).
- * Contains accessors for the various immutable index parameters, and an accessor for the
- * mutable "head" pointer which is index-specific.
- *
- * All synchronization is the responsibility of the caller.
+ * OnDiskIndexData is a pointer to the memory mapped per-index data.
+ * infoObj is a copy of the index-describing BSONObj contained in the OnDiskIndexData.
*/
- class IndexDescriptor {
- public:
- /**
- * OnDiskIndexData is a pointer to the memory mapped per-index data.
- * infoObj is a copy of the index-describing BSONObj contained in the OnDiskIndexData.
- */
- IndexDescriptor(Collection* collection, const std::string& accessMethodName, BSONObj infoObj)
- : _magic(123987),
- _collection(collection),
- _accessMethodName(accessMethodName),
- _infoObj(infoObj.getOwned()),
- _numFields(infoObj.getObjectField("key").nFields()),
- _keyPattern(infoObj.getObjectField("key").getOwned()),
- _indexName(infoObj.getStringField("name")),
- _parentNS(infoObj.getStringField("ns")),
- _isIdIndex(isIdIndexPattern( _keyPattern )),
- _sparse(infoObj["sparse"].trueValue()),
- _unique( _isIdIndex || infoObj["unique"].trueValue() ),
- _partial(!infoObj["partialFilterExpression"].eoo()),
- _cachedEntry( NULL )
- {
- _indexNamespace = makeIndexNamespace( _parentNS, _indexName );
-
- _version = 0;
- BSONElement e = _infoObj["v"];
- if ( e.isNumber() ) {
- _version = e.numberInt();
- }
- }
-
- ~IndexDescriptor() {
- _magic = 555;
- }
-
- //
- // Information about the key pattern.
- //
-
- /**
- * Return the user-provided index key pattern.
- * Example: {geo: "2dsphere", nonGeo: 1}
- * Example: {foo: 1, bar: -1}
- */
- const BSONObj& keyPattern() const { _checkOk(); return _keyPattern; }
-
- // How many fields do we index / are in the key pattern?
- int getNumFields() const { _checkOk(); return _numFields; }
-
- //
- // Information about the index's namespace / collection.
- //
-
- // Return the name of the index.
- const std::string& indexName() const { _checkOk(); return _indexName; }
-
- // Return the name of the indexed collection.
- const std::string& parentNS() const { return _parentNS; }
-
- // Return the name of this index's storage area (database.table.$index)
- const std::string& indexNamespace() const { return _indexNamespace; }
-
- // Return the name of the access method we must use to access this index's data.
- const std::string& getAccessMethodName() const { return _accessMethodName; }
-
- //
- // Properties every index has
- //
-
- // Return what version of index this is.
- int version() const { return _version; }
-
- // May each key only occur once?
- bool unique() const { return _unique; }
-
- // Is this index sparse?
- bool isSparse() const { return _sparse; }
-
- // Is this a partial index?
- bool isPartial() const { return _partial; }
-
- // Is this index multikey?
- bool isMultikey( OperationContext* txn ) const {
- _checkOk();
- return _collection->getIndexCatalog()->isMultikey( txn, this );
+ IndexDescriptor(Collection* collection, const std::string& accessMethodName, BSONObj infoObj)
+ : _magic(123987),
+ _collection(collection),
+ _accessMethodName(accessMethodName),
+ _infoObj(infoObj.getOwned()),
+ _numFields(infoObj.getObjectField("key").nFields()),
+ _keyPattern(infoObj.getObjectField("key").getOwned()),
+ _indexName(infoObj.getStringField("name")),
+ _parentNS(infoObj.getStringField("ns")),
+ _isIdIndex(isIdIndexPattern(_keyPattern)),
+ _sparse(infoObj["sparse"].trueValue()),
+ _unique(_isIdIndex || infoObj["unique"].trueValue()),
+ _partial(!infoObj["partialFilterExpression"].eoo()),
+ _cachedEntry(NULL) {
+ _indexNamespace = makeIndexNamespace(_parentNS, _indexName);
+
+ _version = 0;
+ BSONElement e = _infoObj["v"];
+ if (e.isNumber()) {
+ _version = e.numberInt();
}
+ }
- bool isIdIndex() const { _checkOk(); return _isIdIndex; }
-
- //
- // Properties that are Index-specific.
- //
-
- // Allow access to arbitrary fields in the per-index info object. Some indices stash
- // index-specific data there.
- BSONElement getInfoElement(const std::string& name) const { return _infoObj[name]; }
+ ~IndexDescriptor() {
+ _magic = 555;
+ }
- //
- // "Internals" of accessing the index, used by IndexAccessMethod(s).
- //
+ //
+ // Information about the key pattern.
+ //
- // Return a (rather compact) std::string representation.
- std::string toString() const { _checkOk(); return _infoObj.toString(); }
-
- // Return the info object.
- const BSONObj& infoObj() const { _checkOk(); return _infoObj; }
-
- // Both the collection and the catalog must outlive the IndexDescriptor
- const Collection* getCollection() const { return _collection; }
- const IndexCatalog* getIndexCatalog() const { return _collection->getIndexCatalog(); }
-
- bool areIndexOptionsEquivalent( const IndexDescriptor* other ) const;
-
- static bool isIdIndexPattern( const BSONObj &pattern ) {
- BSONObjIterator i(pattern);
- BSONElement e = i.next();
- //_id index must have form exactly {_id : 1} or {_id : -1}.
- //Allows an index of form {_id : "hashed"} to exist but
- //do not consider it to be the primary _id index
- if(! ( strcmp(e.fieldName(), "_id") == 0
- && (e.numberInt() == 1 || e.numberInt() == -1)))
- return false;
- return i.next().eoo();
- }
-
- static std::string makeIndexNamespace( StringData ns,
- StringData name ) {
- return ns.toString() + ".$" + name.toString();
- }
-
- private:
-
- void _checkOk() const;
-
- int _magic;
-
- // Related catalog information of the parent collection
- Collection* _collection;
-
- // What access method should we use for this index?
- std::string _accessMethodName;
-
- // The BSONObj describing the index. Accessed through the various members above.
- const BSONObj _infoObj;
-
- // --- cached data from _infoObj
-
- int64_t _numFields; // How many fields are indexed?
- BSONObj _keyPattern;
- std::string _indexName;
- std::string _parentNS;
- std::string _indexNamespace;
- bool _isIdIndex;
- bool _sparse;
- bool _unique;
- bool _partial;
- int _version;
-
- // only used by IndexCatalogEntryContainer to do caching for perf
- // users not allowed to touch, and not part of API
- IndexCatalogEntry* _cachedEntry;
-
- friend class IndexCatalog;
- friend class IndexCatalogEntry;
- friend class IndexCatalogEntryContainer;
- };
+ /**
+ * Return the user-provided index key pattern.
+ * Example: {geo: "2dsphere", nonGeo: 1}
+ * Example: {foo: 1, bar: -1}
+ */
+ const BSONObj& keyPattern() const {
+ _checkOk();
+ return _keyPattern;
+ }
+
+ // How many fields do we index / are in the key pattern?
+ int getNumFields() const {
+ _checkOk();
+ return _numFields;
+ }
+
+ //
+ // Information about the index's namespace / collection.
+ //
+
+ // Return the name of the index.
+ const std::string& indexName() const {
+ _checkOk();
+ return _indexName;
+ }
+
+ // Return the name of the indexed collection.
+ const std::string& parentNS() const {
+ return _parentNS;
+ }
+
+ // Return the name of this index's storage area (database.table.$index)
+ const std::string& indexNamespace() const {
+ return _indexNamespace;
+ }
+
+ // Return the name of the access method we must use to access this index's data.
+ const std::string& getAccessMethodName() const {
+ return _accessMethodName;
+ }
+
+ //
+ // Properties every index has
+ //
+
+ // Return what version of index this is.
+ int version() const {
+ return _version;
+ }
+
+ // May each key only occur once?
+ bool unique() const {
+ return _unique;
+ }
+
+ // Is this index sparse?
+ bool isSparse() const {
+ return _sparse;
+ }
+
+ // Is this a partial index?
+ bool isPartial() const {
+ return _partial;
+ }
+
+ // Is this index multikey?
+ bool isMultikey(OperationContext* txn) const {
+ _checkOk();
+ return _collection->getIndexCatalog()->isMultikey(txn, this);
+ }
+
+ bool isIdIndex() const {
+ _checkOk();
+ return _isIdIndex;
+ }
+
+ //
+ // Properties that are Index-specific.
+ //
+
+ // Allow access to arbitrary fields in the per-index info object. Some indices stash
+ // index-specific data there.
+ BSONElement getInfoElement(const std::string& name) const {
+ return _infoObj[name];
+ }
+
+ //
+ // "Internals" of accessing the index, used by IndexAccessMethod(s).
+ //
+
+ // Return a (rather compact) std::string representation.
+ std::string toString() const {
+ _checkOk();
+ return _infoObj.toString();
+ }
+
+ // Return the info object.
+ const BSONObj& infoObj() const {
+ _checkOk();
+ return _infoObj;
+ }
+
+ // Both the collection and the catalog must outlive the IndexDescriptor
+ const Collection* getCollection() const {
+ return _collection;
+ }
+ const IndexCatalog* getIndexCatalog() const {
+ return _collection->getIndexCatalog();
+ }
+
+ bool areIndexOptionsEquivalent(const IndexDescriptor* other) const;
+
+ static bool isIdIndexPattern(const BSONObj& pattern) {
+ BSONObjIterator i(pattern);
+ BSONElement e = i.next();
+ //_id index must have form exactly {_id : 1} or {_id : -1}.
+ // Allows an index of form {_id : "hashed"} to exist but
+ // do not consider it to be the primary _id index
+ if (!(strcmp(e.fieldName(), "_id") == 0 && (e.numberInt() == 1 || e.numberInt() == -1)))
+ return false;
+ return i.next().eoo();
+ }
+
+ static std::string makeIndexNamespace(StringData ns, StringData name) {
+ return ns.toString() + ".$" + name.toString();
+ }
+
+private:
+ void _checkOk() const;
+
+ int _magic;
+
+ // Related catalog information of the parent collection
+ Collection* _collection;
+
+ // What access method should we use for this index?
+ std::string _accessMethodName;
+
+ // The BSONObj describing the index. Accessed through the various members above.
+ const BSONObj _infoObj;
+
+ // --- cached data from _infoObj
+
+ int64_t _numFields; // How many fields are indexed?
+ BSONObj _keyPattern;
+ std::string _indexName;
+ std::string _parentNS;
+ std::string _indexNamespace;
+ bool _isIdIndex;
+ bool _sparse;
+ bool _unique;
+ bool _partial;
+ int _version;
+
+ // only used by IndexCatalogEntryContainer to do caching for perf
+ // users not allowed to touch, and not part of API
+ IndexCatalogEntry* _cachedEntry;
+
+ friend class IndexCatalog;
+ friend class IndexCatalogEntry;
+ friend class IndexCatalogEntryContainer;
+};
} // namespace mongo
diff --git a/src/mongo/db/index/s2_access_method.cpp b/src/mongo/db/index/s2_access_method.cpp
index a9c9630ef67..3fcf4d013a1 100644
--- a/src/mongo/db/index/s2_access_method.cpp
+++ b/src/mongo/db/index/s2_access_method.cpp
@@ -44,67 +44,66 @@
namespace mongo {
- static const string kIndexVersionFieldName("2dsphereIndexVersion");
-
- S2AccessMethod::S2AccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree)
- : IndexAccessMethod(btreeState, btree) {
-
- const IndexDescriptor* descriptor = btreeState->descriptor();
-
- ExpressionParams::parse2dsphereParams(descriptor->infoObj(),
- &_params);
-
- int geoFields = 0;
-
- // Categorize the fields we're indexing and make sure we have a geo field.
- BSONObjIterator i(descriptor->keyPattern());
- while (i.more()) {
- BSONElement e = i.next();
- if (e.type() == String && IndexNames::GEO_2DSPHERE == e.String() ) {
- ++geoFields;
- }
- else {
- // We check for numeric in 2d, so that's the check here
- uassert( 16823, (string)"Cannot use " + IndexNames::GEO_2DSPHERE +
- " index with other special index types: " + e.toString(),
- e.isNumber() );
- }
- }
-
- uassert(16750, "Expect at least one geo field, spec=" + descriptor->keyPattern().toString(),
- geoFields >= 1);
-
- if (descriptor->isSparse()) {
- warning() << "Sparse option ignored for index spec "
- << descriptor->keyPattern().toString() << "\n";
+static const string kIndexVersionFieldName("2dsphereIndexVersion");
+
+S2AccessMethod::S2AccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree)
+ : IndexAccessMethod(btreeState, btree) {
+ const IndexDescriptor* descriptor = btreeState->descriptor();
+
+ ExpressionParams::parse2dsphereParams(descriptor->infoObj(), &_params);
+
+ int geoFields = 0;
+
+ // Categorize the fields we're indexing and make sure we have a geo field.
+ BSONObjIterator i(descriptor->keyPattern());
+ while (i.more()) {
+ BSONElement e = i.next();
+ if (e.type() == String && IndexNames::GEO_2DSPHERE == e.String()) {
+ ++geoFields;
+ } else {
+ // We check for numeric in 2d, so that's the check here
+ uassert(16823,
+ (string) "Cannot use " + IndexNames::GEO_2DSPHERE +
+ " index with other special index types: " + e.toString(),
+ e.isNumber());
}
}
- // static
- BSONObj S2AccessMethod::fixSpec(const BSONObj& specObj) {
- // If the spec object has the field "2dsphereIndexVersion", validate it. If it doesn't, add
- // {2dsphereIndexVersion: 2}, which is the default for newly-built indexes.
-
- BSONElement indexVersionElt = specObj[kIndexVersionFieldName];
- if (indexVersionElt.eoo()) {
- BSONObjBuilder bob;
- bob.appendElements(specObj);
- bob.append(kIndexVersionFieldName, S2_INDEX_VERSION_2);
- return bob.obj();
- }
+ uassert(16750,
+ "Expect at least one geo field, spec=" + descriptor->keyPattern().toString(),
+ geoFields >= 1);
- const int indexVersion = indexVersionElt.numberInt();
- uassert(17394,
- str::stream() << "unsupported geo index version { " << kIndexVersionFieldName
- << " : " << indexVersionElt << " }, only support versions: ["
- << S2_INDEX_VERSION_1 << "," << S2_INDEX_VERSION_2 << "]",
- indexVersionElt.isNumber() && (indexVersion == S2_INDEX_VERSION_2
- || indexVersion == S2_INDEX_VERSION_1));
- return specObj;
+ if (descriptor->isSparse()) {
+ warning() << "Sparse option ignored for index spec " << descriptor->keyPattern().toString()
+ << "\n";
}
-
- void S2AccessMethod::getKeys(const BSONObj& obj, BSONObjSet* keys) const {
- ExpressionKeysPrivate::getS2Keys(obj, _descriptor->keyPattern(), _params, keys);
+}
+
+// static
+BSONObj S2AccessMethod::fixSpec(const BSONObj& specObj) {
+ // If the spec object has the field "2dsphereIndexVersion", validate it. If it doesn't, add
+ // {2dsphereIndexVersion: 2}, which is the default for newly-built indexes.
+
+ BSONElement indexVersionElt = specObj[kIndexVersionFieldName];
+ if (indexVersionElt.eoo()) {
+ BSONObjBuilder bob;
+ bob.appendElements(specObj);
+ bob.append(kIndexVersionFieldName, S2_INDEX_VERSION_2);
+ return bob.obj();
}
+ const int indexVersion = indexVersionElt.numberInt();
+ uassert(17394,
+ str::stream() << "unsupported geo index version { " << kIndexVersionFieldName << " : "
+ << indexVersionElt << " }, only support versions: [" << S2_INDEX_VERSION_1
+ << "," << S2_INDEX_VERSION_2 << "]",
+ indexVersionElt.isNumber() &&
+ (indexVersion == S2_INDEX_VERSION_2 || indexVersion == S2_INDEX_VERSION_1));
+ return specObj;
+}
+
+void S2AccessMethod::getKeys(const BSONObj& obj, BSONObjSet* keys) const {
+ ExpressionKeysPrivate::getS2Keys(obj, _descriptor->keyPattern(), _params, keys);
+}
+
} // namespace mongo
diff --git a/src/mongo/db/index/s2_access_method.h b/src/mongo/db/index/s2_access_method.h
index 9ce655e47f7..15e0c3a773e 100644
--- a/src/mongo/db/index/s2_access_method.h
+++ b/src/mongo/db/index/s2_access_method.h
@@ -36,22 +36,22 @@
namespace mongo {
- class S2AccessMethod : public IndexAccessMethod {
- public:
- S2AccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree);
-
- /**
- * Takes an index spec object for this index and returns a copy tweaked to conform to the
- * expected format. When an index build is initiated, this function is called on the spec
- * object the user provides, and the return value of this function is the final spec object
- * that gets saved in the index catalog. Throws a UserException if 'specObj' is invalid.
- */
- static BSONObj fixSpec(const BSONObj& specObj);
-
- private:
- virtual void getKeys(const BSONObj& obj, BSONObjSet* keys) const;
-
- S2IndexingParams _params;
- };
+class S2AccessMethod : public IndexAccessMethod {
+public:
+ S2AccessMethod(IndexCatalogEntry* btreeState, SortedDataInterface* btree);
+
+ /**
+ * Takes an index spec object for this index and returns a copy tweaked to conform to the
+ * expected format. When an index build is initiated, this function is called on the spec
+ * object the user provides, and the return value of this function is the final spec object
+ * that gets saved in the index catalog. Throws a UserException if 'specObj' is invalid.
+ */
+ static BSONObj fixSpec(const BSONObj& specObj);
+
+private:
+ virtual void getKeys(const BSONObj& obj, BSONObjSet* keys) const;
+
+ S2IndexingParams _params;
+};
} // namespace mongo
diff --git a/src/mongo/db/index/s2_common.h b/src/mongo/db/index/s2_common.h
index 3df3c833694..fad4087bb89 100644
--- a/src/mongo/db/index/s2_common.h
+++ b/src/mongo/db/index/s2_common.h
@@ -38,52 +38,52 @@
namespace mongo {
- // An enum describing the version of an S2 index.
- enum S2IndexVersion {
- // The first version of the S2 index, introduced in MongoDB 2.4.0. Compatible with MongoDB
- // 2.4.0 and later. Supports the following GeoJSON objects: Point, LineString, Polygon.
- S2_INDEX_VERSION_1 = 1,
+// An enum describing the version of an S2 index.
+enum S2IndexVersion {
+ // The first version of the S2 index, introduced in MongoDB 2.4.0. Compatible with MongoDB
+ // 2.4.0 and later. Supports the following GeoJSON objects: Point, LineString, Polygon.
+ S2_INDEX_VERSION_1 = 1,
- // The current version of the S2 index, introduced in MongoDB 2.6.0. Compatible with
- // MongoDB 2.6.0 and later. Introduced support for the following GeoJSON objects:
- // MultiPoint, MultiLineString, MultiPolygon, GeometryCollection.
- S2_INDEX_VERSION_2 = 2
- };
+ // The current version of the S2 index, introduced in MongoDB 2.6.0. Compatible with
+ // MongoDB 2.6.0 and later. Introduced support for the following GeoJSON objects:
+ // MultiPoint, MultiLineString, MultiPolygon, GeometryCollection.
+ S2_INDEX_VERSION_2 = 2
+};
- struct S2IndexingParams {
- // Since we take the cartesian product when we generate keys for an insert,
- // we need a cap.
- size_t maxKeysPerInsert;
- // This is really an advisory parameter that we pass to the cover generator. The
- // finest/coarsest index level determine the required # of cells.
- int maxCellsInCovering;
- // What's the finest grained level that we'll index? When we query for a point
- // we start at that -- we index nothing finer than this.
- int finestIndexedLevel;
- // And, what's the coarsest? When we search in larger coverings we know we
- // can stop here -- we index nothing coarser than this.
- int coarsestIndexedLevel;
- // Version of this index (specific to the index type).
- S2IndexVersion indexVersion;
+struct S2IndexingParams {
+ // Since we take the cartesian product when we generate keys for an insert,
+ // we need a cap.
+ size_t maxKeysPerInsert;
+ // This is really an advisory parameter that we pass to the cover generator. The
+ // finest/coarsest index level determine the required # of cells.
+ int maxCellsInCovering;
+ // What's the finest grained level that we'll index? When we query for a point
+ // we start at that -- we index nothing finer than this.
+ int finestIndexedLevel;
+ // And, what's the coarsest? When we search in larger coverings we know we
+ // can stop here -- we index nothing coarser than this.
+ int coarsestIndexedLevel;
+ // Version of this index (specific to the index type).
+ S2IndexVersion indexVersion;
- double radius;
+ double radius;
- std::string toString() const {
- std::stringstream ss;
- ss << "maxKeysPerInsert: " << maxKeysPerInsert << std::endl;
- ss << "maxCellsInCovering: " << maxCellsInCovering << std::endl;
- ss << "finestIndexedLevel: " << finestIndexedLevel << std::endl;
- ss << "coarsestIndexedLevel: " << coarsestIndexedLevel << std::endl;
- ss << "indexVersion: " << indexVersion << std::endl;
- return ss.str();
- }
+ std::string toString() const {
+ std::stringstream ss;
+ ss << "maxKeysPerInsert: " << maxKeysPerInsert << std::endl;
+ ss << "maxCellsInCovering: " << maxCellsInCovering << std::endl;
+ ss << "finestIndexedLevel: " << finestIndexedLevel << std::endl;
+ ss << "coarsestIndexedLevel: " << coarsestIndexedLevel << std::endl;
+ ss << "indexVersion: " << indexVersion << std::endl;
+ return ss.str();
+ }
- void configureCoverer(S2RegionCoverer *coverer) const {
- coverer->set_min_level(coarsestIndexedLevel);
- coverer->set_max_level(finestIndexedLevel);
- // This is advisory; the two above are strict.
- coverer->set_max_cells(maxCellsInCovering);
- }
- };
+ void configureCoverer(S2RegionCoverer* coverer) const {
+ coverer->set_min_level(coarsestIndexedLevel);
+ coverer->set_max_level(finestIndexedLevel);
+ // This is advisory; the two above are strict.
+ coverer->set_max_cells(maxCellsInCovering);
+ }
+};
} // namespace mongo