summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHari Khalsa <hkhalsa@10gen.com>2013-04-11 16:22:55 -0400
committerHari Khalsa <hkhalsa@10gen.com>2013-04-12 16:40:38 -0400
commitef3d06fb6c5402d56a90ad60fd61c761021b92a7 (patch)
tree4097ad3c2fa71297977faddae8bc6c8028c3997e
parent7797f459758a98bce90593e583f90c858a287a1c (diff)
downloadmongo-ef3d06fb6c5402d56a90ad60fd61c761021b92a7.tar.gz
migrate fts/haystack + add/del logic SERVER-8791 SERVER-9164 SERVER-9165
-rw-r--r--src/mongo/SConscript2
-rw-r--r--src/mongo/db/btreecursor.cpp3
-rw-r--r--src/mongo/db/geo/haystack.cpp28
-rw-r--r--src/mongo/db/index/btree_access_method.cpp15
-rw-r--r--src/mongo/db/index/catalog_hack.h19
-rw-r--r--src/mongo/db/index/emulated_cursor.h6
-rw-r--r--src/mongo/db/index/fts_access_method.cpp33
-rw-r--r--src/mongo/db/index/fts_access_method.h42
-rw-r--r--src/mongo/db/index/hash_access_method.h1
-rw-r--r--src/mongo/db/index/haystack_access_method.cpp207
-rw-r--r--src/mongo/db/index/haystack_access_method.h71
-rw-r--r--src/mongo/db/index/haystack_access_method_internal.h69
-rw-r--r--src/mongo/db/index/index_access_method.h7
-rw-r--r--src/mongo/db/index/index_descriptor.h13
-rw-r--r--src/mongo/db/index_update.cpp335
-rw-r--r--src/mongo/db/index_update.h4
-rw-r--r--src/mongo/db/keypattern.cpp11
-rw-r--r--src/mongo/db/keypattern.h6
-rw-r--r--src/mongo/db/pdfile.cpp51
-rw-r--r--src/mongo/db/query_plan.cpp7
20 files changed, 685 insertions, 245 deletions
diff --git a/src/mongo/SConscript b/src/mongo/SConscript
index c026ce969c2..64ffefe720f 100644
--- a/src/mongo/SConscript
+++ b/src/mongo/SConscript
@@ -398,8 +398,10 @@ serverOnlyFiles = [ "db/curop.cpp",
"db/index/btree_index_cursor.cpp",
"db/index/btree_interface.cpp",
"db/index/btree_key_generator.cpp",
+ "db/index/fts_access_method.cpp",
"db/index/hash_access_method.cpp",
"db/index/hash_index_cursor.cpp",
+ "db/index/haystack_access_method.cpp",
"db/index/s2_access_method.cpp",
"db/index/s2_index_cursor.cpp",
"db/index/s2_near_cursor.cpp",
diff --git a/src/mongo/db/btreecursor.cpp b/src/mongo/db/btreecursor.cpp
index 20ca81eb2e9..3347d488168 100644
--- a/src/mongo/db/btreecursor.cpp
+++ b/src/mongo/db/btreecursor.cpp
@@ -66,7 +66,8 @@ namespace mongo {
_order = indexDetails.keyPattern();
}
- void BtreeCursor::init( const BSONObj& sk, const BSONObj& ek, bool endKeyInclusive, int direction ) {
+ void BtreeCursor::init(const BSONObj& sk, const BSONObj& ek, bool endKeyInclusive,
+ int direction ) {
_finishConstructorInit();
startKey = sk;
endKey = ek;
diff --git a/src/mongo/db/geo/haystack.cpp b/src/mongo/db/geo/haystack.cpp
index 30c3abc1fd6..d5ad671064c 100644
--- a/src/mongo/db/geo/haystack.cpp
+++ b/src/mongo/db/geo/haystack.cpp
@@ -1,3 +1,4 @@
+// XXX THIS FILE IS DEPRECATED. PLEASE DON'T MODIFY.
// db/geo/haystack.cpp
/**
@@ -23,6 +24,10 @@
#include "mongo/db/auth/action_set.h"
#include "mongo/db/auth/action_type.h"
#include "mongo/db/auth/privilege.h"
+#include "mongo/db/index/catalog_hack.h"
+#include "mongo/db/index/haystack_access_method.h"
+#include "mongo/db/index/index_descriptor.h"
+#include "mongo/db/index/index_access_method.h"
#include "mongo/db/namespace-inl.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/index.h"
@@ -343,13 +348,6 @@ namespace mongo {
return false;
}
- int idxNum = idxs[0];
-
- IndexDetails& id = nsd->idx(idxNum);
- GeoHaystackSearchIndex *si =
- static_cast<GeoHaystackSearchIndex*>(id.getSpec().getType());
- verify(&id == si->getDetails());
-
BSONElement nearElt = cmdObj["near"];
BSONElement maxDistance = cmdObj["maxDistance"];
BSONElement search = cmdObj["search"];
@@ -362,8 +360,20 @@ namespace mongo {
if (cmdObj["limit"].isNumber())
limit = static_cast<unsigned>(cmdObj["limit"].numberInt());
- si->searchCommand(nsd, nearElt.Obj(), maxDistance.numberDouble(), search.Obj(),
- result, limit);
+ int idxNum = idxs[0];
+ IndexDetails& id = nsd->idx(idxNum);
+ if (CatalogHack::testIndexMigration()) {
+ auto_ptr<IndexDescriptor> desc(CatalogHack::getDescriptor(nsd, idxNum));
+ auto_ptr<HaystackAccessMethod> ham(new HaystackAccessMethod(desc.get()));
+ ham->searchCommand(nearElt.Obj(), maxDistance.numberDouble(), search.Obj(),
+ &result, limit);
+ } else {
+ GeoHaystackSearchIndex *si =
+ static_cast<GeoHaystackSearchIndex*>(id.getSpec().getType());
+ verify(&id == si->getDetails());
+ si->searchCommand(nsd, nearElt.Obj(), maxDistance.numberDouble(), search.Obj(),
+ result, limit);
+ }
return 1;
}
} nameSearchCommand;
diff --git a/src/mongo/db/index/btree_access_method.cpp b/src/mongo/db/index/btree_access_method.cpp
index 0455fc65c4a..5eb217181cd 100644
--- a/src/mongo/db/index/btree_access_method.cpp
+++ b/src/mongo/db/index/btree_access_method.cpp
@@ -52,8 +52,9 @@ namespace mongo {
options.dupsAllowed, _descriptor->getOnDisk(), true);
++*numInserted;
} catch (AssertionException& e) {
- if (10287 == e.getCode() && options.dupsAllowed) {
- // Duplicate key, but our options say to ignore it.
+ if (10287 == e.getCode() && _descriptor->isBackgroundIndex()) {
+ // This is the duplicate key exception. We ignore it for some reason in BG
+ // indexing.
DEV log() << "info: key already in index during bg indexing (ok)\n";
} else if (!options.dupsAllowed) {
// Assuming it's a duplicate key exception. Clean up any inserted keys.
@@ -71,6 +72,10 @@ namespace mongo {
}
}
+ if (*numInserted > 1) {
+ _descriptor->setMultikey();
+ }
+
return ret;
}
@@ -161,8 +166,6 @@ namespace mongo {
data->loc = record;
data->dupsAllowed = options.dupsAllowed;
- status->_isMultiKey = data->newKeys.size() > 1;
-
setDifference(data->oldKeys, data->newKeys, &data->removed);
setDifference(data->newKeys, data->oldKeys, &data->added);
@@ -193,6 +196,10 @@ namespace mongo {
BtreeBasedPrivateUpdateData* data =
static_cast<BtreeBasedPrivateUpdateData*>(ticket._indexSpecificUpdateData.get());
+ if (data->oldKeys.size() + data->added.size() - data->removed.size() > 1) {
+ _descriptor->setMultikey();
+ }
+
for (size_t i = 0; i < data->added.size(); ++i) {
_interface->bt_insert(_descriptor->getHead(), data->loc, *data->added[i], _ordering,
data->dupsAllowed, _descriptor->getOnDisk(), true);
diff --git a/src/mongo/db/index/catalog_hack.h b/src/mongo/db/index/catalog_hack.h
index 57730422296..4c70dcfbb8e 100644
--- a/src/mongo/db/index/catalog_hack.h
+++ b/src/mongo/db/index/catalog_hack.h
@@ -15,7 +15,9 @@
*/
#include "mongo/db/index/btree_access_method.h"
+#include "mongo/db/index/fts_access_method.h"
#include "mongo/db/index/hash_access_method.h"
+#include "mongo/db/index/haystack_access_method.h"
#include "mongo/db/index/index_access_method.h"
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/index/s2_access_method.h"
@@ -35,15 +37,28 @@ namespace mongo {
static bool isIndexMigrated(const BSONObj& keyPattern) {
string type = KeyPattern::findPluginName(keyPattern);
- return "hashed" == type || "2dsphere" == type;
+ return "" == type || "fts" == type || "hashed" == type || "2dsphere" == type
+ || "geoHaystack" == type;
}
- static IndexAccessMethod* getSpecialIndex(IndexDescriptor* desc) {
+ // If true, use EmulatedCursor + IndexCursor when answering "special" queries.
+ static bool testCursorMigration() { return true; }
+
+ // If true, use IndexAccessMethod for insert/delete when possible.
+ static bool testIndexMigration() { return true; }
+
+ static IndexAccessMethod* getIndex(IndexDescriptor* desc) {
string type = KeyPattern::findPluginName(desc->keyPattern());
if ("hashed" == type) {
return new HashAccessMethod(desc);
} else if ("2dsphere" == type) {
return new S2AccessMethod(desc);
+ } else if ("fts" == type) {
+ return new FTSAccessMethod(desc);
+ } else if ("geoHaystack" == type) {
+ return new HaystackAccessMethod(desc);
+ } else if ("" == type) {
+ return new BtreeAccessMethod(desc);
} else {
verify(0);
return NULL;
diff --git a/src/mongo/db/index/emulated_cursor.h b/src/mongo/db/index/emulated_cursor.h
index d47576ae48d..386d2415b2f 100644
--- a/src/mongo/db/index/emulated_cursor.h
+++ b/src/mongo/db/index/emulated_cursor.h
@@ -50,12 +50,12 @@ namespace mongo {
int numWanted,
const BSONObj& keyPattern) {
- EmulatedCursor* ret = new EmulatedCursor(descriptor, indexAccessMethod,
- order, numWanted, keyPattern);
+ auto_ptr<EmulatedCursor> ret(new EmulatedCursor(descriptor, indexAccessMethod,
+ order, numWanted, keyPattern));
// Why do we have to do this? No reading from disk is allowed in constructors,
// and seeking involves reading from disk.
ret->seek(query);
- return ret;
+ return ret.release();
}
// We defer everything we can to the underlying cursor.
diff --git a/src/mongo/db/index/fts_access_method.cpp b/src/mongo/db/index/fts_access_method.cpp
new file mode 100644
index 00000000000..bbb24ef18ef
--- /dev/null
+++ b/src/mongo/db/index/fts_access_method.cpp
@@ -0,0 +1,33 @@
+/**
+* Copyright (C) 2013 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "mongo/db/index/fts_access_method.h"
+#include "mongo/db/fts/fts_index_format.h"
+
+namespace mongo {
+
+ FTSAccessMethod::FTSAccessMethod(IndexDescriptor* descriptor)
+ : BtreeBasedAccessMethod(descriptor), _ftsSpec(descriptor->infoObj()) { }
+
+ void FTSAccessMethod::getKeys(const BSONObj& obj, BSONObjSet* keys) {
+ fts::FTSIndexFormat::getKeys(_ftsSpec, obj, keys);
+ }
+
+ Status FTSAccessMethod::newCursor(IndexCursor** out) {
+ return Status::OK();
+ }
+
+} // namespace mongo
diff --git a/src/mongo/db/index/fts_access_method.h b/src/mongo/db/index/fts_access_method.h
new file mode 100644
index 00000000000..9909924df87
--- /dev/null
+++ b/src/mongo/db/index/fts_access_method.h
@@ -0,0 +1,42 @@
+/**
+* Copyright (C) 2013 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "mongo/base/status.h"
+#include "mongo/db/fts/fts_spec.h"
+#include "mongo/db/index/index_descriptor.h"
+#include "mongo/db/index/btree_access_method_internal.h"
+#include "mongo/db/jsobj.h"
+
+namespace mongo {
+
+ class FTSAccessMethod : public BtreeBasedAccessMethod {
+ public:
+ FTSAccessMethod(IndexDescriptor* descriptor);
+ virtual ~FTSAccessMethod() { }
+
+ // Not implemented:
+ virtual Status newCursor(IndexCursor** out);
+
+ private:
+ // Implemented:
+ virtual void getKeys(const BSONObj& obj, BSONObjSet* keys);
+
+ fts::FTSSpec _ftsSpec;
+ };
+
+} //namespace mongo
diff --git a/src/mongo/db/index/hash_access_method.h b/src/mongo/db/index/hash_access_method.h
index 9fe10dc463c..53455abb69f 100644
--- a/src/mongo/db/index/hash_access_method.h
+++ b/src/mongo/db/index/hash_access_method.h
@@ -35,6 +35,7 @@ namespace mongo {
using BtreeBasedAccessMethod::_ordering;
HashAccessMethod(IndexDescriptor* descriptor);
+ virtual ~HashAccessMethod() { }
virtual Status newCursor(IndexCursor** out);
diff --git a/src/mongo/db/index/haystack_access_method.cpp b/src/mongo/db/index/haystack_access_method.cpp
new file mode 100644
index 00000000000..ca44366378d
--- /dev/null
+++ b/src/mongo/db/index/haystack_access_method.cpp
@@ -0,0 +1,207 @@
+/**
+ * Copyright (C) 2013 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mongo/db/index/haystack_access_method.h"
+
+#include "mongo/base/status.h"
+#include "mongo/db/btreecursor.h"
+#include "mongo/db/geo/hash.h"
+#include "mongo/db/index/haystack_access_method_internal.h"
+#include "mongo/db/jsobj.h"
+#include "mongo/db/pdfile.h"
+
+namespace mongo {
+
+ static const string GEOSEARCHNAME = "geoHaystack";
+
+ HaystackAccessMethod::HaystackAccessMethod(IndexDescriptor* descriptor)
+ : BtreeBasedAccessMethod(descriptor) {
+
+ BSONElement e = descriptor->getInfoElement("bucketSize");
+ uassert(16777, "need bucketSize", e.isNumber());
+ _bucketSize = e.numberDouble();
+ uassert(16769, "bucketSize cannot be zero", _bucketSize != 0.0);
+
+ // Example:
+ // db.foo.ensureIndex({ pos : "geoHaystack", type : 1 }, { bucketSize : 1 })
+ BSONObjIterator i(descriptor->keyPattern());
+ while (i.more()) {
+ BSONElement e = i.next();
+ if (e.type() == String && GEOSEARCHNAME == e.valuestr()) {
+ uassert(16770, "can't have more than one geo field", _geoField.size() == 0);
+ uassert(16771, "the geo field has to be first in index",
+ _otherFields.size() == 0);
+ _geoField = e.fieldName();
+ } else {
+ uassert(16772, "geoSearch can only have 1 non-geo field for now",
+ _otherFields.size() == 0);
+ _otherFields.push_back(e.fieldName());
+ }
+ }
+
+ uassert(16773, "no geo field specified", _geoField.size());
+ uassert(16774, "no non-geo fields specified", _otherFields.size());
+ }
+
+ void HaystackAccessMethod::getKeys(const BSONObj& obj, BSONObjSet* keys) {
+ BSONElement loc = obj.getFieldDotted(_geoField);
+
+ if (loc.eoo()) { return; }
+
+ uassert(16775, "latlng not an array", loc.isABSONObj());
+ string root;
+ {
+ BSONObjIterator i(loc.Obj());
+ BSONElement x = i.next();
+ BSONElement y = i.next();
+ root = makeString(hash(x), hash(y));
+ }
+
+ verify(_otherFields.size() == 1);
+
+ BSONElementSet all;
+
+ // This is getFieldsDotted (plural not singular) since the object we're indexing
+ // may be an array.
+ obj.getFieldsDotted(_otherFields[0], all);
+
+ if (all.size() == 0) {
+ // We're indexing a document that doesn't have the secondary non-geo field present.
+ // XXX: do we want to add this even if all.size() > 0? result:empty search terms
+ // match everything instead of only things w/empty search terms)
+ addKey(root, BSONElement(), keys);
+ } else {
+ // Ex:If our secondary field is type: "foo" or type: {a:"foo", b:"bar"},
+ // all.size()==1. We can query on the complete field.
+ // Ex: If our secondary field is type: ["A", "B"] all.size()==2 and all has values
+ // "A" and "B". The query looks for any of the fields in the array.
+ for (BSONElementSet::iterator i = all.begin(); i != all.end(); ++i) {
+ addKey(root, *i, keys);
+ }
+ }
+ }
+
+ int HaystackAccessMethod::hash(const BSONElement& e) const {
+ uassert(16776, "geo field is not a number", e.isNumber());
+ double d = e.numberDouble();
+ d += 180;
+ d /= _bucketSize;
+ return static_cast<int>(d);
+ }
+
+ string HaystackAccessMethod::makeString(int hashedX, int hashedY) const {
+ stringstream ss;
+ ss << hashedX << "_" << hashedY;
+ return ss.str();
+ }
+
+ // Build a new BSONObj with root in it. If e is non-empty, append that to the key. Insert
+ // the BSONObj into keys.
+ void HaystackAccessMethod::addKey(const string& root, const BSONElement& e,
+ BSONObjSet* keys) const {
+ BSONObjBuilder buf;
+ buf.append("", root);
+
+ if (e.eoo())
+ buf.appendNull("");
+ else
+ buf.appendAs(e, "");
+
+ keys->insert(buf.obj());
+ }
+
+ Status HaystackAccessMethod::newCursor(IndexCursor** out) {
+ return Status(ErrorCodes::IllegalOperation, "Unimplemented seek called on Haystack");
+ }
+
+ void HaystackAccessMethod::searchCommand(const BSONObj& nearObj, double maxDistance,
+ const BSONObj& search, BSONObjBuilder* result,
+ unsigned limit) {
+ Timer t;
+
+ LOG(1) << "SEARCH near:" << nearObj << " maxDistance:" << maxDistance
+ << " search: " << search << endl;
+ int x, y;
+ {
+ BSONObjIterator i(nearObj);
+ x = hash(i.next());
+ y = hash(i.next());
+ }
+ int scale = static_cast<int>(ceil(maxDistance / _bucketSize));
+
+ GeoHaystackSearchHopper hopper(nearObj, maxDistance, limit, _geoField);
+
+ long long btreeMatches = 0;
+
+ for (int a = -scale; a <= scale && !hopper.limitReached(); ++a) {
+ for (int b = -scale; b <= scale && !hopper.limitReached(); ++b) {
+ BSONObjBuilder bb;
+ bb.append("", makeString(x + a, y + b));
+
+ for (unsigned i = 0; i < _otherFields.size(); i++) {
+ // See if the non-geo field we're indexing on is in the provided search term.
+ BSONElement e = search.getFieldDotted(_otherFields[i]);
+ if (e.eoo())
+ bb.appendNull("");
+ else
+ bb.appendAs(e, "");
+ }
+
+ BSONObj key = bb.obj();
+
+ GEOQUADDEBUG("KEY: " << key);
+
+ // TODO(hk): this keeps a set of all DiskLoc seen in this pass so that we don't
+ // consider the element twice. Do we want to instead store a hash of the set?
+ // Is this often big?
+ unordered_set<DiskLoc, DiskLoc::Hasher> thisPass;
+
+
+ scoped_ptr<BtreeCursor> cursor(BtreeCursor::make(nsdetails(_descriptor->parentNS()),
+ _descriptor->getOnDisk(),
+ key,
+ key,
+ true,
+ 1));
+ while (cursor->ok() && !hopper.limitReached()) {
+ pair<unordered_set<DiskLoc, DiskLoc::Hasher>::iterator, bool> p
+ = thisPass.insert(cursor->currLoc());
+ // If a new element was inserted (haven't seen the DiskLoc before), p.second
+ // is true.
+ if (p.second) {
+ hopper.consider(cursor->currLoc());
+ GEOQUADDEBUG("\t" << cursor->current());
+ btreeMatches++;
+ }
+ cursor->advance();
+ }
+ }
+ }
+
+ BSONArrayBuilder arr(result->subarrayStart("results"));
+ int num = hopper.appendResultsTo(&arr);
+ arr.done();
+
+ {
+ BSONObjBuilder b(result->subobjStart("stats"));
+ b.append("time", t.millis());
+ b.appendNumber("btreeMatches", btreeMatches);
+ b.append("n", num);
+ b.done();
+ }
+ }
+
+} // namespace mongo
diff --git a/src/mongo/db/index/haystack_access_method.h b/src/mongo/db/index/haystack_access_method.h
new file mode 100644
index 00000000000..1f7d200ed79
--- /dev/null
+++ b/src/mongo/db/index/haystack_access_method.h
@@ -0,0 +1,71 @@
+/**
+* Copyright (C) 2013 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "mongo/base/status.h"
+#include "mongo/db/index/btree_access_method_internal.h"
+#include "mongo/db/index/index_descriptor.h"
+#include "mongo/db/jsobj.h"
+
+namespace mongo {
+
+ /**
+ * Maps (lat, lng) to the bucketSize-sided square bucket that contains it.
+ * Examines all documents in a given radius of a given point.
+ * Returns all documents that match a given search restriction.
+ * See http://dochub.mongodb.org/core/haystackindexes
+ *
+ * Use when you want to look for restaurants within 25 miles with a certain name.
+ * Don't use when you want to find the closest open restaurants; see 2d.cpp for that.
+ *
+ * Usage:
+ * db.foo.ensureIndex({ pos : "geoHaystack", type : 1 }, { bucketSize : 1 })
+ * pos is the name of the field to be indexed that has lat/lng data in an array.
+ * type is the name of the secondary field to be indexed.
+ * bucketSize specifies the dimension of the square bucket for the data in pos.
+ * ALL fields are mandatory.
+ */
+ class HaystackAccessMethod : public BtreeBasedAccessMethod {
+ public:
+ using BtreeBasedAccessMethod::_descriptor;
+ using BtreeBasedAccessMethod::_interface;
+
+ HaystackAccessMethod(IndexDescriptor* descriptor);
+ virtual ~HaystackAccessMethod() { }
+
+ // Not implemented.
+ virtual Status newCursor(IndexCursor** out);
+
+ protected:
+ friend class GeoHaystackSearchCommand;
+ void searchCommand(const BSONObj& nearObj, double maxDistance, const BSONObj& search,
+ BSONObjBuilder* result, unsigned limit);
+
+ private:
+ virtual void getKeys(const BSONObj& obj, BSONObjSet* keys);
+
+ // Helper methods called by getKeys:
+ int hash(const BSONElement& e) const;
+ string makeString(int hashedX, int hashedY) const;
+ void addKey(const string& root, const BSONElement& e, BSONObjSet* keys) const;
+
+ string _geoField;
+ vector<string> _otherFields;
+ double _bucketSize;
+ };
+
+} // namespace mongo
diff --git a/src/mongo/db/index/haystack_access_method_internal.h b/src/mongo/db/index/haystack_access_method_internal.h
new file mode 100644
index 00000000000..935f1ee4033
--- /dev/null
+++ b/src/mongo/db/index/haystack_access_method_internal.h
@@ -0,0 +1,69 @@
+/**
+ * Copyright (C) 2013 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include <vector>
+
+#include "mongo/db/diskloc.h"
+#include "mongo/db/geo/core.h"
+#include "mongo/db/geo/shapes.h"
+
+namespace mongo {
+
+ class GeoHaystackSearchHopper {
+ public:
+ /**
+ * Constructed with a point, a max distance from that point, and a max number of
+ * matched points to store.
+ * @param n The centroid that we're searching
+ * @param maxDistance The maximum distance to consider from that point
+ * @param limit The maximum number of results to return
+ * @param geoField Which field in the provided DiskLoc has the point to test.
+ */
+ GeoHaystackSearchHopper(const BSONObj& nearObj, double maxDistance, unsigned limit,
+ const string& geoField)
+ : _near(nearObj), _maxDistance(maxDistance), _limit(limit), _geoField(geoField) { }
+
+ // Consider the point in loc, and keep it if it's within _maxDistance (and we have space for
+ // it)
+ void consider(const DiskLoc& loc) {
+ if (limitReached()) return;
+ Point p(loc.obj().getFieldDotted(_geoField));
+ if (distance(_near, p) > _maxDistance)
+ return;
+ _locs.push_back(loc);
+ }
+
+ int appendResultsTo(BSONArrayBuilder* b) {
+ for (unsigned i = 0; i <_locs.size(); i++)
+ b->append(_locs[i].obj());
+ return _locs.size();
+ }
+
+ // Have we stored as many points as we can?
+ const bool limitReached() const {
+ return _locs.size() >= _limit;
+ }
+ private:
+ Point _near;
+ double _maxDistance;
+ unsigned _limit;
+ const string _geoField;
+ vector<DiskLoc> _locs;
+ };
+
+} // namespace mongo
diff --git a/src/mongo/db/index/index_access_method.h b/src/mongo/db/index/index_access_method.h
index 8488b484f77..5f03e3cda68 100644
--- a/src/mongo/db/index/index_access_method.h
+++ b/src/mongo/db/index/index_access_method.h
@@ -122,11 +122,7 @@ namespace mongo {
*/
class UpdateTicket {
public:
- UpdateTicket() : _isValid(false), _isMultiKey(false) { }
-
- // Multikey is a bit set in the on-disk catalog. If an update is multi-key we have
- // to set that bit. We propagate this up so the caller can do that.
- bool isMultiKey() const { return _isMultiKey; }
+ UpdateTicket() : _isValid(false) { }
protected:
// These friends are the classes that actually fill out an UpdateStatus.
@@ -135,7 +131,6 @@ namespace mongo {
class PrivateUpdateData;
bool _isValid;
- bool _isMultiKey;
// This is meant to be filled out only by the friends above.
scoped_ptr<PrivateUpdateData> _indexSpecificUpdateData;
diff --git a/src/mongo/db/index/index_descriptor.h b/src/mongo/db/index/index_descriptor.h
index 9e58cbb7c46..14595983408 100644
--- a/src/mongo/db/index/index_descriptor.h
+++ b/src/mongo/db/index/index_descriptor.h
@@ -130,6 +130,19 @@ namespace mongo {
// Return a (rather compact) string representation.
string toString() { return _infoObj.toString(); }
+ // Return the info object.
+ BSONObj infoObj() { return _infoObj; }
+
+ // Set multikey attribute. We never unset it.
+ void setMultikey() {
+ _namespaceDetails->setIndexIsMultikey(parentNS().c_str(), _indexNumber);
+ }
+
+ // Is this index being created in the background?
+ bool isBackgroundIndex() {
+ return _indexNumber >= _namespaceDetails->nIndexes;
+ }
+
private:
// Related catalog information.
NamespaceDetails* _namespaceDetails;
diff --git a/src/mongo/db/index_update.cpp b/src/mongo/db/index_update.cpp
index a6be38c6431..21ec3437a84 100644
--- a/src/mongo/db/index_update.cpp
+++ b/src/mongo/db/index_update.cpp
@@ -14,8 +14,6 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include "mongo/pch.h"
-
#include "mongo/db/index_update.h"
#include "mongo/client/dbclientinterface.h"
@@ -24,6 +22,7 @@
#include "mongo/db/clientcursor.h"
#include "mongo/db/extsort.h"
#include "mongo/db/index.h"
+#include "mongo/db/index/catalog_hack.h"
#include "mongo/db/kill_current_op.h"
#include "mongo/db/namespace_details.h"
#include "mongo/db/pdfile_private.h"
@@ -35,187 +34,151 @@
namespace mongo {
- /* unindex all keys in index for this record. */
- static void _unindexRecord(IndexDetails& id, BSONObj& obj, const DiskLoc& dl, bool logMissing = true) {
- BSONObjSet keys;
- id.getKeysFromObject(obj, keys);
- IndexInterface& ii = id.idxInterface();
- for ( BSONObjSet::iterator i=keys.begin(); i != keys.end(); i++ ) {
- BSONObj j = *i;
-
- bool ok = false;
- try {
- ok = ii.unindex(id.head, id, j, dl);
- }
- catch (AssertionException& e) {
- problem() << "Assertion failure: _unindex failed " << id.indexNamespace() << endl;
- out() << "Assertion failure: _unindex failed: " << e.what() << '\n';
- out() << " obj:" << obj.toString() << '\n';
- out() << " key:" << j.toString() << '\n';
- out() << " dl:" << dl.toString() << endl;
- logContext();
+ /**
+ * Remove the provided (obj, dl) pair from the provided index.
+ */
+ static void _unindexRecord(NamespaceDetails *d, int idxNo, const BSONObj& obj,
+ const DiskLoc& dl, bool logIfError = true) {
+ IndexDetails &id = d->idx(idxNo);
+
+ if (CatalogHack::testIndexMigration() && CatalogHack::isIndexMigrated(id.keyPattern())) {
+ auto_ptr<IndexDescriptor> desc(CatalogHack::getDescriptor(d, idxNo));
+ auto_ptr<IndexAccessMethod> iam(CatalogHack::getIndex(desc.get()));
+ InsertDeleteOptions options;
+ options.logIfError = logIfError;
+
+ int64_t removed;
+ Status ret = iam->remove(obj, dl, options, &removed);
+ if (Status::OK() != ret) {
+ problem() << "Couldn't unindex record " << obj.toString() << " status: "
+ << ret.toString() << endl;
}
+ } else {
+ // DEPRECATED: this goes away when the migration finishes.
+ BSONObjSet keys;
+ id.getKeysFromObject(obj, keys);
+ IndexInterface& ii = id.idxInterface();
+ for ( BSONObjSet::iterator i=keys.begin(); i != keys.end(); i++ ) {
+ BSONObj j = *i;
+
+ bool ok = false;
+ try {
+ ok = ii.unindex(id.head, id, j, dl);
+ }
+ catch (AssertionException& e) {
+ problem() << "Assertion failure: _unindex failed " << id.indexNamespace() << endl;
+ out() << "Assertion failure: _unindex failed: " << e.what() << '\n';
+ out() << " obj:" << obj.toString() << '\n';
+ out() << " key:" << j.toString() << '\n';
+ out() << " dl:" << dl.toString() << endl;
+ logContext();
+ }
- if ( !ok && logMissing ) {
- log() << "unindex failed (key too big?) " << id.indexNamespace() << " key: " << j << " " << obj["_id"] << endl;
+ if ( !ok && logIfError ) {
+ log() << "unindex failed (key too big?) " << id.indexNamespace() << " key: " << j << " " << obj["_id"] << endl;
+ }
}
}
}
-//zzz
- /* unindex all keys in all indexes for this record. */
- void unindexRecord(NamespaceDetails *d,
- Record *todelete,
- const DiskLoc& dl,
+ /**
+ * Remove the provided (obj, dl) pair from all indices.
+ */
+ void unindexRecord(NamespaceDetails* nsd, Record* todelete, const DiskLoc& dl,
bool noWarn /* = false */) {
+
BSONObj obj = BSONObj::make(todelete);
- int n = d->nIndexes;
- for ( int i = 0; i < n; i++ )
- _unindexRecord(d->idx(i), obj, dl, !noWarn);
-
- for (int i = 0; i < d->indexBuildsInProgress; i++) { // background index
- // Always pass nowarn here, as this one may be missing for valid reasons as we are
- // concurrently building it
- _unindexRecord(d->idx(n+i), obj, dl, false);
- }
- }
+ int numIndices = nsd->getTotalIndexCount();
- /* step one of adding keys to index idxNo for a new record
- */
- void fetchIndexInserters(BSONObjSet & /*out*/keys,
- IndexInterface::IndexInserter &inserter,
- NamespaceDetails *d,
- int idxNo,
- const BSONObj& obj,
- DiskLoc recordLoc,
- const bool allowDups) {
- IndexDetails &idx = d->idx(idxNo);
- idx.getKeysFromObject(obj, keys);
- if( keys.empty() )
- return;
- bool dupsAllowed = !idx.unique() || allowDups;
- Ordering ordering = Ordering::make(idx.keyPattern());
-
- try {
- // we can't do the two step method with multi keys as insertion of one key changes the indexes
- // structure. however we can do the first key of the set so we go ahead and do that FWIW
- inserter.addInsertionContinuation(
- idx.idxInterface().beginInsertIntoIndex(
- idxNo, idx, recordLoc, *keys.begin(), ordering, dupsAllowed));
- }
- catch (AssertionException& e) {
- if( e.getCode() == 10287 && idxNo >= d->nIndexes ) {
- DEV log() << "info: caught key already in index on bg indexing (ok)" << endl;
- }
- else {
- throw;
- }
+ for (int i = 0; i < numIndices; i++) {
+ // If i >= d->nIndexes, it's a background index, and we DO NOT want to log anything.
+ bool logIfError = (i < nsd->nIndexes) ? !noWarn : false;
+ _unindexRecord(nsd, i, obj, dl, logIfError);
}
}
- /** add index keys for a newly inserted record
- done in two steps/phases to allow potential deferal of write lock portion in the future
- */
- void indexRecordUsingTwoSteps(const char *ns, NamespaceDetails *d, BSONObj obj,
- DiskLoc loc, bool shouldBeUnlocked) {
- vector<int> multi;
- vector<BSONObjSet> multiKeys;
-
- IndexInterface::IndexInserter inserter;
-
- // Step 1, read phase.
- int n = d->getTotalIndexCount();
- {
+ /**
+ * Add the provided (obj, dl) pair to the provided index.
+ */
+ static void addKeysToIndex(const char *ns, NamespaceDetails *d, int idxNo, const BSONObj& obj,
+ const DiskLoc &recordLoc, bool dupsAllowed) {
+ IndexDetails& id = d->idx(idxNo);
+ if (CatalogHack::testIndexMigration() && CatalogHack::isIndexMigrated(id.keyPattern())) {
+ auto_ptr<IndexDescriptor> desc(CatalogHack::getDescriptor(d, idxNo));
+ auto_ptr<IndexAccessMethod> iam(CatalogHack::getIndex(desc.get()));
+ InsertDeleteOptions options;
+ options.logIfError = false;
+ options.dupsAllowed = (!KeyPattern::isIdKeyPattern(id.keyPattern()) && !id.unique())
+ || ignoreUniqueIndex(id);
+
+ int64_t inserted;
+ Status ret = iam->insert(obj, recordLoc, options, &inserted);
+ if (Status::OK() != ret) {
+ uasserted(ret.location(), ret.reason());
+ }
+ } else {
+ // DEPRECATED: this goes away when the migration finishes.
BSONObjSet keys;
- for ( int i = 0; i < n; i++ ) {
- // this call throws on unique constraint violation. we haven't done any writes yet so that is fine.
- fetchIndexInserters(/*out*/keys,
- inserter,
- d,
- i,
- obj,
- loc,
- ignoreUniqueIndex(d->idx(i)));
- if( keys.size() > 1 ) {
- multi.push_back(i);
- multiKeys.push_back(BSONObjSet());
- multiKeys[multiKeys.size()-1].swap(keys);
+ id.getKeysFromObject(obj, keys);
+ if( keys.empty() )
+ return;
+ BSONObj order = id.keyPattern();
+ IndexInterface& ii = id.idxInterface();
+ Ordering ordering = Ordering::make(order);
+ int n = 0;
+ for ( BSONObjSet::iterator i=keys.begin(); i != keys.end(); i++ ) {
+ if( ++n == 2 ) {
+ d->setIndexIsMultikey(ns, idxNo);
}
- keys.clear();
- }
- }
-
- inserter.finishAllInsertions(); // Step 2, write phase.
-
- // now finish adding multikeys
- for( unsigned j = 0; j < multi.size(); j++ ) {
- unsigned i = multi[j];
- BSONObjSet& keys = multiKeys[j];
- IndexDetails& idx = d->idx(i);
- bool dupsAllowed = !idx.unique() || ignoreUniqueIndex(idx);
- IndexInterface& ii = idx.idxInterface();
- Ordering ordering = Ordering::make(idx.keyPattern());
- d->setIndexIsMultikey(ns, i);
- for( BSONObjSet::iterator k = ++keys.begin()/*skip 1*/; k != keys.end(); k++ ) {
+ verify( !recordLoc.isNull() );
try {
- ii.bt_insert(idx.head, loc, *k, ordering, dupsAllowed, idx);
- } catch (AssertionException& e) {
- if( e.getCode() == 10287 && (int) i >= d->nIndexes ) {
+ ii.bt_insert(id.head, recordLoc, *i, ordering, dupsAllowed, id);
+ }
+ catch (AssertionException& e) {
+ if( e.getCode() == 10287 && idxNo >= d->nIndexes ) {
DEV log() << "info: caught key already in index on bg indexing (ok)" << endl;
+ continue;
}
- else {
- /* roll back previously added index entries
- note must do self index as it is multikey and could require some cleanup itself
- */
- for( int j = 0; j < n; j++ ) {
- try {
- _unindexRecord(d->idx(j), obj, loc, false);
- }
- catch(...) {
- LOG(3) << "unindex fails on rollback after unique key constraint prevented insert\n";
- }
- }
+ if( !dupsAllowed ) {
+ // dup key exception, presumably.
throw;
}
+ problem() << " caught assertion addKeysToIndex " << id.indexNamespace() << " " << obj["_id"] << endl;
}
}
}
}
- /* add keys to index idxNo for a new record */
- static void addKeysToIndex(const char *ns, NamespaceDetails *d, int idxNo, BSONObj& obj,
- DiskLoc recordLoc, bool dupsAllowed) {
- IndexDetails& idx = d->idx(idxNo);
- BSONObjSet keys;
- idx.getKeysFromObject(obj, keys);
- if( keys.empty() )
- return;
- BSONObj order = idx.keyPattern();
- IndexInterface& ii = idx.idxInterface();
- Ordering ordering = Ordering::make(order);
- int n = 0;
- for ( BSONObjSet::iterator i=keys.begin(); i != keys.end(); i++ ) {
- if( ++n == 2 ) {
- d->setIndexIsMultikey(ns, idxNo);
- }
- verify( !recordLoc.isNull() );
+ /**
+ * Add the provided (obj, loc) pair to all indices.
+ */
+ void indexRecord(const char *ns, NamespaceDetails *d, const BSONObj &obj, const DiskLoc &loc) {
+ int numIndices = d->getTotalIndexCount();
+
+ for (int i = 0; i < numIndices; ++i) {
+ IndexDetails &id = d->idx(i);
+
try {
- ii.bt_insert(idx.head, recordLoc, *i, ordering, dupsAllowed, idx);
- }
- catch (AssertionException& e) {
- if( e.getCode() == 10287 && idxNo >= d->nIndexes ) {
- DEV log() << "info: caught key already in index on bg indexing (ok)" << endl;
- continue;
- }
- if( !dupsAllowed ) {
- // dup key exception, presumably.
- throw;
+ addKeysToIndex(ns, d, i, obj, loc, !id.unique() || ignoreUniqueIndex(id));
+ } catch (AssertionException& e) {
+ // TODO: the new index layer indexes either all or no keys, so j <= i can be j < i.
+ for (int j = 0; j <= i; j++) {
+ try {
+ _unindexRecord(d, j, obj, loc, false);
+ } catch(...) {
+ LOG(3) << "unindex fails on rollback after unique "
+ "key constraint prevented insert\n";
+ }
}
- problem() << " caught assertion addKeysToIndex " << idx.indexNamespace() << " " << obj["_id"] << endl;
+ throw;
}
}
}
+ //
+ // Bulk index building
+ //
+
void addKeysToPhaseOne( const char* ns,
const IndexDetails& idx,
const BSONObj& order,
@@ -577,7 +540,7 @@ namespace mongo {
tlog() << "build index done. scanned " << n << " total records. " << t.millis() / 1000.0 << " secs" << endl;
}
- extern BSONObj id_obj; // { _id : 1 }
+ extern BSONObj id_obj; // { _id : 1 }
void ensureHaveIdIndex(const char* ns, bool mayInterrupt) {
NamespaceDetails *d = nsdetails(ns);
@@ -618,20 +581,7 @@ namespace mongo {
((tmp >> (x+1)) << x);
}
- class IndexUpdateTest : public StartupTest {
- public:
- void run() {
- verify( removeBit(1, 0) == 0 );
- verify( removeBit(2, 0) == 1 );
- verify( removeBit(2, 1) == 0 );
- verify( removeBit(255, 1) == 127 );
- verify( removeBit(21, 2) == 9 );
- verify( removeBit(0x4000000000000001ULL, 62) == 1 );
- }
- } iu_unittest;
-
- bool dropIndexes( NamespaceDetails *d, const char *ns, const char *name, string &errmsg, BSONObjBuilder &anObjBuilder, bool mayDeleteIdIndex ) {
-
+ bool dropIndexes(NamespaceDetails *d, const char *ns, const char *name, string &errmsg, BSONObjBuilder &anObjBuilder, bool mayDeleteIdIndex) {
BackgroundOperation::assertNoBgOpInProgForNs(ns);
d = d->writingWithExtra();
@@ -703,4 +653,51 @@ namespace mongo {
return true;
}
-}
+ /**
+ * DEPRECATED -- only used by prefetch.cpp
+ * step one of adding keys to index idxNo for a new record
+ */
+ void fetchIndexInserters(BSONObjSet & /*out*/keys,
+ IndexInterface::IndexInserter &inserter,
+ NamespaceDetails *d,
+ int idxNo,
+ const BSONObj& obj,
+ DiskLoc recordLoc,
+ const bool allowDups) {
+ IndexDetails &idx = d->idx(idxNo);
+ idx.getKeysFromObject(obj, keys);
+ if( keys.empty() )
+ return;
+ bool dupsAllowed = !idx.unique() || allowDups;
+ Ordering ordering = Ordering::make(idx.keyPattern());
+
+ try {
+ // we can't do the two step method with multi keys as insertion of one key changes the indexes
+ // structure. however we can do the first key of the set so we go ahead and do that FWIW
+ inserter.addInsertionContinuation(
+ idx.idxInterface().beginInsertIntoIndex(
+ idxNo, idx, recordLoc, *keys.begin(), ordering, dupsAllowed));
+ }
+ catch (AssertionException& e) {
+ if( e.getCode() == 10287 && idxNo >= d->nIndexes ) {
+ DEV log() << "info: caught key already in index on bg indexing (ok)" << endl;
+ }
+ else {
+ throw;
+ }
+ }
+ }
+
+ class IndexUpdateTest : public StartupTest {
+ public:
+ void run() {
+ verify( removeBit(1, 0) == 0 );
+ verify( removeBit(2, 0) == 1 );
+ verify( removeBit(2, 1) == 0 );
+ verify( removeBit(255, 1) == 127 );
+ verify( removeBit(21, 2) == 9 );
+ verify( removeBit(0x4000000000000001ULL, 62) == 1 );
+ }
+ } iu_unittest;
+
+} // namespace mongo
diff --git a/src/mongo/db/index_update.h b/src/mongo/db/index_update.h
index d0f3bd0608e..e32e028931f 100644
--- a/src/mongo/db/index_update.h
+++ b/src/mongo/db/index_update.h
@@ -37,9 +37,7 @@ namespace mongo {
bool mayInterrupt);
// add index keys for a newly inserted record
- // done in two steps/phases to allow potential deferal of write lock portion in the future
- void indexRecordUsingTwoSteps(const char *ns, NamespaceDetails *d, BSONObj obj,
- DiskLoc loc, bool shouldBeUnlocked);
+ void indexRecord(const char *ns, NamespaceDetails *d, const BSONObj& obj, const DiskLoc &loc);
// Given an object, populate "inserter" with information necessary to update indexes.
void fetchIndexInserters(BSONObjSet & /*out*/keys,
diff --git a/src/mongo/db/keypattern.cpp b/src/mongo/db/keypattern.cpp
index 7379c69bd2e..6c1bb6d67e6 100644
--- a/src/mongo/db/keypattern.cpp
+++ b/src/mongo/db/keypattern.cpp
@@ -40,6 +40,17 @@ namespace mongo {
}
}
+ bool KeyPattern::isIdKeyPattern(const BSONObj& pattern) {
+ BSONObjIterator i(pattern);
+ BSONElement e = i.next();
+ // _id index must have form exactly {_id : 1} or {_id : -1}.
+ // Allows an index of form {_id : "hashed"} to exist but
+ // do not consider it to be the primary _id index
+ return (0 == strcmp(e.fieldName(), "_id"))
+ && (e.numberInt() == 1 || e.numberInt() == -1)
+ && i.next().eoo();
+ }
+
string KeyPattern::findPluginName(const BSONObj& keyPattern) {
BSONObjIterator i(keyPattern);
diff --git a/src/mongo/db/keypattern.h b/src/mongo/db/keypattern.h
index 4c52dac2c9d..da206e10b72 100644
--- a/src/mongo/db/keypattern.h
+++ b/src/mongo/db/keypattern.h
@@ -81,6 +81,12 @@ namespace mongo {
}
/**
+ * Is the provided key pattern the index over the ID field?
+ * The always required ID index is always {_id: 1} or {_id: -1}.
+ */
+ static bool isIdKeyPattern(const BSONObj& pattern);
+
+ /**
* Return the first string value in the provided object. For an index key pattern,
* a field with a non-string value indicates a "special" (not straight Btree) index.
*/
diff --git a/src/mongo/db/pdfile.cpp b/src/mongo/db/pdfile.cpp
index 93a948ad666..e858ec7d16a 100644
--- a/src/mongo/db/pdfile.cpp
+++ b/src/mongo/db/pdfile.cpp
@@ -1677,47 +1677,15 @@ namespace mongo {
checkNoIndexConflicts( d, BSONObj( reinterpret_cast<const char *>( obuf ) ) );
}
- bool earlyIndex = true;
- DiskLoc loc;
- if( idToInsert.needed() || tableToIndex || d->isCapped() ) {
- // if need id, we don't do the early indexing. this is not the common case so that is sort of ok
- earlyIndex = false;
- loc = allocateSpaceForANewRecord(ns, d, lenWHdr, god);
- }
- else {
- loc = d->allocWillBeAt(ns, lenWHdr);
- if( loc.isNull() ) {
- // need to get a new extent so we have to do the true alloc now (not common case)
- earlyIndex = false;
- loc = allocateSpaceForANewRecord(ns, d, lenWHdr, god);
- }
- }
+ DiskLoc loc = allocateSpaceForANewRecord(ns, d, lenWHdr, god);
+
if ( loc.isNull() ) {
- log() << "insert: couldn't alloc space for object ns:" << ns << " capped:" << d->isCapped() << endl;
+ log() << "insert: couldn't alloc space for object ns:" << ns
+ << " capped:" << d->isCapped() << endl;
verify(d->isCapped());
return DiskLoc();
}
- if( earlyIndex ) {
- // add record to indexes using two step method so we can do the reading outside a write lock
- if ( d->nIndexes ) {
- verify( obuf );
- BSONObj obj((const char *) obuf);
- try {
- indexRecordUsingTwoSteps(ns, d, obj, loc, true);
- }
- catch( AssertionException& ) {
- // should be a dup key error on _id index
- dassert( !tableToIndex && !d->isCapped() );
- // no need to delete/rollback the record as it was not added yet
- throw;
- }
- }
- // really allocate now
- DiskLoc real = allocateSpaceForANewRecord(ns, d, lenWHdr, god);
- verify( real == loc );
- }
-
Record *r = loc.rec();
{
verify( r->lengthWithHeaders() >= lenWHdr );
@@ -1753,16 +1721,11 @@ namespace mongo {
}
/* add this record to our indexes */
- if ( !earlyIndex && d->nIndexes ) {
+ if (d->nIndexes) {
try {
BSONObj obj(r->data());
- // not sure which of these is better -- either can be used. oldIndexRecord may be faster,
- // but twosteps handles dup key errors more efficiently.
- //oldIndexRecord(d, obj, loc);
- indexRecordUsingTwoSteps(ns, d, obj, loc, false);
-
- }
- catch( AssertionException& e ) {
+ indexRecord(ns, d, obj, loc);
+ } catch( AssertionException& e ) {
// should be a dup key error on _id index
if( tableToIndex || d->isCapped() ) {
massert( 12583, "unexpected index insertion failure on capped collection", !d->isCapped() );
diff --git a/src/mongo/db/query_plan.cpp b/src/mongo/db/query_plan.cpp
index 18ee3ff6dd7..ebde6d81648 100644
--- a/src/mongo/db/query_plan.cpp
+++ b/src/mongo/db/query_plan.cpp
@@ -253,11 +253,10 @@ doneCheckOrder:
}
// TODO(hk): Migrate!
- bool testIndexMigrations = true;
-
- if (testIndexMigrations && CatalogHack::isIndexMigrated(_type->keyPattern())) {
+ if (CatalogHack::testCursorMigration()
+ && CatalogHack::isIndexMigrated(_type->keyPattern())) {
IndexDescriptor* descriptor = CatalogHack::getDescriptor(_d, _idxNo);
- IndexAccessMethod* iam = CatalogHack::getSpecialIndex(descriptor);
+ IndexAccessMethod* iam = CatalogHack::getIndex(descriptor);
return shared_ptr<Cursor>(EmulatedCursor::make(descriptor, iam, _originalQuery,
_order, numWanted,
descriptor->keyPattern()));