summaryrefslogtreecommitdiff
path: root/src/mongo/db/catalog
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db/catalog')
-rw-r--r--src/mongo/db/catalog/apply_ops.cpp279
-rw-r--r--src/mongo/db/catalog/apply_ops.h24
-rw-r--r--src/mongo/db/catalog/capped_utils.cpp340
-rw-r--r--src/mongo/db/catalog/capped_utils.h45
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp250
-rw-r--r--src/mongo/db/catalog/coll_mod.h25
-rw-r--r--src/mongo/db/catalog/collection.cpp1211
-rw-r--r--src/mongo/db/catalog/collection.h681
-rw-r--r--src/mongo/db/catalog/collection_catalog_entry.h107
-rw-r--r--src/mongo/db/catalog/collection_compact.cpp240
-rw-r--r--src/mongo/db/catalog/collection_info_cache.cpp204
-rw-r--r--src/mongo/db/catalog/collection_info_cache.h108
-rw-r--r--src/mongo/db/catalog/collection_options.cpp280
-rw-r--r--src/mongo/db/catalog/collection_options.h127
-rw-r--r--src/mongo/db/catalog/collection_options_test.cpp304
-rw-r--r--src/mongo/db/catalog/create_collection.cpp83
-rw-r--r--src/mongo/db/catalog/create_collection.h16
-rw-r--r--src/mongo/db/catalog/cursor_manager.cpp785
-rw-r--r--src/mongo/db/catalog/cursor_manager.h222
-rw-r--r--src/mongo/db/catalog/database.cpp868
-rw-r--r--src/mongo/db/catalog/database.h206
-rw-r--r--src/mongo/db/catalog/database_catalog_entry.h103
-rw-r--r--src/mongo/db/catalog/database_holder.cpp228
-rw-r--r--src/mongo/db/catalog/database_holder.h94
-rw-r--r--src/mongo/db/catalog/document_validation.cpp4
-rw-r--r--src/mongo/db/catalog/document_validation.h62
-rw-r--r--src/mongo/db/catalog/drop_collection.cpp75
-rw-r--r--src/mongo/db/catalog/drop_collection.h22
-rw-r--r--src/mongo/db/catalog/drop_database.cpp60
-rw-r--r--src/mongo/db/catalog/drop_database.h12
-rw-r--r--src/mongo/db/catalog/drop_indexes.cpp183
-rw-r--r--src/mongo/db/catalog/drop_indexes.h26
-rw-r--r--src/mongo/db/catalog/head_manager.h22
-rw-r--r--src/mongo/db/catalog/index_catalog.cpp1905
-rw-r--r--src/mongo/db/catalog/index_catalog.h481
-rw-r--r--src/mongo/db/catalog/index_catalog_entry.cpp384
-rw-r--r--src/mongo/db/catalog/index_catalog_entry.h197
-rw-r--r--src/mongo/db/catalog/index_create.cpp540
-rw-r--r--src/mongo/db/catalog/index_create.h348
-rw-r--r--src/mongo/db/catalog/index_key_validate.cpp112
-rw-r--r--src/mongo/db/catalog/index_key_validate.h12
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp326
-rw-r--r--src/mongo/db/catalog/rename_collection.h26
43 files changed, 5695 insertions, 5932 deletions
diff --git a/src/mongo/db/catalog/apply_ops.cpp b/src/mongo/db/catalog/apply_ops.cpp
index 9292fbcbf32..623adeaf9d5 100644
--- a/src/mongo/db/catalog/apply_ops.cpp
+++ b/src/mongo/db/catalog/apply_ops.cpp
@@ -47,162 +47,161 @@
#include "mongo/util/log.h"
namespace mongo {
- Status applyOps(OperationContext* txn,
- const std::string& dbName,
- const BSONObj& applyOpCmd,
- BSONObjBuilder* result) {
- // SERVER-4328 todo : is global ok or does this take a long time? i believe multiple
- // ns used so locking individually requires more analysis
- ScopedTransaction scopedXact(txn, MODE_X);
- Lock::GlobalWrite globalWriteLock(txn->lockState());
-
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(dbName);
-
- if (userInitiatedWritesAndNotPrimary) {
- return Status(ErrorCodes::NotMaster,
- str::stream() << "Not primary while applying ops to database " << dbName);
- }
+Status applyOps(OperationContext* txn,
+ const std::string& dbName,
+ const BSONObj& applyOpCmd,
+ BSONObjBuilder* result) {
+ // SERVER-4328 todo : is global ok or does this take a long time? i believe multiple
+ // ns used so locking individually requires more analysis
+ ScopedTransaction scopedXact(txn, MODE_X);
+ Lock::GlobalWrite globalWriteLock(txn->lockState());
+
+ bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(dbName);
+
+ if (userInitiatedWritesAndNotPrimary) {
+ return Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while applying ops to database " << dbName);
+ }
- bool shouldReplicateWrites = txn->writesAreReplicated();
- txn->setReplicatedWrites(false);
- BSONObj ops = applyOpCmd.firstElement().Obj();
- // Preconditions check reads the database state, so needs to be done locked
- if (applyOpCmd["preCondition"].type() == Array) {
- BSONObjIterator i(applyOpCmd["preCondition"].Obj());
- while (i.more()) {
- BSONObj f = i.next().Obj();
-
- DBDirectClient db(txn);
- BSONObj realres = db.findOne(f["ns"].String() , f["q"].Obj());
-
- // Apply-ops would never have a $where matcher, so use the default callback,
- // which will throw an error if $where is found.
- Matcher m(f["res"].Obj());
- if (! m.matches(realres)) {
- result->append("got" , realres);
- result->append("whatFailed" , f);
- txn->setReplicatedWrites(shouldReplicateWrites);
- return Status(ErrorCodes::BadValue, "pre-condition failed");
- }
+ bool shouldReplicateWrites = txn->writesAreReplicated();
+ txn->setReplicatedWrites(false);
+ BSONObj ops = applyOpCmd.firstElement().Obj();
+ // Preconditions check reads the database state, so needs to be done locked
+ if (applyOpCmd["preCondition"].type() == Array) {
+ BSONObjIterator i(applyOpCmd["preCondition"].Obj());
+ while (i.more()) {
+ BSONObj f = i.next().Obj();
+
+ DBDirectClient db(txn);
+ BSONObj realres = db.findOne(f["ns"].String(), f["q"].Obj());
+
+ // Apply-ops would never have a $where matcher, so use the default callback,
+ // which will throw an error if $where is found.
+ Matcher m(f["res"].Obj());
+ if (!m.matches(realres)) {
+ result->append("got", realres);
+ result->append("whatFailed", f);
+ txn->setReplicatedWrites(shouldReplicateWrites);
+ return Status(ErrorCodes::BadValue, "pre-condition failed");
}
}
+ }
- // apply
- int num = 0;
- int errors = 0;
-
- BSONObjIterator i(ops);
- BSONArrayBuilder ab;
- const bool alwaysUpsert = applyOpCmd.hasField("alwaysUpsert") ?
- applyOpCmd["alwaysUpsert"].trueValue() : true;
-
- while (i.more()) {
- BSONElement e = i.next();
- const BSONObj& temp = e.Obj();
-
- // Ignore 'n' operations.
- const char *opType = temp["op"].valuestrsafe();
- if (*opType == 'n') continue;
-
- const std::string ns = temp["ns"].String();
-
- // Run operations under a nested lock as a hack to prevent yielding.
- //
- // The list of operations is supposed to be applied atomically; yielding
- // would break atomicity by allowing an interruption or a shutdown to occur
- // after only some operations are applied. We are already locked globally
- // at this point, so taking a DBLock on the namespace creates a nested lock,
- // and yields are disallowed for operations that hold a nested lock.
- //
- // We do not have a wrapping WriteUnitOfWork so it is possible for a journal
- // commit to happen with a subset of ops applied.
- // TODO figure out what to do about this.
- Lock::GlobalWrite globalWriteLockDisallowTempRelease(txn->lockState());
-
- // Ensures that yielding will not happen (see the comment above).
- DEV {
- Locker::LockSnapshot lockSnapshot;
- invariant(!txn->lockState()->saveLockStateAndUnlock(&lockSnapshot));
- };
-
- Status status(ErrorCodes::InternalError, "");
-
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- if (*opType == 'c') {
- status = repl::applyCommand_inlock(txn, temp);
- break;
- }
- else {
- OldClientContext ctx(txn, ns);
-
- status = repl::applyOperation_inlock(txn, ctx.db(), temp, alwaysUpsert);
- break;
- }
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "applyOps", ns);
-
- ab.append(status.isOK());
- if (!status.isOK()) {
- errors++;
+ // apply
+ int num = 0;
+ int errors = 0;
+
+ BSONObjIterator i(ops);
+ BSONArrayBuilder ab;
+ const bool alwaysUpsert =
+ applyOpCmd.hasField("alwaysUpsert") ? applyOpCmd["alwaysUpsert"].trueValue() : true;
+
+ while (i.more()) {
+ BSONElement e = i.next();
+ const BSONObj& temp = e.Obj();
+
+ // Ignore 'n' operations.
+ const char* opType = temp["op"].valuestrsafe();
+ if (*opType == 'n')
+ continue;
+
+ const std::string ns = temp["ns"].String();
+
+ // Run operations under a nested lock as a hack to prevent yielding.
+ //
+ // The list of operations is supposed to be applied atomically; yielding
+ // would break atomicity by allowing an interruption or a shutdown to occur
+ // after only some operations are applied. We are already locked globally
+ // at this point, so taking a DBLock on the namespace creates a nested lock,
+ // and yields are disallowed for operations that hold a nested lock.
+ //
+ // We do not have a wrapping WriteUnitOfWork so it is possible for a journal
+ // commit to happen with a subset of ops applied.
+ // TODO figure out what to do about this.
+ Lock::GlobalWrite globalWriteLockDisallowTempRelease(txn->lockState());
+
+ // Ensures that yielding will not happen (see the comment above).
+ DEV {
+ Locker::LockSnapshot lockSnapshot;
+ invariant(!txn->lockState()->saveLockStateAndUnlock(&lockSnapshot));
+ };
+
+ Status status(ErrorCodes::InternalError, "");
+
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ if (*opType == 'c') {
+ status = repl::applyCommand_inlock(txn, temp);
+ break;
+ } else {
+ OldClientContext ctx(txn, ns);
+
+ status = repl::applyOperation_inlock(txn, ctx.db(), temp, alwaysUpsert);
+ break;
}
+ }
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "applyOps", ns);
- num++;
-
- WriteUnitOfWork wuow(txn);
- logOpForDbHash(txn, ns.c_str());
- wuow.commit();
+ ab.append(status.isOK());
+ if (!status.isOK()) {
+ errors++;
}
- result->append("applied" , num);
- result->append("results" , ab.arr());
- txn->setReplicatedWrites(shouldReplicateWrites);
+ num++;
- if (txn->writesAreReplicated()) {
- // We want this applied atomically on slaves
- // so we re-wrap without the pre-condition for speed
+ WriteUnitOfWork wuow(txn);
+ logOpForDbHash(txn, ns.c_str());
+ wuow.commit();
+ }
- std::string tempNS = str::stream() << dbName << ".$cmd";
+ result->append("applied", num);
+ result->append("results", ab.arr());
+ txn->setReplicatedWrites(shouldReplicateWrites);
- // TODO: possibly use mutable BSON to remove preCondition field
- // once it is available
- BSONObjBuilder cmdBuilder;
+ if (txn->writesAreReplicated()) {
+ // We want this applied atomically on slaves
+ // so we re-wrap without the pre-condition for speed
- for (auto elem : applyOpCmd) {
- auto name = elem.fieldNameStringData();
- if (name == "preCondition") continue;
- if (name == "bypassDocumentValidation") continue;
- cmdBuilder.append(elem);
- }
+ std::string tempNS = str::stream() << dbName << ".$cmd";
- const BSONObj cmdRewritten = cmdBuilder.done();
-
- // We currently always logOp the command regardless of whether the individial ops
- // succeeded and rely on any failures to also happen on secondaries. This isn't
- // perfect, but it's what the command has always done and is part of its "correct"
- // behavior.
- while (true) {
- try {
- WriteUnitOfWork wunit(txn);
- getGlobalServiceContext()->getOpObserver()->onApplyOps(txn,
- tempNS,
- cmdRewritten);
- wunit.commit();
- break;
- }
- catch (const WriteConflictException& wce) {
- LOG(2) <<
- "WriteConflictException while logging applyOps command, retrying.";
- txn->recoveryUnit()->abandonSnapshot();
- continue;
- }
- }
+ // TODO: possibly use mutable BSON to remove preCondition field
+ // once it is available
+ BSONObjBuilder cmdBuilder;
+
+ for (auto elem : applyOpCmd) {
+ auto name = elem.fieldNameStringData();
+ if (name == "preCondition")
+ continue;
+ if (name == "bypassDocumentValidation")
+ continue;
+ cmdBuilder.append(elem);
}
- if (errors != 0) {
- return Status(ErrorCodes::UnknownError, "");
+ const BSONObj cmdRewritten = cmdBuilder.done();
+
+ // We currently always logOp the command regardless of whether the individial ops
+ // succeeded and rely on any failures to also happen on secondaries. This isn't
+ // perfect, but it's what the command has always done and is part of its "correct"
+ // behavior.
+ while (true) {
+ try {
+ WriteUnitOfWork wunit(txn);
+ getGlobalServiceContext()->getOpObserver()->onApplyOps(txn, tempNS, cmdRewritten);
+ wunit.commit();
+ break;
+ } catch (const WriteConflictException& wce) {
+ LOG(2) << "WriteConflictException while logging applyOps command, retrying.";
+ txn->recoveryUnit()->abandonSnapshot();
+ continue;
+ }
}
+ }
- return Status::OK();
+ if (errors != 0) {
+ return Status(ErrorCodes::UnknownError, "");
}
-} // namespace mongo
+ return Status::OK();
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/catalog/apply_ops.h b/src/mongo/db/catalog/apply_ops.h
index 13639deb586..588d3bb370b 100644
--- a/src/mongo/db/catalog/apply_ops.h
+++ b/src/mongo/db/catalog/apply_ops.h
@@ -29,17 +29,17 @@
#include "mongo/base/status.h"
namespace mongo {
- class BSONObj;
- class BSONObjBuilder;
- class OperationContext;
+class BSONObj;
+class BSONObjBuilder;
+class OperationContext;
- /**
- * Applies ops contained in "applyOpCmd" and populates fields in "result" to be returned to the
- * user.
- */
- Status applyOps(OperationContext* txn,
- const std::string& dbName,
- const BSONObj& applyOpCmd,
- BSONObjBuilder* result);
+/**
+ * Applies ops contained in "applyOpCmd" and populates fields in "result" to be returned to the
+ * user.
+ */
+Status applyOps(OperationContext* txn,
+ const std::string& dbName,
+ const BSONObj& applyOpCmd,
+ BSONObjBuilder* result);
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/catalog/capped_utils.cpp b/src/mongo/db/catalog/capped_utils.cpp
index 8e981bd2491..b1adb472565 100644
--- a/src/mongo/db/catalog/capped_utils.cpp
+++ b/src/mongo/db/catalog/capped_utils.cpp
@@ -51,118 +51,111 @@
#include "mongo/util/scopeguard.h"
namespace mongo {
- Status emptyCapped(OperationContext* txn,
- const NamespaceString& collectionName) {
- ScopedTransaction scopedXact(txn, MODE_IX);
- AutoGetDb autoDb(txn, collectionName.db(), MODE_X);
-
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(
- collectionName);
-
- if (userInitiatedWritesAndNotPrimary) {
- return Status(ErrorCodes::NotMaster,
- str::stream() << "Not primary while truncating collection "
- << collectionName.ns());
- }
-
- Database* db = autoDb.getDb();
- massert(13429, "no such database", db);
+Status emptyCapped(OperationContext* txn, const NamespaceString& collectionName) {
+ ScopedTransaction scopedXact(txn, MODE_IX);
+ AutoGetDb autoDb(txn, collectionName.db(), MODE_X);
- Collection* collection = db->getCollection(collectionName);
- massert(28584, "no such collection", collection);
+ bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(collectionName);
- BackgroundOperation::assertNoBgOpInProgForNs(collectionName.ns());
+ if (userInitiatedWritesAndNotPrimary) {
+ return Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while truncating collection "
+ << collectionName.ns());
+ }
- WriteUnitOfWork wuow(txn);
+ Database* db = autoDb.getDb();
+ massert(13429, "no such database", db);
- Status status = collection->truncate(txn);
- if (!status.isOK()) {
- return status;
- }
+ Collection* collection = db->getCollection(collectionName);
+ massert(28584, "no such collection", collection);
- getGlobalServiceContext()->getOpObserver()->onEmptyCapped(txn, collection->ns());
+ BackgroundOperation::assertNoBgOpInProgForNs(collectionName.ns());
- wuow.commit();
+ WriteUnitOfWork wuow(txn);
- return Status::OK();
+ Status status = collection->truncate(txn);
+ if (!status.isOK()) {
+ return status;
}
- Status cloneCollectionAsCapped(OperationContext* txn,
- Database* db,
- const std::string& shortFrom,
- const std::string& shortTo,
- double size,
- bool temp) {
-
- std::string fromNs = db->name() + "." + shortFrom;
- std::string toNs = db->name() + "." + shortTo;
-
- Collection* fromCollection = db->getCollection(fromNs);
- if (!fromCollection)
- return Status(ErrorCodes::NamespaceNotFound,
- str::stream() << "source collection " << fromNs << " does not exist");
-
- if (db->getCollection(toNs))
- return Status(ErrorCodes::NamespaceExists, "to collection already exists");
-
- // create new collection
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- const auto fromOptions = fromCollection->getCatalogEntry()
- ->getCollectionOptions(txn)
- .toBSON();
- OldClientContext ctx(txn, toNs);
- BSONObjBuilder spec;
- spec.appendBool("capped", true);
- spec.append("size", size);
- if (temp)
- spec.appendBool("temp", true);
- spec.appendElementsUnique(fromOptions);
-
- WriteUnitOfWork wunit(txn);
- Status status = userCreateNS(txn, ctx.db(), toNs, spec.done());
- if (!status.isOK())
- return status;
- wunit.commit();
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "cloneCollectionAsCapped", fromNs);
+ getGlobalServiceContext()->getOpObserver()->onEmptyCapped(txn, collection->ns());
+
+ wuow.commit();
+
+ return Status::OK();
+}
+
+Status cloneCollectionAsCapped(OperationContext* txn,
+ Database* db,
+ const std::string& shortFrom,
+ const std::string& shortTo,
+ double size,
+ bool temp) {
+ std::string fromNs = db->name() + "." + shortFrom;
+ std::string toNs = db->name() + "." + shortTo;
+
+ Collection* fromCollection = db->getCollection(fromNs);
+ if (!fromCollection)
+ return Status(ErrorCodes::NamespaceNotFound,
+ str::stream() << "source collection " << fromNs << " does not exist");
+
+ if (db->getCollection(toNs))
+ return Status(ErrorCodes::NamespaceExists, "to collection already exists");
+
+ // create new collection
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ const auto fromOptions =
+ fromCollection->getCatalogEntry()->getCollectionOptions(txn).toBSON();
+ OldClientContext ctx(txn, toNs);
+ BSONObjBuilder spec;
+ spec.appendBool("capped", true);
+ spec.append("size", size);
+ if (temp)
+ spec.appendBool("temp", true);
+ spec.appendElementsUnique(fromOptions);
+
+ WriteUnitOfWork wunit(txn);
+ Status status = userCreateNS(txn, ctx.db(), toNs, spec.done());
+ if (!status.isOK())
+ return status;
+ wunit.commit();
+ }
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "cloneCollectionAsCapped", fromNs);
- Collection* toCollection = db->getCollection(toNs);
- invariant(toCollection); // we created above
+ Collection* toCollection = db->getCollection(toNs);
+ invariant(toCollection); // we created above
- // how much data to ignore because it won't fit anyway
- // datasize and extentSize can't be compared exactly, so add some padding to 'size'
+ // how much data to ignore because it won't fit anyway
+ // datasize and extentSize can't be compared exactly, so add some padding to 'size'
- long long allocatedSpaceGuess =
- std::max(static_cast<long long>(size * 2),
- static_cast<long long>(toCollection->getRecordStore()->storageSize(txn) * 2));
+ long long allocatedSpaceGuess =
+ std::max(static_cast<long long>(size * 2),
+ static_cast<long long>(toCollection->getRecordStore()->storageSize(txn) * 2));
- long long excessSize = fromCollection->dataSize(txn) - allocatedSpaceGuess;
+ long long excessSize = fromCollection->dataSize(txn) - allocatedSpaceGuess;
- std::unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(
- txn,
- fromNs,
- fromCollection,
- InternalPlanner::FORWARD));
+ std::unique_ptr<PlanExecutor> exec(
+ InternalPlanner::collectionScan(txn, fromNs, fromCollection, InternalPlanner::FORWARD));
- exec->setYieldPolicy(PlanExecutor::WRITE_CONFLICT_RETRY_ONLY);
+ exec->setYieldPolicy(PlanExecutor::WRITE_CONFLICT_RETRY_ONLY);
- Snapshotted<BSONObj> objToClone;
- RecordId loc;
- PlanExecutor::ExecState state = PlanExecutor::FAILURE; // suppress uninitialized warnings
+ Snapshotted<BSONObj> objToClone;
+ RecordId loc;
+ PlanExecutor::ExecState state = PlanExecutor::FAILURE; // suppress uninitialized warnings
- DisableDocumentValidation validationDisabler(txn);
+ DisableDocumentValidation validationDisabler(txn);
- int retries = 0; // non-zero when retrying our last document.
- while (true) {
- if (!retries) {
- state = exec->getNextSnapshotted(&objToClone, &loc);
- }
+ int retries = 0; // non-zero when retrying our last document.
+ while (true) {
+ if (!retries) {
+ state = exec->getNextSnapshotted(&objToClone, &loc);
+ }
- switch(state) {
+ switch (state) {
case PlanExecutor::IS_EOF:
return Status::OK();
- case PlanExecutor::ADVANCED:
- {
+ case PlanExecutor::ADVANCED: {
if (excessSize > 0) {
// 4x is for padding, power of 2, etc...
excessSize -= (4 * objToClone.value().objsize());
@@ -178,116 +171,103 @@ namespace mongo {
// CollectionScan PlanStage does not have a FAILURE scenario.
// 3) All other PlanExecutor states are handled above
invariant(false);
- }
-
- try {
- // Make sure we are working with the latest version of the document.
- if (objToClone.snapshotId() != txn->recoveryUnit()->getSnapshotId()
- && !fromCollection->findDoc(txn, loc, &objToClone)) {
- // doc was deleted so don't clone it.
- retries = 0;
- continue;
- }
-
- WriteUnitOfWork wunit(txn);
- toCollection->insertDocument(txn,
- objToClone.value(),
- true,
- txn->writesAreReplicated());
- wunit.commit();
+ }
- // Go to the next document
+ try {
+ // Make sure we are working with the latest version of the document.
+ if (objToClone.snapshotId() != txn->recoveryUnit()->getSnapshotId() &&
+ !fromCollection->findDoc(txn, loc, &objToClone)) {
+ // doc was deleted so don't clone it.
retries = 0;
+ continue;
}
- catch (const WriteConflictException& wce) {
- CurOp::get(txn)->debug().writeConflicts++;
- retries++; // logAndBackoff expects this to be 1 on first call.
- wce.logAndBackoff(retries, "cloneCollectionAsCapped", fromNs);
-
- // Can't use WRITE_CONFLICT_RETRY_LOOP macros since we need to save/restore exec
- // around call to abandonSnapshot.
- exec->saveState();
- txn->recoveryUnit()->abandonSnapshot();
- exec->restoreState(txn); // Handles any WCEs internally.
- }
- }
- invariant(false); // unreachable
+ WriteUnitOfWork wunit(txn);
+ toCollection->insertDocument(txn, objToClone.value(), true, txn->writesAreReplicated());
+ wunit.commit();
+
+ // Go to the next document
+ retries = 0;
+ } catch (const WriteConflictException& wce) {
+ CurOp::get(txn)->debug().writeConflicts++;
+ retries++; // logAndBackoff expects this to be 1 on first call.
+ wce.logAndBackoff(retries, "cloneCollectionAsCapped", fromNs);
+
+ // Can't use WRITE_CONFLICT_RETRY_LOOP macros since we need to save/restore exec
+ // around call to abandonSnapshot.
+ exec->saveState();
+ txn->recoveryUnit()->abandonSnapshot();
+ exec->restoreState(txn); // Handles any WCEs internally.
+ }
}
- Status convertToCapped(OperationContext* txn,
- const NamespaceString& collectionName,
- double size) {
+ invariant(false); // unreachable
+}
- StringData dbname = collectionName.db();
- StringData shortSource = collectionName.coll();
+Status convertToCapped(OperationContext* txn, const NamespaceString& collectionName, double size) {
+ StringData dbname = collectionName.db();
+ StringData shortSource = collectionName.coll();
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetDb autoDb(txn, collectionName.db(), MODE_X);
+ ScopedTransaction transaction(txn, MODE_IX);
+ AutoGetDb autoDb(txn, collectionName.db(), MODE_X);
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(collectionName);
+ bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(collectionName);
- if (userInitiatedWritesAndNotPrimary) {
- return Status(ErrorCodes::NotMaster,
- str::stream() << "Not primary while converting "
- << collectionName.ns() << " to a capped collection");
- }
+ if (userInitiatedWritesAndNotPrimary) {
+ return Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while converting " << collectionName.ns()
+ << " to a capped collection");
+ }
- Database* const db = autoDb.getDb();
- if (!db) {
- return Status(ErrorCodes::DatabaseNotFound,
- str::stream() << "database " << dbname << " not found");
- }
+ Database* const db = autoDb.getDb();
+ if (!db) {
+ return Status(ErrorCodes::DatabaseNotFound,
+ str::stream() << "database " << dbname << " not found");
+ }
- BackgroundOperation::assertNoBgOpInProgForDb(dbname);
+ BackgroundOperation::assertNoBgOpInProgForDb(dbname);
- std::string shortTmpName = str::stream() << "tmp.convertToCapped." << shortSource;
- std::string longTmpName = str::stream() << dbname << "." << shortTmpName;
+ std::string shortTmpName = str::stream() << "tmp.convertToCapped." << shortSource;
+ std::string longTmpName = str::stream() << dbname << "." << shortTmpName;
- if (db->getCollection(longTmpName)) {
- WriteUnitOfWork wunit(txn);
- Status status = db->dropCollection(txn, longTmpName);
- if (!status.isOK())
- return status;
- }
+ if (db->getCollection(longTmpName)) {
+ WriteUnitOfWork wunit(txn);
+ Status status = db->dropCollection(txn, longTmpName);
+ if (!status.isOK())
+ return status;
+ }
- const bool shouldReplicateWrites = txn->writesAreReplicated();
- txn->setReplicatedWrites(false);
- ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, txn, shouldReplicateWrites);
- Status status = cloneCollectionAsCapped(txn,
- db,
- shortSource.toString(),
- shortTmpName,
- size,
- true);
+ const bool shouldReplicateWrites = txn->writesAreReplicated();
+ txn->setReplicatedWrites(false);
+ ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, txn, shouldReplicateWrites);
+ Status status =
+ cloneCollectionAsCapped(txn, db, shortSource.toString(), shortTmpName, size, true);
- if (!status.isOK()) {
- return status;
- }
+ if (!status.isOK()) {
+ return status;
+ }
- verify(db->getCollection(longTmpName));
+ verify(db->getCollection(longTmpName));
- {
- WriteUnitOfWork wunit(txn);
- status = db->dropCollection(txn, collectionName.ns());
- txn->setReplicatedWrites(shouldReplicateWrites);
- if (!status.isOK())
- return status;
+ {
+ WriteUnitOfWork wunit(txn);
+ status = db->dropCollection(txn, collectionName.ns());
+ txn->setReplicatedWrites(shouldReplicateWrites);
+ if (!status.isOK())
+ return status;
- status = db->renameCollection(txn, longTmpName, collectionName.ns(), false);
- if (!status.isOK())
- return status;
+ status = db->renameCollection(txn, longTmpName, collectionName.ns(), false);
+ if (!status.isOK())
+ return status;
- getGlobalServiceContext()->getOpObserver()->onConvertToCapped(
- txn,
- NamespaceString(collectionName),
- size);
+ getGlobalServiceContext()->getOpObserver()->onConvertToCapped(
+ txn, NamespaceString(collectionName), size);
- wunit.commit();
- }
- return Status::OK();
+ wunit.commit();
}
+ return Status::OK();
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/catalog/capped_utils.h b/src/mongo/db/catalog/capped_utils.h
index 05104230fcf..f7be6dc427e 100644
--- a/src/mongo/db/catalog/capped_utils.h
+++ b/src/mongo/db/catalog/capped_utils.h
@@ -29,30 +29,27 @@
#include "mongo/base/status.h"
namespace mongo {
- class Database;
- class NamespaceString;
- class OperationContext;
+class Database;
+class NamespaceString;
+class OperationContext;
- /**
- * Drops all documents contained in the capped collection, "collectionName".
- */
- Status emptyCapped(OperationContext* txn,
- const NamespaceString& collectionName);
+/**
+ * Drops all documents contained in the capped collection, "collectionName".
+ */
+Status emptyCapped(OperationContext* txn, const NamespaceString& collectionName);
- /**
- * Clones the collection "shortFrom" to the capped collection "shortTo" with a size of "size".
- */
- Status cloneCollectionAsCapped(OperationContext* txn,
- Database* db,
- const std::string& shortFrom,
- const std::string& shortTo,
- double size,
- bool temp);
+/**
+ * Clones the collection "shortFrom" to the capped collection "shortTo" with a size of "size".
+ */
+Status cloneCollectionAsCapped(OperationContext* txn,
+ Database* db,
+ const std::string& shortFrom,
+ const std::string& shortTo,
+ double size,
+ bool temp);
- /**
- * Converts the collection "collectionName" to a capped collection with a size of "size".
- */
- Status convertToCapped(OperationContext* txn,
- const NamespaceString& collectionName,
- double size);
-} // namespace mongo
+/**
+ * Converts the collection "collectionName" to a capped collection with a size of "size".
+ */
+Status convertToCapped(OperationContext* txn, const NamespaceString& collectionName, double size);
+} // namespace mongo
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
index 768ee713b0a..00a3a643a5b 100644
--- a/src/mongo/db/catalog/coll_mod.cpp
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -40,148 +40,140 @@
#include "mongo/db/service_context.h"
namespace mongo {
- Status collMod(OperationContext* txn,
- const NamespaceString& ns,
- const BSONObj& cmdObj,
- BSONObjBuilder* result) {
- StringData dbName = ns.db();
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetDb autoDb(txn, dbName, MODE_X);
- Database* const db = autoDb.getDb();
- Collection* coll = db ? db->getCollection(ns) : NULL;
-
- // If db/collection does not exist, short circuit and return.
- if (!db || !coll) {
- return Status(ErrorCodes::NamespaceNotFound, "ns does not exist");
- }
+Status collMod(OperationContext* txn,
+ const NamespaceString& ns,
+ const BSONObj& cmdObj,
+ BSONObjBuilder* result) {
+ StringData dbName = ns.db();
+ ScopedTransaction transaction(txn, MODE_IX);
+ AutoGetDb autoDb(txn, dbName, MODE_X);
+ Database* const db = autoDb.getDb();
+ Collection* coll = db ? db->getCollection(ns) : NULL;
+
+ // If db/collection does not exist, short circuit and return.
+ if (!db || !coll) {
+ return Status(ErrorCodes::NamespaceNotFound, "ns does not exist");
+ }
- OldClientContext ctx(txn, ns);
+ OldClientContext ctx(txn, ns);
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns);
+ bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns);
- if (userInitiatedWritesAndNotPrimary) {
- return Status(ErrorCodes::NotMaster,
- str::stream() << "Not primary while setting collection options on "
- << ns.toString());
- }
+ if (userInitiatedWritesAndNotPrimary) {
+ return Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while setting collection options on "
+ << ns.toString());
+ }
+
+ WriteUnitOfWork wunit(txn);
- WriteUnitOfWork wunit(txn);
+ Status errorStatus = Status::OK();
- Status errorStatus = Status::OK();
+ BSONForEach(e, cmdObj) {
+ if (str::equals("collMod", e.fieldName())) {
+ // no-op
+ } else if (str::startsWith(e.fieldName(), "$")) {
+ // no-op ignore top-level fields prefixed with $. They are for the command processor
+ } else if (LiteParsedQuery::cmdOptionMaxTimeMS == e.fieldNameStringData()) {
+ // no-op
+ } else if (str::equals("index", e.fieldName())) {
+ BSONObj indexObj = e.Obj();
+ BSONObj keyPattern = indexObj.getObjectField("keyPattern");
- BSONForEach(e, cmdObj) {
- if (str::equals("collMod", e.fieldName())) {
- // no-op
+ if (keyPattern.isEmpty()) {
+ errorStatus = Status(ErrorCodes::InvalidOptions, "no keyPattern specified");
+ continue;
}
- else if (str::startsWith(e.fieldName(), "$")) {
- // no-op ignore top-level fields prefixed with $. They are for the command processor
+
+ BSONElement newExpireSecs = indexObj["expireAfterSeconds"];
+ if (newExpireSecs.eoo()) {
+ errorStatus = Status(ErrorCodes::InvalidOptions, "no expireAfterSeconds field");
+ continue;
}
- else if (LiteParsedQuery::cmdOptionMaxTimeMS == e.fieldNameStringData()) {
- // no-op
+ if (!newExpireSecs.isNumber()) {
+ errorStatus =
+ Status(ErrorCodes::InvalidOptions, "expireAfterSeconds field must be a number");
+ continue;
}
- else if (str::equals("index", e.fieldName())) {
- BSONObj indexObj = e.Obj();
- BSONObj keyPattern = indexObj.getObjectField("keyPattern");
-
- if (keyPattern.isEmpty()){
- errorStatus = Status(ErrorCodes::InvalidOptions, "no keyPattern specified");
- continue;
- }
-
- BSONElement newExpireSecs = indexObj["expireAfterSeconds"];
- if (newExpireSecs.eoo()) {
- errorStatus = Status(ErrorCodes::InvalidOptions, "no expireAfterSeconds field");
- continue;
- }
- if (! newExpireSecs.isNumber()) {
- errorStatus = Status(ErrorCodes::InvalidOptions,
- "expireAfterSeconds field must be a number");
- continue;
- }
-
- const IndexDescriptor* idx = coll->getIndexCatalog()
- ->findIndexByKeyPattern(txn, keyPattern);
- if (idx == NULL) {
- errorStatus = Status(ErrorCodes::InvalidOptions,
- str::stream() << "cannot find index " << keyPattern
- << " for ns " << ns.toString());
- continue;
- }
- BSONElement oldExpireSecs = idx->infoObj().getField("expireAfterSeconds");
- if (oldExpireSecs.eoo()){
- errorStatus = Status(ErrorCodes::InvalidOptions,
- "no expireAfterSeconds field to update");
- continue;
- }
- if (! oldExpireSecs.isNumber()) {
- errorStatus = Status(ErrorCodes::InvalidOptions,
- "existing expireAfterSeconds field is not a number");
- continue;
- }
-
- if (oldExpireSecs != newExpireSecs) {
- result->appendAs(oldExpireSecs, "expireAfterSeconds_old");
- // Change the value of "expireAfterSeconds" on disk.
- coll->getCatalogEntry()->updateTTLSetting(txn,
- idx->indexName(),
- newExpireSecs.numberLong());
- // Notify the index catalog that the definition of this index changed.
- idx = coll->getIndexCatalog()->refreshEntry(txn, idx);
- result->appendAs(newExpireSecs , "expireAfterSeconds_new");
- }
+
+ const IndexDescriptor* idx =
+ coll->getIndexCatalog()->findIndexByKeyPattern(txn, keyPattern);
+ if (idx == NULL) {
+ errorStatus = Status(ErrorCodes::InvalidOptions,
+ str::stream() << "cannot find index " << keyPattern
+ << " for ns " << ns.toString());
+ continue;
}
- else if (str::equals("validator", e.fieldName())) {
- auto status = coll->setValidator(txn, e.Obj());
- if (!status.isOK())
- errorStatus = std::move(status);
+ BSONElement oldExpireSecs = idx->infoObj().getField("expireAfterSeconds");
+ if (oldExpireSecs.eoo()) {
+ errorStatus =
+ Status(ErrorCodes::InvalidOptions, "no expireAfterSeconds field to update");
+ continue;
}
- else {
- // As of SERVER-17312 we only support these two options. When SERVER-17320 is
- // resolved this will need to be enhanced to handle other options.
- typedef CollectionOptions CO;
- const StringData name = e.fieldNameStringData();
- const int flag = (name == "usePowerOf2Sizes") ? CO::Flag_UsePowerOf2Sizes :
- (name == "noPadding") ? CO::Flag_NoPadding :
- 0;
- if (!flag) {
- errorStatus = Status(ErrorCodes::InvalidOptions,
- str::stream() << "unknown option to collMod: " << name);
- continue;
- }
-
- CollectionCatalogEntry* cce = coll->getCatalogEntry();
-
- const int oldFlags = cce->getCollectionOptions(txn).flags;
- const bool oldSetting = oldFlags & flag;
- const bool newSetting = e.trueValue();
-
- result->appendBool(name.toString() + "_old", oldSetting);
- result->appendBool(name.toString() + "_new", newSetting);
-
- const int newFlags = newSetting
- ? (oldFlags | flag) // set flag
- : (oldFlags & ~flag); // clear flag
-
- // NOTE we do this unconditionally to ensure that we note that the user has
- // explicitly set flags, even if they are just setting the default.
- cce->updateFlags(txn, newFlags);
-
- const CollectionOptions newOptions = cce->getCollectionOptions(txn);
- invariant(newOptions.flags == newFlags);
- invariant(newOptions.flagsSet);
+ if (!oldExpireSecs.isNumber()) {
+ errorStatus = Status(ErrorCodes::InvalidOptions,
+ "existing expireAfterSeconds field is not a number");
+ continue;
}
- }
- if (!errorStatus.isOK()) {
- return errorStatus;
- }
+ if (oldExpireSecs != newExpireSecs) {
+ result->appendAs(oldExpireSecs, "expireAfterSeconds_old");
+ // Change the value of "expireAfterSeconds" on disk.
+ coll->getCatalogEntry()->updateTTLSetting(
+ txn, idx->indexName(), newExpireSecs.numberLong());
+ // Notify the index catalog that the definition of this index changed.
+ idx = coll->getIndexCatalog()->refreshEntry(txn, idx);
+ result->appendAs(newExpireSecs, "expireAfterSeconds_new");
+ }
+ } else if (str::equals("validator", e.fieldName())) {
+ auto status = coll->setValidator(txn, e.Obj());
+ if (!status.isOK())
+ errorStatus = std::move(status);
+ } else {
+ // As of SERVER-17312 we only support these two options. When SERVER-17320 is
+ // resolved this will need to be enhanced to handle other options.
+ typedef CollectionOptions CO;
+ const StringData name = e.fieldNameStringData();
+ const int flag = (name == "usePowerOf2Sizes")
+ ? CO::Flag_UsePowerOf2Sizes
+ : (name == "noPadding") ? CO::Flag_NoPadding : 0;
+ if (!flag) {
+ errorStatus = Status(ErrorCodes::InvalidOptions,
+ str::stream() << "unknown option to collMod: " << name);
+ continue;
+ }
+
+ CollectionCatalogEntry* cce = coll->getCatalogEntry();
+
+ const int oldFlags = cce->getCollectionOptions(txn).flags;
+ const bool oldSetting = oldFlags & flag;
+ const bool newSetting = e.trueValue();
+
+ result->appendBool(name.toString() + "_old", oldSetting);
+ result->appendBool(name.toString() + "_new", newSetting);
+
+ const int newFlags = newSetting ? (oldFlags | flag) // set flag
+ : (oldFlags & ~flag); // clear flag
- getGlobalServiceContext()->getOpObserver()->onCollMod(txn,
- (dbName.toString() + ".$cmd").c_str(),
- cmdObj);
+ // NOTE we do this unconditionally to ensure that we note that the user has
+ // explicitly set flags, even if they are just setting the default.
+ cce->updateFlags(txn, newFlags);
- wunit.commit();
- return Status::OK();
+ const CollectionOptions newOptions = cce->getCollectionOptions(txn);
+ invariant(newOptions.flags == newFlags);
+ invariant(newOptions.flagsSet);
+ }
+ }
+
+ if (!errorStatus.isOK()) {
+ return errorStatus;
}
-} // namespace mongo
+
+ getGlobalServiceContext()->getOpObserver()->onCollMod(
+ txn, (dbName.toString() + ".$cmd").c_str(), cmdObj);
+
+ wunit.commit();
+ return Status::OK();
+}
+} // namespace mongo
diff --git a/src/mongo/db/catalog/coll_mod.h b/src/mongo/db/catalog/coll_mod.h
index 1f511f45145..eb8644b74d1 100644
--- a/src/mongo/db/catalog/coll_mod.h
+++ b/src/mongo/db/catalog/coll_mod.h
@@ -29,17 +29,16 @@
#include "mongo/base/status.h"
namespace mongo {
- class BSONObj;
- class BSONObjBuilder;
- class NamespaceString;
- class OperationContext;
-
- /**
- * Performs the collection modification described in "cmdObj" on the collection "ns".
- */
- Status collMod(OperationContext* txn,
- const NamespaceString& ns,
- const BSONObj& cmdObj,
- BSONObjBuilder* result);
-} // namespace mongo
+class BSONObj;
+class BSONObjBuilder;
+class NamespaceString;
+class OperationContext;
+/**
+ * Performs the collection modification described in "cmdObj" on the collection "ns".
+ */
+Status collMod(OperationContext* txn,
+ const NamespaceString& ns,
+ const BSONObj& cmdObj,
+ BSONObjBuilder* result);
+} // namespace mongo
diff --git a/src/mongo/db/catalog/collection.cpp b/src/mongo/db/catalog/collection.cpp
index 4be5fe65529..64ef00330d6 100644
--- a/src/mongo/db/catalog/collection.cpp
+++ b/src/mongo/db/catalog/collection.cpp
@@ -56,49 +56,45 @@
#include "mongo/db/storage/mmap_v1/mmap_v1_options.h"
#include "mongo/db/storage/record_fetcher.h"
-#include "mongo/db/auth/user_document_parser.h" // XXX-ANDY
+#include "mongo/db/auth/user_document_parser.h" // XXX-ANDY
#include "mongo/util/log.h"
namespace mongo {
namespace {
- const auto bannedExpressionsInValidators = std::set<StringData>{
- "$geoNear",
- "$near",
- "$nearSphere",
- "$text",
- "$where",
- };
- Status checkValidatorForBannedExpressions(const BSONObj& validator) {
- for (auto field : validator) {
- const auto name = field.fieldNameStringData();
- if (name[0] == '$' && bannedExpressionsInValidators.count(name)) {
- return {ErrorCodes::InvalidOptions,
- str::stream() << name << " is not allowed in collection validators"};
- }
-
- if (field.type() == Object || field.type() == Array) {
- auto status = checkValidatorForBannedExpressions(field.Obj());
- if (!status.isOK())
- return status;
- }
+const auto bannedExpressionsInValidators = std::set<StringData>{
+ "$geoNear", "$near", "$nearSphere", "$text", "$where",
+};
+Status checkValidatorForBannedExpressions(const BSONObj& validator) {
+ for (auto field : validator) {
+ const auto name = field.fieldNameStringData();
+ if (name[0] == '$' && bannedExpressionsInValidators.count(name)) {
+ return {ErrorCodes::InvalidOptions,
+ str::stream() << name << " is not allowed in collection validators"};
}
- return Status::OK();
+ if (field.type() == Object || field.type() == Array) {
+ auto status = checkValidatorForBannedExpressions(field.Obj());
+ if (!status.isOK())
+ return status;
+ }
}
+
+ return Status::OK();
+}
}
- using std::unique_ptr;
- using std::endl;
- using std::string;
- using std::vector;
+using std::unique_ptr;
+using std::endl;
+using std::string;
+using std::vector;
- using logger::LogComponent;
+using logger::LogComponent;
- std::string CompactOptions::toString() const {
- std::stringstream ss;
- ss << "paddingMode: ";
- switch ( paddingMode ) {
+std::string CompactOptions::toString() const {
+ std::stringstream ss;
+ ss << "paddingMode: ";
+ switch (paddingMode) {
case NONE:
ss << "NONE";
break;
@@ -106,752 +102,721 @@ namespace {
ss << "PRESERVE";
break;
case MANUAL:
- ss << "MANUAL (" << paddingBytes << " + ( doc * " << paddingFactor <<") )";
- }
+ ss << "MANUAL (" << paddingBytes << " + ( doc * " << paddingFactor << ") )";
+ }
- ss << " validateDocuments: " << validateDocuments;
+ ss << " validateDocuments: " << validateDocuments;
- return ss.str();
- }
+ return ss.str();
+}
- //
- // CappedInsertNotifier
- //
+//
+// CappedInsertNotifier
+//
- CappedInsertNotifier::CappedInsertNotifier()
- : _cappedInsertCount(0) {
- }
+CappedInsertNotifier::CappedInsertNotifier() : _cappedInsertCount(0) {}
- void CappedInsertNotifier::notifyOfInsert() {
- stdx::lock_guard<stdx::mutex> lk(_cappedNewDataMutex);
- _cappedInsertCount++;
- _cappedNewDataNotifier.notify_all();
- }
+void CappedInsertNotifier::notifyOfInsert() {
+ stdx::lock_guard<stdx::mutex> lk(_cappedNewDataMutex);
+ _cappedInsertCount++;
+ _cappedNewDataNotifier.notify_all();
+}
- uint64_t CappedInsertNotifier::getCount() const {
- stdx::lock_guard<stdx::mutex> lk(_cappedNewDataMutex);
- return _cappedInsertCount;
- }
+uint64_t CappedInsertNotifier::getCount() const {
+ stdx::lock_guard<stdx::mutex> lk(_cappedNewDataMutex);
+ return _cappedInsertCount;
+}
- void CappedInsertNotifier::waitForInsert(uint64_t referenceCount, Microseconds timeout) const {
- stdx::unique_lock<stdx::mutex> lk(_cappedNewDataMutex);
+void CappedInsertNotifier::waitForInsert(uint64_t referenceCount, Microseconds timeout) const {
+ stdx::unique_lock<stdx::mutex> lk(_cappedNewDataMutex);
- while (referenceCount == _cappedInsertCount) {
- if (stdx::cv_status::timeout == _cappedNewDataNotifier.wait_for(lk, timeout)) {
- return;
- }
+ while (referenceCount == _cappedInsertCount) {
+ if (stdx::cv_status::timeout == _cappedNewDataNotifier.wait_for(lk, timeout)) {
+ return;
}
}
+}
- // ----
-
- Collection::Collection( OperationContext* txn,
- StringData fullNS,
- CollectionCatalogEntry* details,
- RecordStore* recordStore,
- DatabaseCatalogEntry* dbce )
- : _ns( fullNS ),
- _details( details ),
- _recordStore( recordStore ),
- _dbce( dbce ),
- _infoCache( this ),
- _indexCatalog( this ),
- _validatorDoc(_details->getCollectionOptions(txn).validator.getOwned()),
- _validator(uassertStatusOK(parseValidator(_validatorDoc))),
- _cursorManager(fullNS),
- _cappedNotifier(_recordStore->isCapped() ? new CappedInsertNotifier() : nullptr) {
- _magic = 1357924;
- _indexCatalog.init(txn);
- if ( isCapped() )
- _recordStore->setCappedDeleteCallback( this );
- _infoCache.reset(txn);
- }
+// ----
+
+Collection::Collection(OperationContext* txn,
+ StringData fullNS,
+ CollectionCatalogEntry* details,
+ RecordStore* recordStore,
+ DatabaseCatalogEntry* dbce)
+ : _ns(fullNS),
+ _details(details),
+ _recordStore(recordStore),
+ _dbce(dbce),
+ _infoCache(this),
+ _indexCatalog(this),
+ _validatorDoc(_details->getCollectionOptions(txn).validator.getOwned()),
+ _validator(uassertStatusOK(parseValidator(_validatorDoc))),
+ _cursorManager(fullNS),
+ _cappedNotifier(_recordStore->isCapped() ? new CappedInsertNotifier() : nullptr) {
+ _magic = 1357924;
+ _indexCatalog.init(txn);
+ if (isCapped())
+ _recordStore->setCappedDeleteCallback(this);
+ _infoCache.reset(txn);
+}
- Collection::~Collection() {
- verify( ok() );
- _magic = 0;
- }
+Collection::~Collection() {
+ verify(ok());
+ _magic = 0;
+}
- bool Collection::requiresIdIndex() const {
+bool Collection::requiresIdIndex() const {
+ if (_ns.ns().find('$') != string::npos) {
+ // no indexes on indexes
+ return false;
+ }
- if ( _ns.ns().find( '$' ) != string::npos ) {
- // no indexes on indexes
+ if (_ns.isSystem()) {
+ StringData shortName = _ns.coll().substr(_ns.coll().find('.') + 1);
+ if (shortName == "indexes" || shortName == "namespaces" || shortName == "profile") {
return false;
}
-
- if ( _ns.isSystem() ) {
- StringData shortName = _ns.coll().substr( _ns.coll().find( '.' ) + 1 );
- if ( shortName == "indexes" ||
- shortName == "namespaces" ||
- shortName == "profile" ) {
- return false;
- }
- }
-
- if ( _ns.db() == "local" ) {
- if ( _ns.coll().startsWith( "oplog." ) )
- return false;
- }
-
- if ( !_ns.isSystem() ) {
- // non system collections definitely have an _id index
- return true;
- }
-
-
- return true;
- }
-
- std::unique_ptr<RecordCursor> Collection::getCursor(OperationContext* txn, bool forward) const {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
- invariant( ok() );
-
- return _recordStore->getCursor(txn, forward);
}
- vector<std::unique_ptr<RecordCursor>> Collection::getManyCursors(OperationContext* txn) const {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
-
- return _recordStore->getManyCursors(txn);
+ if (_ns.db() == "local") {
+ if (_ns.coll().startsWith("oplog."))
+ return false;
}
- Snapshotted<BSONObj> Collection::docFor(OperationContext* txn, const RecordId& loc) const {
- return Snapshotted<BSONObj>(txn->recoveryUnit()->getSnapshotId(),
- _recordStore->dataFor( txn, loc ).releaseToBson());
+ if (!_ns.isSystem()) {
+ // non system collections definitely have an _id index
+ return true;
}
- bool Collection::findDoc(OperationContext* txn,
- const RecordId& loc,
- Snapshotted<BSONObj>* out) const {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
- RecordData rd;
- if ( !_recordStore->findRecord( txn, loc, &rd ) )
- return false;
- *out = Snapshotted<BSONObj>(txn->recoveryUnit()->getSnapshotId(), rd.releaseToBson());
- return true;
- }
+ return true;
+}
- Status Collection::checkValidation(OperationContext* txn, const BSONObj& document) const {
- if (!_validator)
- return Status::OK();
+std::unique_ptr<RecordCursor> Collection::getCursor(OperationContext* txn, bool forward) const {
+ dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
+ invariant(ok());
- if (documentValidationDisabled(txn))
- return Status::OK();
+ return _recordStore->getCursor(txn, forward);
+}
- if (_validator->matchesBSON(document))
- return Status::OK();
+vector<std::unique_ptr<RecordCursor>> Collection::getManyCursors(OperationContext* txn) const {
+ dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
- return {ErrorCodes::DocumentValidationFailure, "Document failed validation"};
- }
+ return _recordStore->getManyCursors(txn);
+}
- StatusWith<std::unique_ptr<MatchExpression>> Collection::parseValidator(
- const BSONObj& validator) const {
- if (validator.isEmpty())
- return {nullptr};
+Snapshotted<BSONObj> Collection::docFor(OperationContext* txn, const RecordId& loc) const {
+ return Snapshotted<BSONObj>(txn->recoveryUnit()->getSnapshotId(),
+ _recordStore->dataFor(txn, loc).releaseToBson());
+}
- if (ns().isSystem()) {
- return {ErrorCodes::InvalidOptions,
- "Document validators not allowed on system collections."};
- }
+bool Collection::findDoc(OperationContext* txn,
+ const RecordId& loc,
+ Snapshotted<BSONObj>* out) const {
+ dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
- if (ns().isOnInternalDb()) {
- return {ErrorCodes::InvalidOptions,
- str::stream() << "Document validators are not allowed on collections in"
- << " the " << ns().db() << " database"};
- }
+ RecordData rd;
+ if (!_recordStore->findRecord(txn, loc, &rd))
+ return false;
+ *out = Snapshotted<BSONObj>(txn->recoveryUnit()->getSnapshotId(), rd.releaseToBson());
+ return true;
+}
- {
- auto status = checkValidatorForBannedExpressions(validator);
- if (!status.isOK())
- return status;
- }
+Status Collection::checkValidation(OperationContext* txn, const BSONObj& document) const {
+ if (!_validator)
+ return Status::OK();
- auto statusWithRawPtr = MatchExpressionParser::parse(validator);
- if (!statusWithRawPtr.isOK())
- return statusWithRawPtr.getStatus();
+ if (documentValidationDisabled(txn))
+ return Status::OK();
- return {std::unique_ptr<MatchExpression>(statusWithRawPtr.getValue())};
- }
+ if (_validator->matchesBSON(document))
+ return Status::OK();
- StatusWith<RecordId> Collection::insertDocument(OperationContext* txn,
- const DocWriter* doc,
- bool enforceQuota) {
- invariant(!_validator || documentValidationDisabled(txn));
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
- invariant( !_indexCatalog.haveAnyIndexes() ); // eventually can implement, just not done
+ return {ErrorCodes::DocumentValidationFailure, "Document failed validation"};
+}
- StatusWith<RecordId> loc = _recordStore->insertRecord( txn,
- doc,
- _enforceQuota( enforceQuota ) );
- if ( !loc.isOK() )
- return loc;
+StatusWith<std::unique_ptr<MatchExpression>> Collection::parseValidator(
+ const BSONObj& validator) const {
+ if (validator.isEmpty())
+ return {nullptr};
- // we cannot call into the OpObserver here because the document being written is not present
- // fortunately, this is currently only used for adding entries to the oplog.
+ if (ns().isSystem()) {
+ return {ErrorCodes::InvalidOptions,
+ "Document validators not allowed on system collections."};
+ }
- return StatusWith<RecordId>( loc );
+ if (ns().isOnInternalDb()) {
+ return {ErrorCodes::InvalidOptions,
+ str::stream() << "Document validators are not allowed on collections in"
+ << " the " << ns().db() << " database"};
}
- StatusWith<RecordId> Collection::insertDocument(OperationContext* txn,
- const BSONObj& docToInsert,
- bool enforceQuota,
- bool fromMigrate) {
- {
- auto status = checkValidation(txn, docToInsert);
- if (!status.isOK())
- return status;
- }
+ {
+ auto status = checkValidatorForBannedExpressions(validator);
+ if (!status.isOK())
+ return status;
+ }
- const SnapshotId sid = txn->recoveryUnit()->getSnapshotId();
+ auto statusWithRawPtr = MatchExpressionParser::parse(validator);
+ if (!statusWithRawPtr.isOK())
+ return statusWithRawPtr.getStatus();
- if ( _indexCatalog.findIdIndex( txn ) ) {
- if ( docToInsert["_id"].eoo() ) {
- return StatusWith<RecordId>( ErrorCodes::InternalError,
- str::stream() << "Collection::insertDocument got "
- "document without _id for ns:" << _ns.ns() );
- }
- }
+ return {std::unique_ptr<MatchExpression>(statusWithRawPtr.getValue())};
+}
- StatusWith<RecordId> res = _insertDocument( txn, docToInsert, enforceQuota );
- invariant( sid == txn->recoveryUnit()->getSnapshotId() );
- if (res.isOK()) {
- getGlobalServiceContext()->getOpObserver()->onInsert(txn,
- ns(),
- docToInsert,
- fromMigrate);
-
- // If there is a notifier object and another thread is waiting on it, then we notify
- // waiters of this document insert. Waiters keep a shared_ptr to '_cappedNotifier', so
- // there are waiters if this Collection's shared_ptr is not unique.
- if (_cappedNotifier && !_cappedNotifier.unique()) {
- _cappedNotifier->notifyOfInsert();
- }
- }
+StatusWith<RecordId> Collection::insertDocument(OperationContext* txn,
+ const DocWriter* doc,
+ bool enforceQuota) {
+ invariant(!_validator || documentValidationDisabled(txn));
+ dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
+ invariant(!_indexCatalog.haveAnyIndexes()); // eventually can implement, just not done
- return res;
- }
+ StatusWith<RecordId> loc = _recordStore->insertRecord(txn, doc, _enforceQuota(enforceQuota));
+ if (!loc.isOK())
+ return loc;
- StatusWith<RecordId> Collection::insertDocument(OperationContext* txn,
- const BSONObj& doc,
- MultiIndexBlock* indexBlock,
- bool enforceQuota) {
- {
- auto status = checkValidation(txn, doc);
- if (!status.isOK())
- return status;
- }
+ // we cannot call into the OpObserver here because the document being written is not present
+ // fortunately, this is currently only used for adding entries to the oplog.
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
+ return StatusWith<RecordId>(loc);
+}
- StatusWith<RecordId> loc = _recordStore->insertRecord( txn,
- doc.objdata(),
- doc.objsize(),
- _enforceQuota(enforceQuota) );
+StatusWith<RecordId> Collection::insertDocument(OperationContext* txn,
+ const BSONObj& docToInsert,
+ bool enforceQuota,
+ bool fromMigrate) {
+ {
+ auto status = checkValidation(txn, docToInsert);
+ if (!status.isOK())
+ return status;
+ }
- if ( !loc.isOK() )
- return loc;
+ const SnapshotId sid = txn->recoveryUnit()->getSnapshotId();
- Status status = indexBlock->insert( doc, loc.getValue() );
- if ( !status.isOK() )
- return StatusWith<RecordId>( status );
+ if (_indexCatalog.findIdIndex(txn)) {
+ if (docToInsert["_id"].eoo()) {
+ return StatusWith<RecordId>(ErrorCodes::InternalError,
+ str::stream()
+ << "Collection::insertDocument got "
+ "document without _id for ns:" << _ns.ns());
+ }
+ }
- getGlobalServiceContext()->getOpObserver()->onInsert(txn, ns(), doc);
+ StatusWith<RecordId> res = _insertDocument(txn, docToInsert, enforceQuota);
+ invariant(sid == txn->recoveryUnit()->getSnapshotId());
+ if (res.isOK()) {
+ getGlobalServiceContext()->getOpObserver()->onInsert(txn, ns(), docToInsert, fromMigrate);
- // If there is a notifier object and another thread is waiting on it, then we notify waiters
- // of this document insert. Waiters keep a shared_ptr to '_cappedNotifier', so there are
- // waiters if this Collection's shared_ptr is not unique.
+ // If there is a notifier object and another thread is waiting on it, then we notify
+ // waiters of this document insert. Waiters keep a shared_ptr to '_cappedNotifier', so
+ // there are waiters if this Collection's shared_ptr is not unique.
if (_cappedNotifier && !_cappedNotifier.unique()) {
_cappedNotifier->notifyOfInsert();
}
-
- return loc;
}
- StatusWith<RecordId> Collection::_insertDocument( OperationContext* txn,
- const BSONObj& docToInsert,
- bool enforceQuota ) {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
+ return res;
+}
- // TODO: for now, capped logic lives inside NamespaceDetails, which is hidden
- // under the RecordStore, this feels broken since that should be a
- // collection access method probably
+StatusWith<RecordId> Collection::insertDocument(OperationContext* txn,
+ const BSONObj& doc,
+ MultiIndexBlock* indexBlock,
+ bool enforceQuota) {
+ {
+ auto status = checkValidation(txn, doc);
+ if (!status.isOK())
+ return status;
+ }
- StatusWith<RecordId> loc = _recordStore->insertRecord( txn,
- docToInsert.objdata(),
- docToInsert.objsize(),
- _enforceQuota( enforceQuota ) );
- if ( !loc.isOK() )
- return loc;
+ dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
- invariant( RecordId::min() < loc.getValue() );
- invariant( loc.getValue() < RecordId::max() );
+ StatusWith<RecordId> loc =
+ _recordStore->insertRecord(txn, doc.objdata(), doc.objsize(), _enforceQuota(enforceQuota));
- _infoCache.notifyOfWriteOp();
+ if (!loc.isOK())
+ return loc;
- Status s = _indexCatalog.indexRecord(txn, docToInsert, loc.getValue());
- if (!s.isOK())
- return StatusWith<RecordId>(s);
+ Status status = indexBlock->insert(doc, loc.getValue());
+ if (!status.isOK())
+ return StatusWith<RecordId>(status);
- return loc;
+ getGlobalServiceContext()->getOpObserver()->onInsert(txn, ns(), doc);
+
+ // If there is a notifier object and another thread is waiting on it, then we notify waiters
+ // of this document insert. Waiters keep a shared_ptr to '_cappedNotifier', so there are
+ // waiters if this Collection's shared_ptr is not unique.
+ if (_cappedNotifier && !_cappedNotifier.unique()) {
+ _cappedNotifier->notifyOfInsert();
}
- Status Collection::aboutToDeleteCapped( OperationContext* txn,
- const RecordId& loc,
- RecordData data ) {
+ return loc;
+}
- /* check if any cursors point to us. if so, advance them. */
- _cursorManager.invalidateDocument(txn, loc, INVALIDATION_DELETION);
+StatusWith<RecordId> Collection::_insertDocument(OperationContext* txn,
+ const BSONObj& docToInsert,
+ bool enforceQuota) {
+ dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
- BSONObj doc = data.releaseToBson();
- _indexCatalog.unindexRecord(txn, doc, loc, false);
+ // TODO: for now, capped logic lives inside NamespaceDetails, which is hidden
+ // under the RecordStore, this feels broken since that should be a
+ // collection access method probably
- return Status::OK();
- }
+ StatusWith<RecordId> loc = _recordStore->insertRecord(
+ txn, docToInsert.objdata(), docToInsert.objsize(), _enforceQuota(enforceQuota));
+ if (!loc.isOK())
+ return loc;
- void Collection::deleteDocument(OperationContext* txn,
- const RecordId& loc,
- bool cappedOK,
- bool noWarn,
- BSONObj* deletedId) {
- if ( isCapped() && !cappedOK ) {
- log() << "failing remove on a capped ns " << _ns << endl;
- uasserted( 10089, "cannot remove from a capped collection" );
- return;
- }
+ invariant(RecordId::min() < loc.getValue());
+ invariant(loc.getValue() < RecordId::max());
- Snapshotted<BSONObj> doc = docFor(txn, loc);
+ _infoCache.notifyOfWriteOp();
- BSONElement e = doc.value()["_id"];
- BSONObj id;
- if (e.type()) {
- id = e.wrap();
- if (deletedId) {
- *deletedId = e.wrap();
- }
- }
+ Status s = _indexCatalog.indexRecord(txn, docToInsert, loc.getValue());
+ if (!s.isOK())
+ return StatusWith<RecordId>(s);
- /* check if any cursors point to us. if so, advance them. */
- _cursorManager.invalidateDocument(txn, loc, INVALIDATION_DELETION);
+ return loc;
+}
- _indexCatalog.unindexRecord(txn, doc.value(), loc, noWarn);
+Status Collection::aboutToDeleteCapped(OperationContext* txn,
+ const RecordId& loc,
+ RecordData data) {
+ /* check if any cursors point to us. if so, advance them. */
+ _cursorManager.invalidateDocument(txn, loc, INVALIDATION_DELETION);
- _recordStore->deleteRecord(txn, loc);
+ BSONObj doc = data.releaseToBson();
+ _indexCatalog.unindexRecord(txn, doc, loc, false);
- _infoCache.notifyOfWriteOp();
+ return Status::OK();
+}
- if (!id.isEmpty()) {
- getGlobalServiceContext()->getOpObserver()->onDelete(txn, ns().ns(), id);
- }
+void Collection::deleteDocument(
+ OperationContext* txn, const RecordId& loc, bool cappedOK, bool noWarn, BSONObj* deletedId) {
+ if (isCapped() && !cappedOK) {
+ log() << "failing remove on a capped ns " << _ns << endl;
+ uasserted(10089, "cannot remove from a capped collection");
+ return;
}
- Counter64 moveCounter;
- ServerStatusMetricField<Counter64> moveCounterDisplay( "record.moves", &moveCounter );
-
- StatusWith<RecordId> Collection::updateDocument( OperationContext* txn,
- const RecordId& oldLocation,
- const Snapshotted<BSONObj>& oldDoc,
- const BSONObj& newDoc,
- bool enforceQuota,
- bool indexesAffected,
- OpDebug* debug,
- oplogUpdateEntryArgs& args) {
- {
- auto status = checkValidation(txn, newDoc);
- if (!status.isOK())
- return status;
- }
+ Snapshotted<BSONObj> doc = docFor(txn, loc);
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
- invariant(oldDoc.snapshotId() == txn->recoveryUnit()->getSnapshotId());
-
- SnapshotId sid = txn->recoveryUnit()->getSnapshotId();
-
- BSONElement oldId = oldDoc.value()["_id"];
- if ( !oldId.eoo() && ( oldId != newDoc["_id"] ) )
- return StatusWith<RecordId>( ErrorCodes::InternalError,
- "in Collection::updateDocument _id mismatch",
- 13596 );
-
- // At the end of this step, we will have a map of UpdateTickets, one per index, which
- // represent the index updates needed to be done, based on the changes between oldDoc and
- // newDoc.
- OwnedPointerMap<IndexDescriptor*,UpdateTicket> updateTickets;
- if ( indexesAffected ) {
- IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator( txn, true );
- while ( ii.more() ) {
- IndexDescriptor* descriptor = ii.next();
- IndexCatalogEntry* entry = ii.catalogEntry(descriptor);
- IndexAccessMethod* iam = ii.accessMethod( descriptor );
-
- InsertDeleteOptions options;
- options.logIfError = false;
- options.dupsAllowed =
- !(KeyPattern::isIdKeyPattern(descriptor->keyPattern()) || descriptor->unique())
- || repl::getGlobalReplicationCoordinator()->shouldIgnoreUniqueIndex(descriptor);
- UpdateTicket* updateTicket = new UpdateTicket();
- updateTickets.mutableMap()[descriptor] = updateTicket;
- Status ret = iam->validateUpdate(txn,
- oldDoc.value(),
- newDoc,
- oldLocation,
- options,
- updateTicket,
- entry->getFilterExpression());
- if ( !ret.isOK() ) {
- return StatusWith<RecordId>( ret );
- }
- }
+ BSONElement e = doc.value()["_id"];
+ BSONObj id;
+ if (e.type()) {
+ id = e.wrap();
+ if (deletedId) {
+ *deletedId = e.wrap();
}
+ }
- // This can call back into Collection::recordStoreGoingToMove. If that happens, the old
- // object is removed from all indexes.
- StatusWith<RecordId> newLocation = _recordStore->updateRecord( txn,
- oldLocation,
- newDoc.objdata(),
- newDoc.objsize(),
- _enforceQuota( enforceQuota ),
- this );
-
- if ( !newLocation.isOK() ) {
- return newLocation;
- }
+ /* check if any cursors point to us. if so, advance them. */
+ _cursorManager.invalidateDocument(txn, loc, INVALIDATION_DELETION);
- // At this point, the old object may or may not still be indexed, depending on if it was
- // moved.
+ _indexCatalog.unindexRecord(txn, doc.value(), loc, noWarn);
- _infoCache.notifyOfWriteOp();
+ _recordStore->deleteRecord(txn, loc);
- // If the object did move, we need to add the new location to all indexes.
- if ( newLocation.getValue() != oldLocation ) {
+ _infoCache.notifyOfWriteOp();
- if ( debug ) {
- if (debug->nmoved == -1) // default of -1 rather than 0
- debug->nmoved = 1;
- else
- debug->nmoved += 1;
- }
+ if (!id.isEmpty()) {
+ getGlobalServiceContext()->getOpObserver()->onDelete(txn, ns().ns(), id);
+ }
+}
+
+Counter64 moveCounter;
+ServerStatusMetricField<Counter64> moveCounterDisplay("record.moves", &moveCounter);
+
+StatusWith<RecordId> Collection::updateDocument(OperationContext* txn,
+ const RecordId& oldLocation,
+ const Snapshotted<BSONObj>& oldDoc,
+ const BSONObj& newDoc,
+ bool enforceQuota,
+ bool indexesAffected,
+ OpDebug* debug,
+ oplogUpdateEntryArgs& args) {
+ {
+ auto status = checkValidation(txn, newDoc);
+ if (!status.isOK())
+ return status;
+ }
+
+ dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
+ invariant(oldDoc.snapshotId() == txn->recoveryUnit()->getSnapshotId());
- Status s = _indexCatalog.indexRecord(txn, newDoc, newLocation.getValue());
- if (!s.isOK())
- return StatusWith<RecordId>(s);
- invariant( sid == txn->recoveryUnit()->getSnapshotId() );
- args.ns = ns().ns();
- getGlobalServiceContext()->getOpObserver()->onUpdate(txn, args);
+ SnapshotId sid = txn->recoveryUnit()->getSnapshotId();
- return newLocation;
+ BSONElement oldId = oldDoc.value()["_id"];
+ if (!oldId.eoo() && (oldId != newDoc["_id"]))
+ return StatusWith<RecordId>(
+ ErrorCodes::InternalError, "in Collection::updateDocument _id mismatch", 13596);
+
+ // At the end of this step, we will have a map of UpdateTickets, one per index, which
+ // represent the index updates needed to be done, based on the changes between oldDoc and
+ // newDoc.
+ OwnedPointerMap<IndexDescriptor*, UpdateTicket> updateTickets;
+ if (indexesAffected) {
+ IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator(txn, true);
+ while (ii.more()) {
+ IndexDescriptor* descriptor = ii.next();
+ IndexCatalogEntry* entry = ii.catalogEntry(descriptor);
+ IndexAccessMethod* iam = ii.accessMethod(descriptor);
+
+ InsertDeleteOptions options;
+ options.logIfError = false;
+ options.dupsAllowed =
+ !(KeyPattern::isIdKeyPattern(descriptor->keyPattern()) || descriptor->unique()) ||
+ repl::getGlobalReplicationCoordinator()->shouldIgnoreUniqueIndex(descriptor);
+ UpdateTicket* updateTicket = new UpdateTicket();
+ updateTickets.mutableMap()[descriptor] = updateTicket;
+ Status ret = iam->validateUpdate(txn,
+ oldDoc.value(),
+ newDoc,
+ oldLocation,
+ options,
+ updateTicket,
+ entry->getFilterExpression());
+ if (!ret.isOK()) {
+ return StatusWith<RecordId>(ret);
+ }
}
+ }
- // Object did not move. We update each index with each respective UpdateTicket.
+ // This can call back into Collection::recordStoreGoingToMove. If that happens, the old
+ // object is removed from all indexes.
+ StatusWith<RecordId> newLocation = _recordStore->updateRecord(
+ txn, oldLocation, newDoc.objdata(), newDoc.objsize(), _enforceQuota(enforceQuota), this);
- if ( debug )
- debug->keyUpdates = 0;
+ if (!newLocation.isOK()) {
+ return newLocation;
+ }
- if ( indexesAffected ) {
- IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator( txn, true );
- while ( ii.more() ) {
- IndexDescriptor* descriptor = ii.next();
- IndexAccessMethod* iam = ii.accessMethod(descriptor);
+ // At this point, the old object may or may not still be indexed, depending on if it was
+ // moved.
- int64_t updatedKeys;
- Status ret = iam->update(
- txn, *updateTickets.mutableMap()[descriptor], &updatedKeys);
- if ( !ret.isOK() )
- return StatusWith<RecordId>( ret );
- if ( debug )
- debug->keyUpdates += updatedKeys;
- }
+ _infoCache.notifyOfWriteOp();
+
+ // If the object did move, we need to add the new location to all indexes.
+ if (newLocation.getValue() != oldLocation) {
+ if (debug) {
+ if (debug->nmoved == -1) // default of -1 rather than 0
+ debug->nmoved = 1;
+ else
+ debug->nmoved += 1;
}
- invariant( sid == txn->recoveryUnit()->getSnapshotId() );
+ Status s = _indexCatalog.indexRecord(txn, newDoc, newLocation.getValue());
+ if (!s.isOK())
+ return StatusWith<RecordId>(s);
+ invariant(sid == txn->recoveryUnit()->getSnapshotId());
args.ns = ns().ns();
getGlobalServiceContext()->getOpObserver()->onUpdate(txn, args);
return newLocation;
}
- Status Collection::recordStoreGoingToMove( OperationContext* txn,
- const RecordId& oldLocation,
- const char* oldBuffer,
- size_t oldSize ) {
- moveCounter.increment();
- _cursorManager.invalidateDocument(txn, oldLocation, INVALIDATION_DELETION);
- _indexCatalog.unindexRecord(txn, BSONObj(oldBuffer), oldLocation, true);
- return Status::OK();
- }
+ // Object did not move. We update each index with each respective UpdateTicket.
- Status Collection::recordStoreGoingToUpdateInPlace( OperationContext* txn,
- const RecordId& loc ) {
- // Broadcast the mutation so that query results stay correct.
- _cursorManager.invalidateDocument(txn, loc, INVALIDATION_MUTATION);
- return Status::OK();
+ if (debug)
+ debug->keyUpdates = 0;
+
+ if (indexesAffected) {
+ IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator(txn, true);
+ while (ii.more()) {
+ IndexDescriptor* descriptor = ii.next();
+ IndexAccessMethod* iam = ii.accessMethod(descriptor);
+
+ int64_t updatedKeys;
+ Status ret = iam->update(txn, *updateTickets.mutableMap()[descriptor], &updatedKeys);
+ if (!ret.isOK())
+ return StatusWith<RecordId>(ret);
+ if (debug)
+ debug->keyUpdates += updatedKeys;
+ }
}
+ invariant(sid == txn->recoveryUnit()->getSnapshotId());
+ args.ns = ns().ns();
+ getGlobalServiceContext()->getOpObserver()->onUpdate(txn, args);
- bool Collection::updateWithDamagesSupported() const {
- if (_validator)
- return false;
+ return newLocation;
+}
- return _recordStore->updateWithDamagesSupported();
- }
+Status Collection::recordStoreGoingToMove(OperationContext* txn,
+ const RecordId& oldLocation,
+ const char* oldBuffer,
+ size_t oldSize) {
+ moveCounter.increment();
+ _cursorManager.invalidateDocument(txn, oldLocation, INVALIDATION_DELETION);
+ _indexCatalog.unindexRecord(txn, BSONObj(oldBuffer), oldLocation, true);
+ return Status::OK();
+}
- Status Collection::updateDocumentWithDamages( OperationContext* txn,
- const RecordId& loc,
- const Snapshotted<RecordData>& oldRec,
- const char* damageSource,
- const mutablebson::DamageVector& damages,
- oplogUpdateEntryArgs& args) {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
- invariant(oldRec.snapshotId() == txn->recoveryUnit()->getSnapshotId());
- invariant(updateWithDamagesSupported());
-
- // Broadcast the mutation so that query results stay correct.
- _cursorManager.invalidateDocument(txn, loc, INVALIDATION_MUTATION);
-
- Status status =
- _recordStore->updateWithDamages(txn, loc, oldRec.value(), damageSource, damages);
-
- if (status.isOK()) {
- args.ns = ns().ns();
- getGlobalServiceContext()->getOpObserver()->onUpdate(txn, args);
- }
- return status;
- }
+Status Collection::recordStoreGoingToUpdateInPlace(OperationContext* txn, const RecordId& loc) {
+ // Broadcast the mutation so that query results stay correct.
+ _cursorManager.invalidateDocument(txn, loc, INVALIDATION_MUTATION);
+ return Status::OK();
+}
- bool Collection::_enforceQuota( bool userEnforeQuota ) const {
- if ( !userEnforeQuota )
- return false;
- if ( !mmapv1GlobalOptions.quota )
- return false;
+bool Collection::updateWithDamagesSupported() const {
+ if (_validator)
+ return false;
- if ( _ns.db() == "local" )
- return false;
+ return _recordStore->updateWithDamagesSupported();
+}
- if ( _ns.isSpecial() )
- return false;
+Status Collection::updateDocumentWithDamages(OperationContext* txn,
+ const RecordId& loc,
+ const Snapshotted<RecordData>& oldRec,
+ const char* damageSource,
+ const mutablebson::DamageVector& damages,
+ oplogUpdateEntryArgs& args) {
+ dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
+ invariant(oldRec.snapshotId() == txn->recoveryUnit()->getSnapshotId());
+ invariant(updateWithDamagesSupported());
- return true;
- }
+ // Broadcast the mutation so that query results stay correct.
+ _cursorManager.invalidateDocument(txn, loc, INVALIDATION_MUTATION);
- bool Collection::isCapped() const {
- return _cappedNotifier.get();
- }
+ Status status =
+ _recordStore->updateWithDamages(txn, loc, oldRec.value(), damageSource, damages);
- std::shared_ptr<CappedInsertNotifier> Collection::getCappedInsertNotifier() const {
- invariant(isCapped());
- return _cappedNotifier;
+ if (status.isOK()) {
+ args.ns = ns().ns();
+ getGlobalServiceContext()->getOpObserver()->onUpdate(txn, args);
}
+ return status;
+}
- uint64_t Collection::numRecords( OperationContext* txn ) const {
- return _recordStore->numRecords( txn );
- }
+bool Collection::_enforceQuota(bool userEnforeQuota) const {
+ if (!userEnforeQuota)
+ return false;
- uint64_t Collection::dataSize( OperationContext* txn ) const {
- return _recordStore->dataSize( txn );
- }
+ if (!mmapv1GlobalOptions.quota)
+ return false;
+
+ if (_ns.db() == "local")
+ return false;
- uint64_t Collection::getIndexSize(OperationContext* opCtx,
- BSONObjBuilder* details,
- int scale) {
+ if (_ns.isSpecial())
+ return false;
- IndexCatalog* idxCatalog = getIndexCatalog();
+ return true;
+}
- IndexCatalog::IndexIterator ii = idxCatalog->getIndexIterator(opCtx, true);
+bool Collection::isCapped() const {
+ return _cappedNotifier.get();
+}
- uint64_t totalSize = 0;
+std::shared_ptr<CappedInsertNotifier> Collection::getCappedInsertNotifier() const {
+ invariant(isCapped());
+ return _cappedNotifier;
+}
- while (ii.more()) {
- IndexDescriptor* d = ii.next();
- IndexAccessMethod* iam = idxCatalog->getIndex(d);
+uint64_t Collection::numRecords(OperationContext* txn) const {
+ return _recordStore->numRecords(txn);
+}
- long long ds = iam->getSpaceUsedBytes(opCtx);
+uint64_t Collection::dataSize(OperationContext* txn) const {
+ return _recordStore->dataSize(txn);
+}
- totalSize += ds;
- if (details) {
- details->appendNumber(d->indexName(), ds / scale);
- }
- }
+uint64_t Collection::getIndexSize(OperationContext* opCtx, BSONObjBuilder* details, int scale) {
+ IndexCatalog* idxCatalog = getIndexCatalog();
- return totalSize;
- }
+ IndexCatalog::IndexIterator ii = idxCatalog->getIndexIterator(opCtx, true);
- /**
- * order will be:
- * 1) store index specs
- * 2) drop indexes
- * 3) truncate record store
- * 4) re-write indexes
- */
- Status Collection::truncate(OperationContext* txn) {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
- massert( 17445, "index build in progress", _indexCatalog.numIndexesInProgress( txn ) == 0 );
-
- // 1) store index specs
- vector<BSONObj> indexSpecs;
- {
- IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator( txn, false );
- while ( ii.more() ) {
- const IndexDescriptor* idx = ii.next();
- indexSpecs.push_back( idx->infoObj().getOwned() );
- }
- }
+ uint64_t totalSize = 0;
- // 2) drop indexes
- Status status = _indexCatalog.dropAllIndexes(txn, true);
- if ( !status.isOK() )
- return status;
- _cursorManager.invalidateAll(false, "collection truncated");
- _infoCache.reset( txn );
+ while (ii.more()) {
+ IndexDescriptor* d = ii.next();
+ IndexAccessMethod* iam = idxCatalog->getIndex(d);
- // 3) truncate record store
- status = _recordStore->truncate(txn);
- if ( !status.isOK() )
- return status;
+ long long ds = iam->getSpaceUsedBytes(opCtx);
- // 4) re-create indexes
- for ( size_t i = 0; i < indexSpecs.size(); i++ ) {
- status = _indexCatalog.createIndexOnEmptyCollection(txn, indexSpecs[i]);
- if ( !status.isOK() )
- return status;
+ totalSize += ds;
+ if (details) {
+ details->appendNumber(d->indexName(), ds / scale);
}
+ }
- return Status::OK();
+ return totalSize;
+}
+
+/**
+ * order will be:
+ * 1) store index specs
+ * 2) drop indexes
+ * 3) truncate record store
+ * 4) re-write indexes
+ */
+Status Collection::truncate(OperationContext* txn) {
+ dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
+ massert(17445, "index build in progress", _indexCatalog.numIndexesInProgress(txn) == 0);
+
+ // 1) store index specs
+ vector<BSONObj> indexSpecs;
+ {
+ IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator(txn, false);
+ while (ii.more()) {
+ const IndexDescriptor* idx = ii.next();
+ indexSpecs.push_back(idx->infoObj().getOwned());
+ }
}
- void Collection::temp_cappedTruncateAfter(OperationContext* txn,
- RecordId end,
- bool inclusive) {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
- invariant( isCapped() );
+ // 2) drop indexes
+ Status status = _indexCatalog.dropAllIndexes(txn, true);
+ if (!status.isOK())
+ return status;
+ _cursorManager.invalidateAll(false, "collection truncated");
+ _infoCache.reset(txn);
- _cursorManager.invalidateAll(false, "capped collection truncated");
- _recordStore->temp_cappedTruncateAfter( txn, end, inclusive );
+ // 3) truncate record store
+ status = _recordStore->truncate(txn);
+ if (!status.isOK())
+ return status;
+
+ // 4) re-create indexes
+ for (size_t i = 0; i < indexSpecs.size(); i++) {
+ status = _indexCatalog.createIndexOnEmptyCollection(txn, indexSpecs[i]);
+ if (!status.isOK())
+ return status;
}
- Status Collection::setValidator(OperationContext* txn, BSONObj validatorDoc) {
- invariant(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
+ return Status::OK();
+}
- // Make owned early so that the parsed match expression refers to the owned object.
- if (!validatorDoc.isOwned()) validatorDoc = validatorDoc.getOwned();
+void Collection::temp_cappedTruncateAfter(OperationContext* txn, RecordId end, bool inclusive) {
+ dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
+ invariant(isCapped());
- auto statusWithMatcher = parseValidator(validatorDoc);
- if (!statusWithMatcher.isOK())
- return statusWithMatcher.getStatus();
+ _cursorManager.invalidateAll(false, "capped collection truncated");
+ _recordStore->temp_cappedTruncateAfter(txn, end, inclusive);
+}
- _details->updateValidator(txn, validatorDoc);
+Status Collection::setValidator(OperationContext* txn, BSONObj validatorDoc) {
+ invariant(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
- _validator = std::move(statusWithMatcher.getValue());
- _validatorDoc = std::move(validatorDoc);
- return Status::OK();
- }
+ // Make owned early so that the parsed match expression refers to the owned object.
+ if (!validatorDoc.isOwned())
+ validatorDoc = validatorDoc.getOwned();
- namespace {
- class MyValidateAdaptor : public ValidateAdaptor {
- public:
- virtual ~MyValidateAdaptor(){}
-
- virtual Status validate( const RecordData& record, size_t* dataSize ) {
- BSONObj obj = record.toBson();
- const Status status = validateBSON(obj.objdata(), obj.objsize());
- if ( status.isOK() )
- *dataSize = obj.objsize();
- return Status::OK();
- }
+ auto statusWithMatcher = parseValidator(validatorDoc);
+ if (!statusWithMatcher.isOK())
+ return statusWithMatcher.getStatus();
- };
+ _details->updateValidator(txn, validatorDoc);
+
+ _validator = std::move(statusWithMatcher.getValue());
+ _validatorDoc = std::move(validatorDoc);
+ return Status::OK();
+}
+
+namespace {
+class MyValidateAdaptor : public ValidateAdaptor {
+public:
+ virtual ~MyValidateAdaptor() {}
+
+ virtual Status validate(const RecordData& record, size_t* dataSize) {
+ BSONObj obj = record.toBson();
+ const Status status = validateBSON(obj.objdata(), obj.objsize());
+ if (status.isOK())
+ *dataSize = obj.objsize();
+ return Status::OK();
}
+};
+}
- Status Collection::validate( OperationContext* txn,
- bool full, bool scanData,
- ValidateResults* results, BSONObjBuilder* output ){
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
+Status Collection::validate(OperationContext* txn,
+ bool full,
+ bool scanData,
+ ValidateResults* results,
+ BSONObjBuilder* output) {
+ dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IS));
- MyValidateAdaptor adaptor;
- Status status = _recordStore->validate( txn, full, scanData, &adaptor, results, output );
- if ( !status.isOK() )
- return status;
+ MyValidateAdaptor adaptor;
+ Status status = _recordStore->validate(txn, full, scanData, &adaptor, results, output);
+ if (!status.isOK())
+ return status;
- { // indexes
- output->append("nIndexes", _indexCatalog.numIndexesReady( txn ) );
- int idxn = 0;
- try {
- // Only applicable when 'full' validation is requested.
- std::unique_ptr<BSONObjBuilder> indexDetails(full ? new BSONObjBuilder() : NULL);
- BSONObjBuilder indexes; // not using subObjStart to be exception safe
-
- IndexCatalog::IndexIterator i = _indexCatalog.getIndexIterator(txn, false);
- while( i.more() ) {
- const IndexDescriptor* descriptor = i.next();
- log(LogComponent::kIndex) << "validating index " << descriptor->indexNamespace() << endl;
- IndexAccessMethod* iam = _indexCatalog.getIndex( descriptor );
- invariant( iam );
-
- std::unique_ptr<BSONObjBuilder> bob(
- indexDetails.get() ? new BSONObjBuilder(
- indexDetails->subobjStart(descriptor->indexNamespace())) :
- NULL);
-
- int64_t keys;
- iam->validate(txn, full, &keys, bob.get());
- indexes.appendNumber(descriptor->indexNamespace(),
- static_cast<long long>(keys));
-
- if (bob) {
- BSONObj obj = bob->done();
- BSONElement valid = obj["valid"];
- if (valid.ok() && !valid.trueValue()) {
- results->valid = false;
- }
+ { // indexes
+ output->append("nIndexes", _indexCatalog.numIndexesReady(txn));
+ int idxn = 0;
+ try {
+ // Only applicable when 'full' validation is requested.
+ std::unique_ptr<BSONObjBuilder> indexDetails(full ? new BSONObjBuilder() : NULL);
+ BSONObjBuilder indexes; // not using subObjStart to be exception safe
+
+ IndexCatalog::IndexIterator i = _indexCatalog.getIndexIterator(txn, false);
+ while (i.more()) {
+ const IndexDescriptor* descriptor = i.next();
+ log(LogComponent::kIndex) << "validating index " << descriptor->indexNamespace()
+ << endl;
+ IndexAccessMethod* iam = _indexCatalog.getIndex(descriptor);
+ invariant(iam);
+
+ std::unique_ptr<BSONObjBuilder> bob(
+ indexDetails.get() ? new BSONObjBuilder(indexDetails->subobjStart(
+ descriptor->indexNamespace()))
+ : NULL);
+
+ int64_t keys;
+ iam->validate(txn, full, &keys, bob.get());
+ indexes.appendNumber(descriptor->indexNamespace(), static_cast<long long>(keys));
+
+ if (bob) {
+ BSONObj obj = bob->done();
+ BSONElement valid = obj["valid"];
+ if (valid.ok() && !valid.trueValue()) {
+ results->valid = false;
}
- idxn++;
- }
-
- output->append("keysPerIndex", indexes.done());
- if (indexDetails.get()) {
- output->append("indexDetails", indexDetails->done());
}
+ idxn++;
}
- catch ( DBException& exc ) {
- string err = str::stream() <<
- "exception during index validate idxn "<<
- BSONObjBuilder::numStr(idxn) <<
- ": " << exc.toString();
- results->errors.push_back( err );
- results->valid = false;
+
+ output->append("keysPerIndex", indexes.done());
+ if (indexDetails.get()) {
+ output->append("indexDetails", indexDetails->done());
}
+ } catch (DBException& exc) {
+ string err = str::stream() << "exception during index validate idxn "
+ << BSONObjBuilder::numStr(idxn) << ": " << exc.toString();
+ results->errors.push_back(err);
+ results->valid = false;
}
-
- return Status::OK();
}
- Status Collection::touch( OperationContext* txn,
- bool touchData, bool touchIndexes,
- BSONObjBuilder* output ) const {
- if ( touchData ) {
- BSONObjBuilder b;
- Status status = _recordStore->touch( txn, &b );
- if ( !status.isOK() )
- return status;
- output->append( "data", b.obj() );
- }
+ return Status::OK();
+}
- if ( touchIndexes ) {
- Timer t;
- IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator( txn, false );
- while ( ii.more() ) {
- const IndexDescriptor* desc = ii.next();
- const IndexAccessMethod* iam = _indexCatalog.getIndex( desc );
- Status status = iam->touch( txn );
- if ( !status.isOK() )
- return status;
- }
+Status Collection::touch(OperationContext* txn,
+ bool touchData,
+ bool touchIndexes,
+ BSONObjBuilder* output) const {
+ if (touchData) {
+ BSONObjBuilder b;
+ Status status = _recordStore->touch(txn, &b);
+ if (!status.isOK())
+ return status;
+ output->append("data", b.obj());
+ }
- output->append( "indexes", BSON( "num" << _indexCatalog.numIndexesTotal( txn ) <<
- "millis" << t.millis() ) );
+ if (touchIndexes) {
+ Timer t;
+ IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator(txn, false);
+ while (ii.more()) {
+ const IndexDescriptor* desc = ii.next();
+ const IndexAccessMethod* iam = _indexCatalog.getIndex(desc);
+ Status status = iam->touch(txn);
+ if (!status.isOK())
+ return status;
}
- return Status::OK();
+ output->append("indexes",
+ BSON("num" << _indexCatalog.numIndexesTotal(txn) << "millis" << t.millis()));
}
+ return Status::OK();
+}
}
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h
index 5f5609bd437..972979378cf 100644
--- a/src/mongo/db/catalog/collection.h
+++ b/src/mongo/db/catalog/collection.h
@@ -53,343 +53,360 @@
namespace mongo {
- class CollectionCatalogEntry;
- class DatabaseCatalogEntry;
- class ExtentManager;
- class IndexCatalog;
- class MatchExpression;
- class MultiIndexBlock;
- class OpDebug;
- class OperationContext;
- class RecordCursor;
- class RecordFetcher;
- class UpdateDriver;
- class UpdateRequest;
-
- struct CompactOptions {
-
- CompactOptions() {
- paddingMode = NONE;
- validateDocuments = true;
- paddingFactor = 1;
- paddingBytes = 0;
- }
-
- // padding
- enum PaddingMode {
- PRESERVE, NONE, MANUAL
- } paddingMode;
-
- // only used if _paddingMode == MANUAL
- double paddingFactor; // what to multiple document size by
- int paddingBytes; // what to add to ducment size after multiplication
- unsigned computeRecordSize( unsigned recordSize ) const {
- recordSize = static_cast<unsigned>( paddingFactor * recordSize );
- recordSize += paddingBytes;
- return recordSize;
- }
-
- // other
- bool validateDocuments;
-
- std::string toString() const;
- };
-
- struct CompactStats {
- CompactStats() {
- corruptDocuments = 0;
- }
-
- long long corruptDocuments;
- };
+class CollectionCatalogEntry;
+class DatabaseCatalogEntry;
+class ExtentManager;
+class IndexCatalog;
+class MatchExpression;
+class MultiIndexBlock;
+class OpDebug;
+class OperationContext;
+class RecordCursor;
+class RecordFetcher;
+class UpdateDriver;
+class UpdateRequest;
+
+struct CompactOptions {
+ CompactOptions() {
+ paddingMode = NONE;
+ validateDocuments = true;
+ paddingFactor = 1;
+ paddingBytes = 0;
+ }
+
+ // padding
+ enum PaddingMode { PRESERVE, NONE, MANUAL } paddingMode;
+
+ // only used if _paddingMode == MANUAL
+ double paddingFactor; // what to multiple document size by
+ int paddingBytes; // what to add to ducment size after multiplication
+ unsigned computeRecordSize(unsigned recordSize) const {
+ recordSize = static_cast<unsigned>(paddingFactor * recordSize);
+ recordSize += paddingBytes;
+ return recordSize;
+ }
+
+ // other
+ bool validateDocuments;
+
+ std::string toString() const;
+};
+
+struct CompactStats {
+ CompactStats() {
+ corruptDocuments = 0;
+ }
+
+ long long corruptDocuments;
+};
+
+/**
+ * Queries with the awaitData option use this notifier object to wait for more data to be
+ * inserted into the capped collection.
+ */
+class CappedInsertNotifier {
+public:
+ CappedInsertNotifier();
+
+ /**
+ * Wakes up threads waiting on this object for the arrival of new data.
+ */
+ void notifyOfInsert();
+
+ /**
+ * Get a counter value which is incremented on every insert into a capped collection.
+ * The return value should be used as a reference value to pass into waitForCappedInsert().
+ */
+ uint64_t getCount() const;
+
+ /**
+ * Waits for 'timeout' microseconds, or until notifyAll() is called to indicate that new
+ * data is available in the capped collection.
+ */
+ void waitForInsert(uint64_t referenceCount, Microseconds timeout) const;
+
+private:
+ // Signalled when a successful insert is made into a capped collection.
+ mutable stdx::condition_variable _cappedNewDataNotifier;
+
+ // Mutex used with '_cappedNewDataNotifier'. Protects access to '_cappedInsertCount'.
+ mutable stdx::mutex _cappedNewDataMutex;
+
+ // A counter, incremented on insertion of new data into the capped collection.
+ //
+ // The condition which '_cappedNewDataNotifier' is being notified of is an increment of this
+ // counter. Access to this counter is synchronized with '_cappedNewDataMutex'.
+ uint64_t _cappedInsertCount;
+};
+
+/**
+ * this is NOT safe through a yield right now
+ * not sure if it will be, or what yet
+ */
+class Collection : CappedDocumentDeleteCallback, UpdateNotifier {
+public:
+ Collection(OperationContext* txn,
+ StringData fullNS,
+ CollectionCatalogEntry* details, // does not own
+ RecordStore* recordStore, // does not own
+ DatabaseCatalogEntry* dbce); // does not own
+
+ ~Collection();
+
+ bool ok() const {
+ return _magic == 1357924;
+ }
+
+ CollectionCatalogEntry* getCatalogEntry() {
+ return _details;
+ }
+ const CollectionCatalogEntry* getCatalogEntry() const {
+ return _details;
+ }
+
+ CollectionInfoCache* infoCache() {
+ return &_infoCache;
+ }
+ const CollectionInfoCache* infoCache() const {
+ return &_infoCache;
+ }
+
+ const NamespaceString& ns() const {
+ return _ns;
+ }
+
+ const IndexCatalog* getIndexCatalog() const {
+ return &_indexCatalog;
+ }
+ IndexCatalog* getIndexCatalog() {
+ return &_indexCatalog;
+ }
+
+ const RecordStore* getRecordStore() const {
+ return _recordStore;
+ }
+ RecordStore* getRecordStore() {
+ return _recordStore;
+ }
+
+ CursorManager* getCursorManager() const {
+ return &_cursorManager;
+ }
+
+ bool requiresIdIndex() const;
+
+ Snapshotted<BSONObj> docFor(OperationContext* txn, const RecordId& loc) const;
+
+ /**
+ * @param out - contents set to the right docs if exists, or nothing.
+ * @return true iff loc exists
+ */
+ bool findDoc(OperationContext* txn, const RecordId& loc, Snapshotted<BSONObj>* out) const;
+
+ std::unique_ptr<RecordCursor> getCursor(OperationContext* txn, bool forward = true) const;
+
+ /**
+ * Returns many cursors that partition the Collection into many disjoint sets. Iterating
+ * all returned cursors is equivalent to iterating the full collection.
+ */
+ std::vector<std::unique_ptr<RecordCursor>> getManyCursors(OperationContext* txn) const;
+
+ void deleteDocument(OperationContext* txn,
+ const RecordId& loc,
+ bool cappedOK = false,
+ bool noWarn = false,
+ BSONObj* deletedId = 0);
+
+ /**
+ * this does NOT modify the doc before inserting
+ * i.e. will not add an _id field for documents that are missing it
+ *
+ * If enforceQuota is false, quotas will be ignored.
+ */
+ StatusWith<RecordId> insertDocument(OperationContext* txn,
+ const BSONObj& doc,
+ bool enforceQuota,
+ bool fromMigrate = false);
/**
- * Queries with the awaitData option use this notifier object to wait for more data to be
- * inserted into the capped collection.
+ * Callers must ensure no document validation is performed for this collection when calling
+ * this method.
*/
- class CappedInsertNotifier {
- public:
- CappedInsertNotifier();
-
- /**
- * Wakes up threads waiting on this object for the arrival of new data.
- */
- void notifyOfInsert();
-
- /**
- * Get a counter value which is incremented on every insert into a capped collection.
- * The return value should be used as a reference value to pass into waitForCappedInsert().
- */
- uint64_t getCount() const;
-
- /**
- * Waits for 'timeout' microseconds, or until notifyAll() is called to indicate that new
- * data is available in the capped collection.
- */
- void waitForInsert(uint64_t referenceCount, Microseconds timeout) const;
-
- private:
- // Signalled when a successful insert is made into a capped collection.
- mutable stdx::condition_variable _cappedNewDataNotifier;
-
- // Mutex used with '_cappedNewDataNotifier'. Protects access to '_cappedInsertCount'.
- mutable stdx::mutex _cappedNewDataMutex;
-
- // A counter, incremented on insertion of new data into the capped collection.
- //
- // The condition which '_cappedNewDataNotifier' is being notified of is an increment of this
- // counter. Access to this counter is synchronized with '_cappedNewDataMutex'.
- uint64_t _cappedInsertCount;
- };
+ StatusWith<RecordId> insertDocument(OperationContext* txn,
+ const DocWriter* doc,
+ bool enforceQuota);
+
+ StatusWith<RecordId> insertDocument(OperationContext* txn,
+ const BSONObj& doc,
+ MultiIndexBlock* indexBlock,
+ bool enforceQuota);
+
+ /**
+ * updates the document @ oldLocation with newDoc
+ * if the document fits in the old space, it is put there
+ * if not, it is moved
+ * @return the post update location of the doc (may or may not be the same as oldLocation)
+ */
+ StatusWith<RecordId> updateDocument(OperationContext* txn,
+ const RecordId& oldLocation,
+ const Snapshotted<BSONObj>& oldDoc,
+ const BSONObj& newDoc,
+ bool enforceQuota,
+ bool indexesAffected,
+ OpDebug* debug,
+ oplogUpdateEntryArgs& args);
+
+ bool updateWithDamagesSupported() const;
/**
- * this is NOT safe through a yield right now
- * not sure if it will be, or what yet
+ * Not allowed to modify indexes.
+ * Illegal to call if updateWithDamagesSupported() returns false.
*/
- class Collection : CappedDocumentDeleteCallback, UpdateNotifier {
- public:
- Collection( OperationContext* txn,
- StringData fullNS,
- CollectionCatalogEntry* details, // does not own
- RecordStore* recordStore, // does not own
- DatabaseCatalogEntry* dbce ); // does not own
-
- ~Collection();
-
- bool ok() const { return _magic == 1357924; }
-
- CollectionCatalogEntry* getCatalogEntry() { return _details; }
- const CollectionCatalogEntry* getCatalogEntry() const { return _details; }
-
- CollectionInfoCache* infoCache() { return &_infoCache; }
- const CollectionInfoCache* infoCache() const { return &_infoCache; }
-
- const NamespaceString& ns() const { return _ns; }
-
- const IndexCatalog* getIndexCatalog() const { return &_indexCatalog; }
- IndexCatalog* getIndexCatalog() { return &_indexCatalog; }
-
- const RecordStore* getRecordStore() const { return _recordStore; }
- RecordStore* getRecordStore() { return _recordStore; }
-
- CursorManager* getCursorManager() const { return &_cursorManager; }
-
- bool requiresIdIndex() const;
-
- Snapshotted<BSONObj> docFor(OperationContext* txn, const RecordId& loc) const;
-
- /**
- * @param out - contents set to the right docs if exists, or nothing.
- * @return true iff loc exists
- */
- bool findDoc(OperationContext* txn, const RecordId& loc, Snapshotted<BSONObj>* out) const;
-
- std::unique_ptr<RecordCursor> getCursor(OperationContext* txn, bool forward = true) const;
-
- /**
- * Returns many cursors that partition the Collection into many disjoint sets. Iterating
- * all returned cursors is equivalent to iterating the full collection.
- */
- std::vector<std::unique_ptr<RecordCursor>> getManyCursors(OperationContext* txn) const;
-
- void deleteDocument( OperationContext* txn,
- const RecordId& loc,
- bool cappedOK = false,
- bool noWarn = false,
- BSONObj* deletedId = 0 );
-
- /**
- * this does NOT modify the doc before inserting
- * i.e. will not add an _id field for documents that are missing it
- *
- * If enforceQuota is false, quotas will be ignored.
- */
- StatusWith<RecordId> insertDocument( OperationContext* txn,
- const BSONObj& doc,
- bool enforceQuota,
- bool fromMigrate = false);
-
- /**
- * Callers must ensure no document validation is performed for this collection when calling
- * this method.
- */
- StatusWith<RecordId> insertDocument( OperationContext* txn,
- const DocWriter* doc,
- bool enforceQuota );
-
- StatusWith<RecordId> insertDocument( OperationContext* txn,
- const BSONObj& doc,
- MultiIndexBlock* indexBlock,
- bool enforceQuota );
-
- /**
- * updates the document @ oldLocation with newDoc
- * if the document fits in the old space, it is put there
- * if not, it is moved
- * @return the post update location of the doc (may or may not be the same as oldLocation)
- */
- StatusWith<RecordId> updateDocument(OperationContext* txn,
- const RecordId& oldLocation,
- const Snapshotted<BSONObj>& oldDoc,
- const BSONObj& newDoc,
- bool enforceQuota,
- bool indexesAffected,
- OpDebug* debug,
- oplogUpdateEntryArgs& args);
-
- bool updateWithDamagesSupported() const;
-
- /**
- * Not allowed to modify indexes.
- * Illegal to call if updateWithDamagesSupported() returns false.
- */
- Status updateDocumentWithDamages(OperationContext* txn,
- const RecordId& loc,
- const Snapshotted<RecordData>& oldRec,
- const char* damageSource,
- const mutablebson::DamageVector& damages,
- oplogUpdateEntryArgs& args);
-
- // -----------
-
- StatusWith<CompactStats> compact(OperationContext* txn, const CompactOptions* options);
-
- /**
- * removes all documents as fast as possible
- * indexes before and after will be the same
- * as will other characteristics
- */
- Status truncate(OperationContext* txn);
-
- /**
- * @param full - does more checks
- * @param scanData - scans each document
- * @return OK if the validate run successfully
- * OK will be returned even if corruption is found
- * deatils will be in result
- */
- Status validate( OperationContext* txn,
- bool full, bool scanData,
- ValidateResults* results, BSONObjBuilder* output );
-
- /**
- * forces data into cache
- */
- Status touch( OperationContext* txn,
- bool touchData, bool touchIndexes,
- BSONObjBuilder* output ) const;
-
- /**
- * Truncate documents newer than the document at 'end' from the capped
- * collection. The collection cannot be completely emptied using this
- * function. An assertion will be thrown if that is attempted.
- * @param inclusive - Truncate 'end' as well iff true
- * XXX: this will go away soon, just needed to move for now
- */
- void temp_cappedTruncateAfter( OperationContext* txn, RecordId end, bool inclusive );
-
- /**
- * Sets the validator for this collection.
- *
- * An empty validator removes all validation.
- * Requires an exclusive lock on the collection.
- */
- Status setValidator(OperationContext* txn, BSONObj validator);
-
- // -----------
-
- //
- // Stats
- //
-
- bool isCapped() const;
-
- /**
- * Get a pointer to a capped insert notifier object. The caller can wait on this object
- * until it is notified of a new insert into the capped collection.
- *
- * It is invalid to call this method unless the collection is capped.
- */
- std::shared_ptr<CappedInsertNotifier> getCappedInsertNotifier() const;
-
- uint64_t numRecords( OperationContext* txn ) const;
-
- uint64_t dataSize( OperationContext* txn ) const;
-
- int averageObjectSize( OperationContext* txn ) const {
- uint64_t n = numRecords( txn );
- if ( n == 0 )
- return 5;
- return static_cast<int>( dataSize( txn ) / n );
- }
-
- uint64_t getIndexSize(OperationContext* opCtx,
- BSONObjBuilder* details = NULL,
- int scale = 1);
-
- // --- end suspect things
-
- private:
-
- /**
- * Returns a non-ok Status if document does not pass this collection's validator.
- */
- Status checkValidation(OperationContext* txn, const BSONObj& document) const;
-
- /**
- * Returns a non-ok Status if validator is not legal for this collection.
- */
- StatusWith<std::unique_ptr<MatchExpression>> parseValidator(const BSONObj& validator) const;
-
- Status recordStoreGoingToMove( OperationContext* txn,
- const RecordId& oldLocation,
- const char* oldBuffer,
- size_t oldSize );
-
- Status recordStoreGoingToUpdateInPlace( OperationContext* txn,
- const RecordId& loc );
-
- Status aboutToDeleteCapped( OperationContext* txn, const RecordId& loc, RecordData data );
-
- /**
- * same semantics as insertDocument, but doesn't do:
- * - some user error checks
- * - adjust padding
- */
- StatusWith<RecordId> _insertDocument( OperationContext* txn,
- const BSONObj& doc,
- bool enforceQuota );
-
- bool _enforceQuota( bool userEnforeQuota ) const;
-
- int _magic;
-
- NamespaceString _ns;
- CollectionCatalogEntry* _details;
- RecordStore* _recordStore;
- DatabaseCatalogEntry* _dbce;
- CollectionInfoCache _infoCache;
- IndexCatalog _indexCatalog;
-
- // Empty means no filter.
- BSONObj _validatorDoc;
- // Points into _validatorDoc. Null means no filter.
- std::unique_ptr<MatchExpression> _validator;
-
- // this is mutable because read only users of the Collection class
- // use it keep state. This seems valid as const correctness of Collection
- // should be about the data.
- mutable CursorManager _cursorManager;
-
- // Notifier object for awaitData. Threads polling a capped collection for new data can wait
- // on this object until notified of the arrival of new data.
- //
- // This is non-null if and only if the collection is a capped collection.
- std::shared_ptr<CappedInsertNotifier> _cappedNotifier;
-
- friend class Database;
- friend class IndexCatalog;
- friend class NamespaceDetails;
- };
+ Status updateDocumentWithDamages(OperationContext* txn,
+ const RecordId& loc,
+ const Snapshotted<RecordData>& oldRec,
+ const char* damageSource,
+ const mutablebson::DamageVector& damages,
+ oplogUpdateEntryArgs& args);
+ // -----------
+
+ StatusWith<CompactStats> compact(OperationContext* txn, const CompactOptions* options);
+
+ /**
+ * removes all documents as fast as possible
+ * indexes before and after will be the same
+ * as will other characteristics
+ */
+ Status truncate(OperationContext* txn);
+
+ /**
+ * @param full - does more checks
+ * @param scanData - scans each document
+ * @return OK if the validate run successfully
+ * OK will be returned even if corruption is found
+ * deatils will be in result
+ */
+ Status validate(OperationContext* txn,
+ bool full,
+ bool scanData,
+ ValidateResults* results,
+ BSONObjBuilder* output);
+
+ /**
+ * forces data into cache
+ */
+ Status touch(OperationContext* txn,
+ bool touchData,
+ bool touchIndexes,
+ BSONObjBuilder* output) const;
+
+ /**
+ * Truncate documents newer than the document at 'end' from the capped
+ * collection. The collection cannot be completely emptied using this
+ * function. An assertion will be thrown if that is attempted.
+ * @param inclusive - Truncate 'end' as well iff true
+ * XXX: this will go away soon, just needed to move for now
+ */
+ void temp_cappedTruncateAfter(OperationContext* txn, RecordId end, bool inclusive);
+
+ /**
+ * Sets the validator for this collection.
+ *
+ * An empty validator removes all validation.
+ * Requires an exclusive lock on the collection.
+ */
+ Status setValidator(OperationContext* txn, BSONObj validator);
+
+ // -----------
+
+ //
+ // Stats
+ //
+
+ bool isCapped() const;
+
+ /**
+ * Get a pointer to a capped insert notifier object. The caller can wait on this object
+ * until it is notified of a new insert into the capped collection.
+ *
+ * It is invalid to call this method unless the collection is capped.
+ */
+ std::shared_ptr<CappedInsertNotifier> getCappedInsertNotifier() const;
+
+ uint64_t numRecords(OperationContext* txn) const;
+
+ uint64_t dataSize(OperationContext* txn) const;
+
+ int averageObjectSize(OperationContext* txn) const {
+ uint64_t n = numRecords(txn);
+ if (n == 0)
+ return 5;
+ return static_cast<int>(dataSize(txn) / n);
+ }
+
+ uint64_t getIndexSize(OperationContext* opCtx, BSONObjBuilder* details = NULL, int scale = 1);
+
+ // --- end suspect things
+
+private:
+ /**
+ * Returns a non-ok Status if document does not pass this collection's validator.
+ */
+ Status checkValidation(OperationContext* txn, const BSONObj& document) const;
+
+ /**
+ * Returns a non-ok Status if validator is not legal for this collection.
+ */
+ StatusWith<std::unique_ptr<MatchExpression>> parseValidator(const BSONObj& validator) const;
+
+ Status recordStoreGoingToMove(OperationContext* txn,
+ const RecordId& oldLocation,
+ const char* oldBuffer,
+ size_t oldSize);
+
+ Status recordStoreGoingToUpdateInPlace(OperationContext* txn, const RecordId& loc);
+
+ Status aboutToDeleteCapped(OperationContext* txn, const RecordId& loc, RecordData data);
+
+ /**
+ * same semantics as insertDocument, but doesn't do:
+ * - some user error checks
+ * - adjust padding
+ */
+ StatusWith<RecordId> _insertDocument(OperationContext* txn,
+ const BSONObj& doc,
+ bool enforceQuota);
+
+ bool _enforceQuota(bool userEnforeQuota) const;
+
+ int _magic;
+
+ NamespaceString _ns;
+ CollectionCatalogEntry* _details;
+ RecordStore* _recordStore;
+ DatabaseCatalogEntry* _dbce;
+ CollectionInfoCache _infoCache;
+ IndexCatalog _indexCatalog;
+
+ // Empty means no filter.
+ BSONObj _validatorDoc;
+ // Points into _validatorDoc. Null means no filter.
+ std::unique_ptr<MatchExpression> _validator;
+
+ // this is mutable because read only users of the Collection class
+ // use it keep state. This seems valid as const correctness of Collection
+ // should be about the data.
+ mutable CursorManager _cursorManager;
+
+ // Notifier object for awaitData. Threads polling a capped collection for new data can wait
+ // on this object until notified of the arrival of new data.
+ //
+ // This is non-null if and only if the collection is a capped collection.
+ std::shared_ptr<CappedInsertNotifier> _cappedNotifier;
+
+ friend class Database;
+ friend class IndexCatalog;
+ friend class NamespaceDetails;
+};
}
diff --git a/src/mongo/db/catalog/collection_catalog_entry.h b/src/mongo/db/catalog/collection_catalog_entry.h
index 876d393d733..c5f4278c3f1 100644
--- a/src/mongo/db/catalog/collection_catalog_entry.h
+++ b/src/mongo/db/catalog/collection_catalog_entry.h
@@ -37,83 +37,74 @@
namespace mongo {
- class IndexDescriptor;
- class OperationContext;
+class IndexDescriptor;
+class OperationContext;
- class CollectionCatalogEntry {
- public:
- CollectionCatalogEntry( StringData ns )
- : _ns( ns ){
- }
- virtual ~CollectionCatalogEntry(){}
+class CollectionCatalogEntry {
+public:
+ CollectionCatalogEntry(StringData ns) : _ns(ns) {}
+ virtual ~CollectionCatalogEntry() {}
- const NamespaceString& ns() const { return _ns; }
+ const NamespaceString& ns() const {
+ return _ns;
+ }
- // ------- indexes ----------
+ // ------- indexes ----------
- virtual CollectionOptions getCollectionOptions( OperationContext* txn ) const = 0;
+ virtual CollectionOptions getCollectionOptions(OperationContext* txn) const = 0;
- virtual int getTotalIndexCount( OperationContext* txn ) const = 0;
+ virtual int getTotalIndexCount(OperationContext* txn) const = 0;
- virtual int getCompletedIndexCount( OperationContext* txn ) const = 0;
+ virtual int getCompletedIndexCount(OperationContext* txn) const = 0;
- virtual int getMaxAllowedIndexes() const = 0;
+ virtual int getMaxAllowedIndexes() const = 0;
- virtual void getAllIndexes( OperationContext* txn,
- std::vector<std::string>* names ) const = 0;
+ virtual void getAllIndexes(OperationContext* txn, std::vector<std::string>* names) const = 0;
- virtual BSONObj getIndexSpec( OperationContext* txn,
- StringData idxName ) const = 0;
+ virtual BSONObj getIndexSpec(OperationContext* txn, StringData idxName) const = 0;
- virtual bool isIndexMultikey( OperationContext* txn,
- StringData indexName) const = 0;
+ virtual bool isIndexMultikey(OperationContext* txn, StringData indexName) const = 0;
- virtual bool setIndexIsMultikey(OperationContext* txn,
- StringData indexName,
- bool multikey = true) = 0;
+ virtual bool setIndexIsMultikey(OperationContext* txn,
+ StringData indexName,
+ bool multikey = true) = 0;
- virtual RecordId getIndexHead( OperationContext* txn,
- StringData indexName ) const = 0;
+ virtual RecordId getIndexHead(OperationContext* txn, StringData indexName) const = 0;
- virtual void setIndexHead( OperationContext* txn,
- StringData indexName,
- const RecordId& newHead ) = 0;
+ virtual void setIndexHead(OperationContext* txn,
+ StringData indexName,
+ const RecordId& newHead) = 0;
- virtual bool isIndexReady( OperationContext* txn,
- StringData indexName ) const = 0;
+ virtual bool isIndexReady(OperationContext* txn, StringData indexName) const = 0;
- virtual Status removeIndex( OperationContext* txn,
- StringData indexName ) = 0;
+ virtual Status removeIndex(OperationContext* txn, StringData indexName) = 0;
- virtual Status prepareForIndexBuild( OperationContext* txn,
- const IndexDescriptor* spec ) = 0;
+ virtual Status prepareForIndexBuild(OperationContext* txn, const IndexDescriptor* spec) = 0;
- virtual void indexBuildSuccess( OperationContext* txn,
- StringData indexName ) = 0;
+ virtual void indexBuildSuccess(OperationContext* txn, StringData indexName) = 0;
- /* Updates the expireAfterSeconds field of the given index to the value in newExpireSecs.
- * The specified index must already contain an expireAfterSeconds field, and the value in
- * that field and newExpireSecs must both be numeric.
- */
- virtual void updateTTLSetting( OperationContext* txn,
- StringData idxName,
- long long newExpireSeconds ) = 0;
+ /* Updates the expireAfterSeconds field of the given index to the value in newExpireSecs.
+ * The specified index must already contain an expireAfterSeconds field, and the value in
+ * that field and newExpireSecs must both be numeric.
+ */
+ virtual void updateTTLSetting(OperationContext* txn,
+ StringData idxName,
+ long long newExpireSeconds) = 0;
- /**
- * Sets the flags field of CollectionOptions to newValue.
- * Subsequent calls to getCollectionOptions should have flags==newValue and flagsSet==true.
- */
- virtual void updateFlags(OperationContext* txn, int newValue) = 0;
+ /**
+ * Sets the flags field of CollectionOptions to newValue.
+ * Subsequent calls to getCollectionOptions should have flags==newValue and flagsSet==true.
+ */
+ virtual void updateFlags(OperationContext* txn, int newValue) = 0;
- /**
- * Updates the validator for this collection.
- *
- * An empty validator removes all validation.
- */
- virtual void updateValidator(OperationContext* txn, const BSONObj& validator) = 0;
-
- private:
- NamespaceString _ns;
- };
+ /**
+ * Updates the validator for this collection.
+ *
+ * An empty validator removes all validation.
+ */
+ virtual void updateValidator(OperationContext* txn, const BSONObj& validator) = 0;
+private:
+ NamespaceString _ns;
+};
}
diff --git a/src/mongo/db/catalog/collection_compact.cpp b/src/mongo/db/catalog/collection_compact.cpp
index a6f4f98041b..e3f0623006f 100644
--- a/src/mongo/db/catalog/collection_compact.cpp
+++ b/src/mongo/db/catalog/collection_compact.cpp
@@ -48,154 +48,150 @@
namespace mongo {
- using std::endl;
- using std::vector;
-
- namespace {
- BSONObj _compactAdjustIndexSpec( const BSONObj& oldSpec ) {
- BSONObjBuilder b;
- BSONObj::iterator i( oldSpec );
- while( i.more() ) {
- BSONElement e = i.next();
- if ( str::equals( e.fieldName(), "v" ) ) {
- // Drop any preexisting index version spec. The default index version will
- // be used instead for the new index.
- continue;
- }
- if ( str::equals( e.fieldName(), "background" ) ) {
- // Create the new index in the foreground.
- continue;
- }
- // Pass the element through to the new index spec.
- b.append(e);
- }
- return b.obj();
+using std::endl;
+using std::vector;
+
+namespace {
+BSONObj _compactAdjustIndexSpec(const BSONObj& oldSpec) {
+ BSONObjBuilder b;
+ BSONObj::iterator i(oldSpec);
+ while (i.more()) {
+ BSONElement e = i.next();
+ if (str::equals(e.fieldName(), "v")) {
+ // Drop any preexisting index version spec. The default index version will
+ // be used instead for the new index.
+ continue;
+ }
+ if (str::equals(e.fieldName(), "background")) {
+ // Create the new index in the foreground.
+ continue;
}
+ // Pass the element through to the new index spec.
+ b.append(e);
+ }
+ return b.obj();
+}
- class MyCompactAdaptor : public RecordStoreCompactAdaptor {
- public:
- MyCompactAdaptor(Collection* collection,
- MultiIndexBlock* indexBlock)
+class MyCompactAdaptor : public RecordStoreCompactAdaptor {
+public:
+ MyCompactAdaptor(Collection* collection, MultiIndexBlock* indexBlock)
- : _collection( collection ),
- _multiIndexBlock(indexBlock) {
- }
+ : _collection(collection), _multiIndexBlock(indexBlock) {}
- virtual bool isDataValid( const RecordData& recData ) {
- return recData.toBson().valid();
- }
-
- virtual size_t dataSize( const RecordData& recData ) {
- return recData.toBson().objsize();
- }
+ virtual bool isDataValid(const RecordData& recData) {
+ return recData.toBson().valid();
+ }
- virtual void inserted( const RecordData& recData, const RecordId& newLocation ) {
- _multiIndexBlock->insert( recData.toBson(), newLocation );
- }
+ virtual size_t dataSize(const RecordData& recData) {
+ return recData.toBson().objsize();
+ }
- private:
- Collection* _collection;
+ virtual void inserted(const RecordData& recData, const RecordId& newLocation) {
+ _multiIndexBlock->insert(recData.toBson(), newLocation);
+ }
- MultiIndexBlock* _multiIndexBlock;
- };
+private:
+ Collection* _collection;
- }
+ MultiIndexBlock* _multiIndexBlock;
+};
+}
- StatusWith<CompactStats> Collection::compact( OperationContext* txn,
- const CompactOptions* compactOptions ) {
- dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
+StatusWith<CompactStats> Collection::compact(OperationContext* txn,
+ const CompactOptions* compactOptions) {
+ dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_X));
- DisableDocumentValidation validationDisabler(txn);
+ DisableDocumentValidation validationDisabler(txn);
- if ( !_recordStore->compactSupported() )
- return StatusWith<CompactStats>( ErrorCodes::CommandNotSupported,
- str::stream() <<
- "cannot compact collection with record store: " <<
- _recordStore->name() );
+ if (!_recordStore->compactSupported())
+ return StatusWith<CompactStats>(ErrorCodes::CommandNotSupported,
+ str::stream()
+ << "cannot compact collection with record store: "
+ << _recordStore->name());
- if (_recordStore->compactsInPlace()) {
- // Since we are compacting in-place, we don't need to touch the indexes.
- // TODO SERVER-16856 compact indexes
- CompactStats stats;
- Status status = _recordStore->compact(txn, NULL, compactOptions, &stats);
- if (!status.isOK())
- return StatusWith<CompactStats>(status);
+ if (_recordStore->compactsInPlace()) {
+ // Since we are compacting in-place, we don't need to touch the indexes.
+ // TODO SERVER-16856 compact indexes
+ CompactStats stats;
+ Status status = _recordStore->compact(txn, NULL, compactOptions, &stats);
+ if (!status.isOK())
+ return StatusWith<CompactStats>(status);
- return StatusWith<CompactStats>(stats);
- }
+ return StatusWith<CompactStats>(stats);
+ }
- if ( _indexCatalog.numIndexesInProgress( txn ) )
- return StatusWith<CompactStats>( ErrorCodes::BadValue,
- "cannot compact when indexes in progress" );
-
-
- // same data, but might perform a little different after compact?
- _infoCache.reset( txn );
-
- vector<BSONObj> indexSpecs;
- {
- IndexCatalog::IndexIterator ii( _indexCatalog.getIndexIterator( txn, false ) );
- while ( ii.more() ) {
- IndexDescriptor* descriptor = ii.next();
-
- const BSONObj spec = _compactAdjustIndexSpec(descriptor->infoObj());
- const BSONObj key = spec.getObjectField("key");
- const Status keyStatus = validateKeyPattern(key);
- if (!keyStatus.isOK()) {
- return StatusWith<CompactStats>(
- ErrorCodes::CannotCreateIndex,
- str::stream() << "Cannot compact collection due to invalid index "
- << spec << ": " << keyStatus.reason() << " For more info see"
- << " http://dochub.mongodb.org/core/index-validation");
- }
- indexSpecs.push_back(spec);
+ if (_indexCatalog.numIndexesInProgress(txn))
+ return StatusWith<CompactStats>(ErrorCodes::BadValue,
+ "cannot compact when indexes in progress");
+
+
+ // same data, but might perform a little different after compact?
+ _infoCache.reset(txn);
+
+ vector<BSONObj> indexSpecs;
+ {
+ IndexCatalog::IndexIterator ii(_indexCatalog.getIndexIterator(txn, false));
+ while (ii.more()) {
+ IndexDescriptor* descriptor = ii.next();
+
+ const BSONObj spec = _compactAdjustIndexSpec(descriptor->infoObj());
+ const BSONObj key = spec.getObjectField("key");
+ const Status keyStatus = validateKeyPattern(key);
+ if (!keyStatus.isOK()) {
+ return StatusWith<CompactStats>(
+ ErrorCodes::CannotCreateIndex,
+ str::stream() << "Cannot compact collection due to invalid index " << spec
+ << ": " << keyStatus.reason() << " For more info see"
+ << " http://dochub.mongodb.org/core/index-validation");
}
+ indexSpecs.push_back(spec);
}
+ }
- // Give a chance to be interrupted *before* we drop all indexes.
- txn->checkForInterrupt();
-
- {
- // note that the drop indexes call also invalidates all clientcursors for the namespace,
- // which is important and wanted here
- WriteUnitOfWork wunit(txn);
- log() << "compact dropping indexes" << endl;
- Status status = _indexCatalog.dropAllIndexes(txn, true);
- if ( !status.isOK() ) {
- return StatusWith<CompactStats>( status );
- }
- wunit.commit();
- }
+ // Give a chance to be interrupted *before* we drop all indexes.
+ txn->checkForInterrupt();
- CompactStats stats;
+ {
+ // note that the drop indexes call also invalidates all clientcursors for the namespace,
+ // which is important and wanted here
+ WriteUnitOfWork wunit(txn);
+ log() << "compact dropping indexes" << endl;
+ Status status = _indexCatalog.dropAllIndexes(txn, true);
+ if (!status.isOK()) {
+ return StatusWith<CompactStats>(status);
+ }
+ wunit.commit();
+ }
- MultiIndexBlock indexer(txn, this);
- indexer.allowInterruption();
- indexer.ignoreUniqueConstraint(); // in compact we should be doing no checking
+ CompactStats stats;
- Status status = indexer.init( indexSpecs );
- if ( !status.isOK() )
- return StatusWith<CompactStats>( status );
+ MultiIndexBlock indexer(txn, this);
+ indexer.allowInterruption();
+ indexer.ignoreUniqueConstraint(); // in compact we should be doing no checking
- MyCompactAdaptor adaptor(this, &indexer);
+ Status status = indexer.init(indexSpecs);
+ if (!status.isOK())
+ return StatusWith<CompactStats>(status);
- status = _recordStore->compact( txn, &adaptor, compactOptions, &stats);
- if (!status.isOK())
- return StatusWith<CompactStats>(status);
+ MyCompactAdaptor adaptor(this, &indexer);
- log() << "starting index commits";
- status = indexer.doneInserting();
- if ( !status.isOK() )
- return StatusWith<CompactStats>( status );
+ status = _recordStore->compact(txn, &adaptor, compactOptions, &stats);
+ if (!status.isOK())
+ return StatusWith<CompactStats>(status);
- {
- WriteUnitOfWork wunit(txn);
- indexer.commit();
- wunit.commit();
- }
+ log() << "starting index commits";
+ status = indexer.doneInserting();
+ if (!status.isOK())
+ return StatusWith<CompactStats>(status);
- return StatusWith<CompactStats>( stats );
+ {
+ WriteUnitOfWork wunit(txn);
+ indexer.commit();
+ wunit.commit();
}
+ return StatusWith<CompactStats>(stats);
+}
+
} // namespace mongo
diff --git a/src/mongo/db/catalog/collection_info_cache.cpp b/src/mongo/db/catalog/collection_info_cache.cpp
index a111a9a6684..994dc9945c5 100644
--- a/src/mongo/db/catalog/collection_info_cache.cpp
+++ b/src/mongo/db/catalog/collection_info_cache.cpp
@@ -46,128 +46,124 @@
namespace mongo {
- CollectionInfoCache::CollectionInfoCache( Collection* collection )
- : _collection( collection ),
- _keysComputed( false ),
- _planCache(new PlanCache(collection->ns().ns())),
- _querySettings(new QuerySettings()) { }
-
- void CollectionInfoCache::reset( OperationContext* txn ) {
- LOG(1) << _collection->ns().ns() << ": clearing plan cache - collection info cache reset";
- clearQueryCache();
- _keysComputed = false;
- computeIndexKeys( txn );
- updatePlanCacheIndexEntries( txn );
- // query settings is not affected by info cache reset.
- // index filters should persist throughout life of collection
- }
+CollectionInfoCache::CollectionInfoCache(Collection* collection)
+ : _collection(collection),
+ _keysComputed(false),
+ _planCache(new PlanCache(collection->ns().ns())),
+ _querySettings(new QuerySettings()) {}
+
+void CollectionInfoCache::reset(OperationContext* txn) {
+ LOG(1) << _collection->ns().ns() << ": clearing plan cache - collection info cache reset";
+ clearQueryCache();
+ _keysComputed = false;
+ computeIndexKeys(txn);
+ updatePlanCacheIndexEntries(txn);
+ // query settings is not affected by info cache reset.
+ // index filters should persist throughout life of collection
+}
- const UpdateIndexData& CollectionInfoCache::indexKeys( OperationContext* txn ) const {
- // This requires "some" lock, and MODE_IS is an expression for that, for now.
- dassert(txn->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_IS));
- invariant(_keysComputed);
- return _indexedPaths;
- }
+const UpdateIndexData& CollectionInfoCache::indexKeys(OperationContext* txn) const {
+ // This requires "some" lock, and MODE_IS is an expression for that, for now.
+ dassert(txn->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_IS));
+ invariant(_keysComputed);
+ return _indexedPaths;
+}
- void CollectionInfoCache::computeIndexKeys( OperationContext* txn ) {
- // This function modified objects attached to the Collection so we need a write lock
- invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_X));
- _indexedPaths.clear();
-
- IndexCatalog::IndexIterator i = _collection->getIndexCatalog()->getIndexIterator(txn, true);
- while (i.more()) {
- IndexDescriptor* descriptor = i.next();
-
- if (descriptor->getAccessMethodName() != IndexNames::TEXT) {
- BSONObj key = descriptor->keyPattern();
- BSONObjIterator j(key);
- while (j.more()) {
- BSONElement e = j.next();
- _indexedPaths.addPath(e.fieldName());
- }
+void CollectionInfoCache::computeIndexKeys(OperationContext* txn) {
+ // This function modified objects attached to the Collection so we need a write lock
+ invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_X));
+ _indexedPaths.clear();
+
+ IndexCatalog::IndexIterator i = _collection->getIndexCatalog()->getIndexIterator(txn, true);
+ while (i.more()) {
+ IndexDescriptor* descriptor = i.next();
+
+ if (descriptor->getAccessMethodName() != IndexNames::TEXT) {
+ BSONObj key = descriptor->keyPattern();
+ BSONObjIterator j(key);
+ while (j.more()) {
+ BSONElement e = j.next();
+ _indexedPaths.addPath(e.fieldName());
}
- else {
- fts::FTSSpec ftsSpec(descriptor->infoObj());
-
- if (ftsSpec.wildcard()) {
- _indexedPaths.allPathsIndexed();
+ } else {
+ fts::FTSSpec ftsSpec(descriptor->infoObj());
+
+ if (ftsSpec.wildcard()) {
+ _indexedPaths.allPathsIndexed();
+ } else {
+ for (size_t i = 0; i < ftsSpec.numExtraBefore(); ++i) {
+ _indexedPaths.addPath(ftsSpec.extraBefore(i));
}
- else {
- for (size_t i = 0; i < ftsSpec.numExtraBefore(); ++i) {
- _indexedPaths.addPath(ftsSpec.extraBefore(i));
- }
- for (fts::Weights::const_iterator it = ftsSpec.weights().begin();
- it != ftsSpec.weights().end();
- ++it) {
- _indexedPaths.addPath(it->first);
- }
- for (size_t i = 0; i < ftsSpec.numExtraAfter(); ++i) {
- _indexedPaths.addPath(ftsSpec.extraAfter(i));
- }
- // Any update to a path containing "language" as a component could change the
- // language of a subdocument. Add the override field as a path component.
- _indexedPaths.addPathComponent(ftsSpec.languageOverrideField());
+ for (fts::Weights::const_iterator it = ftsSpec.weights().begin();
+ it != ftsSpec.weights().end();
+ ++it) {
+ _indexedPaths.addPath(it->first);
}
- }
-
- // handle partial indexes
- const IndexCatalogEntry* entry = i.catalogEntry(descriptor);
- const MatchExpression* filter = entry->getFilterExpression();
- if (filter) {
- unordered_set<std::string> paths;
- QueryPlannerIXSelect::getFields(filter, "", &paths);
- for (auto it = paths.begin(); it != paths.end(); ++it) {
- _indexedPaths.addPath(*it);
+ for (size_t i = 0; i < ftsSpec.numExtraAfter(); ++i) {
+ _indexedPaths.addPath(ftsSpec.extraAfter(i));
}
+ // Any update to a path containing "language" as a component could change the
+ // language of a subdocument. Add the override field as a path component.
+ _indexedPaths.addPathComponent(ftsSpec.languageOverrideField());
}
}
- _keysComputed = true;
-
- }
-
- void CollectionInfoCache::notifyOfWriteOp() {
- if (NULL != _planCache.get()) {
- _planCache->notifyOfWriteOp();
+ // handle partial indexes
+ const IndexCatalogEntry* entry = i.catalogEntry(descriptor);
+ const MatchExpression* filter = entry->getFilterExpression();
+ if (filter) {
+ unordered_set<std::string> paths;
+ QueryPlannerIXSelect::getFields(filter, "", &paths);
+ for (auto it = paths.begin(); it != paths.end(); ++it) {
+ _indexedPaths.addPath(*it);
+ }
}
}
- void CollectionInfoCache::clearQueryCache() {
- if (NULL != _planCache.get()) {
- _planCache->clear();
- }
- }
+ _keysComputed = true;
+}
- PlanCache* CollectionInfoCache::getPlanCache() const {
- return _planCache.get();
+void CollectionInfoCache::notifyOfWriteOp() {
+ if (NULL != _planCache.get()) {
+ _planCache->notifyOfWriteOp();
}
+}
- QuerySettings* CollectionInfoCache::getQuerySettings() const {
- return _querySettings.get();
+void CollectionInfoCache::clearQueryCache() {
+ if (NULL != _planCache.get()) {
+ _planCache->clear();
}
+}
- void CollectionInfoCache::updatePlanCacheIndexEntries(OperationContext* txn) {
- std::vector<IndexEntry> indexEntries;
-
- // TODO We shouldn't need to include unfinished indexes, but we must here because the index
- // catalog may be in an inconsistent state. SERVER-18346.
- const bool includeUnfinishedIndexes = true;
- IndexCatalog::IndexIterator ii =
- _collection->getIndexCatalog()->getIndexIterator(txn, includeUnfinishedIndexes);
- while (ii.more()) {
- const IndexDescriptor* desc = ii.next();
- const IndexCatalogEntry* ice = ii.catalogEntry(desc);
- indexEntries.emplace_back(desc->keyPattern(),
- desc->getAccessMethodName(),
- desc->isMultikey(txn),
- desc->isSparse(),
- desc->unique(),
- desc->indexName(),
- ice->getFilterExpression(),
- desc->infoObj());
- }
+PlanCache* CollectionInfoCache::getPlanCache() const {
+ return _planCache.get();
+}
- _planCache->notifyOfIndexEntries(indexEntries);
+QuerySettings* CollectionInfoCache::getQuerySettings() const {
+ return _querySettings.get();
+}
+
+void CollectionInfoCache::updatePlanCacheIndexEntries(OperationContext* txn) {
+ std::vector<IndexEntry> indexEntries;
+
+ // TODO We shouldn't need to include unfinished indexes, but we must here because the index
+ // catalog may be in an inconsistent state. SERVER-18346.
+ const bool includeUnfinishedIndexes = true;
+ IndexCatalog::IndexIterator ii =
+ _collection->getIndexCatalog()->getIndexIterator(txn, includeUnfinishedIndexes);
+ while (ii.more()) {
+ const IndexDescriptor* desc = ii.next();
+ const IndexCatalogEntry* ice = ii.catalogEntry(desc);
+ indexEntries.emplace_back(desc->keyPattern(),
+ desc->getAccessMethodName(),
+ desc->isMultikey(txn),
+ desc->isSparse(),
+ desc->unique(),
+ desc->indexName(),
+ ice->getFilterExpression(),
+ desc->infoObj());
}
+ _planCache->notifyOfIndexEntries(indexEntries);
+}
}
diff --git a/src/mongo/db/catalog/collection_info_cache.h b/src/mongo/db/catalog/collection_info_cache.h
index 294f371eef0..7e418b4e123 100644
--- a/src/mongo/db/catalog/collection_info_cache.h
+++ b/src/mongo/db/catalog/collection_info_cache.h
@@ -37,76 +37,76 @@
namespace mongo {
- class Collection;
+class Collection;
- /**
- * this is for storing things that you want to cache about a single collection
- * life cycle is managed for you from inside Collection
+/**
+ * this is for storing things that you want to cache about a single collection
+ * life cycle is managed for you from inside Collection
+ */
+class CollectionInfoCache {
+public:
+ CollectionInfoCache(Collection* collection);
+
+ /*
+ * Resets entire cache state. Must be called under exclusive DB lock.
*/
- class CollectionInfoCache {
- public:
-
- CollectionInfoCache( Collection* collection );
-
- /*
- * Resets entire cache state. Must be called under exclusive DB lock.
- */
- void reset( OperationContext* txn );
+ void reset(OperationContext* txn);
- //
- // New Query Execution
- //
+ //
+ // New Query Execution
+ //
- /**
- * Get the PlanCache for this collection.
- */
- PlanCache* getPlanCache() const;
-
- /**
- * Get the QuerySettings for this collection.
- */
- QuerySettings* getQuerySettings() const;
+ /**
+ * Get the PlanCache for this collection.
+ */
+ PlanCache* getPlanCache() const;
- // -------------------
+ /**
+ * Get the QuerySettings for this collection.
+ */
+ QuerySettings* getQuerySettings() const;
- /* get set of index keys for this namespace. handy to quickly check if a given
- field is indexed (Note it might be a secondary component of a compound index.)
- */
- const UpdateIndexData& indexKeys( OperationContext* txn ) const;
+ // -------------------
- // ---------------------
+ /* get set of index keys for this namespace. handy to quickly check if a given
+ field is indexed (Note it might be a secondary component of a compound index.)
+ */
+ const UpdateIndexData& indexKeys(OperationContext* txn) const;
- /**
- * Called when an index is added to this collection.
- */
- void addedIndex( OperationContext* txn ) { reset( txn ); }
+ // ---------------------
- void clearQueryCache();
+ /**
+ * Called when an index is added to this collection.
+ */
+ void addedIndex(OperationContext* txn) {
+ reset(txn);
+ }
- /* you must notify the cache if you are doing writes, as query plan utility will change */
- void notifyOfWriteOp();
+ void clearQueryCache();
- private:
+ /* you must notify the cache if you are doing writes, as query plan utility will change */
+ void notifyOfWriteOp();
- Collection* _collection; // not owned
+private:
+ Collection* _collection; // not owned
- // --- index keys cache
- bool _keysComputed;
- UpdateIndexData _indexedPaths;
+ // --- index keys cache
+ bool _keysComputed;
+ UpdateIndexData _indexedPaths;
- // A cache for query plans.
- std::unique_ptr<PlanCache> _planCache;
+ // A cache for query plans.
+ std::unique_ptr<PlanCache> _planCache;
- // Query settings.
- // Includes index filters.
- std::unique_ptr<QuerySettings> _querySettings;
+ // Query settings.
+ // Includes index filters.
+ std::unique_ptr<QuerySettings> _querySettings;
- /**
- * Must be called under exclusive DB lock.
- */
- void computeIndexKeys( OperationContext* txn );
+ /**
+ * Must be called under exclusive DB lock.
+ */
+ void computeIndexKeys(OperationContext* txn);
- void updatePlanCacheIndexEntries( OperationContext* txn );
- };
+ void updatePlanCacheIndexEntries(OperationContext* txn);
+};
} // namespace mongo
diff --git a/src/mongo/db/catalog/collection_options.cpp b/src/mongo/db/catalog/collection_options.cpp
index 90dd997fce9..3aaaaac6893 100644
--- a/src/mongo/db/catalog/collection_options.cpp
+++ b/src/mongo/db/catalog/collection_options.cpp
@@ -35,180 +35,170 @@
namespace mongo {
- // static
- bool CollectionOptions::validMaxCappedDocs( long long* max ) {
- if ( *max <= 0 ||
- *max == std::numeric_limits<long long>::max() ) {
- *max = 0x7fffffff;
- return true;
- }
-
- if ( *max < ( 0x1LL << 31 ) ) {
- return true;
- }
-
- return false;
+// static
+bool CollectionOptions::validMaxCappedDocs(long long* max) {
+ if (*max <= 0 || *max == std::numeric_limits<long long>::max()) {
+ *max = 0x7fffffff;
+ return true;
}
- void CollectionOptions::reset() {
- capped = false;
- cappedSize = 0;
- cappedMaxDocs = 0;
- initialNumExtents = 0;
- initialExtentSizes.clear();
- autoIndexId = DEFAULT;
- // For compatibility with previous versions if the user sets no flags,
- // we set Flag_UsePowerOf2Sizes in case the user downgrades.
- flags = Flag_UsePowerOf2Sizes;
- flagsSet = false;
- temp = false;
- storageEngine = BSONObj();
- validator = BSONObj();
+ if (*max < (0x1LL << 31)) {
+ return true;
}
- bool CollectionOptions::isValid() const {
- return validate().isOK();
- }
+ return false;
+}
- Status CollectionOptions::validate() const {
- return CollectionOptions().parse(toBSON());
- }
+void CollectionOptions::reset() {
+ capped = false;
+ cappedSize = 0;
+ cappedMaxDocs = 0;
+ initialNumExtents = 0;
+ initialExtentSizes.clear();
+ autoIndexId = DEFAULT;
+ // For compatibility with previous versions if the user sets no flags,
+ // we set Flag_UsePowerOf2Sizes in case the user downgrades.
+ flags = Flag_UsePowerOf2Sizes;
+ flagsSet = false;
+ temp = false;
+ storageEngine = BSONObj();
+ validator = BSONObj();
+}
- Status CollectionOptions::parse(const BSONObj& options) {
- reset();
+bool CollectionOptions::isValid() const {
+ return validate().isOK();
+}
- // During parsing, ignore some validation errors in order to accept options objects that
- // were valid in previous versions of the server. SERVER-13737.
- BSONObjIterator i( options );
- while ( i.more() ) {
- BSONElement e = i.next();
- StringData fieldName = e.fieldName();
+Status CollectionOptions::validate() const {
+ return CollectionOptions().parse(toBSON());
+}
- if ( fieldName == "capped" ) {
- capped = e.trueValue();
+Status CollectionOptions::parse(const BSONObj& options) {
+ reset();
+
+ // During parsing, ignore some validation errors in order to accept options objects that
+ // were valid in previous versions of the server. SERVER-13737.
+ BSONObjIterator i(options);
+ while (i.more()) {
+ BSONElement e = i.next();
+ StringData fieldName = e.fieldName();
+
+ if (fieldName == "capped") {
+ capped = e.trueValue();
+ } else if (fieldName == "size") {
+ if (!e.isNumber()) {
+ // Ignoring for backwards compatibility.
+ continue;
}
- else if ( fieldName == "size" ) {
- if ( !e.isNumber() ) {
- // Ignoring for backwards compatibility.
- continue;
- }
- cappedSize = e.numberLong();
- if ( cappedSize < 0 )
- return Status( ErrorCodes::BadValue, "size has to be >= 0" );
- cappedSize += 0xff;
- cappedSize &= 0xffffffffffffff00LL;
+ cappedSize = e.numberLong();
+ if (cappedSize < 0)
+ return Status(ErrorCodes::BadValue, "size has to be >= 0");
+ cappedSize += 0xff;
+ cappedSize &= 0xffffffffffffff00LL;
+ } else if (fieldName == "max") {
+ if (!options["capped"].trueValue() || !e.isNumber()) {
+ // Ignoring for backwards compatibility.
+ continue;
}
- else if ( fieldName == "max" ) {
- if ( !options["capped"].trueValue() || !e.isNumber() ) {
- // Ignoring for backwards compatibility.
- continue;
- }
- cappedMaxDocs = e.numberLong();
- if ( !validMaxCappedDocs( &cappedMaxDocs ) )
- return Status( ErrorCodes::BadValue,
- "max in a capped collection has to be < 2^31 or not set" );
- }
- else if ( fieldName == "$nExtents" ) {
- if ( e.type() == Array ) {
- BSONObjIterator j( e.Obj() );
- while ( j.more() ) {
- BSONElement inner = j.next();
- initialExtentSizes.push_back( inner.numberInt() );
- }
- }
- else {
- initialNumExtents = e.numberLong();
+ cappedMaxDocs = e.numberLong();
+ if (!validMaxCappedDocs(&cappedMaxDocs))
+ return Status(ErrorCodes::BadValue,
+ "max in a capped collection has to be < 2^31 or not set");
+ } else if (fieldName == "$nExtents") {
+ if (e.type() == Array) {
+ BSONObjIterator j(e.Obj());
+ while (j.more()) {
+ BSONElement inner = j.next();
+ initialExtentSizes.push_back(inner.numberInt());
}
+ } else {
+ initialNumExtents = e.numberLong();
}
- else if ( fieldName == "autoIndexId" ) {
- if ( e.trueValue() )
- autoIndexId = YES;
- else
- autoIndexId = NO;
+ } else if (fieldName == "autoIndexId") {
+ if (e.trueValue())
+ autoIndexId = YES;
+ else
+ autoIndexId = NO;
+ } else if (fieldName == "flags") {
+ flags = e.numberInt();
+ flagsSet = true;
+ } else if (fieldName == "temp") {
+ temp = e.trueValue();
+ } else if (fieldName == "storageEngine") {
+ // Storage engine-specific collection options.
+ // "storageEngine" field must be of type "document".
+ // Every field inside "storageEngine" has to be a document.
+ // Format:
+ // {
+ // ...
+ // storageEngine: {
+ // storageEngine1: {
+ // ...
+ // },
+ // storageEngine2: {
+ // ...
+ // }
+ // },
+ // ...
+ // }
+ if (e.type() != mongo::Object) {
+ return Status(ErrorCodes::BadValue, "'storageEngine' has to be a document.");
}
- else if ( fieldName == "flags" ) {
- flags = e.numberInt();
- flagsSet = true;
- }
- else if ( fieldName == "temp" ) {
- temp = e.trueValue();
- }
- else if (fieldName == "storageEngine") {
- // Storage engine-specific collection options.
- // "storageEngine" field must be of type "document".
- // Every field inside "storageEngine" has to be a document.
- // Format:
- // {
- // ...
- // storageEngine: {
- // storageEngine1: {
- // ...
- // },
- // storageEngine2: {
- // ...
- // }
- // },
- // ...
- // }
- if (e.type() != mongo::Object) {
- return Status(ErrorCodes::BadValue, "'storageEngine' has to be a document.");
- }
- BSONForEach(storageEngineElement, e.Obj()) {
- StringData storageEngineName = storageEngineElement.fieldNameStringData();
- if (storageEngineElement.type() != mongo::Object) {
- return Status(ErrorCodes::BadValue, str::stream() << "'storageEngine." <<
- storageEngineName << "' has to be an embedded document.");
- }
+ BSONForEach(storageEngineElement, e.Obj()) {
+ StringData storageEngineName = storageEngineElement.fieldNameStringData();
+ if (storageEngineElement.type() != mongo::Object) {
+ return Status(ErrorCodes::BadValue,
+ str::stream() << "'storageEngine." << storageEngineName
+ << "' has to be an embedded document.");
}
-
- storageEngine = e.Obj().getOwned();
}
- else if (fieldName == "validator") {
- if (e.type() != mongo::Object) {
- return Status(ErrorCodes::BadValue, "'validator' has to be a document.");
- }
- validator = e.Obj().getOwned();
+ storageEngine = e.Obj().getOwned();
+ } else if (fieldName == "validator") {
+ if (e.type() != mongo::Object) {
+ return Status(ErrorCodes::BadValue, "'validator' has to be a document.");
}
- }
- return Status::OK();
+ validator = e.Obj().getOwned();
+ }
}
- BSONObj CollectionOptions::toBSON() const {
- BSONObjBuilder b;
- if ( capped ) {
- b.appendBool( "capped", true );
- b.appendNumber( "size", cappedSize );
+ return Status::OK();
+}
- if ( cappedMaxDocs )
- b.appendNumber( "max", cappedMaxDocs );
- }
+BSONObj CollectionOptions::toBSON() const {
+ BSONObjBuilder b;
+ if (capped) {
+ b.appendBool("capped", true);
+ b.appendNumber("size", cappedSize);
- if ( initialNumExtents )
- b.appendNumber( "$nExtents", initialNumExtents );
- if ( !initialExtentSizes.empty() )
- b.append( "$nExtents", initialExtentSizes );
+ if (cappedMaxDocs)
+ b.appendNumber("max", cappedMaxDocs);
+ }
- if ( autoIndexId != DEFAULT )
- b.appendBool( "autoIndexId", autoIndexId == YES );
+ if (initialNumExtents)
+ b.appendNumber("$nExtents", initialNumExtents);
+ if (!initialExtentSizes.empty())
+ b.append("$nExtents", initialExtentSizes);
- if ( flagsSet )
- b.append( "flags", flags );
+ if (autoIndexId != DEFAULT)
+ b.appendBool("autoIndexId", autoIndexId == YES);
- if ( temp )
- b.appendBool( "temp", true );
+ if (flagsSet)
+ b.append("flags", flags);
- if (!storageEngine.isEmpty()) {
- b.append("storageEngine", storageEngine);
- }
+ if (temp)
+ b.appendBool("temp", true);
- if (!validator.isEmpty()) {
- b.append("validator", validator);
- }
+ if (!storageEngine.isEmpty()) {
+ b.append("storageEngine", storageEngine);
+ }
- return b.obj();
+ if (!validator.isEmpty()) {
+ b.append("validator", validator);
}
+ return b.obj();
+}
}
diff --git a/src/mongo/db/catalog/collection_options.h b/src/mongo/db/catalog/collection_options.h
index 356d4f2c3e7..7520917b1d2 100644
--- a/src/mongo/db/catalog/collection_options.h
+++ b/src/mongo/db/catalog/collection_options.h
@@ -35,69 +35,70 @@
namespace mongo {
- struct CollectionOptions {
- CollectionOptions() {
- reset();
- }
-
- void reset();
-
- /**
- * Returns true if collection options validates successfully.
- */
- bool isValid() const;
-
- /**
- * Confirms that collection options can be converted to BSON and back without errors.
- */
- Status validate() const;
-
- /**
- * Parses the "options" subfield of the collection info object.
- */
- Status parse( const BSONObj& obj );
-
- BSONObj toBSON() const;
-
- /**
- * @param max in and out, will be adjusted
- * @return if the value is valid at all
- */
- static bool validMaxCappedDocs( long long* max );
-
- // ----
-
- bool capped;
- long long cappedSize;
- long long cappedMaxDocs;
-
- // following 2 are mutually exclusive, can only have one set
- long long initialNumExtents;
- std::vector<long long> initialExtentSizes;
-
- // behavior of _id index creation when collection created
- void setNoIdIndex() { autoIndexId = NO; }
- enum {
- DEFAULT, // currently yes for most collections, NO for some system ones
- YES, // create _id index
- NO // do not create _id index
- } autoIndexId;
-
- // user flags
- enum UserFlags {
- Flag_UsePowerOf2Sizes = 1 << 0,
- Flag_NoPadding = 1 << 1,
- };
- int flags; // a bitvector of UserFlags
- bool flagsSet;
-
- bool temp;
-
- // Storage engine collection options. Always owned or empty.
- BSONObj storageEngine;
-
- // Always owned or empty.
- BSONObj validator;
+struct CollectionOptions {
+ CollectionOptions() {
+ reset();
+ }
+
+ void reset();
+
+ /**
+ * Returns true if collection options validates successfully.
+ */
+ bool isValid() const;
+
+ /**
+ * Confirms that collection options can be converted to BSON and back without errors.
+ */
+ Status validate() const;
+
+ /**
+ * Parses the "options" subfield of the collection info object.
+ */
+ Status parse(const BSONObj& obj);
+
+ BSONObj toBSON() const;
+
+ /**
+ * @param max in and out, will be adjusted
+ * @return if the value is valid at all
+ */
+ static bool validMaxCappedDocs(long long* max);
+
+ // ----
+
+ bool capped;
+ long long cappedSize;
+ long long cappedMaxDocs;
+
+ // following 2 are mutually exclusive, can only have one set
+ long long initialNumExtents;
+ std::vector<long long> initialExtentSizes;
+
+ // behavior of _id index creation when collection created
+ void setNoIdIndex() {
+ autoIndexId = NO;
+ }
+ enum {
+ DEFAULT, // currently yes for most collections, NO for some system ones
+ YES, // create _id index
+ NO // do not create _id index
+ } autoIndexId;
+
+ // user flags
+ enum UserFlags {
+ Flag_UsePowerOf2Sizes = 1 << 0,
+ Flag_NoPadding = 1 << 1,
};
+ int flags; // a bitvector of UserFlags
+ bool flagsSet;
+ bool temp;
+
+ // Storage engine collection options. Always owned or empty.
+ BSONObj storageEngine;
+
+ // Always owned or empty.
+ BSONObj validator;
+};
}
diff --git a/src/mongo/db/catalog/collection_options_test.cpp b/src/mongo/db/catalog/collection_options_test.cpp
index 165f377a8d7..b56b883e7a2 100644
--- a/src/mongo/db/catalog/collection_options_test.cpp
+++ b/src/mongo/db/catalog/collection_options_test.cpp
@@ -35,156 +35,156 @@
namespace mongo {
- void checkRoundTrip( const CollectionOptions& options1 ) {
- CollectionOptions options2;
- options2.parse( options1.toBSON() );
- ASSERT_EQUALS( options1.toBSON(), options2.toBSON() );
- }
-
- TEST( CollectionOptions, SimpleRoundTrip ) {
- CollectionOptions options;
- checkRoundTrip( options );
-
- options.capped = true;
- options.cappedSize = 10240;
- options.cappedMaxDocs = 1111;
- checkRoundTrip( options );
-
- options.setNoIdIndex();
- options.flags = 5;
- checkRoundTrip( options );
- }
-
- TEST(CollectionOptions, IsValid) {
- CollectionOptions options;
- ASSERT_TRUE(options.isValid());
-
- options.storageEngine = fromjson("{storageEngine1: 1}");
- ASSERT_FALSE(options.isValid());
- }
-
- TEST(CollectionOptions, Validate) {
- CollectionOptions options;
- ASSERT_OK(options.validate());
-
- options.storageEngine = fromjson("{storageEngine1: 1}");
- ASSERT_NOT_OK(options.validate());
- }
-
- TEST(CollectionOptions, Validator) {
- CollectionOptions options;
-
- ASSERT_NOT_OK(options.parse(fromjson("{validator: 'notAnObject'}")));
-
- ASSERT_OK(options.parse(fromjson("{validator: {a: 1}}")));
- ASSERT_EQ(options.validator, fromjson("{a: 1}"));
-
- options.validator = fromjson("{b: 1}");
- ASSERT_EQ(options.toBSON()["validator"].Obj(), fromjson("{b: 1}"));
-
- options.reset();
- ASSERT_EQ(options.validator, BSONObj());
- ASSERT(!options.toBSON()["validator"]);
- }
-
- TEST( CollectionOptions, ErrorBadSize ) {
- ASSERT_NOT_OK( CollectionOptions().parse( fromjson( "{capped: true, size: -1}" ) ) );
- ASSERT_NOT_OK( CollectionOptions().parse( fromjson( "{capped: false, size: -1}" ) ) );
- }
-
- TEST( CollectionOptions, ErrorBadMax ) {
- ASSERT_NOT_OK( CollectionOptions().parse( BSON( "capped" << true << "max"
- << ( 1LL << 31 ) ) ) );
- }
-
- TEST( CollectionOptions, IgnoreSizeWrongType ) {
- CollectionOptions options;
- ASSERT_OK( options.parse( fromjson( "{size: undefined, capped: undefined}" ) ) );
- ASSERT_EQUALS( options.capped, false );
- ASSERT_EQUALS( options.cappedSize, 0 );
- }
-
- TEST( CollectionOptions, IgnoreMaxWrongType ) {
- CollectionOptions options;
- ASSERT_OK( options.parse( fromjson( "{capped: true, size: 1024, max: ''}" ) ) );
- ASSERT_EQUALS( options.capped, true );
- ASSERT_EQUALS( options.cappedSize, 1024 );
- ASSERT_EQUALS( options.cappedMaxDocs, 0 );
- }
-
- TEST( CollectionOptions, IgnoreUnregisteredFields ) {
- ASSERT_OK( CollectionOptions().parse( BSON( "create" << "c" ) ) );
- ASSERT_OK( CollectionOptions().parse( BSON( "foo" << "bar" ) ) );
- }
-
- TEST(CollectionOptions, InvalidStorageEngineField) {
- // "storageEngine" field has to be an object if present.
- ASSERT_NOT_OK(CollectionOptions().parse(fromjson("{storageEngine: 1}")));
-
- // Every field under "storageEngine" has to be an object.
- ASSERT_NOT_OK(CollectionOptions().parse(fromjson("{storageEngine: {storageEngine1: 1}}")));
-
- // Empty "storageEngine" not allowed
- ASSERT_OK(CollectionOptions().parse(fromjson("{storageEngine: {}}")));
- }
-
- TEST(CollectionOptions, ParseEngineField) {
- CollectionOptions opts;
- ASSERT_OK(opts.parse(fromjson("{unknownField: 1, "
- "storageEngine: {storageEngine1: {x: 1, y: 2}, storageEngine2: {a: 1, b:2}}}")));
- checkRoundTrip(opts);
-
- // Unrecognized field should not be present in BSON representation.
- BSONObj obj = opts.toBSON();
- ASSERT_FALSE(obj.hasField("unknownField"));
-
- // Check "storageEngine" field.
- ASSERT_TRUE(obj.hasField("storageEngine"));
- ASSERT_TRUE(obj.getField("storageEngine").isABSONObj());
- BSONObj storageEngine = obj.getObjectField("storageEngine");
-
- // Check individual storage storageEngine fields.
- ASSERT_TRUE(storageEngine.getField("storageEngine1").isABSONObj());
- BSONObj storageEngine1 = storageEngine.getObjectField("storageEngine1");
- ASSERT_EQUALS(1, storageEngine1.getIntField("x"));
- ASSERT_EQUALS(2, storageEngine1.getIntField("y"));
-
- ASSERT_TRUE(storageEngine.getField("storageEngine2").isABSONObj());
- BSONObj storageEngine2 = storageEngine.getObjectField("storageEngine2");
- ASSERT_EQUALS(1, storageEngine2.getIntField("a"));
- ASSERT_EQUALS(2, storageEngine2.getIntField("b"));
-
- }
-
- TEST(CollectionOptions, ResetStorageEngineField) {
- CollectionOptions opts;
- ASSERT_OK(opts.parse(fromjson(
- "{storageEngine: {storageEngine1: {x: 1}}}")));
- checkRoundTrip(opts);
-
- opts.reset();
-
- ASSERT_TRUE(opts.storageEngine.isEmpty());
- }
-
- TEST(CollectionOptions, ModifyStorageEngineField) {
- CollectionOptions opts;
-
- // Directly modify storageEngine field in collection options.
- opts.storageEngine = fromjson("{storageEngine1: {x: 1}}");
-
- // Unrecognized field should not be present in BSON representation.
- BSONObj obj = opts.toBSON();
- ASSERT_FALSE(obj.hasField("unknownField"));
-
- // Check "storageEngine" field.
- ASSERT_TRUE(obj.hasField("storageEngine"));
- ASSERT_TRUE(obj.getField("storageEngine").isABSONObj());
- BSONObj storageEngine = obj.getObjectField("storageEngine");
-
- // Check individual storage storageEngine fields.
- ASSERT_TRUE(storageEngine.getField("storageEngine1").isABSONObj());
- BSONObj storageEngine1 = storageEngine.getObjectField("storageEngine1");
- ASSERT_EQUALS(1, storageEngine1.getIntField("x"));
- }
+void checkRoundTrip(const CollectionOptions& options1) {
+ CollectionOptions options2;
+ options2.parse(options1.toBSON());
+ ASSERT_EQUALS(options1.toBSON(), options2.toBSON());
+}
+
+TEST(CollectionOptions, SimpleRoundTrip) {
+ CollectionOptions options;
+ checkRoundTrip(options);
+
+ options.capped = true;
+ options.cappedSize = 10240;
+ options.cappedMaxDocs = 1111;
+ checkRoundTrip(options);
+
+ options.setNoIdIndex();
+ options.flags = 5;
+ checkRoundTrip(options);
+}
+
+TEST(CollectionOptions, IsValid) {
+ CollectionOptions options;
+ ASSERT_TRUE(options.isValid());
+
+ options.storageEngine = fromjson("{storageEngine1: 1}");
+ ASSERT_FALSE(options.isValid());
+}
+
+TEST(CollectionOptions, Validate) {
+ CollectionOptions options;
+ ASSERT_OK(options.validate());
+
+ options.storageEngine = fromjson("{storageEngine1: 1}");
+ ASSERT_NOT_OK(options.validate());
+}
+
+TEST(CollectionOptions, Validator) {
+ CollectionOptions options;
+
+ ASSERT_NOT_OK(options.parse(fromjson("{validator: 'notAnObject'}")));
+
+ ASSERT_OK(options.parse(fromjson("{validator: {a: 1}}")));
+ ASSERT_EQ(options.validator, fromjson("{a: 1}"));
+
+ options.validator = fromjson("{b: 1}");
+ ASSERT_EQ(options.toBSON()["validator"].Obj(), fromjson("{b: 1}"));
+
+ options.reset();
+ ASSERT_EQ(options.validator, BSONObj());
+ ASSERT(!options.toBSON()["validator"]);
+}
+
+TEST(CollectionOptions, ErrorBadSize) {
+ ASSERT_NOT_OK(CollectionOptions().parse(fromjson("{capped: true, size: -1}")));
+ ASSERT_NOT_OK(CollectionOptions().parse(fromjson("{capped: false, size: -1}")));
+}
+
+TEST(CollectionOptions, ErrorBadMax) {
+ ASSERT_NOT_OK(CollectionOptions().parse(BSON("capped" << true << "max" << (1LL << 31))));
+}
+
+TEST(CollectionOptions, IgnoreSizeWrongType) {
+ CollectionOptions options;
+ ASSERT_OK(options.parse(fromjson("{size: undefined, capped: undefined}")));
+ ASSERT_EQUALS(options.capped, false);
+ ASSERT_EQUALS(options.cappedSize, 0);
+}
+
+TEST(CollectionOptions, IgnoreMaxWrongType) {
+ CollectionOptions options;
+ ASSERT_OK(options.parse(fromjson("{capped: true, size: 1024, max: ''}")));
+ ASSERT_EQUALS(options.capped, true);
+ ASSERT_EQUALS(options.cappedSize, 1024);
+ ASSERT_EQUALS(options.cappedMaxDocs, 0);
+}
+
+TEST(CollectionOptions, IgnoreUnregisteredFields) {
+ ASSERT_OK(CollectionOptions().parse(BSON("create"
+ << "c")));
+ ASSERT_OK(CollectionOptions().parse(BSON("foo"
+ << "bar")));
+}
+
+TEST(CollectionOptions, InvalidStorageEngineField) {
+ // "storageEngine" field has to be an object if present.
+ ASSERT_NOT_OK(CollectionOptions().parse(fromjson("{storageEngine: 1}")));
+
+ // Every field under "storageEngine" has to be an object.
+ ASSERT_NOT_OK(CollectionOptions().parse(fromjson("{storageEngine: {storageEngine1: 1}}")));
+
+ // Empty "storageEngine" not allowed
+ ASSERT_OK(CollectionOptions().parse(fromjson("{storageEngine: {}}")));
+}
+
+TEST(CollectionOptions, ParseEngineField) {
+ CollectionOptions opts;
+ ASSERT_OK(opts.parse(fromjson(
+ "{unknownField: 1, "
+ "storageEngine: {storageEngine1: {x: 1, y: 2}, storageEngine2: {a: 1, b:2}}}")));
+ checkRoundTrip(opts);
+
+ // Unrecognized field should not be present in BSON representation.
+ BSONObj obj = opts.toBSON();
+ ASSERT_FALSE(obj.hasField("unknownField"));
+
+ // Check "storageEngine" field.
+ ASSERT_TRUE(obj.hasField("storageEngine"));
+ ASSERT_TRUE(obj.getField("storageEngine").isABSONObj());
+ BSONObj storageEngine = obj.getObjectField("storageEngine");
+
+ // Check individual storage storageEngine fields.
+ ASSERT_TRUE(storageEngine.getField("storageEngine1").isABSONObj());
+ BSONObj storageEngine1 = storageEngine.getObjectField("storageEngine1");
+ ASSERT_EQUALS(1, storageEngine1.getIntField("x"));
+ ASSERT_EQUALS(2, storageEngine1.getIntField("y"));
+
+ ASSERT_TRUE(storageEngine.getField("storageEngine2").isABSONObj());
+ BSONObj storageEngine2 = storageEngine.getObjectField("storageEngine2");
+ ASSERT_EQUALS(1, storageEngine2.getIntField("a"));
+ ASSERT_EQUALS(2, storageEngine2.getIntField("b"));
+}
+
+TEST(CollectionOptions, ResetStorageEngineField) {
+ CollectionOptions opts;
+ ASSERT_OK(opts.parse(fromjson("{storageEngine: {storageEngine1: {x: 1}}}")));
+ checkRoundTrip(opts);
+
+ opts.reset();
+
+ ASSERT_TRUE(opts.storageEngine.isEmpty());
+}
+
+TEST(CollectionOptions, ModifyStorageEngineField) {
+ CollectionOptions opts;
+
+ // Directly modify storageEngine field in collection options.
+ opts.storageEngine = fromjson("{storageEngine1: {x: 1}}");
+
+ // Unrecognized field should not be present in BSON representation.
+ BSONObj obj = opts.toBSON();
+ ASSERT_FALSE(obj.hasField("unknownField"));
+
+ // Check "storageEngine" field.
+ ASSERT_TRUE(obj.hasField("storageEngine"));
+ ASSERT_TRUE(obj.getField("storageEngine").isABSONObj());
+ BSONObj storageEngine = obj.getObjectField("storageEngine");
+
+ // Check individual storage storageEngine fields.
+ ASSERT_TRUE(storageEngine.getField("storageEngine1").isABSONObj());
+ BSONObj storageEngine1 = storageEngine.getObjectField("storageEngine1");
+ ASSERT_EQUALS(1, storageEngine1.getIntField("x"));
+}
}
diff --git a/src/mongo/db/catalog/create_collection.cpp b/src/mongo/db/catalog/create_collection.cpp
index aaff88c3121..afa5204fcbf 100644
--- a/src/mongo/db/catalog/create_collection.cpp
+++ b/src/mongo/db/catalog/create_collection.cpp
@@ -40,56 +40,53 @@
#include "mongo/db/repl/replication_coordinator_global.h"
namespace mongo {
- Status createCollection(OperationContext* txn,
- const std::string& dbName,
- const BSONObj& cmdObj) {
- BSONObjIterator it(cmdObj);
+Status createCollection(OperationContext* txn, const std::string& dbName, const BSONObj& cmdObj) {
+ BSONObjIterator it(cmdObj);
- // Extract ns from first cmdObj element.
- BSONElement firstElt = it.next();
- uassert(15888,
- "must pass name of collection to create",
- firstElt.valuestrsafe()[0] != '\0');
+ // Extract ns from first cmdObj element.
+ BSONElement firstElt = it.next();
+ uassert(15888, "must pass name of collection to create", firstElt.valuestrsafe()[0] != '\0');
- Status status = userAllowedCreateNS(dbName, firstElt.valuestr());
- if (!status.isOK()) {
- return status;
- }
+ Status status = userAllowedCreateNS(dbName, firstElt.valuestr());
+ if (!status.isOK()) {
+ return status;
+ }
- NamespaceString nss(dbName, firstElt.valuestrsafe());
+ NamespaceString nss(dbName, firstElt.valuestrsafe());
- // Build options object from remaining cmdObj elements.
- BSONObjBuilder optionsBuilder;
- while (it.more()) {
- optionsBuilder.append(it.next());
- }
+ // Build options object from remaining cmdObj elements.
+ BSONObjBuilder optionsBuilder;
+ while (it.more()) {
+ optionsBuilder.append(it.next());
+ }
- BSONObj options = optionsBuilder.obj();
- uassert(14832,
- "specify size:<n> when capped is true",
- !options["capped"].trueValue() || options["size"].isNumber() ||
- options.hasField("$nExtents"));
+ BSONObj options = optionsBuilder.obj();
+ uassert(14832,
+ "specify size:<n> when capped is true",
+ !options["capped"].trueValue() || options["size"].isNumber() ||
+ options.hasField("$nExtents"));
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock dbXLock(txn->lockState(), dbName, MODE_X);
- OldClientContext ctx(txn, nss.ns());
- if (txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nss)) {
- return Status(ErrorCodes::NotMaster, str::stream() <<
- "Not primary while creating collection " << nss.ns());
- }
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ ScopedTransaction transaction(txn, MODE_IX);
+ Lock::DBLock dbXLock(txn->lockState(), dbName, MODE_X);
+ OldClientContext ctx(txn, nss.ns());
+ if (txn->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nss)) {
+ return Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while creating collection " << nss.ns());
+ }
- WriteUnitOfWork wunit(txn);
+ WriteUnitOfWork wunit(txn);
- // Create collection.
- status = userCreateNS(txn, ctx.db(), nss.ns(), options);
- if (!status.isOK()) {
- return status;
- }
+ // Create collection.
+ status = userCreateNS(txn, ctx.db(), nss.ns(), options);
+ if (!status.isOK()) {
+ return status;
+ }
- wunit.commit();
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "create", nss.ns());
- return Status::OK();
+ wunit.commit();
}
-} // namespace mongo
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "create", nss.ns());
+ return Status::OK();
+}
+} // namespace mongo
diff --git a/src/mongo/db/catalog/create_collection.h b/src/mongo/db/catalog/create_collection.h
index e96a7d799f9..84a042dae12 100644
--- a/src/mongo/db/catalog/create_collection.h
+++ b/src/mongo/db/catalog/create_collection.h
@@ -31,13 +31,11 @@
#include "mongo/base/status.h"
namespace mongo {
- class BSONObj;
- class OperationContext;
+class BSONObj;
+class OperationContext;
- /**
- * Creates a collection as described in "cmdObj" on the database "dbName".
- */
- Status createCollection(OperationContext* txn,
- const std::string& dbName,
- const BSONObj& cmdObj);
-} // namespace mongo
+/**
+ * Creates a collection as described in "cmdObj" on the database "dbName".
+ */
+Status createCollection(OperationContext* txn, const std::string& dbName, const BSONObj& cmdObj);
+} // namespace mongo
diff --git a/src/mongo/db/catalog/cursor_manager.cpp b/src/mongo/db/catalog/cursor_manager.cpp
index b5bf0f49f73..97426228ca9 100644
--- a/src/mongo/db/catalog/cursor_manager.cpp
+++ b/src/mongo/db/catalog/cursor_manager.cpp
@@ -48,514 +48,479 @@
namespace mongo {
- using std::string;
- using std::vector;
-
- namespace {
- unsigned idFromCursorId( CursorId id ) {
- uint64_t x = static_cast<uint64_t>(id);
- x = x >> 32;
- return static_cast<unsigned>( x );
- }
-
- CursorId cursorIdFromParts( unsigned collection,
- unsigned cursor ) {
- CursorId x = static_cast<CursorId>( collection ) << 32;
- x |= cursor;
- return x;
- }
-
- class IdWorkTest : public StartupTest {
- public:
- void _run( unsigned a, unsigned b) {
- CursorId x = cursorIdFromParts( a, b );
- invariant( a == idFromCursorId( x ) );
- CursorId y = cursorIdFromParts( a, b + 1 );
- invariant( x != y );
- }
+using std::string;
+using std::vector;
+
+namespace {
+unsigned idFromCursorId(CursorId id) {
+ uint64_t x = static_cast<uint64_t>(id);
+ x = x >> 32;
+ return static_cast<unsigned>(x);
+}
- void run() {
- _run( 123, 456 );
- _run( 0xdeadbeef, 0xcafecafe );
- _run( 0, 0 );
- _run( 99999999, 999 );
- _run( 0xFFFFFFFF, 1 );
- _run( 0xFFFFFFFF, 0 );
- _run( 0xFFFFFFFF, 0xFFFFFFFF );
- }
- } idWorkTest;
- }
+CursorId cursorIdFromParts(unsigned collection, unsigned cursor) {
+ CursorId x = static_cast<CursorId>(collection) << 32;
+ x |= cursor;
+ return x;
+}
- class GlobalCursorIdCache {
- public:
+class IdWorkTest : public StartupTest {
+public:
+ void _run(unsigned a, unsigned b) {
+ CursorId x = cursorIdFromParts(a, b);
+ invariant(a == idFromCursorId(x));
+ CursorId y = cursorIdFromParts(a, b + 1);
+ invariant(x != y);
+ }
+
+ void run() {
+ _run(123, 456);
+ _run(0xdeadbeef, 0xcafecafe);
+ _run(0, 0);
+ _run(99999999, 999);
+ _run(0xFFFFFFFF, 1);
+ _run(0xFFFFFFFF, 0);
+ _run(0xFFFFFFFF, 0xFFFFFFFF);
+ }
+} idWorkTest;
+}
- GlobalCursorIdCache();
- ~GlobalCursorIdCache();
+class GlobalCursorIdCache {
+public:
+ GlobalCursorIdCache();
+ ~GlobalCursorIdCache();
- /**
- * this gets called when a CursorManager gets created
- * @return the id the CursorManager should use when generating
- * cursor ids
- */
- unsigned created( const std::string& ns );
+ /**
+ * this gets called when a CursorManager gets created
+ * @return the id the CursorManager should use when generating
+ * cursor ids
+ */
+ unsigned created(const std::string& ns);
- /**
- * called by CursorManager when its going away
- */
- void destroyed( unsigned id, const std::string& ns );
+ /**
+ * called by CursorManager when its going away
+ */
+ void destroyed(unsigned id, const std::string& ns);
- /**
- * works globally
- */
- bool eraseCursor(OperationContext* txn, CursorId id, bool checkAuth);
+ /**
+ * works globally
+ */
+ bool eraseCursor(OperationContext* txn, CursorId id, bool checkAuth);
- void appendStats( BSONObjBuilder& builder );
+ void appendStats(BSONObjBuilder& builder);
- std::size_t timeoutCursors(OperationContext* txn, int millisSinceLastCall);
+ std::size_t timeoutCursors(OperationContext* txn, int millisSinceLastCall);
- int64_t nextSeed();
- private:
- SimpleMutex _mutex;
+ int64_t nextSeed();
- typedef unordered_map<unsigned,string> Map;
- Map _idToNS;
- unsigned _nextId;
+private:
+ SimpleMutex _mutex;
- std::unique_ptr<SecureRandom> _secureRandom;
- };
+ typedef unordered_map<unsigned, string> Map;
+ Map _idToNS;
+ unsigned _nextId;
- // Note that "globalCursorIdCache" must be declared before "globalCursorManager", as the latter
- // calls into the former during destruction.
- std::unique_ptr<GlobalCursorIdCache> globalCursorIdCache;
- std::unique_ptr<CursorManager> globalCursorManager;
+ std::unique_ptr<SecureRandom> _secureRandom;
+};
- MONGO_INITIALIZER(GlobalCursorIdCache)(InitializerContext* context) {
- globalCursorIdCache.reset(new GlobalCursorIdCache());
- return Status::OK();
- }
+// Note that "globalCursorIdCache" must be declared before "globalCursorManager", as the latter
+// calls into the former during destruction.
+std::unique_ptr<GlobalCursorIdCache> globalCursorIdCache;
+std::unique_ptr<CursorManager> globalCursorManager;
- MONGO_INITIALIZER_WITH_PREREQUISITES(GlobalCursorManager, ("GlobalCursorIdCache"))
- (InitializerContext* context) {
- globalCursorManager.reset(new CursorManager(""));
- return Status::OK();
- }
+MONGO_INITIALIZER(GlobalCursorIdCache)(InitializerContext* context) {
+ globalCursorIdCache.reset(new GlobalCursorIdCache());
+ return Status::OK();
+}
- GlobalCursorIdCache::GlobalCursorIdCache()
- : _nextId( 0 ),
- _secureRandom() {
- }
+MONGO_INITIALIZER_WITH_PREREQUISITES(GlobalCursorManager, ("GlobalCursorIdCache"))
+(InitializerContext* context) {
+ globalCursorManager.reset(new CursorManager(""));
+ return Status::OK();
+}
- GlobalCursorIdCache::~GlobalCursorIdCache() {
- }
+GlobalCursorIdCache::GlobalCursorIdCache() : _nextId(0), _secureRandom() {}
- int64_t GlobalCursorIdCache::nextSeed() {
- stdx::lock_guard<SimpleMutex> lk( _mutex );
- if ( !_secureRandom )
- _secureRandom.reset(SecureRandom::create());
- return _secureRandom->nextInt64();
- }
+GlobalCursorIdCache::~GlobalCursorIdCache() {}
- unsigned GlobalCursorIdCache::created( const std::string& ns ) {
- static const unsigned MAX_IDS = 1000 * 1000 * 1000;
+int64_t GlobalCursorIdCache::nextSeed() {
+ stdx::lock_guard<SimpleMutex> lk(_mutex);
+ if (!_secureRandom)
+ _secureRandom.reset(SecureRandom::create());
+ return _secureRandom->nextInt64();
+}
- stdx::lock_guard<SimpleMutex> lk( _mutex );
+unsigned GlobalCursorIdCache::created(const std::string& ns) {
+ static const unsigned MAX_IDS = 1000 * 1000 * 1000;
- fassert( 17359, _idToNS.size() < MAX_IDS );
+ stdx::lock_guard<SimpleMutex> lk(_mutex);
- for ( unsigned i = 0; i <= MAX_IDS; i++ ) {
- unsigned id = ++_nextId;
- if ( id == 0 )
- continue;
- if ( _idToNS.count( id ) > 0 )
- continue;
- _idToNS[id] = ns;
- return id;
- }
+ fassert(17359, _idToNS.size() < MAX_IDS);
- invariant( false );
+ for (unsigned i = 0; i <= MAX_IDS; i++) {
+ unsigned id = ++_nextId;
+ if (id == 0)
+ continue;
+ if (_idToNS.count(id) > 0)
+ continue;
+ _idToNS[id] = ns;
+ return id;
}
- void GlobalCursorIdCache::destroyed( unsigned id, const std::string& ns ) {
- stdx::lock_guard<SimpleMutex> lk( _mutex );
- invariant( ns == _idToNS[id] );
- _idToNS.erase( id );
- }
+ invariant(false);
+}
- bool GlobalCursorIdCache::eraseCursor(OperationContext* txn, CursorId id, bool checkAuth) {
- // Figure out what the namespace of this cursor is.
- std::string ns;
- if (globalCursorManager->ownsCursorId(id)) {
- ClientCursorPin pin(globalCursorManager.get(), id);
- if (!pin.c()) {
- // No such cursor. TODO: Consider writing to audit log here (even though we don't
- // have a namespace).
- return false;
- }
- ns = pin.c()->ns();
- }
- else {
- stdx::lock_guard<SimpleMutex> lk(_mutex);
- unsigned nsid = idFromCursorId(id);
- Map::const_iterator it = _idToNS.find(nsid);
- if (it == _idToNS.end()) {
- // No namespace corresponding to this cursor id prefix. TODO: Consider writing to
- // audit log here (even though we don't have a namespace).
- return false;
- }
- ns = it->second;
- }
- const NamespaceString nss(ns);
- invariant(nss.isValid());
-
- // Check if we are authorized to erase this cursor.
- if (checkAuth) {
- AuthorizationSession* as = AuthorizationSession::get(txn->getClient());
- Status authorizationStatus = as->checkAuthForKillCursors(nss, id);
- if (!authorizationStatus.isOK()) {
- audit::logKillCursorsAuthzCheck(txn->getClient(),
- nss,
- id,
- ErrorCodes::Unauthorized);
- return false;
- }
- }
+void GlobalCursorIdCache::destroyed(unsigned id, const std::string& ns) {
+ stdx::lock_guard<SimpleMutex> lk(_mutex);
+ invariant(ns == _idToNS[id]);
+ _idToNS.erase(id);
+}
- // If this cursor is owned by the global cursor manager, ask it to erase the cursor for us.
- if (globalCursorManager->ownsCursorId(id)) {
- return globalCursorManager->eraseCursor(txn, id, checkAuth);
+bool GlobalCursorIdCache::eraseCursor(OperationContext* txn, CursorId id, bool checkAuth) {
+ // Figure out what the namespace of this cursor is.
+ std::string ns;
+ if (globalCursorManager->ownsCursorId(id)) {
+ ClientCursorPin pin(globalCursorManager.get(), id);
+ if (!pin.c()) {
+ // No such cursor. TODO: Consider writing to audit log here (even though we don't
+ // have a namespace).
+ return false;
}
-
- // If not, then the cursor must be owned by a collection. Erase the cursor under the
- // collection lock (to prevent the collection from going away during the erase).
- AutoGetCollectionForRead ctx(txn, nss);
- Collection* collection = ctx.getCollection();
- if (!collection) {
- if (checkAuth)
- audit::logKillCursorsAuthzCheck(txn->getClient(),
- nss,
- id,
- ErrorCodes::CursorNotFound);
+ ns = pin.c()->ns();
+ } else {
+ stdx::lock_guard<SimpleMutex> lk(_mutex);
+ unsigned nsid = idFromCursorId(id);
+ Map::const_iterator it = _idToNS.find(nsid);
+ if (it == _idToNS.end()) {
+ // No namespace corresponding to this cursor id prefix. TODO: Consider writing to
+ // audit log here (even though we don't have a namespace).
return false;
}
- return collection->getCursorManager()->eraseCursor(txn, id, checkAuth);
+ ns = it->second;
}
+ const NamespaceString nss(ns);
+ invariant(nss.isValid());
- std::size_t GlobalCursorIdCache::timeoutCursors(OperationContext* txn, int millisSinceLastCall) {
- size_t totalTimedOut = 0;
-
- // Time out the cursors from the global cursor manager.
- totalTimedOut += globalCursorManager->timeoutCursors( millisSinceLastCall );
-
- // Compute the set of collection names that we have to time out cursors for.
- vector<string> todo;
- {
- stdx::lock_guard<SimpleMutex> lk( _mutex );
- for ( Map::const_iterator i = _idToNS.begin(); i != _idToNS.end(); ++i ) {
- if (globalCursorManager->ownsCursorId(cursorIdFromParts(i->first, 0))) {
- // Skip the global cursor manager, since we handle it above (and it's not
- // associated with a collection).
- continue;
- }
- todo.push_back( i->second );
- }
+ // Check if we are authorized to erase this cursor.
+ if (checkAuth) {
+ AuthorizationSession* as = AuthorizationSession::get(txn->getClient());
+ Status authorizationStatus = as->checkAuthForKillCursors(nss, id);
+ if (!authorizationStatus.isOK()) {
+ audit::logKillCursorsAuthzCheck(txn->getClient(), nss, id, ErrorCodes::Unauthorized);
+ return false;
}
+ }
+
+ // If this cursor is owned by the global cursor manager, ask it to erase the cursor for us.
+ if (globalCursorManager->ownsCursorId(id)) {
+ return globalCursorManager->eraseCursor(txn, id, checkAuth);
+ }
- // For each collection, time out its cursors under the collection lock (to prevent the
- // collection from going away during the erase).
- for ( unsigned i = 0; i < todo.size(); i++ ) {
- const std::string& ns = todo[i];
+ // If not, then the cursor must be owned by a collection. Erase the cursor under the
+ // collection lock (to prevent the collection from going away during the erase).
+ AutoGetCollectionForRead ctx(txn, nss);
+ Collection* collection = ctx.getCollection();
+ if (!collection) {
+ if (checkAuth)
+ audit::logKillCursorsAuthzCheck(txn->getClient(), nss, id, ErrorCodes::CursorNotFound);
+ return false;
+ }
+ return collection->getCursorManager()->eraseCursor(txn, id, checkAuth);
+}
- AutoGetCollectionForRead ctx(txn, ns);
- if (!ctx.getDb()) {
- continue;
- }
+std::size_t GlobalCursorIdCache::timeoutCursors(OperationContext* txn, int millisSinceLastCall) {
+ size_t totalTimedOut = 0;
+
+ // Time out the cursors from the global cursor manager.
+ totalTimedOut += globalCursorManager->timeoutCursors(millisSinceLastCall);
- Collection* collection = ctx.getCollection();
- if ( collection == NULL ) {
+ // Compute the set of collection names that we have to time out cursors for.
+ vector<string> todo;
+ {
+ stdx::lock_guard<SimpleMutex> lk(_mutex);
+ for (Map::const_iterator i = _idToNS.begin(); i != _idToNS.end(); ++i) {
+ if (globalCursorManager->ownsCursorId(cursorIdFromParts(i->first, 0))) {
+ // Skip the global cursor manager, since we handle it above (and it's not
+ // associated with a collection).
continue;
}
+ todo.push_back(i->second);
+ }
+ }
+
+ // For each collection, time out its cursors under the collection lock (to prevent the
+ // collection from going away during the erase).
+ for (unsigned i = 0; i < todo.size(); i++) {
+ const std::string& ns = todo[i];
+
+ AutoGetCollectionForRead ctx(txn, ns);
+ if (!ctx.getDb()) {
+ continue;
+ }
- totalTimedOut += collection->getCursorManager()->timeoutCursors( millisSinceLastCall );
+ Collection* collection = ctx.getCollection();
+ if (collection == NULL) {
+ continue;
}
- return totalTimedOut;
+ totalTimedOut += collection->getCursorManager()->timeoutCursors(millisSinceLastCall);
}
- // ---
+ return totalTimedOut;
+}
- CursorManager* CursorManager::getGlobalCursorManager() {
- return globalCursorManager.get();
- }
+// ---
- std::size_t CursorManager::timeoutCursorsGlobal(OperationContext* txn,
- int millisSinceLastCall) {
- return globalCursorIdCache->timeoutCursors(txn, millisSinceLastCall);
- }
+CursorManager* CursorManager::getGlobalCursorManager() {
+ return globalCursorManager.get();
+}
- int CursorManager::eraseCursorGlobalIfAuthorized(OperationContext* txn, int n,
- const char* _ids) {
- ConstDataCursor ids(_ids);
- int numDeleted = 0;
- for ( int i = 0; i < n; i++ ) {
- if ( eraseCursorGlobalIfAuthorized(txn, ids.readAndAdvance<LittleEndian<int64_t>>()))
- numDeleted++;
- if ( inShutdown() )
- break;
- }
- return numDeleted;
- }
- bool CursorManager::eraseCursorGlobalIfAuthorized(OperationContext* txn, CursorId id) {
- return globalCursorIdCache->eraseCursor(txn, id, true);
- }
- bool CursorManager::eraseCursorGlobal(OperationContext* txn, CursorId id) {
- return globalCursorIdCache->eraseCursor(txn, id, false );
+std::size_t CursorManager::timeoutCursorsGlobal(OperationContext* txn, int millisSinceLastCall) {
+ return globalCursorIdCache->timeoutCursors(txn, millisSinceLastCall);
+}
+
+int CursorManager::eraseCursorGlobalIfAuthorized(OperationContext* txn, int n, const char* _ids) {
+ ConstDataCursor ids(_ids);
+ int numDeleted = 0;
+ for (int i = 0; i < n; i++) {
+ if (eraseCursorGlobalIfAuthorized(txn, ids.readAndAdvance<LittleEndian<int64_t>>()))
+ numDeleted++;
+ if (inShutdown())
+ break;
}
+ return numDeleted;
+}
+bool CursorManager::eraseCursorGlobalIfAuthorized(OperationContext* txn, CursorId id) {
+ return globalCursorIdCache->eraseCursor(txn, id, true);
+}
+bool CursorManager::eraseCursorGlobal(OperationContext* txn, CursorId id) {
+ return globalCursorIdCache->eraseCursor(txn, id, false);
+}
- // --------------------------
+// --------------------------
- CursorManager::CursorManager( StringData ns )
- : _nss( ns ) {
- _collectionCacheRuntimeId = globalCursorIdCache->created( _nss.ns() );
- _random.reset( new PseudoRandom( globalCursorIdCache->nextSeed() ) );
- }
+CursorManager::CursorManager(StringData ns) : _nss(ns) {
+ _collectionCacheRuntimeId = globalCursorIdCache->created(_nss.ns());
+ _random.reset(new PseudoRandom(globalCursorIdCache->nextSeed()));
+}
- CursorManager::~CursorManager() {
- invalidateAll(true, "collection going away");
- globalCursorIdCache->destroyed( _collectionCacheRuntimeId, _nss.ns() );
- }
+CursorManager::~CursorManager() {
+ invalidateAll(true, "collection going away");
+ globalCursorIdCache->destroyed(_collectionCacheRuntimeId, _nss.ns());
+}
- void CursorManager::invalidateAll(bool collectionGoingAway,
- const std::string& reason) {
- stdx::lock_guard<SimpleMutex> lk( _mutex );
+void CursorManager::invalidateAll(bool collectionGoingAway, const std::string& reason) {
+ stdx::lock_guard<SimpleMutex> lk(_mutex);
- for ( ExecSet::iterator it = _nonCachedExecutors.begin();
- it != _nonCachedExecutors.end();
- ++it ) {
+ for (ExecSet::iterator it = _nonCachedExecutors.begin(); it != _nonCachedExecutors.end();
+ ++it) {
+ // we kill the executor, but it deletes itself
+ PlanExecutor* exec = *it;
+ exec->kill(reason);
+ invariant(exec->collection() == NULL);
+ }
+ _nonCachedExecutors.clear();
- // we kill the executor, but it deletes itself
- PlanExecutor* exec = *it;
- exec->kill(reason);
- invariant( exec->collection() == NULL );
- }
- _nonCachedExecutors.clear();
+ if (collectionGoingAway) {
+ // we're going to wipe out the world
+ for (CursorMap::const_iterator i = _cursors.begin(); i != _cursors.end(); ++i) {
+ ClientCursor* cc = i->second;
- if ( collectionGoingAway ) {
- // we're going to wipe out the world
- for ( CursorMap::const_iterator i = _cursors.begin(); i != _cursors.end(); ++i ) {
- ClientCursor* cc = i->second;
+ cc->kill();
- cc->kill();
+ invariant(cc->getExecutor() == NULL || cc->getExecutor()->collection() == NULL);
- invariant( cc->getExecutor() == NULL || cc->getExecutor()->collection() == NULL );
-
- // If the CC is pinned, somebody is actively using it and we do not delete it.
- // Instead we notify the holder that we killed it. The holder will then delete the
- // CC.
- //
- // If the CC is not pinned, there is nobody actively holding it. We can safely
- // delete it.
- if (!cc->isPinned()) {
- delete cc;
- }
+ // If the CC is pinned, somebody is actively using it and we do not delete it.
+ // Instead we notify the holder that we killed it. The holder will then delete the
+ // CC.
+ //
+ // If the CC is not pinned, there is nobody actively holding it. We can safely
+ // delete it.
+ if (!cc->isPinned()) {
+ delete cc;
}
}
- else {
- CursorMap newMap;
-
- // collection will still be around, just all PlanExecutors are invalid
- for ( CursorMap::const_iterator i = _cursors.begin(); i != _cursors.end(); ++i ) {
- ClientCursor* cc = i->second;
-
- // Note that a valid ClientCursor state is "no cursor no executor." This is because
- // the set of active cursor IDs in ClientCursor is used as representation of query
- // state. See sharding_block.h. TODO(greg,hk): Move this out.
- if (NULL == cc->getExecutor() ) {
- newMap.insert( *i );
- continue;
- }
-
- if (cc->isPinned() || cc->isAggCursor()) {
- // Pinned cursors need to stay alive, so we leave them around. Aggregation
- // cursors also can stay alive (since they don't have their lifetime bound to
- // the underlying collection). However, if they have an associated executor, we
- // need to kill it, because it's now invalid.
- if ( cc->getExecutor() )
- cc->getExecutor()->kill(reason);
- newMap.insert( *i );
- }
- else {
- cc->kill();
- delete cc;
- }
+ } else {
+ CursorMap newMap;
+
+ // collection will still be around, just all PlanExecutors are invalid
+ for (CursorMap::const_iterator i = _cursors.begin(); i != _cursors.end(); ++i) {
+ ClientCursor* cc = i->second;
+ // Note that a valid ClientCursor state is "no cursor no executor." This is because
+ // the set of active cursor IDs in ClientCursor is used as representation of query
+ // state. See sharding_block.h. TODO(greg,hk): Move this out.
+ if (NULL == cc->getExecutor()) {
+ newMap.insert(*i);
+ continue;
}
- _cursors = newMap;
+ if (cc->isPinned() || cc->isAggCursor()) {
+ // Pinned cursors need to stay alive, so we leave them around. Aggregation
+ // cursors also can stay alive (since they don't have their lifetime bound to
+ // the underlying collection). However, if they have an associated executor, we
+ // need to kill it, because it's now invalid.
+ if (cc->getExecutor())
+ cc->getExecutor()->kill(reason);
+ newMap.insert(*i);
+ } else {
+ cc->kill();
+ delete cc;
+ }
}
+
+ _cursors = newMap;
}
+}
- void CursorManager::invalidateDocument( OperationContext* txn,
- const RecordId& dl,
- InvalidationType type ) {
- if ( supportsDocLocking() ) {
- // If a storage engine supports doc locking, then we do not need to invalidate.
- // The transactional boundaries of the operation protect us.
- return;
- }
+void CursorManager::invalidateDocument(OperationContext* txn,
+ const RecordId& dl,
+ InvalidationType type) {
+ if (supportsDocLocking()) {
+ // If a storage engine supports doc locking, then we do not need to invalidate.
+ // The transactional boundaries of the operation protect us.
+ return;
+ }
- stdx::lock_guard<SimpleMutex> lk( _mutex );
+ stdx::lock_guard<SimpleMutex> lk(_mutex);
- for ( ExecSet::iterator it = _nonCachedExecutors.begin();
- it != _nonCachedExecutors.end();
- ++it ) {
+ for (ExecSet::iterator it = _nonCachedExecutors.begin(); it != _nonCachedExecutors.end();
+ ++it) {
+ PlanExecutor* exec = *it;
+ exec->invalidate(txn, dl, type);
+ }
- PlanExecutor* exec = *it;
+ for (CursorMap::const_iterator i = _cursors.begin(); i != _cursors.end(); ++i) {
+ PlanExecutor* exec = i->second->getExecutor();
+ if (exec) {
exec->invalidate(txn, dl, type);
}
-
- for ( CursorMap::const_iterator i = _cursors.begin(); i != _cursors.end(); ++i ) {
- PlanExecutor* exec = i->second->getExecutor();
- if ( exec ) {
- exec->invalidate(txn, dl, type);
- }
- }
}
+}
- std::size_t CursorManager::timeoutCursors( int millisSinceLastCall ) {
- stdx::lock_guard<SimpleMutex> lk( _mutex );
-
- vector<ClientCursor*> toDelete;
-
- for ( CursorMap::const_iterator i = _cursors.begin(); i != _cursors.end(); ++i ) {
- ClientCursor* cc = i->second;
- if ( cc->shouldTimeout( millisSinceLastCall ) )
- toDelete.push_back( cc );
- }
+std::size_t CursorManager::timeoutCursors(int millisSinceLastCall) {
+ stdx::lock_guard<SimpleMutex> lk(_mutex);
- for ( vector<ClientCursor*>::const_iterator i = toDelete.begin();
- i != toDelete.end(); ++i ) {
- ClientCursor* cc = *i;
- _deregisterCursor_inlock( cc );
- cc->kill();
- delete cc;
- }
+ vector<ClientCursor*> toDelete;
- return toDelete.size();
+ for (CursorMap::const_iterator i = _cursors.begin(); i != _cursors.end(); ++i) {
+ ClientCursor* cc = i->second;
+ if (cc->shouldTimeout(millisSinceLastCall))
+ toDelete.push_back(cc);
}
- void CursorManager::registerExecutor( PlanExecutor* exec ) {
- stdx::lock_guard<SimpleMutex> lk(_mutex);
- const std::pair<ExecSet::iterator, bool> result = _nonCachedExecutors.insert(exec);
- invariant(result.second); // make sure this was inserted
+ for (vector<ClientCursor*>::const_iterator i = toDelete.begin(); i != toDelete.end(); ++i) {
+ ClientCursor* cc = *i;
+ _deregisterCursor_inlock(cc);
+ cc->kill();
+ delete cc;
}
- void CursorManager::deregisterExecutor( PlanExecutor* exec ) {
- stdx::lock_guard<SimpleMutex> lk(_mutex);
- _nonCachedExecutors.erase(exec);
- }
+ return toDelete.size();
+}
- ClientCursor* CursorManager::find( CursorId id, bool pin ) {
- stdx::lock_guard<SimpleMutex> lk( _mutex );
- CursorMap::const_iterator it = _cursors.find( id );
- if ( it == _cursors.end() )
- return NULL;
-
- ClientCursor* cursor = it->second;
- if ( pin ) {
- uassert( 12051,
- "clientcursor already in use? driver problem?",
- !cursor->isPinned() );
- cursor->setPinned();
- }
+void CursorManager::registerExecutor(PlanExecutor* exec) {
+ stdx::lock_guard<SimpleMutex> lk(_mutex);
+ const std::pair<ExecSet::iterator, bool> result = _nonCachedExecutors.insert(exec);
+ invariant(result.second); // make sure this was inserted
+}
- return cursor;
- }
+void CursorManager::deregisterExecutor(PlanExecutor* exec) {
+ stdx::lock_guard<SimpleMutex> lk(_mutex);
+ _nonCachedExecutors.erase(exec);
+}
- void CursorManager::unpin( ClientCursor* cursor ) {
- stdx::lock_guard<SimpleMutex> lk( _mutex );
+ClientCursor* CursorManager::find(CursorId id, bool pin) {
+ stdx::lock_guard<SimpleMutex> lk(_mutex);
+ CursorMap::const_iterator it = _cursors.find(id);
+ if (it == _cursors.end())
+ return NULL;
- invariant( cursor->isPinned() );
- cursor->unsetPinned();
+ ClientCursor* cursor = it->second;
+ if (pin) {
+ uassert(12051, "clientcursor already in use? driver problem?", !cursor->isPinned());
+ cursor->setPinned();
}
- bool CursorManager::ownsCursorId( CursorId cursorId ) const {
- return _collectionCacheRuntimeId == idFromCursorId( cursorId );
- }
+ return cursor;
+}
- void CursorManager::getCursorIds( std::set<CursorId>* openCursors ) const {
- stdx::lock_guard<SimpleMutex> lk( _mutex );
+void CursorManager::unpin(ClientCursor* cursor) {
+ stdx::lock_guard<SimpleMutex> lk(_mutex);
- for ( CursorMap::const_iterator i = _cursors.begin(); i != _cursors.end(); ++i ) {
- ClientCursor* cc = i->second;
- openCursors->insert( cc->cursorid() );
- }
- }
+ invariant(cursor->isPinned());
+ cursor->unsetPinned();
+}
- size_t CursorManager::numCursors() const {
- stdx::lock_guard<SimpleMutex> lk( _mutex );
- return _cursors.size();
- }
+bool CursorManager::ownsCursorId(CursorId cursorId) const {
+ return _collectionCacheRuntimeId == idFromCursorId(cursorId);
+}
- CursorId CursorManager::_allocateCursorId_inlock() {
- for ( int i = 0; i < 10000; i++ ) {
- unsigned mypart = static_cast<unsigned>( _random->nextInt32() );
- CursorId id = cursorIdFromParts( _collectionCacheRuntimeId, mypart );
- if ( _cursors.count( id ) == 0 )
- return id;
- }
- fassertFailed( 17360 );
- }
+void CursorManager::getCursorIds(std::set<CursorId>* openCursors) const {
+ stdx::lock_guard<SimpleMutex> lk(_mutex);
- CursorId CursorManager::registerCursor( ClientCursor* cc ) {
- invariant( cc );
- stdx::lock_guard<SimpleMutex> lk( _mutex );
- CursorId id = _allocateCursorId_inlock();
- _cursors[id] = cc;
- return id;
+ for (CursorMap::const_iterator i = _cursors.begin(); i != _cursors.end(); ++i) {
+ ClientCursor* cc = i->second;
+ openCursors->insert(cc->cursorid());
}
+}
+
+size_t CursorManager::numCursors() const {
+ stdx::lock_guard<SimpleMutex> lk(_mutex);
+ return _cursors.size();
+}
- void CursorManager::deregisterCursor( ClientCursor* cc ) {
- stdx::lock_guard<SimpleMutex> lk( _mutex );
- _deregisterCursor_inlock( cc );
+CursorId CursorManager::_allocateCursorId_inlock() {
+ for (int i = 0; i < 10000; i++) {
+ unsigned mypart = static_cast<unsigned>(_random->nextInt32());
+ CursorId id = cursorIdFromParts(_collectionCacheRuntimeId, mypart);
+ if (_cursors.count(id) == 0)
+ return id;
}
+ fassertFailed(17360);
+}
- bool CursorManager::eraseCursor(OperationContext* txn, CursorId id, bool checkAuth) {
- stdx::lock_guard<SimpleMutex> lk( _mutex );
+CursorId CursorManager::registerCursor(ClientCursor* cc) {
+ invariant(cc);
+ stdx::lock_guard<SimpleMutex> lk(_mutex);
+ CursorId id = _allocateCursorId_inlock();
+ _cursors[id] = cc;
+ return id;
+}
- CursorMap::iterator it = _cursors.find( id );
- if ( it == _cursors.end() ) {
- if ( checkAuth )
- audit::logKillCursorsAuthzCheck( txn->getClient(),
- _nss,
- id,
- ErrorCodes::CursorNotFound );
- return false;
- }
+void CursorManager::deregisterCursor(ClientCursor* cc) {
+ stdx::lock_guard<SimpleMutex> lk(_mutex);
+ _deregisterCursor_inlock(cc);
+}
- ClientCursor* cursor = it->second;
+bool CursorManager::eraseCursor(OperationContext* txn, CursorId id, bool checkAuth) {
+ stdx::lock_guard<SimpleMutex> lk(_mutex);
- if ( checkAuth )
- audit::logKillCursorsAuthzCheck( txn->getClient(),
- _nss,
- id,
- ErrorCodes::OK );
+ CursorMap::iterator it = _cursors.find(id);
+ if (it == _cursors.end()) {
+ if (checkAuth)
+ audit::logKillCursorsAuthzCheck(txn->getClient(), _nss, id, ErrorCodes::CursorNotFound);
+ return false;
+ }
- massert( 16089,
- str::stream() << "Cannot kill active cursor " << id,
- !cursor->isPinned() );
+ ClientCursor* cursor = it->second;
- cursor->kill();
- _deregisterCursor_inlock( cursor );
- delete cursor;
- return true;
- }
+ if (checkAuth)
+ audit::logKillCursorsAuthzCheck(txn->getClient(), _nss, id, ErrorCodes::OK);
- void CursorManager::_deregisterCursor_inlock( ClientCursor* cc ) {
- invariant( cc );
- CursorId id = cc->cursorid();
- _cursors.erase( id );
- }
+ massert(16089, str::stream() << "Cannot kill active cursor " << id, !cursor->isPinned());
+
+ cursor->kill();
+ _deregisterCursor_inlock(cursor);
+ delete cursor;
+ return true;
+}
+void CursorManager::_deregisterCursor_inlock(ClientCursor* cc) {
+ invariant(cc);
+ CursorId id = cc->cursorid();
+ _cursors.erase(id);
+}
}
diff --git a/src/mongo/db/catalog/cursor_manager.h b/src/mongo/db/catalog/cursor_manager.h
index 65fd204c5e3..81b9dd0107c 100644
--- a/src/mongo/db/catalog/cursor_manager.h
+++ b/src/mongo/db/catalog/cursor_manager.h
@@ -40,118 +40,114 @@
namespace mongo {
- class OperationContext;
- class PseudoRandom;
- class PlanExecutor;
-
- class CursorManager {
- public:
- CursorManager( StringData ns );
-
- /**
- * will kill() all PlanExecutor instances it has
- */
- ~CursorManager();
-
- // -----------------
-
- /**
- * @param collectionGoingAway Pass as true if the Collection instance is going away.
- * This could be because the db is being closed, or the
- * collection/db is being dropped.
- * @param reason The motivation for invalidating all cursors. Will be used
- * for error reporting and logging when an operation finds that
- * the cursor it was operating on has been killed.
- */
- void invalidateAll(bool collectionGoingAway, const std::string& reason);
-
- /**
- * Broadcast a document invalidation to all relevant PlanExecutor(s). invalidateDocument
- * must called *before* the provided RecordId is about to be deleted or mutated.
- */
- void invalidateDocument( OperationContext* txn,
- const RecordId& dl,
- InvalidationType type );
-
- /*
- * timesout cursors that have been idle for too long
- * note: must have a readlock on the collection
- * @return number timed out
- */
- std::size_t timeoutCursors( int millisSinceLastCall );
-
- // -----------------
-
- /**
- * Register an executor so that it can be notified of deletion/invalidation during yields.
- * Must be called before an executor yields. If an executor is cached (inside a
- * ClientCursor) it MUST NOT be registered; the two are mutually exclusive.
- */
- void registerExecutor(PlanExecutor* exec);
-
- /**
- * Remove an executor from the registry.
- */
- void deregisterExecutor(PlanExecutor* exec);
-
- // -----------------
-
- CursorId registerCursor( ClientCursor* cc );
- void deregisterCursor( ClientCursor* cc );
-
- bool eraseCursor(OperationContext* txn, CursorId id, bool checkAuth );
-
- /**
- * Returns true if the space of cursor ids that cursor manager is responsible for includes
- * the given cursor id. Otherwise, returns false.
- *
- * The return value of this method does not indicate any information about whether or not a
- * cursor actually exists with the given cursor id. Use the find() method for that purpose.
- */
- bool ownsCursorId( CursorId cursorId ) const;
-
- void getCursorIds( std::set<CursorId>* openCursors ) const;
- std::size_t numCursors() const;
-
- /**
- * @param pin - if true, will try to pin cursor
- * if pinned already, will assert
- * otherwise will pin
- */
- ClientCursor* find( CursorId id, bool pin );
-
- void unpin( ClientCursor* cursor );
-
- // ----------------------
-
- static CursorManager* getGlobalCursorManager();
-
- static int eraseCursorGlobalIfAuthorized(OperationContext* txn, int n,
- const char* ids);
- static bool eraseCursorGlobalIfAuthorized(OperationContext* txn, CursorId id);
-
- static bool eraseCursorGlobal(OperationContext* txn, CursorId id);
-
- /**
- * @return number timed out
- */
- static std::size_t timeoutCursorsGlobal(OperationContext* txn, int millisSinceLastCall);
-
- private:
- CursorId _allocateCursorId_inlock();
- void _deregisterCursor_inlock( ClientCursor* cc );
-
- NamespaceString _nss;
- unsigned _collectionCacheRuntimeId;
- std::unique_ptr<PseudoRandom> _random;
-
- mutable SimpleMutex _mutex;
-
- typedef unordered_set<PlanExecutor*> ExecSet;
- ExecSet _nonCachedExecutors;
-
- typedef std::map<CursorId,ClientCursor*> CursorMap;
- CursorMap _cursors;
- };
+class OperationContext;
+class PseudoRandom;
+class PlanExecutor;
+
+class CursorManager {
+public:
+ CursorManager(StringData ns);
+
+ /**
+ * will kill() all PlanExecutor instances it has
+ */
+ ~CursorManager();
+
+ // -----------------
+
+ /**
+ * @param collectionGoingAway Pass as true if the Collection instance is going away.
+ * This could be because the db is being closed, or the
+ * collection/db is being dropped.
+ * @param reason The motivation for invalidating all cursors. Will be used
+ * for error reporting and logging when an operation finds that
+ * the cursor it was operating on has been killed.
+ */
+ void invalidateAll(bool collectionGoingAway, const std::string& reason);
+
+ /**
+ * Broadcast a document invalidation to all relevant PlanExecutor(s). invalidateDocument
+ * must called *before* the provided RecordId is about to be deleted or mutated.
+ */
+ void invalidateDocument(OperationContext* txn, const RecordId& dl, InvalidationType type);
+
+ /*
+ * timesout cursors that have been idle for too long
+ * note: must have a readlock on the collection
+ * @return number timed out
+ */
+ std::size_t timeoutCursors(int millisSinceLastCall);
+
+ // -----------------
+
+ /**
+ * Register an executor so that it can be notified of deletion/invalidation during yields.
+ * Must be called before an executor yields. If an executor is cached (inside a
+ * ClientCursor) it MUST NOT be registered; the two are mutually exclusive.
+ */
+ void registerExecutor(PlanExecutor* exec);
+
+ /**
+ * Remove an executor from the registry.
+ */
+ void deregisterExecutor(PlanExecutor* exec);
+
+ // -----------------
+
+ CursorId registerCursor(ClientCursor* cc);
+ void deregisterCursor(ClientCursor* cc);
+
+ bool eraseCursor(OperationContext* txn, CursorId id, bool checkAuth);
+
+ /**
+ * Returns true if the space of cursor ids that cursor manager is responsible for includes
+ * the given cursor id. Otherwise, returns false.
+ *
+ * The return value of this method does not indicate any information about whether or not a
+ * cursor actually exists with the given cursor id. Use the find() method for that purpose.
+ */
+ bool ownsCursorId(CursorId cursorId) const;
+
+ void getCursorIds(std::set<CursorId>* openCursors) const;
+ std::size_t numCursors() const;
+
+ /**
+ * @param pin - if true, will try to pin cursor
+ * if pinned already, will assert
+ * otherwise will pin
+ */
+ ClientCursor* find(CursorId id, bool pin);
+
+ void unpin(ClientCursor* cursor);
+
+ // ----------------------
+
+ static CursorManager* getGlobalCursorManager();
+
+ static int eraseCursorGlobalIfAuthorized(OperationContext* txn, int n, const char* ids);
+ static bool eraseCursorGlobalIfAuthorized(OperationContext* txn, CursorId id);
+
+ static bool eraseCursorGlobal(OperationContext* txn, CursorId id);
+
+ /**
+ * @return number timed out
+ */
+ static std::size_t timeoutCursorsGlobal(OperationContext* txn, int millisSinceLastCall);
+
+private:
+ CursorId _allocateCursorId_inlock();
+ void _deregisterCursor_inlock(ClientCursor* cc);
+
+ NamespaceString _nss;
+ unsigned _collectionCacheRuntimeId;
+ std::unique_ptr<PseudoRandom> _random;
+
+ mutable SimpleMutex _mutex;
+ typedef unordered_set<PlanExecutor*> ExecSet;
+ ExecSet _nonCachedExecutors;
+
+ typedef std::map<CursorId, ClientCursor*> CursorMap;
+ CursorMap _cursors;
+};
}
diff --git a/src/mongo/db/catalog/database.cpp b/src/mongo/db/catalog/database.cpp
index 7753235fa8f..d5b7b372c40 100644
--- a/src/mongo/db/catalog/database.cpp
+++ b/src/mongo/db/catalog/database.cpp
@@ -65,570 +65,544 @@
namespace mongo {
- using std::unique_ptr;
- using std::endl;
- using std::list;
- using std::set;
- using std::string;
- using std::stringstream;
- using std::vector;
-
- void massertNamespaceNotIndex( StringData ns, StringData caller ) {
- massert( 17320,
- str::stream() << "cannot do " << caller
- << " on namespace with a $ in it: " << ns,
- NamespaceString::normal( ns ) );
- }
+using std::unique_ptr;
+using std::endl;
+using std::list;
+using std::set;
+using std::string;
+using std::stringstream;
+using std::vector;
+
+void massertNamespaceNotIndex(StringData ns, StringData caller) {
+ massert(17320,
+ str::stream() << "cannot do " << caller << " on namespace with a $ in it: " << ns,
+ NamespaceString::normal(ns));
+}
+
+class Database::AddCollectionChange : public RecoveryUnit::Change {
+public:
+ AddCollectionChange(Database* db, StringData ns) : _db(db), _ns(ns.toString()) {}
+
+ virtual void commit() {}
+ virtual void rollback() {
+ CollectionMap::const_iterator it = _db->_collections.find(_ns);
+ if (it == _db->_collections.end())
+ return;
- class Database::AddCollectionChange : public RecoveryUnit::Change {
- public:
- AddCollectionChange(Database* db, StringData ns)
- : _db(db)
- , _ns(ns.toString())
- {}
-
- virtual void commit() {}
- virtual void rollback() {
- CollectionMap::const_iterator it = _db->_collections.find(_ns);
- if ( it == _db->_collections.end() )
- return;
-
- delete it->second;
- _db->_collections.erase( it );
- }
+ delete it->second;
+ _db->_collections.erase(it);
+ }
- Database* const _db;
- const std::string _ns;
- };
+ Database* const _db;
+ const std::string _ns;
+};
- class Database::RemoveCollectionChange : public RecoveryUnit::Change {
- public:
- // Takes ownership of coll (but not db).
- RemoveCollectionChange(Database* db, Collection* coll)
- : _db(db)
- , _coll(coll)
- {}
+class Database::RemoveCollectionChange : public RecoveryUnit::Change {
+public:
+ // Takes ownership of coll (but not db).
+ RemoveCollectionChange(Database* db, Collection* coll) : _db(db), _coll(coll) {}
- virtual void commit() {
- delete _coll;
- }
+ virtual void commit() {
+ delete _coll;
+ }
- virtual void rollback() {
- Collection*& inMap = _db->_collections[_coll->ns().ns()];
- invariant(!inMap);
- inMap = _coll;
- }
+ virtual void rollback() {
+ Collection*& inMap = _db->_collections[_coll->ns().ns()];
+ invariant(!inMap);
+ inMap = _coll;
+ }
- Database* const _db;
- Collection* const _coll;
- };
+ Database* const _db;
+ Collection* const _coll;
+};
- Database::~Database() {
- for (CollectionMap::const_iterator i = _collections.begin(); i != _collections.end(); ++i)
- delete i->second;
- }
+Database::~Database() {
+ for (CollectionMap::const_iterator i = _collections.begin(); i != _collections.end(); ++i)
+ delete i->second;
+}
- void Database::close(OperationContext* txn ) {
- // XXX? - Do we need to close database under global lock or just DB-lock is sufficient ?
- invariant(txn->lockState()->isW());
- // oplog caches some things, dirty its caches
- repl::oplogCheckCloseDatabase(txn, this);
+void Database::close(OperationContext* txn) {
+ // XXX? - Do we need to close database under global lock or just DB-lock is sufficient ?
+ invariant(txn->lockState()->isW());
+ // oplog caches some things, dirty its caches
+ repl::oplogCheckCloseDatabase(txn, this);
- if ( BackgroundOperation::inProgForDb( _name ) ) {
- log() << "warning: bg op in prog during close db? " << _name << endl;
- }
+ if (BackgroundOperation::inProgForDb(_name)) {
+ log() << "warning: bg op in prog during close db? " << _name << endl;
}
+}
- Status Database::validateDBName( StringData dbname ) {
+Status Database::validateDBName(StringData dbname) {
+ if (dbname.size() <= 0)
+ return Status(ErrorCodes::BadValue, "db name is empty");
- if ( dbname.size() <= 0 )
- return Status( ErrorCodes::BadValue, "db name is empty" );
+ if (dbname.size() >= 64)
+ return Status(ErrorCodes::BadValue, "db name is too long");
- if ( dbname.size() >= 64 )
- return Status( ErrorCodes::BadValue, "db name is too long" );
+ if (dbname.find('.') != string::npos)
+ return Status(ErrorCodes::BadValue, "db name cannot contain a .");
- if ( dbname.find( '.' ) != string::npos )
- return Status( ErrorCodes::BadValue, "db name cannot contain a ." );
-
- if ( dbname.find( ' ' ) != string::npos )
- return Status( ErrorCodes::BadValue, "db name cannot contain a space" );
+ if (dbname.find(' ') != string::npos)
+ return Status(ErrorCodes::BadValue, "db name cannot contain a space");
#ifdef _WIN32
- static const char* windowsReservedNames[] = {
- "con", "prn", "aux", "nul",
- "com1", "com2", "com3", "com4", "com5", "com6", "com7", "com8", "com9",
- "lpt1", "lpt2", "lpt3", "lpt4", "lpt5", "lpt6", "lpt7", "lpt8", "lpt9"
- };
-
- string lower( dbname.toString() );
- std::transform( lower.begin(), lower.end(), lower.begin(), ::tolower );
- for ( size_t i = 0; i < (sizeof(windowsReservedNames) / sizeof(char*)); ++i ) {
- if ( lower == windowsReservedNames[i] ) {
- stringstream errorString;
- errorString << "db name \"" << dbname.toString() << "\" is a reserved name";
- return Status( ErrorCodes::BadValue, errorString.str() );
- }
+ static const char* windowsReservedNames[] = {
+ "con", "prn", "aux", "nul", "com1", "com2", "com3", "com4", "com5", "com6", "com7",
+ "com8", "com9", "lpt1", "lpt2", "lpt3", "lpt4", "lpt5", "lpt6", "lpt7", "lpt8", "lpt9"};
+
+ string lower(dbname.toString());
+ std::transform(lower.begin(), lower.end(), lower.begin(), ::tolower);
+ for (size_t i = 0; i < (sizeof(windowsReservedNames) / sizeof(char*)); ++i) {
+ if (lower == windowsReservedNames[i]) {
+ stringstream errorString;
+ errorString << "db name \"" << dbname.toString() << "\" is a reserved name";
+ return Status(ErrorCodes::BadValue, errorString.str());
}
+ }
#endif
- return Status::OK();
- }
+ return Status::OK();
+}
- Collection* Database::_getOrCreateCollectionInstance(OperationContext* txn,
- StringData fullns) {
- Collection* collection = getCollection( fullns );
- if (collection) {
- return collection;
- }
+Collection* Database::_getOrCreateCollectionInstance(OperationContext* txn, StringData fullns) {
+ Collection* collection = getCollection(fullns);
+ if (collection) {
+ return collection;
+ }
- unique_ptr<CollectionCatalogEntry> cce( _dbEntry->getCollectionCatalogEntry( fullns ) );
- invariant( cce.get() );
+ unique_ptr<CollectionCatalogEntry> cce(_dbEntry->getCollectionCatalogEntry(fullns));
+ invariant(cce.get());
+
+ unique_ptr<RecordStore> rs(_dbEntry->getRecordStore(fullns));
+ invariant(rs.get()); // if cce exists, so should this
+
+ // Not registering AddCollectionChange since this is for collections that already exist.
+ Collection* c = new Collection(txn, fullns, cce.release(), rs.release(), _dbEntry);
+ return c;
+}
+
+Database::Database(OperationContext* txn, StringData name, DatabaseCatalogEntry* dbEntry)
+ : _name(name.toString()),
+ _dbEntry(dbEntry),
+ _profileName(_name + ".system.profile"),
+ _indexesName(_name + ".system.indexes") {
+ Status status = validateDBName(_name);
+ if (!status.isOK()) {
+ warning() << "tried to open invalid db: " << _name << endl;
+ uasserted(10028, status.toString());
+ }
- unique_ptr<RecordStore> rs( _dbEntry->getRecordStore( fullns ) );
- invariant( rs.get() ); // if cce exists, so should this
+ _profile = serverGlobalParams.defaultProfile;
- // Not registering AddCollectionChange since this is for collections that already exist.
- Collection* c = new Collection( txn, fullns, cce.release(), rs.release(), _dbEntry );
- return c;
+ list<string> collections;
+ _dbEntry->getCollectionNamespaces(&collections);
+ for (list<string>::const_iterator it = collections.begin(); it != collections.end(); ++it) {
+ const string ns = *it;
+ _collections[ns] = _getOrCreateCollectionInstance(txn, ns);
}
+}
- Database::Database(OperationContext* txn, StringData name, DatabaseCatalogEntry* dbEntry)
- : _name(name.toString()),
- _dbEntry( dbEntry ),
- _profileName(_name + ".system.profile"),
- _indexesName(_name + ".system.indexes")
- {
- Status status = validateDBName( _name );
- if ( !status.isOK() ) {
- warning() << "tried to open invalid db: " << _name << endl;
- uasserted( 10028, status.toString() );
- }
-
- _profile = serverGlobalParams.defaultProfile;
- list<string> collections;
- _dbEntry->getCollectionNamespaces( &collections );
- for (list<string>::const_iterator it = collections.begin(); it != collections.end(); ++it) {
- const string ns = *it;
- _collections[ns] = _getOrCreateCollectionInstance(txn, ns);
- }
+/*static*/
+string Database::duplicateUncasedName(const string& name, set<string>* duplicates) {
+ if (duplicates) {
+ duplicates->clear();
}
+ set<string> allShortNames;
+ dbHolder().getAllShortNames(allShortNames);
- /*static*/
- string Database::duplicateUncasedName(const string &name, set< string > *duplicates) {
- if ( duplicates ) {
- duplicates->clear();
- }
-
- set<string> allShortNames;
- dbHolder().getAllShortNames(allShortNames);
+ for (const auto& dbname : allShortNames) {
+ if (strcasecmp(dbname.c_str(), name.c_str()))
+ continue;
- for (const auto& dbname : allShortNames) {
- if (strcasecmp(dbname.c_str(), name.c_str()))
- continue;
+ if (strcmp(dbname.c_str(), name.c_str()) == 0)
+ continue;
- if (strcmp(dbname.c_str(), name.c_str()) == 0)
- continue;
-
- if ( duplicates ) {
- duplicates->insert(dbname);
- } else {
- return dbname;
- }
+ if (duplicates) {
+ duplicates->insert(dbname);
+ } else {
+ return dbname;
}
- if ( duplicates ) {
- return duplicates->empty() ? "" : *duplicates->begin();
- }
- return "";
}
+ if (duplicates) {
+ return duplicates->empty() ? "" : *duplicates->begin();
+ }
+ return "";
+}
- void Database::clearTmpCollections(OperationContext* txn) {
- invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
+void Database::clearTmpCollections(OperationContext* txn) {
+ invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
- list<string> collections;
- _dbEntry->getCollectionNamespaces( &collections );
+ list<string> collections;
+ _dbEntry->getCollectionNamespaces(&collections);
- for ( list<string>::iterator i = collections.begin(); i != collections.end(); ++i ) {
- string ns = *i;
- invariant( NamespaceString::normal( ns ) );
+ for (list<string>::iterator i = collections.begin(); i != collections.end(); ++i) {
+ string ns = *i;
+ invariant(NamespaceString::normal(ns));
- CollectionCatalogEntry* coll = _dbEntry->getCollectionCatalogEntry( ns );
+ CollectionCatalogEntry* coll = _dbEntry->getCollectionCatalogEntry(ns);
- CollectionOptions options = coll->getCollectionOptions( txn );
- if ( !options.temp )
+ CollectionOptions options = coll->getCollectionOptions(txn);
+ if (!options.temp)
+ continue;
+ try {
+ WriteUnitOfWork wunit(txn);
+ Status status = dropCollection(txn, ns);
+ if (!status.isOK()) {
+ warning() << "could not drop temp collection '" << ns << "': " << status;
continue;
- try {
- WriteUnitOfWork wunit(txn);
- Status status = dropCollection( txn, ns );
- if ( !status.isOK() ) {
- warning() << "could not drop temp collection '" << ns << "': " << status;
- continue;
- }
-
- wunit.commit();
- }
- catch (const WriteConflictException& exp) {
- warning() << "could not drop temp collection '" << ns << "' due to "
- "WriteConflictException";
- txn->recoveryUnit()->abandonSnapshot();
}
- }
- }
-
- Status Database::setProfilingLevel(OperationContext* txn, int newLevel) {
- if (_profile == newLevel) {
- return Status::OK();
- }
- if (newLevel == 0) {
- _profile = 0;
- return Status::OK();
+ wunit.commit();
+ } catch (const WriteConflictException& exp) {
+ warning() << "could not drop temp collection '" << ns << "' due to "
+ "WriteConflictException";
+ txn->recoveryUnit()->abandonSnapshot();
}
+ }
+}
- if (newLevel < 0 || newLevel > 2) {
- return Status(ErrorCodes::BadValue, "profiling level has to be >=0 and <= 2");
- }
+Status Database::setProfilingLevel(OperationContext* txn, int newLevel) {
+ if (_profile == newLevel) {
+ return Status::OK();
+ }
- Status status = createProfileCollection(txn, this);
- if (!status.isOK()) {
- return status;
- }
+ if (newLevel == 0) {
+ _profile = 0;
+ return Status::OK();
+ }
- _profile = newLevel;
+ if (newLevel < 0 || newLevel > 2) {
+ return Status(ErrorCodes::BadValue, "profiling level has to be >=0 and <= 2");
+ }
- return Status::OK();
+ Status status = createProfileCollection(txn, this);
+ if (!status.isOK()) {
+ return status;
}
- void Database::getStats( OperationContext* opCtx, BSONObjBuilder* output, double scale ) {
- list<string> collections;
- _dbEntry->getCollectionNamespaces( &collections );
+ _profile = newLevel;
- long long ncollections = 0;
- long long objects = 0;
- long long size = 0;
- long long storageSize = 0;
- long long numExtents = 0;
- long long indexes = 0;
- long long indexSize = 0;
+ return Status::OK();
+}
- for (list<string>::const_iterator it = collections.begin(); it != collections.end(); ++it) {
- const string ns = *it;
+void Database::getStats(OperationContext* opCtx, BSONObjBuilder* output, double scale) {
+ list<string> collections;
+ _dbEntry->getCollectionNamespaces(&collections);
- Collection* collection = getCollection( ns );
- if ( !collection )
- continue;
+ long long ncollections = 0;
+ long long objects = 0;
+ long long size = 0;
+ long long storageSize = 0;
+ long long numExtents = 0;
+ long long indexes = 0;
+ long long indexSize = 0;
- ncollections += 1;
- objects += collection->numRecords(opCtx);
- size += collection->dataSize(opCtx);
+ for (list<string>::const_iterator it = collections.begin(); it != collections.end(); ++it) {
+ const string ns = *it;
- BSONObjBuilder temp;
- storageSize += collection->getRecordStore()->storageSize( opCtx, &temp );
- numExtents += temp.obj()["numExtents"].numberInt(); // XXX
+ Collection* collection = getCollection(ns);
+ if (!collection)
+ continue;
- indexes += collection->getIndexCatalog()->numIndexesTotal( opCtx );
- indexSize += collection->getIndexSize(opCtx);
- }
+ ncollections += 1;
+ objects += collection->numRecords(opCtx);
+ size += collection->dataSize(opCtx);
- output->appendNumber( "collections" , ncollections );
- output->appendNumber( "objects" , objects );
- output->append ( "avgObjSize" , objects == 0 ? 0 : double(size) / double(objects) );
- output->appendNumber( "dataSize" , size / scale );
- output->appendNumber( "storageSize" , storageSize / scale);
- output->appendNumber( "numExtents" , numExtents );
- output->appendNumber( "indexes" , indexes );
- output->appendNumber( "indexSize" , indexSize / scale );
+ BSONObjBuilder temp;
+ storageSize += collection->getRecordStore()->storageSize(opCtx, &temp);
+ numExtents += temp.obj()["numExtents"].numberInt(); // XXX
- _dbEntry->appendExtraStats( opCtx, output, scale );
+ indexes += collection->getIndexCatalog()->numIndexesTotal(opCtx);
+ indexSize += collection->getIndexSize(opCtx);
}
- Status Database::dropCollection(OperationContext* txn, StringData fullns) {
- invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
+ output->appendNumber("collections", ncollections);
+ output->appendNumber("objects", objects);
+ output->append("avgObjSize", objects == 0 ? 0 : double(size) / double(objects));
+ output->appendNumber("dataSize", size / scale);
+ output->appendNumber("storageSize", storageSize / scale);
+ output->appendNumber("numExtents", numExtents);
+ output->appendNumber("indexes", indexes);
+ output->appendNumber("indexSize", indexSize / scale);
- LOG(1) << "dropCollection: " << fullns << endl;
- massertNamespaceNotIndex( fullns, "dropCollection" );
+ _dbEntry->appendExtraStats(opCtx, output, scale);
+}
- Collection* collection = getCollection( fullns );
- if ( !collection ) {
- // collection doesn't exist
- return Status::OK();
- }
+Status Database::dropCollection(OperationContext* txn, StringData fullns) {
+ invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
- NamespaceString nss(fullns);
- {
- verify(nss.db() == _name);
+ LOG(1) << "dropCollection: " << fullns << endl;
+ massertNamespaceNotIndex(fullns, "dropCollection");
- if (nss.isSystem()) {
- if (nss.isSystemDotProfile()) {
- if ( _profile != 0 )
- return Status(
- ErrorCodes::IllegalOperation,
- "turn off profiling before dropping system.profile collection");
- }
- else {
- return Status( ErrorCodes::IllegalOperation, "can't drop system ns" );
- }
- }
- }
-
- BackgroundOperation::assertNoBgOpInProgForNs( fullns );
+ Collection* collection = getCollection(fullns);
+ if (!collection) {
+ // collection doesn't exist
+ return Status::OK();
+ }
- audit::logDropCollection( &cc(), fullns );
+ NamespaceString nss(fullns);
+ {
+ verify(nss.db() == _name);
- Status s = collection->getIndexCatalog()->dropAllIndexes(txn, true);
- if ( !s.isOK() ) {
- warning() << "could not drop collection, trying to drop indexes"
- << fullns << " because of " << s.toString();
- return s;
+ if (nss.isSystem()) {
+ if (nss.isSystemDotProfile()) {
+ if (_profile != 0)
+ return Status(ErrorCodes::IllegalOperation,
+ "turn off profiling before dropping system.profile collection");
+ } else {
+ return Status(ErrorCodes::IllegalOperation, "can't drop system ns");
+ }
}
+ }
- verify( collection->_details->getTotalIndexCount( txn ) == 0 );
- LOG(1) << "\t dropIndexes done" << endl;
-
- Top::get(txn->getClient()->getServiceContext()).collectionDropped(fullns);
-
- s = _dbEntry->dropCollection( txn, fullns );
+ BackgroundOperation::assertNoBgOpInProgForNs(fullns);
- // we want to do this always
- _clearCollectionCache(txn, fullns, "collection dropped");
+ audit::logDropCollection(&cc(), fullns);
- if ( !s.isOK() )
- return s;
+ Status s = collection->getIndexCatalog()->dropAllIndexes(txn, true);
+ if (!s.isOK()) {
+ warning() << "could not drop collection, trying to drop indexes" << fullns << " because of "
+ << s.toString();
+ return s;
+ }
- DEV {
- // check all index collection entries are gone
- string nstocheck = fullns.toString() + ".$";
- for ( CollectionMap::const_iterator i = _collections.begin();
- i != _collections.end();
- ++i ) {
- string temp = i->first;
- if ( temp.find( nstocheck ) != 0 )
- continue;
- log() << "after drop, bad cache entries for: "
- << fullns << " have " << temp;
- verify(0);
- }
- }
+ verify(collection->_details->getTotalIndexCount(txn) == 0);
+ LOG(1) << "\t dropIndexes done" << endl;
- getGlobalServiceContext()->getOpObserver()->onDropCollection(txn, nss);
- return Status::OK();
- }
+ Top::get(txn->getClient()->getServiceContext()).collectionDropped(fullns);
- void Database::_clearCollectionCache(OperationContext* txn,
- StringData fullns,
- const std::string& reason) {
- verify( _name == nsToDatabaseSubstring( fullns ) );
- CollectionMap::const_iterator it = _collections.find( fullns.toString() );
- if ( it == _collections.end() )
- return;
+ s = _dbEntry->dropCollection(txn, fullns);
- // Takes ownership of the collection
- txn->recoveryUnit()->registerChange(new RemoveCollectionChange(this, it->second));
+ // we want to do this always
+ _clearCollectionCache(txn, fullns, "collection dropped");
- it->second->_cursorManager.invalidateAll(false, reason);
- _collections.erase( it );
- }
+ if (!s.isOK())
+ return s;
- Collection* Database::getCollection( StringData ns ) const {
- invariant( _name == nsToDatabaseSubstring( ns ) );
- CollectionMap::const_iterator it = _collections.find( ns );
- if ( it != _collections.end() && it->second ) {
- return it->second;
+ DEV {
+ // check all index collection entries are gone
+ string nstocheck = fullns.toString() + ".$";
+ for (CollectionMap::const_iterator i = _collections.begin(); i != _collections.end(); ++i) {
+ string temp = i->first;
+ if (temp.find(nstocheck) != 0)
+ continue;
+ log() << "after drop, bad cache entries for: " << fullns << " have " << temp;
+ verify(0);
}
-
- return NULL;
}
+ getGlobalServiceContext()->getOpObserver()->onDropCollection(txn, nss);
+ return Status::OK();
+}
+
+void Database::_clearCollectionCache(OperationContext* txn,
+ StringData fullns,
+ const std::string& reason) {
+ verify(_name == nsToDatabaseSubstring(fullns));
+ CollectionMap::const_iterator it = _collections.find(fullns.toString());
+ if (it == _collections.end())
+ return;
+
+ // Takes ownership of the collection
+ txn->recoveryUnit()->registerChange(new RemoveCollectionChange(this, it->second));
+
+ it->second->_cursorManager.invalidateAll(false, reason);
+ _collections.erase(it);
+}
+
+Collection* Database::getCollection(StringData ns) const {
+ invariant(_name == nsToDatabaseSubstring(ns));
+ CollectionMap::const_iterator it = _collections.find(ns);
+ if (it != _collections.end() && it->second) {
+ return it->second;
+ }
+ return NULL;
+}
- Status Database::renameCollection( OperationContext* txn,
- StringData fromNS,
- StringData toNS,
- bool stayTemp ) {
-
- audit::logRenameCollection( &cc(), fromNS, toNS );
- invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
-
- { // remove anything cached
- Collection* coll = getCollection( fromNS );
- if ( !coll )
- return Status(ErrorCodes::NamespaceNotFound, "collection not found to rename");
- string clearCacheReason = str::stream() << "renamed collection '" << fromNS
- << "' to '" << toNS << "'";
- IndexCatalog::IndexIterator ii = coll->getIndexCatalog()->getIndexIterator(txn, true);
- while ( ii.more() ) {
- IndexDescriptor* desc = ii.next();
- _clearCollectionCache(txn, desc->indexNamespace(), clearCacheReason);
- }
+Status Database::renameCollection(OperationContext* txn,
+ StringData fromNS,
+ StringData toNS,
+ bool stayTemp) {
+ audit::logRenameCollection(&cc(), fromNS, toNS);
+ invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
- _clearCollectionCache(txn, fromNS, clearCacheReason);
- _clearCollectionCache(txn, toNS, clearCacheReason);
+ { // remove anything cached
+ Collection* coll = getCollection(fromNS);
+ if (!coll)
+ return Status(ErrorCodes::NamespaceNotFound, "collection not found to rename");
- Top::get(txn->getClient()->getServiceContext()).collectionDropped(fromNS.toString());
+ string clearCacheReason = str::stream() << "renamed collection '" << fromNS << "' to '"
+ << toNS << "'";
+ IndexCatalog::IndexIterator ii = coll->getIndexCatalog()->getIndexIterator(txn, true);
+ while (ii.more()) {
+ IndexDescriptor* desc = ii.next();
+ _clearCollectionCache(txn, desc->indexNamespace(), clearCacheReason);
}
- txn->recoveryUnit()->registerChange( new AddCollectionChange(this, toNS) );
- Status s = _dbEntry->renameCollection( txn, fromNS, toNS, stayTemp );
- _collections[toNS] = _getOrCreateCollectionInstance(txn, toNS);
- return s;
- }
+ _clearCollectionCache(txn, fromNS, clearCacheReason);
+ _clearCollectionCache(txn, toNS, clearCacheReason);
- Collection* Database::getOrCreateCollection(OperationContext* txn, StringData ns) {
- Collection* c = getCollection( ns );
- if ( !c ) {
- c = createCollection( txn, ns );
- }
- return c;
+ Top::get(txn->getClient()->getServiceContext()).collectionDropped(fromNS.toString());
}
- Collection* Database::createCollection( OperationContext* txn,
- StringData ns,
- const CollectionOptions& options,
- bool createIdIndex ) {
- massert( 17399, "collection already exists", getCollection( ns ) == NULL );
- massertNamespaceNotIndex( ns, "createCollection" );
- invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
-
- if ( serverGlobalParams.configsvr &&
- !( ns.startsWith( "config." ) ||
- ns.startsWith( "local." ) ||
- ns.startsWith( "admin." ) ) ) {
- uasserted(14037, "can't create user databases on a --configsvr instance");
- }
+ txn->recoveryUnit()->registerChange(new AddCollectionChange(this, toNS));
+ Status s = _dbEntry->renameCollection(txn, fromNS, toNS, stayTemp);
+ _collections[toNS] = _getOrCreateCollectionInstance(txn, toNS);
+ return s;
+}
- if (NamespaceString::normal(ns)) {
- // This check only applies for actual collections, not indexes or other types of ns.
- uassert(17381, str::stream() << "fully qualified namespace " << ns << " is too long "
- << "(max is " << NamespaceString::MaxNsCollectionLen << " bytes)",
- ns.size() <= NamespaceString::MaxNsCollectionLen);
- }
+Collection* Database::getOrCreateCollection(OperationContext* txn, StringData ns) {
+ Collection* c = getCollection(ns);
+ if (!c) {
+ c = createCollection(txn, ns);
+ }
+ return c;
+}
+
+Collection* Database::createCollection(OperationContext* txn,
+ StringData ns,
+ const CollectionOptions& options,
+ bool createIdIndex) {
+ massert(17399, "collection already exists", getCollection(ns) == NULL);
+ massertNamespaceNotIndex(ns, "createCollection");
+ invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
+
+ if (serverGlobalParams.configsvr &&
+ !(ns.startsWith("config.") || ns.startsWith("local.") || ns.startsWith("admin."))) {
+ uasserted(14037, "can't create user databases on a --configsvr instance");
+ }
- NamespaceString nss( ns );
- uassert( 17316, "cannot create a blank collection", nss.coll() > 0 );
+ if (NamespaceString::normal(ns)) {
+ // This check only applies for actual collections, not indexes or other types of ns.
+ uassert(17381,
+ str::stream() << "fully qualified namespace " << ns << " is too long "
+ << "(max is " << NamespaceString::MaxNsCollectionLen << " bytes)",
+ ns.size() <= NamespaceString::MaxNsCollectionLen);
+ }
- audit::logCreateCollection( &cc(), ns );
+ NamespaceString nss(ns);
+ uassert(17316, "cannot create a blank collection", nss.coll() > 0);
- txn->recoveryUnit()->registerChange( new AddCollectionChange(this, ns) );
+ audit::logCreateCollection(&cc(), ns);
- Status status = _dbEntry->createCollection(txn, ns, options, true /*allocateDefaultSpace*/);
- massertNoTraceStatusOK(status);
+ txn->recoveryUnit()->registerChange(new AddCollectionChange(this, ns));
+ Status status = _dbEntry->createCollection(txn, ns, options, true /*allocateDefaultSpace*/);
+ massertNoTraceStatusOK(status);
- Collection* collection = _getOrCreateCollectionInstance(txn, ns);
- invariant(collection);
- _collections[ns] = collection;
- if ( createIdIndex ) {
- if ( collection->requiresIdIndex() ) {
- if ( options.autoIndexId == CollectionOptions::YES ||
- options.autoIndexId == CollectionOptions::DEFAULT ) {
- IndexCatalog* ic = collection->getIndexCatalog();
- uassertStatusOK(
- ic->createIndexOnEmptyCollection(txn, ic->getDefaultIdIndexSpec()));
- }
- }
+ Collection* collection = _getOrCreateCollectionInstance(txn, ns);
+ invariant(collection);
+ _collections[ns] = collection;
- if ( nss.isSystem() ) {
- authindex::createSystemIndexes( txn, collection );
+ if (createIdIndex) {
+ if (collection->requiresIdIndex()) {
+ if (options.autoIndexId == CollectionOptions::YES ||
+ options.autoIndexId == CollectionOptions::DEFAULT) {
+ IndexCatalog* ic = collection->getIndexCatalog();
+ uassertStatusOK(ic->createIndexOnEmptyCollection(txn, ic->getDefaultIdIndexSpec()));
}
-
}
- getGlobalServiceContext()->getOpObserver()->onCreateCollection(txn, nss, options);
-
- return collection;
- }
-
- const DatabaseCatalogEntry* Database::getDatabaseCatalogEntry() const {
- return _dbEntry;
+ if (nss.isSystem()) {
+ authindex::createSystemIndexes(txn, collection);
+ }
}
- void dropAllDatabasesExceptLocal(OperationContext* txn) {
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
-
- vector<string> n;
- StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
- storageEngine->listDatabases(&n);
-
- if( n.size() == 0 ) return;
- log() << "dropAllDatabasesExceptLocal " << n.size() << endl;
-
- for (vector<string>::iterator i = n.begin(); i != n.end(); i++) {
- if (*i != "local") {
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- Database* db = dbHolder().get(txn, *i);
- // This is needed since dropDatabase can't be rolled back.
- // This is safe be replaced by "invariant(db);dropDatabase(txn, db);" once fixed
- if (db == nullptr) {
- log() << "database disappeared after listDatabases but before drop: " << *i;
- } else {
- dropDatabase(txn, db);
- }
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn,
- "dropAllDatabasesExceptLocal",
- *i);
+ getGlobalServiceContext()->getOpObserver()->onCreateCollection(txn, nss, options);
+
+ return collection;
+}
+
+const DatabaseCatalogEntry* Database::getDatabaseCatalogEntry() const {
+ return _dbEntry;
+}
+
+void dropAllDatabasesExceptLocal(OperationContext* txn) {
+ ScopedTransaction transaction(txn, MODE_X);
+ Lock::GlobalWrite lk(txn->lockState());
+
+ vector<string> n;
+ StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
+ storageEngine->listDatabases(&n);
+
+ if (n.size() == 0)
+ return;
+ log() << "dropAllDatabasesExceptLocal " << n.size() << endl;
+
+ for (vector<string>::iterator i = n.begin(); i != n.end(); i++) {
+ if (*i != "local") {
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ Database* db = dbHolder().get(txn, *i);
+ // This is needed since dropDatabase can't be rolled back.
+ // This is safe be replaced by "invariant(db);dropDatabase(txn, db);" once fixed
+ if (db == nullptr) {
+ log() << "database disappeared after listDatabases but before drop: " << *i;
+ } else {
+ dropDatabase(txn, db);
+ }
}
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "dropAllDatabasesExceptLocal", *i);
}
}
+}
- void dropDatabase(OperationContext* txn, Database* db ) {
- invariant( db );
-
- // Store the name so we have if for after the db object is deleted
- const string name = db->name();
- LOG(1) << "dropDatabase " << name << endl;
+void dropDatabase(OperationContext* txn, Database* db) {
+ invariant(db);
- invariant(txn->lockState()->isDbLockedForMode(name, MODE_X));
+ // Store the name so we have if for after the db object is deleted
+ const string name = db->name();
+ LOG(1) << "dropDatabase " << name << endl;
- BackgroundOperation::assertNoBgOpInProgForDb(name.c_str());
+ invariant(txn->lockState()->isDbLockedForMode(name, MODE_X));
- audit::logDropDatabase( &cc(), name );
+ BackgroundOperation::assertNoBgOpInProgForDb(name.c_str());
- dbHolder().close( txn, name );
- db = NULL; // d is now deleted
+ audit::logDropDatabase(&cc(), name);
- getGlobalServiceContext()->getGlobalStorageEngine()->dropDatabase( txn, name );
- }
+ dbHolder().close(txn, name);
+ db = NULL; // d is now deleted
- /** { ..., capped: true, size: ..., max: ... }
- * @param createDefaultIndexes - if false, defers id (and other) index creation.
- * @return true if successful
- */
- Status userCreateNS( OperationContext* txn,
- Database* db,
- StringData ns,
- BSONObj options,
- bool createDefaultIndexes ) {
+ getGlobalServiceContext()->getGlobalStorageEngine()->dropDatabase(txn, name);
+}
- invariant( db );
+/** { ..., capped: true, size: ..., max: ... }
+ * @param createDefaultIndexes - if false, defers id (and other) index creation.
+ * @return true if successful
+*/
+Status userCreateNS(OperationContext* txn,
+ Database* db,
+ StringData ns,
+ BSONObj options,
+ bool createDefaultIndexes) {
+ invariant(db);
- LOG(1) << "create collection " << ns << ' ' << options;
+ LOG(1) << "create collection " << ns << ' ' << options;
- if ( !NamespaceString::validCollectionComponent(ns) )
- return Status( ErrorCodes::InvalidNamespace,
- str::stream() << "invalid ns: " << ns );
+ if (!NamespaceString::validCollectionComponent(ns))
+ return Status(ErrorCodes::InvalidNamespace, str::stream() << "invalid ns: " << ns);
- Collection* collection = db->getCollection( ns );
+ Collection* collection = db->getCollection(ns);
- if ( collection )
- return Status( ErrorCodes::NamespaceExists,
- "collection already exists" );
+ if (collection)
+ return Status(ErrorCodes::NamespaceExists, "collection already exists");
- CollectionOptions collectionOptions;
- Status status = collectionOptions.parse(options);
- if ( !status.isOK() )
- return status;
+ CollectionOptions collectionOptions;
+ Status status = collectionOptions.parse(options);
+ if (!status.isOK())
+ return status;
- status = validateStorageOptions(collectionOptions.storageEngine,
- &StorageEngine::Factory::validateCollectionStorageOptions);
- if ( !status.isOK() )
- return status;
+ status = validateStorageOptions(collectionOptions.storageEngine,
+ &StorageEngine::Factory::validateCollectionStorageOptions);
+ if (!status.isOK())
+ return status;
- invariant(db->createCollection(txn, ns, collectionOptions, createDefaultIndexes));
+ invariant(db->createCollection(txn, ns, collectionOptions, createDefaultIndexes));
- return Status::OK();
- }
-} // namespace mongo
+ return Status::OK();
+}
+} // namespace mongo
diff --git a/src/mongo/db/catalog/database.h b/src/mongo/db/catalog/database.h
index d1f2465ef60..7f07371ceff 100644
--- a/src/mongo/db/catalog/database.h
+++ b/src/mongo/db/catalog/database.h
@@ -40,133 +40,139 @@
namespace mongo {
- class Collection;
- class DataFile;
- class DatabaseCatalogEntry;
- class ExtentManager;
- class IndexCatalog;
- class NamespaceDetails;
- class OperationContext;
+class Collection;
+class DataFile;
+class DatabaseCatalogEntry;
+class ExtentManager;
+class IndexCatalog;
+class NamespaceDetails;
+class OperationContext;
- /**
- * Database represents a database database
- * Each database database has its own set of files -- dbname.ns, dbname.0, dbname.1, ...
- * NOT memory mapped
- */
- class Database {
- public:
- Database(OperationContext* txn, StringData name, DatabaseCatalogEntry* dbEntry);
-
- // must call close first
- ~Database();
+/**
+ * Database represents a database database
+ * Each database database has its own set of files -- dbname.ns, dbname.0, dbname.1, ...
+ * NOT memory mapped
+*/
+class Database {
+public:
+ Database(OperationContext* txn, StringData name, DatabaseCatalogEntry* dbEntry);
- // closes files and other cleanup see below.
- void close( OperationContext* txn );
+ // must call close first
+ ~Database();
- const std::string& name() const { return _name; }
+ // closes files and other cleanup see below.
+ void close(OperationContext* txn);
- void clearTmpCollections(OperationContext* txn);
+ const std::string& name() const {
+ return _name;
+ }
- /**
- * Sets a new profiling level for the database and returns the outcome.
- *
- * @param txn Operation context which to use for creating the profiling collection.
- * @param newLevel New profiling level to use.
- */
- Status setProfilingLevel(OperationContext* txn, int newLevel);
+ void clearTmpCollections(OperationContext* txn);
- int getProfilingLevel() const { return _profile; }
- const char* getProfilingNS() const { return _profileName.c_str(); }
+ /**
+ * Sets a new profiling level for the database and returns the outcome.
+ *
+ * @param txn Operation context which to use for creating the profiling collection.
+ * @param newLevel New profiling level to use.
+ */
+ Status setProfilingLevel(OperationContext* txn, int newLevel);
- void getStats( OperationContext* opCtx, BSONObjBuilder* output, double scale = 1 );
+ int getProfilingLevel() const {
+ return _profile;
+ }
+ const char* getProfilingNS() const {
+ return _profileName.c_str();
+ }
- const DatabaseCatalogEntry* getDatabaseCatalogEntry() const;
+ void getStats(OperationContext* opCtx, BSONObjBuilder* output, double scale = 1);
- Status dropCollection(OperationContext* txn, StringData fullns);
+ const DatabaseCatalogEntry* getDatabaseCatalogEntry() const;
- Collection* createCollection( OperationContext* txn,
- StringData ns,
- const CollectionOptions& options = CollectionOptions(),
- bool createDefaultIndexes = true );
+ Status dropCollection(OperationContext* txn, StringData fullns);
- /**
- * @param ns - this is fully qualified, which is maybe not ideal ???
- */
- Collection* getCollection( StringData ns ) const ;
+ Collection* createCollection(OperationContext* txn,
+ StringData ns,
+ const CollectionOptions& options = CollectionOptions(),
+ bool createDefaultIndexes = true);
- Collection* getCollection( const NamespaceString& ns ) const {
- return getCollection( ns.ns() );
- }
+ /**
+ * @param ns - this is fully qualified, which is maybe not ideal ???
+ */
+ Collection* getCollection(StringData ns) const;
- Collection* getOrCreateCollection( OperationContext* txn, StringData ns );
+ Collection* getCollection(const NamespaceString& ns) const {
+ return getCollection(ns.ns());
+ }
- Status renameCollection( OperationContext* txn,
- StringData fromNS,
- StringData toNS,
- bool stayTemp );
+ Collection* getOrCreateCollection(OperationContext* txn, StringData ns);
- /**
- * @return name of an existing database with same text name but different
- * casing, if one exists. Otherwise the empty std::string is returned. If
- * 'duplicates' is specified, it is filled with all duplicate names.
- // TODO move???
- */
- static std::string duplicateUncasedName( const std::string &name,
- std::set< std::string > *duplicates = 0 );
+ Status renameCollection(OperationContext* txn,
+ StringData fromNS,
+ StringData toNS,
+ bool stayTemp);
- static Status validateDBName( StringData dbname );
+ /**
+ * @return name of an existing database with same text name but different
+ * casing, if one exists. Otherwise the empty std::string is returned. If
+ * 'duplicates' is specified, it is filled with all duplicate names.
+ // TODO move???
+ */
+ static std::string duplicateUncasedName(const std::string& name,
+ std::set<std::string>* duplicates = 0);
+
+ static Status validateDBName(StringData dbname);
- const std::string& getSystemIndexesName() const { return _indexesName; }
- private:
+ const std::string& getSystemIndexesName() const {
+ return _indexesName;
+ }
- /**
- * Gets or creates collection instance from existing metadata,
- * Returns NULL if invalid
- *
- * Note: This does not add the collection to _collections map, that must be done
- * by the caller, who takes onership of the Collection*
- */
- Collection* _getOrCreateCollectionInstance(OperationContext* txn, StringData fullns);
+private:
+ /**
+ * Gets or creates collection instance from existing metadata,
+ * Returns NULL if invalid
+ *
+ * Note: This does not add the collection to _collections map, that must be done
+ * by the caller, who takes onership of the Collection*
+ */
+ Collection* _getOrCreateCollectionInstance(OperationContext* txn, StringData fullns);
- /**
- * Deregisters and invalidates all cursors on collection 'fullns'. Callers must specify
- * 'reason' for why the cache is being cleared.
- */
- void _clearCollectionCache(OperationContext* txn,
- StringData fullns,
- const std::string& reason);
+ /**
+ * Deregisters and invalidates all cursors on collection 'fullns'. Callers must specify
+ * 'reason' for why the cache is being cleared.
+ */
+ void _clearCollectionCache(OperationContext* txn, StringData fullns, const std::string& reason);
- class AddCollectionChange;
- class RemoveCollectionChange;
+ class AddCollectionChange;
+ class RemoveCollectionChange;
- const std::string _name; // "alleyinsider"
+ const std::string _name; // "alleyinsider"
- DatabaseCatalogEntry* _dbEntry; // not owned here
+ DatabaseCatalogEntry* _dbEntry; // not owned here
- const std::string _profileName; // "alleyinsider.system.profile"
- const std::string _indexesName; // "alleyinsider.system.indexes"
+ const std::string _profileName; // "alleyinsider.system.profile"
+ const std::string _indexesName; // "alleyinsider.system.indexes"
- int _profile; // 0=off.
+ int _profile; // 0=off.
- // TODO: make sure deletes go through
- // this in some ways is a dupe of _namespaceIndex
- // but it points to a much more useful data structure
- typedef StringMap< Collection* > CollectionMap;
- CollectionMap _collections;
+ // TODO: make sure deletes go through
+ // this in some ways is a dupe of _namespaceIndex
+ // but it points to a much more useful data structure
+ typedef StringMap<Collection*> CollectionMap;
+ CollectionMap _collections;
- friend class Collection;
- friend class NamespaceDetails;
- friend class IndexCatalog;
- };
+ friend class Collection;
+ friend class NamespaceDetails;
+ friend class IndexCatalog;
+};
- void dropDatabase(OperationContext* txn, Database* db );
+void dropDatabase(OperationContext* txn, Database* db);
- void dropAllDatabasesExceptLocal(OperationContext* txn);
+void dropAllDatabasesExceptLocal(OperationContext* txn);
- Status userCreateNS( OperationContext* txn,
- Database* db,
- StringData ns,
- BSONObj options,
- bool createDefaultIndexes = true );
+Status userCreateNS(OperationContext* txn,
+ Database* db,
+ StringData ns,
+ BSONObj options,
+ bool createDefaultIndexes = true);
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/catalog/database_catalog_entry.h b/src/mongo/db/catalog/database_catalog_entry.h
index 4c981582cc6..6ad95405fce 100644
--- a/src/mongo/db/catalog/database_catalog_entry.h
+++ b/src/mongo/db/catalog/database_catalog_entry.h
@@ -38,74 +38,73 @@
namespace mongo {
- class BSONObjBuilder;
- class CollectionCatalogEntry;
- class IndexAccessMethod;
- class IndexCatalogEntry;
- class OperationContext;
- class RecordStore;
+class BSONObjBuilder;
+class CollectionCatalogEntry;
+class IndexAccessMethod;
+class IndexCatalogEntry;
+class OperationContext;
+class RecordStore;
- struct CollectionOptions;
+struct CollectionOptions;
- class DatabaseCatalogEntry {
- public:
- DatabaseCatalogEntry( StringData name )
- : _name( name.toString() ) {
- }
+class DatabaseCatalogEntry {
+public:
+ DatabaseCatalogEntry(StringData name) : _name(name.toString()) {}
- virtual ~DatabaseCatalogEntry(){ }
+ virtual ~DatabaseCatalogEntry() {}
- const std::string& name() const { return _name; }
+ const std::string& name() const {
+ return _name;
+ }
- virtual bool exists() const = 0;
- virtual bool isEmpty() const = 0;
- virtual bool hasUserData() const = 0;
+ virtual bool exists() const = 0;
+ virtual bool isEmpty() const = 0;
+ virtual bool hasUserData() const = 0;
- virtual int64_t sizeOnDisk( OperationContext* opCtx ) const = 0;
+ virtual int64_t sizeOnDisk(OperationContext* opCtx) const = 0;
- virtual void appendExtraStats( OperationContext* opCtx,
- BSONObjBuilder* out,
- double scale ) const = 0;
+ virtual void appendExtraStats(OperationContext* opCtx,
+ BSONObjBuilder* out,
+ double scale) const = 0;
- // these are hacks :(
- virtual bool isOlderThan24( OperationContext* opCtx ) const = 0;
- virtual void markIndexSafe24AndUp( OperationContext* opCtx ) = 0;
+ // these are hacks :(
+ virtual bool isOlderThan24(OperationContext* opCtx) const = 0;
+ virtual void markIndexSafe24AndUp(OperationContext* opCtx) = 0;
- /**
- * @return true if current files on disk are compatibile with the current version.
- * if we return false, then an upgrade will be required
- */
- virtual bool currentFilesCompatible( OperationContext* opCtx ) const = 0;
+ /**
+ * @return true if current files on disk are compatibile with the current version.
+ * if we return false, then an upgrade will be required
+ */
+ virtual bool currentFilesCompatible(OperationContext* opCtx) const = 0;
- // ----
+ // ----
- virtual void getCollectionNamespaces( std::list<std::string>* out ) const = 0;
+ virtual void getCollectionNamespaces(std::list<std::string>* out) const = 0;
- // The DatabaseCatalogEntry owns this, do not delete
- virtual CollectionCatalogEntry* getCollectionCatalogEntry( StringData ns ) const = 0;
+ // The DatabaseCatalogEntry owns this, do not delete
+ virtual CollectionCatalogEntry* getCollectionCatalogEntry(StringData ns) const = 0;
- // The DatabaseCatalogEntry owns this, do not delete
- virtual RecordStore* getRecordStore( StringData ns ) const = 0;
+ // The DatabaseCatalogEntry owns this, do not delete
+ virtual RecordStore* getRecordStore(StringData ns) const = 0;
- // Ownership passes to caller
- virtual IndexAccessMethod* getIndex( OperationContext* txn,
- const CollectionCatalogEntry* collection,
- IndexCatalogEntry* index ) = 0;
+ // Ownership passes to caller
+ virtual IndexAccessMethod* getIndex(OperationContext* txn,
+ const CollectionCatalogEntry* collection,
+ IndexCatalogEntry* index) = 0;
- virtual Status createCollection( OperationContext* txn,
- StringData ns,
- const CollectionOptions& options,
- bool allocateDefaultSpace ) = 0;
+ virtual Status createCollection(OperationContext* txn,
+ StringData ns,
+ const CollectionOptions& options,
+ bool allocateDefaultSpace) = 0;
- virtual Status renameCollection( OperationContext* txn,
- StringData fromNS,
- StringData toNS,
- bool stayTemp ) = 0;
+ virtual Status renameCollection(OperationContext* txn,
+ StringData fromNS,
+ StringData toNS,
+ bool stayTemp) = 0;
- virtual Status dropCollection( OperationContext* opCtx,
- StringData ns ) = 0;
+ virtual Status dropCollection(OperationContext* opCtx, StringData ns) = 0;
- private:
- std::string _name;
- };
+private:
+ std::string _name;
+};
}
diff --git a/src/mongo/db/catalog/database_holder.cpp b/src/mongo/db/catalog/database_holder.cpp
index bf1238db95f..f4a2cf62970 100644
--- a/src/mongo/db/catalog/database_holder.cpp
+++ b/src/mongo/db/catalog/database_holder.cpp
@@ -46,169 +46,159 @@
namespace mongo {
- using std::set;
- using std::string;
- using std::stringstream;
+using std::set;
+using std::string;
+using std::stringstream;
namespace {
- StringData _todb(StringData ns) {
- size_t i = ns.find('.');
- if (i == std::string::npos) {
- uassert(13074, "db name can't be empty", ns.size());
- return ns;
- }
+StringData _todb(StringData ns) {
+ size_t i = ns.find('.');
+ if (i == std::string::npos) {
+ uassert(13074, "db name can't be empty", ns.size());
+ return ns;
+ }
- uassert(13075, "db name can't be empty", i > 0);
+ uassert(13075, "db name can't be empty", i > 0);
- const StringData d = ns.substr(0, i);
- uassert(13280, "invalid db name: " + ns.toString(), NamespaceString::validDBName(d));
+ const StringData d = ns.substr(0, i);
+ uassert(13280, "invalid db name: " + ns.toString(), NamespaceString::validDBName(d));
- return d;
- }
+ return d;
+}
- DatabaseHolder _dbHolder;
+DatabaseHolder _dbHolder;
-} // namespace
+} // namespace
- DatabaseHolder& dbHolder() {
- return _dbHolder;
- }
+DatabaseHolder& dbHolder() {
+ return _dbHolder;
+}
+
+Database* DatabaseHolder::get(OperationContext* txn, StringData ns) const {
+ const StringData db = _todb(ns);
+ invariant(txn->lockState()->isDbLockedForMode(db, MODE_IS));
- Database* DatabaseHolder::get(OperationContext* txn,
- StringData ns) const {
+ stdx::lock_guard<SimpleMutex> lk(_m);
+ DBs::const_iterator it = _dbs.find(db);
+ if (it != _dbs.end()) {
+ return it->second;
+ }
- const StringData db = _todb(ns);
- invariant(txn->lockState()->isDbLockedForMode(db, MODE_IS));
+ return NULL;
+}
- stdx::lock_guard<SimpleMutex> lk(_m);
- DBs::const_iterator it = _dbs.find(db);
- if (it != _dbs.end()) {
- return it->second;
+Database* DatabaseHolder::openDb(OperationContext* txn, StringData ns, bool* justCreated) {
+ const StringData dbname = _todb(ns);
+ invariant(txn->lockState()->isDbLockedForMode(dbname, MODE_X));
+
+ Database* db = get(txn, ns);
+ if (db) {
+ if (justCreated) {
+ *justCreated = false;
}
- return NULL;
+ return db;
}
- Database* DatabaseHolder::openDb(OperationContext* txn,
- StringData ns,
- bool* justCreated) {
+ // Check casing
+ const string duplicate = Database::duplicateUncasedName(dbname.toString());
+ if (!duplicate.empty()) {
+ stringstream ss;
+ ss << "db already exists with different case already have: [" << duplicate
+ << "] trying to create [" << dbname.toString() << "]";
+ uasserted(ErrorCodes::DatabaseDifferCase, ss.str());
+ }
- const StringData dbname = _todb(ns);
- invariant(txn->lockState()->isDbLockedForMode(dbname, MODE_X));
+ StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
+ invariant(storageEngine);
- Database* db = get(txn, ns);
- if (db) {
- if (justCreated) {
- *justCreated = false;
- }
+ DatabaseCatalogEntry* entry = storageEngine->getDatabaseCatalogEntry(txn, dbname);
+ invariant(entry);
+ const bool exists = entry->exists();
+ if (!exists) {
+ audit::logCreateDatabase(&cc(), dbname);
+ }
- return db;
- }
+ if (justCreated) {
+ *justCreated = !exists;
+ }
- // Check casing
- const string duplicate = Database::duplicateUncasedName(dbname.toString());
- if (!duplicate.empty()) {
- stringstream ss;
- ss << "db already exists with different case already have: ["
- << duplicate
- << "] trying to create ["
- << dbname.toString()
- << "]";
- uasserted(ErrorCodes::DatabaseDifferCase, ss.str());
- }
+ // Do this outside of the scoped lock, because database creation does transactional
+ // operations which may block. Only one thread can be inside this method for the same DB
+ // name, because of the requirement for X-lock on the database when we enter. So there is
+ // no way we can insert two different databases for the same name.
+ db = new Database(txn, dbname, entry);
- StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
- invariant(storageEngine);
+ stdx::lock_guard<SimpleMutex> lk(_m);
+ _dbs[dbname] = db;
- DatabaseCatalogEntry* entry = storageEngine->getDatabaseCatalogEntry(txn, dbname);
- invariant(entry);
- const bool exists = entry->exists();
- if (!exists) {
- audit::logCreateDatabase(&cc(), dbname);
- }
+ return db;
+}
- if (justCreated) {
- *justCreated = !exists;
- }
+void DatabaseHolder::close(OperationContext* txn, StringData ns) {
+ // TODO: This should be fine if only a DB X-lock
+ invariant(txn->lockState()->isW());
- // Do this outside of the scoped lock, because database creation does transactional
- // operations which may block. Only one thread can be inside this method for the same DB
- // name, because of the requirement for X-lock on the database when we enter. So there is
- // no way we can insert two different databases for the same name.
- db = new Database(txn, dbname, entry);
+ const StringData dbName = _todb(ns);
- stdx::lock_guard<SimpleMutex> lk(_m);
- _dbs[dbname] = db;
+ stdx::lock_guard<SimpleMutex> lk(_m);
- return db;
+ DBs::const_iterator it = _dbs.find(dbName);
+ if (it == _dbs.end()) {
+ return;
}
- void DatabaseHolder::close(OperationContext* txn,
- StringData ns) {
- // TODO: This should be fine if only a DB X-lock
- invariant(txn->lockState()->isW());
+ it->second->close(txn);
+ delete it->second;
+ _dbs.erase(it);
- const StringData dbName = _todb(ns);
-
- stdx::lock_guard<SimpleMutex> lk(_m);
+ getGlobalServiceContext()->getGlobalStorageEngine()->closeDatabase(txn, dbName.toString());
+}
- DBs::const_iterator it = _dbs.find(dbName);
- if (it == _dbs.end()) {
- return;
- }
+bool DatabaseHolder::closeAll(OperationContext* txn, BSONObjBuilder& result, bool force) {
+ invariant(txn->lockState()->isW());
- it->second->close( txn );
- delete it->second;
- _dbs.erase(it);
+ stdx::lock_guard<SimpleMutex> lk(_m);
- getGlobalServiceContext()->getGlobalStorageEngine()->closeDatabase(txn, dbName.toString());
+ set<string> dbs;
+ for (DBs::const_iterator i = _dbs.begin(); i != _dbs.end(); ++i) {
+ dbs.insert(i->first);
}
- bool DatabaseHolder::closeAll(OperationContext* txn, BSONObjBuilder& result, bool force) {
- invariant(txn->lockState()->isW());
+ BSONArrayBuilder bb(result.subarrayStart("dbs"));
+ int nNotClosed = 0;
+ for (set<string>::iterator i = dbs.begin(); i != dbs.end(); ++i) {
+ string name = *i;
- stdx::lock_guard<SimpleMutex> lk(_m);
+ LOG(2) << "DatabaseHolder::closeAll name:" << name;
- set< string > dbs;
- for ( DBs::const_iterator i = _dbs.begin(); i != _dbs.end(); ++i ) {
- dbs.insert( i->first );
+ if (!force && BackgroundOperation::inProgForDb(name)) {
+ log() << "WARNING: can't close database " << name
+ << " because a bg job is in progress - try killOp command";
+ nNotClosed++;
+ continue;
}
- BSONArrayBuilder bb( result.subarrayStart( "dbs" ) );
- int nNotClosed = 0;
- for( set< string >::iterator i = dbs.begin(); i != dbs.end(); ++i ) {
- string name = *i;
-
- LOG(2) << "DatabaseHolder::closeAll name:" << name;
+ Database* db = _dbs[name];
+ db->close(txn);
+ delete db;
- if( !force && BackgroundOperation::inProgForDb(name) ) {
- log() << "WARNING: can't close database "
- << name
- << " because a bg job is in progress - try killOp command";
- nNotClosed++;
- continue;
- }
+ _dbs.erase(name);
- Database* db = _dbs[name];
- db->close( txn );
- delete db;
+ getGlobalServiceContext()->getGlobalStorageEngine()->closeDatabase(txn, name);
- _dbs.erase( name );
-
- getGlobalServiceContext()->getGlobalStorageEngine()->closeDatabase( txn, name );
-
- bb.append( name );
- }
-
- bb.done();
- if( nNotClosed ) {
- result.append("nNotClosed", nNotClosed);
- }
+ bb.append(name);
+ }
- return true;
+ bb.done();
+ if (nNotClosed) {
+ result.append("nNotClosed", nNotClosed);
}
+
+ return true;
+}
}
diff --git a/src/mongo/db/catalog/database_holder.h b/src/mongo/db/catalog/database_holder.h
index 3238b886f01..ce99747937b 100644
--- a/src/mongo/db/catalog/database_holder.h
+++ b/src/mongo/db/catalog/database_holder.h
@@ -37,62 +37,62 @@
namespace mongo {
- class Database;
- class OperationContext;
+class Database;
+class OperationContext;
+
+/**
+ * Registry of opened databases.
+ */
+class DatabaseHolder {
+public:
+ DatabaseHolder() = default;
/**
- * Registry of opened databases.
+ * Retrieves an already opened database or returns NULL. Must be called with the database
+ * locked in at least IS-mode.
*/
- class DatabaseHolder {
- public:
- DatabaseHolder() = default;
+ Database* get(OperationContext* txn, StringData ns) const;
- /**
- * Retrieves an already opened database or returns NULL. Must be called with the database
- * locked in at least IS-mode.
- */
- Database* get(OperationContext* txn, StringData ns) const;
-
- /**
- * Retrieves a database reference if it is already opened, or opens it if it hasn't been
- * opened/created yet. Must be called with the database locked in X-mode.
- *
- * @param justCreated Returns whether the database was newly created (true) or it already
- * existed (false). Can be NULL if this information is not necessary.
- */
- Database* openDb(OperationContext* txn, StringData ns, bool* justCreated = NULL);
+ /**
+ * Retrieves a database reference if it is already opened, or opens it if it hasn't been
+ * opened/created yet. Must be called with the database locked in X-mode.
+ *
+ * @param justCreated Returns whether the database was newly created (true) or it already
+ * existed (false). Can be NULL if this information is not necessary.
+ */
+ Database* openDb(OperationContext* txn, StringData ns, bool* justCreated = NULL);
- /**
- * Closes the specified database. Must be called with the database locked in X-mode.
- */
- void close(OperationContext* txn, StringData ns);
+ /**
+ * Closes the specified database. Must be called with the database locked in X-mode.
+ */
+ void close(OperationContext* txn, StringData ns);
- /**
- * Closes all opened databases. Must be called with the global lock acquired in X-mode.
- *
- * @param result Populated with the names of the databases, which were closed.
- * @param force Force close even if something underway - use at shutdown
- */
- bool closeAll(OperationContext* txn, BSONObjBuilder& result, bool force);
+ /**
+ * Closes all opened databases. Must be called with the global lock acquired in X-mode.
+ *
+ * @param result Populated with the names of the databases, which were closed.
+ * @param force Force close even if something underway - use at shutdown
+ */
+ bool closeAll(OperationContext* txn, BSONObjBuilder& result, bool force);
- /**
- * Retrieves the names of all currently opened databases. Does not require locking, but it
- * is not guaranteed that the returned set of names will be still valid unless a global
- * lock is held, which would prevent database from disappearing or being created.
- */
- void getAllShortNames( std::set<std::string>& all ) const {
- stdx::lock_guard<SimpleMutex> lk(_m);
- for( DBs::const_iterator j=_dbs.begin(); j!=_dbs.end(); ++j ) {
- all.insert( j->first );
- }
+ /**
+ * Retrieves the names of all currently opened databases. Does not require locking, but it
+ * is not guaranteed that the returned set of names will be still valid unless a global
+ * lock is held, which would prevent database from disappearing or being created.
+ */
+ void getAllShortNames(std::set<std::string>& all) const {
+ stdx::lock_guard<SimpleMutex> lk(_m);
+ for (DBs::const_iterator j = _dbs.begin(); j != _dbs.end(); ++j) {
+ all.insert(j->first);
}
+ }
- private:
- typedef StringMap<Database*> DBs;
+private:
+ typedef StringMap<Database*> DBs;
- mutable SimpleMutex _m;
- DBs _dbs;
- };
+ mutable SimpleMutex _m;
+ DBs _dbs;
+};
- DatabaseHolder& dbHolder();
+DatabaseHolder& dbHolder();
}
diff --git a/src/mongo/db/catalog/document_validation.cpp b/src/mongo/db/catalog/document_validation.cpp
index 73a6540a391..c0377028e3f 100644
--- a/src/mongo/db/catalog/document_validation.cpp
+++ b/src/mongo/db/catalog/document_validation.cpp
@@ -31,6 +31,6 @@
#include "mongo/db/catalog/document_validation.h"
namespace mongo {
- const OperationContext::Decoration<bool> documentValidationDisabled =
- OperationContext::declareDecoration<bool>();
+const OperationContext::Decoration<bool> documentValidationDisabled =
+ OperationContext::declareDecoration<bool>();
}
diff --git a/src/mongo/db/catalog/document_validation.h b/src/mongo/db/catalog/document_validation.h
index d03b1627a56..2bc8f8b4787 100644
--- a/src/mongo/db/catalog/document_validation.h
+++ b/src/mongo/db/catalog/document_validation.h
@@ -33,40 +33,40 @@
#include "mongo/db/operation_context.h"
namespace mongo {
- /**
- * If true, Collection should do no validation of writes from this OperationContext.
- *
- * Note that Decorations are value-constructed so this defaults to false.
- */
- extern const OperationContext::Decoration<bool> documentValidationDisabled;
+/**
+ * If true, Collection should do no validation of writes from this OperationContext.
+ *
+ * Note that Decorations are value-constructed so this defaults to false.
+ */
+extern const OperationContext::Decoration<bool> documentValidationDisabled;
- inline StringData bypassDocumentValidationCommandOption() {
- return "bypassDocumentValidation";
- }
+inline StringData bypassDocumentValidationCommandOption() {
+ return "bypassDocumentValidation";
+}
- inline bool shouldBypassDocumentValidationForCommand(const BSONObj& cmdObj) {
- return cmdObj[bypassDocumentValidationCommandOption()].trueValue();
- }
+inline bool shouldBypassDocumentValidationForCommand(const BSONObj& cmdObj) {
+ return cmdObj[bypassDocumentValidationCommandOption()].trueValue();
+}
- /**
- * Disables document validation on a single OperationContext while in scope.
- * Resets to original value when leaving scope so they are safe to nest.
- */
- class DisableDocumentValidation {
- MONGO_DISALLOW_COPYING(DisableDocumentValidation);
- public:
- DisableDocumentValidation(OperationContext* txn)
- : _txn(txn)
- , _initialState(documentValidationDisabled(_txn)) {
- documentValidationDisabled(_txn) = true;
- }
+/**
+ * Disables document validation on a single OperationContext while in scope.
+ * Resets to original value when leaving scope so they are safe to nest.
+ */
+class DisableDocumentValidation {
+ MONGO_DISALLOW_COPYING(DisableDocumentValidation);
- ~DisableDocumentValidation() {
- documentValidationDisabled(_txn) = _initialState;
- }
+public:
+ DisableDocumentValidation(OperationContext* txn)
+ : _txn(txn), _initialState(documentValidationDisabled(_txn)) {
+ documentValidationDisabled(_txn) = true;
+ }
+
+ ~DisableDocumentValidation() {
+ documentValidationDisabled(_txn) = _initialState;
+ }
- private:
- OperationContext* const _txn;
- const bool _initialState;
- };
+private:
+ OperationContext* const _txn;
+ const bool _initialState;
+};
}
diff --git a/src/mongo/db/catalog/drop_collection.cpp b/src/mongo/db/catalog/drop_collection.cpp
index cb932432dab..67bfb70ae45 100644
--- a/src/mongo/db/catalog/drop_collection.cpp
+++ b/src/mongo/db/catalog/drop_collection.cpp
@@ -48,54 +48,55 @@
#include "mongo/util/log.h"
namespace mongo {
- Status dropCollection(OperationContext* txn,
- const NamespaceString& collectionName,
- BSONObjBuilder& result) {
- if (!serverGlobalParams.quiet) {
- log() << "CMD: drop " << collectionName;
- }
+Status dropCollection(OperationContext* txn,
+ const NamespaceString& collectionName,
+ BSONObjBuilder& result) {
+ if (!serverGlobalParams.quiet) {
+ log() << "CMD: drop " << collectionName;
+ }
- std::string dbname = collectionName.db().toString();
+ std::string dbname = collectionName.db().toString();
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_IX);
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ ScopedTransaction transaction(txn, MODE_IX);
- AutoGetDb autoDb(txn, dbname, MODE_X);
- Database* const db = autoDb.getDb();
- Collection* coll = db ? db->getCollection(collectionName) : nullptr;
+ AutoGetDb autoDb(txn, dbname, MODE_X);
+ Database* const db = autoDb.getDb();
+ Collection* coll = db ? db->getCollection(collectionName) : nullptr;
- // If db/collection does not exist, short circuit and return.
- if ( !db || !coll ) {
- return Status(ErrorCodes::NamespaceNotFound, "ns not found");
- }
- OldClientContext context(txn, collectionName);
+ // If db/collection does not exist, short circuit and return.
+ if (!db || !coll) {
+ return Status(ErrorCodes::NamespaceNotFound, "ns not found");
+ }
+ OldClientContext context(txn, collectionName);
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(collectionName);
+ bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(collectionName);
- if (userInitiatedWritesAndNotPrimary) {
- return Status(ErrorCodes::NotMaster,
- str::stream() << "Not primary while dropping collection "
- << collectionName.ns());
- }
+ if (userInitiatedWritesAndNotPrimary) {
+ return Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while dropping collection "
+ << collectionName.ns());
+ }
- int numIndexes = coll->getIndexCatalog()->numIndexesTotal(txn);
+ int numIndexes = coll->getIndexCatalog()->numIndexesTotal(txn);
- BackgroundOperation::assertNoBgOpInProgForNs(collectionName.ns());
+ BackgroundOperation::assertNoBgOpInProgForNs(collectionName.ns());
- WriteUnitOfWork wunit(txn);
- Status s = db->dropCollection(txn, collectionName.ns());
+ WriteUnitOfWork wunit(txn);
+ Status s = db->dropCollection(txn, collectionName.ns());
- result.append("ns", collectionName);
+ result.append("ns", collectionName);
- if ( !s.isOK() ) {
- return s;
- }
+ if (!s.isOK()) {
+ return s;
+ }
- result.append("nIndexesWas", numIndexes);
+ result.append("nIndexesWas", numIndexes);
- wunit.commit();
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "drop", collectionName.ns());
- return Status::OK();
+ wunit.commit();
}
-} // namespace mongo
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "drop", collectionName.ns());
+ return Status::OK();
+}
+} // namespace mongo
diff --git a/src/mongo/db/catalog/drop_collection.h b/src/mongo/db/catalog/drop_collection.h
index a12f5e8419c..c62b2c376c5 100644
--- a/src/mongo/db/catalog/drop_collection.h
+++ b/src/mongo/db/catalog/drop_collection.h
@@ -29,15 +29,15 @@
#include "mongo/base/status.h"
namespace mongo {
- class BSONObjBuilder;
- class NamespaceString;
- class OperationContext;
+class BSONObjBuilder;
+class NamespaceString;
+class OperationContext;
- /**
- * Drops the collection "collectionName" and populates "result" with statistics about what
- * was removed.
- */
- Status dropCollection(OperationContext* txn,
- const NamespaceString& collectionName,
- BSONObjBuilder& result);
-} // namespace mongo
+/**
+ * Drops the collection "collectionName" and populates "result" with statistics about what
+ * was removed.
+ */
+Status dropCollection(OperationContext* txn,
+ const NamespaceString& collectionName,
+ BSONObjBuilder& result);
+} // namespace mongo
diff --git a/src/mongo/db/catalog/drop_database.cpp b/src/mongo/db/catalog/drop_database.cpp
index aa274ad2b28..7996c7c2eee 100644
--- a/src/mongo/db/catalog/drop_database.cpp
+++ b/src/mongo/db/catalog/drop_database.cpp
@@ -48,43 +48,43 @@
#include "mongo/util/log.h"
namespace mongo {
- Status dropDatabase(OperationContext* txn, const std::string& dbName) {
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
- AutoGetDb autoDB(txn, dbName, MODE_X);
- Database* const db = autoDB.getDb();
- if (!db) {
- return Status(ErrorCodes::DatabaseNotFound,
- str::stream() << "Could not drop database " << dbName
- << " because it does not exist");
+Status dropDatabase(OperationContext* txn, const std::string& dbName) {
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ ScopedTransaction transaction(txn, MODE_X);
+ Lock::GlobalWrite lk(txn->lockState());
+ AutoGetDb autoDB(txn, dbName, MODE_X);
+ Database* const db = autoDB.getDb();
+ if (!db) {
+ return Status(ErrorCodes::DatabaseNotFound,
+ str::stream() << "Could not drop database " << dbName
+ << " because it does not exist");
+ }
+ OldClientContext context(txn, dbName);
- }
- OldClientContext context(txn, dbName);
+ bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(dbName);
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(dbName);
+ if (userInitiatedWritesAndNotPrimary) {
+ return Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while dropping database " << dbName);
+ }
- if (userInitiatedWritesAndNotPrimary) {
- return Status(ErrorCodes::NotMaster,
- str::stream() << "Not primary while dropping database " << dbName);
- }
+ log() << "dropDatabase " << dbName << " starting";
- log() << "dropDatabase " << dbName << " starting";
+ BackgroundOperation::assertNoBgOpInProgForDb(dbName);
+ mongo::dropDatabase(txn, db);
- BackgroundOperation::assertNoBgOpInProgForDb(dbName);
- mongo::dropDatabase(txn, db);
+ log() << "dropDatabase " << dbName << " finished";
- log() << "dropDatabase " << dbName << " finished";
+ WriteUnitOfWork wunit(txn);
- WriteUnitOfWork wunit(txn);
+ getGlobalServiceContext()->getOpObserver()->onDropDatabase(txn, dbName + ".$cmd");
- getGlobalServiceContext()->getOpObserver()->onDropDatabase(txn, dbName + ".$cmd");
-
- wunit.commit();
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "dropDatabase", dbName);
-
- return Status::OK();
+ wunit.commit();
}
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "dropDatabase", dbName);
+
+ return Status::OK();
+}
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/catalog/drop_database.h b/src/mongo/db/catalog/drop_database.h
index 184d66d5bf2..b60e817be27 100644
--- a/src/mongo/db/catalog/drop_database.h
+++ b/src/mongo/db/catalog/drop_database.h
@@ -29,10 +29,10 @@
#include "mongo/base/status.h"
namespace mongo {
- class OperationContext;
+class OperationContext;
- /**
- * Drops the database "dbName".
- */
- Status dropDatabase(OperationContext* txn, const std::string& dbName);
-} // namespace mongo
+/**
+ * Drops the database "dbName".
+ */
+Status dropDatabase(OperationContext* txn, const std::string& dbName);
+} // namespace mongo
diff --git a/src/mongo/db/catalog/drop_indexes.cpp b/src/mongo/db/catalog/drop_indexes.cpp
index 344b247290a..1f338303b69 100644
--- a/src/mongo/db/catalog/drop_indexes.cpp
+++ b/src/mongo/db/catalog/drop_indexes.cpp
@@ -48,117 +48,114 @@
namespace mongo {
namespace {
- Status wrappedRun(OperationContext* txn,
- const StringData& dbname,
- const std::string& toDeleteNs,
- Database* const db,
- const BSONObj& jsobj,
- BSONObjBuilder* anObjBuilder) {
- if (!serverGlobalParams.quiet) {
- LOG(0) << "CMD: dropIndexes " << toDeleteNs;
- }
- Collection* collection = db ? db->getCollection(toDeleteNs) : nullptr;
-
- // If db/collection does not exist, short circuit and return.
- if (!db || !collection) {
- return Status(ErrorCodes::NamespaceNotFound, "ns not found");
- }
-
- OldClientContext ctx(txn, toDeleteNs);
- BackgroundOperation::assertNoBgOpInProgForNs(toDeleteNs);
-
- IndexCatalog* indexCatalog = collection->getIndexCatalog();
- anObjBuilder->appendNumber("nIndexesWas", indexCatalog->numIndexesTotal(txn));
-
+Status wrappedRun(OperationContext* txn,
+ const StringData& dbname,
+ const std::string& toDeleteNs,
+ Database* const db,
+ const BSONObj& jsobj,
+ BSONObjBuilder* anObjBuilder) {
+ if (!serverGlobalParams.quiet) {
+ LOG(0) << "CMD: dropIndexes " << toDeleteNs;
+ }
+ Collection* collection = db ? db->getCollection(toDeleteNs) : nullptr;
- BSONElement f = jsobj.getField("index");
- if (f.type() == String) {
+ // If db/collection does not exist, short circuit and return.
+ if (!db || !collection) {
+ return Status(ErrorCodes::NamespaceNotFound, "ns not found");
+ }
- std::string indexToDelete = f.valuestr();
+ OldClientContext ctx(txn, toDeleteNs);
+ BackgroundOperation::assertNoBgOpInProgForNs(toDeleteNs);
- if (indexToDelete == "*") {
- Status s = indexCatalog->dropAllIndexes(txn, false);
- if (!s.isOK()) {
- return s;
- }
- anObjBuilder->append("msg", "non-_id indexes dropped for collection");
- return Status::OK();
- }
+ IndexCatalog* indexCatalog = collection->getIndexCatalog();
+ anObjBuilder->appendNumber("nIndexesWas", indexCatalog->numIndexesTotal(txn));
- IndexDescriptor* desc = collection->getIndexCatalog()->findIndexByName(txn,
- indexToDelete);
- if (desc == NULL) {
- return Status(ErrorCodes::IndexNotFound,
- str::stream() << "index not found with name ["
- << indexToDelete << "]");
- }
- if (desc->isIdIndex()) {
- return Status(ErrorCodes::InvalidOptions, "cannot drop _id index");
- }
+ BSONElement f = jsobj.getField("index");
+ if (f.type() == String) {
+ std::string indexToDelete = f.valuestr();
- Status s = indexCatalog->dropIndex(txn, desc);
+ if (indexToDelete == "*") {
+ Status s = indexCatalog->dropAllIndexes(txn, false);
if (!s.isOK()) {
return s;
}
-
+ anObjBuilder->append("msg", "non-_id indexes dropped for collection");
return Status::OK();
}
- if (f.type() == Object) {
- IndexDescriptor* desc =
- collection->getIndexCatalog()->findIndexByKeyPattern(txn, f.embeddedObject());
- if (desc == NULL) {
- return Status(ErrorCodes::InvalidOptions,
- str::stream() << "can't find index with key: "
- << f.embeddedObject().toString());
- }
-
- if (desc->isIdIndex()) {
- return Status(ErrorCodes::InvalidOptions, "cannot drop _id index");
- }
+ IndexDescriptor* desc = collection->getIndexCatalog()->findIndexByName(txn, indexToDelete);
+ if (desc == NULL) {
+ return Status(ErrorCodes::IndexNotFound,
+ str::stream() << "index not found with name [" << indexToDelete << "]");
+ }
- Status s = indexCatalog->dropIndex(txn, desc);
- if (!s.isOK()) {
- return s;
- }
+ if (desc->isIdIndex()) {
+ return Status(ErrorCodes::InvalidOptions, "cannot drop _id index");
+ }
- return Status::OK();
+ Status s = indexCatalog->dropIndex(txn, desc);
+ if (!s.isOK()) {
+ return s;
}
- return Status(ErrorCodes::IndexNotFound, "invalid index name spec");
+ return Status::OK();
}
-} // namespace
-
- Status dropIndexes(OperationContext* txn,
- const NamespaceString& ns,
- const BSONObj& idxDescriptor,
- BSONObjBuilder* result) {
- StringData dbName = ns.db();
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetDb autoDb(txn, dbName, MODE_X);
-
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns);
-
- if (userInitiatedWritesAndNotPrimary) {
- return Status(ErrorCodes::NotMaster,
- str::stream() << "Not primary while dropping indexes in "
- << ns.toString());
- }
- WriteUnitOfWork wunit(txn);
- Status status = wrappedRun(txn, dbName, ns, autoDb.getDb(), idxDescriptor, result);
- if (!status.isOK()) {
- return status;
- }
- getGlobalServiceContext()->getOpObserver()->onDropIndex(txn,
- dbName.toString() + ".$cmd",
- idxDescriptor);
- wunit.commit();
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "dropIndexes", dbName);
+ if (f.type() == Object) {
+ IndexDescriptor* desc =
+ collection->getIndexCatalog()->findIndexByKeyPattern(txn, f.embeddedObject());
+ if (desc == NULL) {
+ return Status(ErrorCodes::InvalidOptions,
+ str::stream()
+ << "can't find index with key: " << f.embeddedObject().toString());
+ }
+
+ if (desc->isIdIndex()) {
+ return Status(ErrorCodes::InvalidOptions, "cannot drop _id index");
+ }
+
+ Status s = indexCatalog->dropIndex(txn, desc);
+ if (!s.isOK()) {
+ return s;
+ }
+
return Status::OK();
}
-} // namespace mongo
+ return Status(ErrorCodes::IndexNotFound, "invalid index name spec");
+}
+} // namespace
+
+Status dropIndexes(OperationContext* txn,
+ const NamespaceString& ns,
+ const BSONObj& idxDescriptor,
+ BSONObjBuilder* result) {
+ StringData dbName = ns.db();
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ ScopedTransaction transaction(txn, MODE_IX);
+ AutoGetDb autoDb(txn, dbName, MODE_X);
+
+ bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(ns);
+
+ if (userInitiatedWritesAndNotPrimary) {
+ return Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while dropping indexes in "
+ << ns.toString());
+ }
+
+ WriteUnitOfWork wunit(txn);
+ Status status = wrappedRun(txn, dbName, ns, autoDb.getDb(), idxDescriptor, result);
+ if (!status.isOK()) {
+ return status;
+ }
+ getGlobalServiceContext()->getOpObserver()->onDropIndex(
+ txn, dbName.toString() + ".$cmd", idxDescriptor);
+ wunit.commit();
+ }
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "dropIndexes", dbName);
+ return Status::OK();
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/catalog/drop_indexes.h b/src/mongo/db/catalog/drop_indexes.h
index ba07687098e..931fa348019 100644
--- a/src/mongo/db/catalog/drop_indexes.h
+++ b/src/mongo/db/catalog/drop_indexes.h
@@ -29,18 +29,18 @@
#include "mongo/base/status.h"
namespace mongo {
- class BSONObj;
- class BSONObjBuilder;
- class NamespaceString;
- class OperationContext;
+class BSONObj;
+class BSONObjBuilder;
+class NamespaceString;
+class OperationContext;
- /**
- * Drops the index from collection "ns" that matches the "idxDescriptor" and populates
- * "result" with some statistics about the dropped index.
- */
- Status dropIndexes(OperationContext* txn,
- const NamespaceString& ns,
- const BSONObj& idxDescriptor,
- BSONObjBuilder* result);
+/**
+ * Drops the index from collection "ns" that matches the "idxDescriptor" and populates
+ * "result" with some statistics about the dropped index.
+ */
+Status dropIndexes(OperationContext* txn,
+ const NamespaceString& ns,
+ const BSONObj& idxDescriptor,
+ BSONObjBuilder* result);
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/catalog/head_manager.h b/src/mongo/db/catalog/head_manager.h
index f3b198b276e..7a671ccf69f 100644
--- a/src/mongo/db/catalog/head_manager.h
+++ b/src/mongo/db/catalog/head_manager.h
@@ -32,19 +32,19 @@
namespace mongo {
- class OperationContext;
+class OperationContext;
- /**
- * An abstraction for setting and getting data about the 'head' of an index. This is the data
- * that lives in the catalog to identify where an index lives.
- */
- class HeadManager {
- public:
- virtual ~HeadManager() { }
+/**
+ * An abstraction for setting and getting data about the 'head' of an index. This is the data
+ * that lives in the catalog to identify where an index lives.
+ */
+class HeadManager {
+public:
+ virtual ~HeadManager() {}
- virtual const RecordId getHead(OperationContext* txn) const = 0;
+ virtual const RecordId getHead(OperationContext* txn) const = 0;
- virtual void setHead(OperationContext* txn, const RecordId newHead) = 0;
- };
+ virtual void setHead(OperationContext* txn, const RecordId newHead) = 0;
+};
} // namespace mongo
diff --git a/src/mongo/db/catalog/index_catalog.cpp b/src/mongo/db/catalog/index_catalog.cpp
index 734c6c6e412..85a1b8f401f 100644
--- a/src/mongo/db/catalog/index_catalog.cpp
+++ b/src/mongo/db/catalog/index_catalog.cpp
@@ -65,1215 +65,1140 @@
namespace mongo {
- using std::unique_ptr;
- using std::endl;
- using std::string;
- using std::vector;
+using std::unique_ptr;
+using std::endl;
+using std::string;
+using std::vector;
- static const int INDEX_CATALOG_INIT = 283711;
- static const int INDEX_CATALOG_UNINIT = 654321;
+static const int INDEX_CATALOG_INIT = 283711;
+static const int INDEX_CATALOG_UNINIT = 654321;
- // What's the default version of our indices?
- const int DefaultIndexVersionNumber = 1;
+// What's the default version of our indices?
+const int DefaultIndexVersionNumber = 1;
- const BSONObj IndexCatalog::_idObj = BSON( "_id" << 1 );
+const BSONObj IndexCatalog::_idObj = BSON("_id" << 1);
- // -------------
+// -------------
- IndexCatalog::IndexCatalog( Collection* collection )
- : _magic(INDEX_CATALOG_UNINIT),
- _collection( collection ),
- _maxNumIndexesAllowed(_collection->getCatalogEntry()->getMaxAllowedIndexes()) {
- }
+IndexCatalog::IndexCatalog(Collection* collection)
+ : _magic(INDEX_CATALOG_UNINIT),
+ _collection(collection),
+ _maxNumIndexesAllowed(_collection->getCatalogEntry()->getMaxAllowedIndexes()) {}
- IndexCatalog::~IndexCatalog() {
- if ( _magic != INDEX_CATALOG_UNINIT ) {
- // only do this check if we haven't been initialized
- _checkMagic();
- }
- _magic = 123456;
+IndexCatalog::~IndexCatalog() {
+ if (_magic != INDEX_CATALOG_UNINIT) {
+ // only do this check if we haven't been initialized
+ _checkMagic();
}
+ _magic = 123456;
+}
- Status IndexCatalog::init(OperationContext* txn) {
- vector<string> indexNames;
- _collection->getCatalogEntry()->getAllIndexes( txn, &indexNames );
-
- for ( size_t i = 0; i < indexNames.size(); i++ ) {
- const string& indexName = indexNames[i];
- BSONObj spec = _collection->getCatalogEntry()->getIndexSpec( txn,
- indexName ).getOwned();
-
- if ( !_collection->getCatalogEntry()->isIndexReady( txn, indexName ) ) {
- _unfinishedIndexes.push_back( spec );
- continue;
- }
+Status IndexCatalog::init(OperationContext* txn) {
+ vector<string> indexNames;
+ _collection->getCatalogEntry()->getAllIndexes(txn, &indexNames);
- BSONObj keyPattern = spec.getObjectField("key");
- IndexDescriptor* descriptor = new IndexDescriptor( _collection,
- _getAccessMethodName(txn,
- keyPattern),
- spec );
- const bool initFromDisk = true;
- IndexCatalogEntry* entry = _setupInMemoryStructures( txn, descriptor, initFromDisk );
+ for (size_t i = 0; i < indexNames.size(); i++) {
+ const string& indexName = indexNames[i];
+ BSONObj spec = _collection->getCatalogEntry()->getIndexSpec(txn, indexName).getOwned();
- fassert( 17340, entry->isReady( txn ) );
+ if (!_collection->getCatalogEntry()->isIndexReady(txn, indexName)) {
+ _unfinishedIndexes.push_back(spec);
+ continue;
}
- if ( _unfinishedIndexes.size() ) {
- // if there are left over indexes, we don't let anyone add/drop indexes
- // until someone goes and fixes them
- log() << "found " << _unfinishedIndexes.size()
- << " index(es) that wasn't finished before shutdown";
- }
+ BSONObj keyPattern = spec.getObjectField("key");
+ IndexDescriptor* descriptor =
+ new IndexDescriptor(_collection, _getAccessMethodName(txn, keyPattern), spec);
+ const bool initFromDisk = true;
+ IndexCatalogEntry* entry = _setupInMemoryStructures(txn, descriptor, initFromDisk);
- _magic = INDEX_CATALOG_INIT;
- return Status::OK();
+ fassert(17340, entry->isReady(txn));
}
-namespace {
- class IndexCleanupOnRollback : public RecoveryUnit::Change {
- public:
- /**
- * None of these pointers are owned by this class.
- */
- IndexCleanupOnRollback(OperationContext* txn,
- Collection* collection,
- IndexCatalogEntryContainer* entries,
- const IndexDescriptor* desc)
- : _txn(txn),
- _collection(collection),
- _entries(entries),
- _desc(desc) {
- }
-
- virtual void commit() {}
-
- virtual void rollback() {
- _entries->remove(_desc);
- _collection->infoCache()->reset(_txn);
- }
-
- private:
- OperationContext* _txn;
- Collection* _collection;
- IndexCatalogEntryContainer* _entries;
- const IndexDescriptor* _desc;
- };
-} // namespace
-
- IndexCatalogEntry* IndexCatalog::_setupInMemoryStructures(OperationContext* txn,
- IndexDescriptor* descriptor,
- bool initFromDisk) {
- unique_ptr<IndexDescriptor> descriptorCleanup( descriptor );
-
- unique_ptr<IndexCatalogEntry> entry( new IndexCatalogEntry( _collection->ns().ns(),
- _collection->getCatalogEntry(),
- descriptorCleanup.release(),
- _collection->infoCache() ) );
-
- entry->init( txn, _collection->_dbce->getIndex( txn,
- _collection->getCatalogEntry(),
- entry.get() ) );
-
- IndexCatalogEntry* save = entry.get();
- _entries.add( entry.release() );
-
- if (!initFromDisk) {
- txn->recoveryUnit()->registerChange(new IndexCleanupOnRollback(txn,
- _collection,
- &_entries,
- descriptor));
- }
+ if (_unfinishedIndexes.size()) {
+ // if there are left over indexes, we don't let anyone add/drop indexes
+ // until someone goes and fixes them
+ log() << "found " << _unfinishedIndexes.size()
+ << " index(es) that wasn't finished before shutdown";
+ }
- invariant( save == _entries.find( descriptor ) );
- invariant( save == _entries.find( descriptor->indexName() ) );
+ _magic = INDEX_CATALOG_INIT;
+ return Status::OK();
+}
- return save;
+namespace {
+class IndexCleanupOnRollback : public RecoveryUnit::Change {
+public:
+ /**
+ * None of these pointers are owned by this class.
+ */
+ IndexCleanupOnRollback(OperationContext* txn,
+ Collection* collection,
+ IndexCatalogEntryContainer* entries,
+ const IndexDescriptor* desc)
+ : _txn(txn), _collection(collection), _entries(entries), _desc(desc) {}
+
+ virtual void commit() {}
+
+ virtual void rollback() {
+ _entries->remove(_desc);
+ _collection->infoCache()->reset(_txn);
}
- bool IndexCatalog::ok() const {
- return ( _magic == INDEX_CATALOG_INIT );
+private:
+ OperationContext* _txn;
+ Collection* _collection;
+ IndexCatalogEntryContainer* _entries;
+ const IndexDescriptor* _desc;
+};
+} // namespace
+
+IndexCatalogEntry* IndexCatalog::_setupInMemoryStructures(OperationContext* txn,
+ IndexDescriptor* descriptor,
+ bool initFromDisk) {
+ unique_ptr<IndexDescriptor> descriptorCleanup(descriptor);
+
+ unique_ptr<IndexCatalogEntry> entry(new IndexCatalogEntry(_collection->ns().ns(),
+ _collection->getCatalogEntry(),
+ descriptorCleanup.release(),
+ _collection->infoCache()));
+
+ entry->init(txn,
+ _collection->_dbce->getIndex(txn, _collection->getCatalogEntry(), entry.get()));
+
+ IndexCatalogEntry* save = entry.get();
+ _entries.add(entry.release());
+
+ if (!initFromDisk) {
+ txn->recoveryUnit()->registerChange(
+ new IndexCleanupOnRollback(txn, _collection, &_entries, descriptor));
}
- void IndexCatalog::_checkMagic() const {
- if ( ok() ) {
- return;
- }
- log() << "IndexCatalog::_magic wrong, is : " << _magic;
- fassertFailed(17198);
- }
+ invariant(save == _entries.find(descriptor));
+ invariant(save == _entries.find(descriptor->indexName()));
- Status IndexCatalog::checkUnfinished() const {
- if ( _unfinishedIndexes.size() == 0 )
- return Status::OK();
+ return save;
+}
- return Status( ErrorCodes::InternalError,
- str::stream()
- << "IndexCatalog has left over indexes that must be cleared"
- << " ns: " << _collection->ns().ns() );
- }
+bool IndexCatalog::ok() const {
+ return (_magic == INDEX_CATALOG_INIT);
+}
- bool IndexCatalog::_shouldOverridePlugin(OperationContext* txn,
- const BSONObj& keyPattern) const {
- string pluginName = IndexNames::findPluginName(keyPattern);
- bool known = IndexNames::isKnownName(pluginName);
-
- if ( !_collection->_dbce->isOlderThan24( txn ) ) {
- // RulesFor24+
- // This assert will be triggered when downgrading from a future version that
- // supports an index plugin unsupported by this version.
- uassert(17197, str::stream() << "Invalid index type '" << pluginName << "' "
- << "in index " << keyPattern,
- known);
- return false;
- }
+void IndexCatalog::_checkMagic() const {
+ if (ok()) {
+ return;
+ }
+ log() << "IndexCatalog::_magic wrong, is : " << _magic;
+ fassertFailed(17198);
+}
- // RulesFor22
- if (!known) {
- log() << "warning: can't find plugin [" << pluginName << "]" << endl;
- return true;
- }
+Status IndexCatalog::checkUnfinished() const {
+ if (_unfinishedIndexes.size() == 0)
+ return Status::OK();
- if (!IndexNames::existedBefore24(pluginName)) {
- warning() << "Treating index " << keyPattern << " as ascending since "
- << "it was created before 2.4 and '" << pluginName << "' "
- << "was not a valid type at that time."
- << endl;
- return true;
- }
+ return Status(ErrorCodes::InternalError,
+ str::stream() << "IndexCatalog has left over indexes that must be cleared"
+ << " ns: " << _collection->ns().ns());
+}
+bool IndexCatalog::_shouldOverridePlugin(OperationContext* txn, const BSONObj& keyPattern) const {
+ string pluginName = IndexNames::findPluginName(keyPattern);
+ bool known = IndexNames::isKnownName(pluginName);
+
+ if (!_collection->_dbce->isOlderThan24(txn)) {
+ // RulesFor24+
+ // This assert will be triggered when downgrading from a future version that
+ // supports an index plugin unsupported by this version.
+ uassert(17197,
+ str::stream() << "Invalid index type '" << pluginName << "' "
+ << "in index " << keyPattern,
+ known);
return false;
}
- string IndexCatalog::_getAccessMethodName(OperationContext* txn,
- const BSONObj& keyPattern) const {
- if ( _shouldOverridePlugin(txn, keyPattern) ) {
- return "";
- }
-
- return IndexNames::findPluginName(keyPattern);
+ // RulesFor22
+ if (!known) {
+ log() << "warning: can't find plugin [" << pluginName << "]" << endl;
+ return true;
}
+ if (!IndexNames::existedBefore24(pluginName)) {
+ warning() << "Treating index " << keyPattern << " as ascending since "
+ << "it was created before 2.4 and '" << pluginName << "' "
+ << "was not a valid type at that time." << endl;
+ return true;
+ }
- // ---------------------------
-
- Status IndexCatalog::_upgradeDatabaseMinorVersionIfNeeded( OperationContext* txn,
- const string& newPluginName ) {
-
- // first check if requested index requires pdfile minor version to be bumped
- if ( IndexNames::existedBefore24(newPluginName) ) {
- return Status::OK();
- }
-
- DatabaseCatalogEntry* dbce = _collection->_dbce;
-
- if ( !dbce->isOlderThan24( txn ) ) {
- return Status::OK(); // these checks have already been done
- }
+ return false;
+}
- // Everything below is MMAPv1 specific since it was the only storage engine that existed
- // before 2.4. We look at all indexes in this database to make sure that none of them use
- // plugins that didn't exist before 2.4. If that holds, we mark the database as "2.4-clean"
- // which allows creation of indexes using new plugins.
-
- RecordStore* indexes = dbce->getRecordStore(dbce->name() + ".system.indexes");
- auto cursor = indexes->getCursor(txn);
- while (auto record = cursor->next()) {
- const BSONObj index = record->data.releaseToBson();
- const BSONObj key = index.getObjectField("key");
- const string plugin = IndexNames::findPluginName(key);
- if ( IndexNames::existedBefore24(plugin) )
- continue;
+string IndexCatalog::_getAccessMethodName(OperationContext* txn, const BSONObj& keyPattern) const {
+ if (_shouldOverridePlugin(txn, keyPattern)) {
+ return "";
+ }
- const string errmsg = str::stream()
- << "Found pre-existing index " << index << " with invalid type '" << plugin << "'. "
- << "Disallowing creation of new index type '" << newPluginName << "'. See "
- << "http://dochub.mongodb.org/core/index-type-changes"
- ;
+ return IndexNames::findPluginName(keyPattern);
+}
- return Status( ErrorCodes::CannotCreateIndex, errmsg );
- }
- dbce->markIndexSafe24AndUp( txn );
+// ---------------------------
+Status IndexCatalog::_upgradeDatabaseMinorVersionIfNeeded(OperationContext* txn,
+ const string& newPluginName) {
+ // first check if requested index requires pdfile minor version to be bumped
+ if (IndexNames::existedBefore24(newPluginName)) {
return Status::OK();
}
- StatusWith<BSONObj> IndexCatalog::prepareSpecForCreate( OperationContext* txn,
- const BSONObj& original ) const {
- Status status = _isSpecOk( original );
- if ( !status.isOK() )
- return StatusWith<BSONObj>( status );
-
- BSONObj fixed = _fixIndexSpec( original );
+ DatabaseCatalogEntry* dbce = _collection->_dbce;
- // we double check with new index spec
- status = _isSpecOk( fixed );
- if ( !status.isOK() )
- return StatusWith<BSONObj>( status );
-
- status = _doesSpecConflictWithExisting( txn, fixed );
- if ( !status.isOK() )
- return StatusWith<BSONObj>( status );
-
- return StatusWith<BSONObj>( fixed );
+ if (!dbce->isOlderThan24(txn)) {
+ return Status::OK(); // these checks have already been done
}
- Status IndexCatalog::createIndexOnEmptyCollection(OperationContext* txn, BSONObj spec) {
- invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().toString(),
- MODE_X));
- invariant(_collection->numRecords(txn) == 0);
+ // Everything below is MMAPv1 specific since it was the only storage engine that existed
+ // before 2.4. We look at all indexes in this database to make sure that none of them use
+ // plugins that didn't exist before 2.4. If that holds, we mark the database as "2.4-clean"
+ // which allows creation of indexes using new plugins.
+
+ RecordStore* indexes = dbce->getRecordStore(dbce->name() + ".system.indexes");
+ auto cursor = indexes->getCursor(txn);
+ while (auto record = cursor->next()) {
+ const BSONObj index = record->data.releaseToBson();
+ const BSONObj key = index.getObjectField("key");
+ const string plugin = IndexNames::findPluginName(key);
+ if (IndexNames::existedBefore24(plugin))
+ continue;
+
+ const string errmsg = str::stream()
+ << "Found pre-existing index " << index << " with invalid type '" << plugin << "'. "
+ << "Disallowing creation of new index type '" << newPluginName << "'. See "
+ << "http://dochub.mongodb.org/core/index-type-changes";
+
+ return Status(ErrorCodes::CannotCreateIndex, errmsg);
+ }
- _checkMagic();
- Status status = checkUnfinished();
- if ( !status.isOK() )
- return status;
+ dbce->markIndexSafe24AndUp(txn);
- StatusWith<BSONObj> statusWithSpec = prepareSpecForCreate( txn, spec );
- status = statusWithSpec.getStatus();
- if ( !status.isOK() )
- return status;
- spec = statusWithSpec.getValue();
+ return Status::OK();
+}
- string pluginName = IndexNames::findPluginName( spec["key"].Obj() );
- if ( pluginName.size() ) {
- Status s = _upgradeDatabaseMinorVersionIfNeeded( txn, pluginName );
- if ( !s.isOK() )
- return s;
- }
+StatusWith<BSONObj> IndexCatalog::prepareSpecForCreate(OperationContext* txn,
+ const BSONObj& original) const {
+ Status status = _isSpecOk(original);
+ if (!status.isOK())
+ return StatusWith<BSONObj>(status);
- // now going to touch disk
- IndexBuildBlock indexBuildBlock(txn, _collection, spec);
- status = indexBuildBlock.init();
- if ( !status.isOK() )
- return status;
+ BSONObj fixed = _fixIndexSpec(original);
- // sanity checks, etc...
- IndexCatalogEntry* entry = indexBuildBlock.getEntry();
- invariant( entry );
- IndexDescriptor* descriptor = entry->descriptor();
- invariant( descriptor );
- invariant( entry == _entries.find( descriptor ) );
+ // we double check with new index spec
+ status = _isSpecOk(fixed);
+ if (!status.isOK())
+ return StatusWith<BSONObj>(status);
- status = entry->accessMethod()->initializeAsEmpty(txn);
- if (!status.isOK())
- return status;
- indexBuildBlock.success();
+ status = _doesSpecConflictWithExisting(txn, fixed);
+ if (!status.isOK())
+ return StatusWith<BSONObj>(status);
- // sanity check
- invariant(_collection->getCatalogEntry()->isIndexReady(txn, descriptor->indexName()));
+ return StatusWith<BSONObj>(fixed);
+}
- return Status::OK();
+Status IndexCatalog::createIndexOnEmptyCollection(OperationContext* txn, BSONObj spec) {
+ invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().toString(), MODE_X));
+ invariant(_collection->numRecords(txn) == 0);
+
+ _checkMagic();
+ Status status = checkUnfinished();
+ if (!status.isOK())
+ return status;
+
+ StatusWith<BSONObj> statusWithSpec = prepareSpecForCreate(txn, spec);
+ status = statusWithSpec.getStatus();
+ if (!status.isOK())
+ return status;
+ spec = statusWithSpec.getValue();
+
+ string pluginName = IndexNames::findPluginName(spec["key"].Obj());
+ if (pluginName.size()) {
+ Status s = _upgradeDatabaseMinorVersionIfNeeded(txn, pluginName);
+ if (!s.isOK())
+ return s;
}
- IndexCatalog::IndexBuildBlock::IndexBuildBlock(OperationContext* txn,
- Collection* collection,
- const BSONObj& spec )
- : _collection( collection ),
- _catalog( collection->getIndexCatalog() ),
- _ns( _catalog->_collection->ns().ns() ),
- _spec( spec.getOwned() ),
- _entry( NULL ),
- _txn(txn) {
-
- invariant( collection );
- }
+ // now going to touch disk
+ IndexBuildBlock indexBuildBlock(txn, _collection, spec);
+ status = indexBuildBlock.init();
+ if (!status.isOK())
+ return status;
- Status IndexCatalog::IndexBuildBlock::init() {
- // need this first for names, etc...
- BSONObj keyPattern = _spec.getObjectField("key");
- IndexDescriptor* descriptor = new IndexDescriptor( _collection,
- IndexNames::findPluginName(keyPattern),
- _spec );
- unique_ptr<IndexDescriptor> descriptorCleaner( descriptor );
+ // sanity checks, etc...
+ IndexCatalogEntry* entry = indexBuildBlock.getEntry();
+ invariant(entry);
+ IndexDescriptor* descriptor = entry->descriptor();
+ invariant(descriptor);
+ invariant(entry == _entries.find(descriptor));
- _indexName = descriptor->indexName();
- _indexNamespace = descriptor->indexNamespace();
+ status = entry->accessMethod()->initializeAsEmpty(txn);
+ if (!status.isOK())
+ return status;
+ indexBuildBlock.success();
- /// ---------- setup on disk structures ----------------
+ // sanity check
+ invariant(_collection->getCatalogEntry()->isIndexReady(txn, descriptor->indexName()));
- Status status = _collection->getCatalogEntry()->prepareForIndexBuild( _txn, descriptor );
- if ( !status.isOK() )
- return status;
+ return Status::OK();
+}
- /// ---------- setup in memory structures ----------------
- const bool initFromDisk = false;
- _entry = _catalog->_setupInMemoryStructures(_txn,
- descriptorCleaner.release(),
- initFromDisk);
+IndexCatalog::IndexBuildBlock::IndexBuildBlock(OperationContext* txn,
+ Collection* collection,
+ const BSONObj& spec)
+ : _collection(collection),
+ _catalog(collection->getIndexCatalog()),
+ _ns(_catalog->_collection->ns().ns()),
+ _spec(spec.getOwned()),
+ _entry(NULL),
+ _txn(txn) {
+ invariant(collection);
+}
- return Status::OK();
- }
+Status IndexCatalog::IndexBuildBlock::init() {
+ // need this first for names, etc...
+ BSONObj keyPattern = _spec.getObjectField("key");
+ IndexDescriptor* descriptor =
+ new IndexDescriptor(_collection, IndexNames::findPluginName(keyPattern), _spec);
+ unique_ptr<IndexDescriptor> descriptorCleaner(descriptor);
- IndexCatalog::IndexBuildBlock::~IndexBuildBlock() {
- // Don't need to call fail() here, as rollback will clean everything up for us.
- }
+ _indexName = descriptor->indexName();
+ _indexNamespace = descriptor->indexNamespace();
- void IndexCatalog::IndexBuildBlock::fail() {
- fassert( 17204, _catalog->_collection->ok() ); // defensive
+ /// ---------- setup on disk structures ----------------
- IndexCatalogEntry* entry = _catalog->_entries.find( _indexName );
- invariant( entry == _entry );
+ Status status = _collection->getCatalogEntry()->prepareForIndexBuild(_txn, descriptor);
+ if (!status.isOK())
+ return status;
- if ( entry ) {
- _catalog->_dropIndex(_txn, entry);
- }
- else {
- _catalog->_deleteIndexFromDisk( _txn,
- _indexName,
- _indexNamespace );
- }
- }
+ /// ---------- setup in memory structures ----------------
+ const bool initFromDisk = false;
+ _entry = _catalog->_setupInMemoryStructures(_txn, descriptorCleaner.release(), initFromDisk);
- void IndexCatalog::IndexBuildBlock::success() {
- fassert( 17207, _catalog->_collection->ok() );
+ return Status::OK();
+}
- _catalog->_collection->getCatalogEntry()->indexBuildSuccess( _txn, _indexName );
+IndexCatalog::IndexBuildBlock::~IndexBuildBlock() {
+ // Don't need to call fail() here, as rollback will clean everything up for us.
+}
- IndexDescriptor* desc = _catalog->findIndexByName( _txn, _indexName, true );
- fassert( 17330, desc );
- IndexCatalogEntry* entry = _catalog->_entries.find( desc );
- fassert( 17331, entry && entry == _entry );
+void IndexCatalog::IndexBuildBlock::fail() {
+ fassert(17204, _catalog->_collection->ok()); // defensive
- entry->setIsReady( true );
+ IndexCatalogEntry* entry = _catalog->_entries.find(_indexName);
+ invariant(entry == _entry);
- _catalog->_collection->infoCache()->addedIndex( _txn );
+ if (entry) {
+ _catalog->_dropIndex(_txn, entry);
+ } else {
+ _catalog->_deleteIndexFromDisk(_txn, _indexName, _indexNamespace);
}
+}
- namespace {
- // While technically recursive, only current possible with 2 levels.
- Status _checkValidFilterExpressions(MatchExpression* expression, int level = 0) {
- if (!expression)
- return Status::OK();
-
- switch(expression->matchType()) {
- case MatchExpression::AND:
- if (level > 0)
- return Status(ErrorCodes::CannotCreateIndex,
- "$and only supported in partialFilterExpression at top level");
- for (size_t i = 0; i < expression->numChildren(); i++) {
- Status status = _checkValidFilterExpressions(expression->getChild(i),
- level + 1 );
- if (!status.isOK())
- return status;
- }
- return Status::OK();
- case MatchExpression::EQ:
- case MatchExpression::LT:
- case MatchExpression::LTE:
- case MatchExpression::GT:
- case MatchExpression::GTE:
- case MatchExpression::EXISTS:
- case MatchExpression::TYPE_OPERATOR:
- return Status::OK();
- default:
- return Status(ErrorCodes::CannotCreateIndex,
- str::stream() << "unsupported expression in partial index: "
- << expression->toString());
- }
- }
- }
+void IndexCatalog::IndexBuildBlock::success() {
+ fassert(17207, _catalog->_collection->ok());
- Status IndexCatalog::_isSpecOk( const BSONObj& spec ) const {
+ _catalog->_collection->getCatalogEntry()->indexBuildSuccess(_txn, _indexName);
- const NamespaceString& nss = _collection->ns();
+ IndexDescriptor* desc = _catalog->findIndexByName(_txn, _indexName, true);
+ fassert(17330, desc);
+ IndexCatalogEntry* entry = _catalog->_entries.find(desc);
+ fassert(17331, entry && entry == _entry);
- BSONElement vElt = spec["v"];
- if( !vElt.eoo() ) {
- if ( !vElt.isNumber() ) {
- return Status( ErrorCodes::CannotCreateIndex,
- str::stream() << "non-numeric value for \"v\" field:" << vElt );
- }
- double v = vElt.Number();
+ entry->setIsReady(true);
- // SERVER-16893 Forbid use of v0 indexes with non-mmapv1 engines
- if (v == 0 && !getGlobalServiceContext()->getGlobalStorageEngine()->isMmapV1()) {
- return Status( ErrorCodes::CannotCreateIndex,
- str::stream() << "use of v0 indexes is only allowed with the "
- << "mmapv1 storage engine");
- }
+ _catalog->_collection->infoCache()->addedIndex(_txn);
+}
- // note (one day) we may be able to fresh build less versions than we can use
- // isASupportedIndexVersionNumber() is what we can use
- if ( v != 0 && v != 1 ) {
- return Status( ErrorCodes::CannotCreateIndex,
- str::stream() << "this version of mongod cannot build new indexes "
- << "of version number " << v );
- }
- }
+namespace {
+// While technically recursive, only current possible with 2 levels.
+Status _checkValidFilterExpressions(MatchExpression* expression, int level = 0) {
+ if (!expression)
+ return Status::OK();
- if ( nss.isSystemDotIndexes() )
- return Status( ErrorCodes::CannotCreateIndex,
- "cannot create indexes on the system.indexes collection" );
+ switch (expression->matchType()) {
+ case MatchExpression::AND:
+ if (level > 0)
+ return Status(ErrorCodes::CannotCreateIndex,
+ "$and only supported in partialFilterExpression at top level");
+ for (size_t i = 0; i < expression->numChildren(); i++) {
+ Status status = _checkValidFilterExpressions(expression->getChild(i), level + 1);
+ if (!status.isOK())
+ return status;
+ }
+ return Status::OK();
+ case MatchExpression::EQ:
+ case MatchExpression::LT:
+ case MatchExpression::LTE:
+ case MatchExpression::GT:
+ case MatchExpression::GTE:
+ case MatchExpression::EXISTS:
+ case MatchExpression::TYPE_OPERATOR:
+ return Status::OK();
+ default:
+ return Status(ErrorCodes::CannotCreateIndex,
+ str::stream() << "unsupported expression in partial index: "
+ << expression->toString());
+ }
+}
+}
- if ( nss.isOplog() )
- return Status( ErrorCodes::CannotCreateIndex,
- "cannot create indexes on the oplog" );
+Status IndexCatalog::_isSpecOk(const BSONObj& spec) const {
+ const NamespaceString& nss = _collection->ns();
- if ( nss.coll() == "$freelist" ) {
- // this isn't really proper, but we never want it and its not an error per se
- return Status( ErrorCodes::IndexAlreadyExists, "cannot index freelist" );
+ BSONElement vElt = spec["v"];
+ if (!vElt.eoo()) {
+ if (!vElt.isNumber()) {
+ return Status(ErrorCodes::CannotCreateIndex,
+ str::stream() << "non-numeric value for \"v\" field:" << vElt);
}
+ double v = vElt.Number();
- const BSONElement specNamespace = spec["ns"];
- if ( specNamespace.type() != String )
- return Status( ErrorCodes::CannotCreateIndex,
- "the index spec needs a 'ns' string field" );
-
- if ( nss.ns() != specNamespace.valueStringData())
- return Status( ErrorCodes::CannotCreateIndex,
- "the index spec ns does not match" );
+ // SERVER-16893 Forbid use of v0 indexes with non-mmapv1 engines
+ if (v == 0 && !getGlobalServiceContext()->getGlobalStorageEngine()->isMmapV1()) {
+ return Status(ErrorCodes::CannotCreateIndex,
+ str::stream() << "use of v0 indexes is only allowed with the "
+ << "mmapv1 storage engine");
+ }
- // logical name of the index
- const BSONElement nameElem = spec["name"];
- if (nameElem.type() != String)
+ // note (one day) we may be able to fresh build less versions than we can use
+ // isASupportedIndexVersionNumber() is what we can use
+ if (v != 0 && v != 1) {
return Status(ErrorCodes::CannotCreateIndex,
- "index name must be specified as a string");
-
- const StringData name = nameElem.valueStringData();
- if (name.find('\0') != std::string::npos)
- return Status(ErrorCodes::CannotCreateIndex, "index names cannot contain NUL bytes");
-
- if (name.empty())
- return Status(ErrorCodes::CannotCreateIndex, "index names cannot be empty");
-
- const std::string indexNamespace = IndexDescriptor::makeIndexNamespace( nss.ns(), name );
- if ( indexNamespace.length() > NamespaceString::MaxNsLen )
- return Status( ErrorCodes::CannotCreateIndex,
- str::stream() << "namespace name generated from index name \"" <<
- indexNamespace << "\" is too long (127 byte max)" );
-
- const BSONObj key = spec.getObjectField("key");
- const Status keyStatus = validateKeyPattern(key);
- if (!keyStatus.isOK()) {
- return Status( ErrorCodes::CannotCreateIndex,
- str::stream() << "bad index key pattern " << key << ": "
- << keyStatus.reason() );
+ str::stream() << "this version of mongod cannot build new indexes "
+ << "of version number " << v);
}
+ }
- const bool isSparse = spec["sparse"].trueValue();
+ if (nss.isSystemDotIndexes())
+ return Status(ErrorCodes::CannotCreateIndex,
+ "cannot create indexes on the system.indexes collection");
- // Ensure if there is a filter, its valid.
- BSONElement filterElement = spec.getField("partialFilterExpression");
- if ( filterElement ) {
- if ( isSparse ) {
- return Status( ErrorCodes::CannotCreateIndex,
- "cannot mix \"partialFilterExpression\" and \"sparse\" options" );
- }
+ if (nss.isOplog())
+ return Status(ErrorCodes::CannotCreateIndex, "cannot create indexes on the oplog");
- if ( filterElement.type() != Object ) {
- return Status(ErrorCodes::CannotCreateIndex,
- "'partialFilterExpression' for an index has to be a document");
- }
- StatusWithMatchExpression res = MatchExpressionParser::parse( filterElement.Obj() );
- if ( !res.isOK() ) {
- return res.getStatus();
- }
- const std::unique_ptr<MatchExpression> filterExpr( res.getValue() );
+ if (nss.coll() == "$freelist") {
+ // this isn't really proper, but we never want it and its not an error per se
+ return Status(ErrorCodes::IndexAlreadyExists, "cannot index freelist");
+ }
- Status status = _checkValidFilterExpressions( filterExpr.get() );
- if (!status.isOK()) {
- return status;
- }
- }
+ const BSONElement specNamespace = spec["ns"];
+ if (specNamespace.type() != String)
+ return Status(ErrorCodes::CannotCreateIndex, "the index spec needs a 'ns' string field");
+
+ if (nss.ns() != specNamespace.valueStringData())
+ return Status(ErrorCodes::CannotCreateIndex, "the index spec ns does not match");
+
+ // logical name of the index
+ const BSONElement nameElem = spec["name"];
+ if (nameElem.type() != String)
+ return Status(ErrorCodes::CannotCreateIndex, "index name must be specified as a string");
+
+ const StringData name = nameElem.valueStringData();
+ if (name.find('\0') != std::string::npos)
+ return Status(ErrorCodes::CannotCreateIndex, "index names cannot contain NUL bytes");
+
+ if (name.empty())
+ return Status(ErrorCodes::CannotCreateIndex, "index names cannot be empty");
+
+ const std::string indexNamespace = IndexDescriptor::makeIndexNamespace(nss.ns(), name);
+ if (indexNamespace.length() > NamespaceString::MaxNsLen)
+ return Status(ErrorCodes::CannotCreateIndex,
+ str::stream() << "namespace name generated from index name \""
+ << indexNamespace << "\" is too long (127 byte max)");
+
+ const BSONObj key = spec.getObjectField("key");
+ const Status keyStatus = validateKeyPattern(key);
+ if (!keyStatus.isOK()) {
+ return Status(ErrorCodes::CannotCreateIndex,
+ str::stream() << "bad index key pattern " << key << ": "
+ << keyStatus.reason());
+ }
- if ( IndexDescriptor::isIdIndexPattern( key ) ) {
- BSONElement uniqueElt = spec["unique"];
- if ( uniqueElt && !uniqueElt.trueValue() ) {
- return Status( ErrorCodes::CannotCreateIndex, "_id index cannot be non-unique" );
- }
+ const bool isSparse = spec["sparse"].trueValue();
- if ( filterElement ) {
- return Status( ErrorCodes::CannotCreateIndex, "_id index cannot be partial" );
- }
+ // Ensure if there is a filter, its valid.
+ BSONElement filterElement = spec.getField("partialFilterExpression");
+ if (filterElement) {
+ if (isSparse) {
+ return Status(ErrorCodes::CannotCreateIndex,
+ "cannot mix \"partialFilterExpression\" and \"sparse\" options");
+ }
- if ( isSparse ) {
- return Status( ErrorCodes::CannotCreateIndex, "_id index cannot be sparse" );
- }
+ if (filterElement.type() != Object) {
+ return Status(ErrorCodes::CannotCreateIndex,
+ "'partialFilterExpression' for an index has to be a document");
}
- else {
- // for non _id indexes, we check to see if replication has turned off all indexes
- // we _always_ created _id index
- if (!repl::getGlobalReplicationCoordinator()->buildsIndexes()) {
- // this is not exactly the right error code, but I think will make the most sense
- return Status( ErrorCodes::IndexAlreadyExists, "no indexes per repl" );
- }
+ StatusWithMatchExpression res = MatchExpressionParser::parse(filterElement.Obj());
+ if (!res.isOK()) {
+ return res.getStatus();
}
+ const std::unique_ptr<MatchExpression> filterExpr(res.getValue());
- // --- only storage engine checks allowed below this ----
+ Status status = _checkValidFilterExpressions(filterExpr.get());
+ if (!status.isOK()) {
+ return status;
+ }
+ }
- BSONElement storageEngineElement = spec.getField("storageEngine");
- if (storageEngineElement.eoo()) {
- return Status::OK();
+ if (IndexDescriptor::isIdIndexPattern(key)) {
+ BSONElement uniqueElt = spec["unique"];
+ if (uniqueElt && !uniqueElt.trueValue()) {
+ return Status(ErrorCodes::CannotCreateIndex, "_id index cannot be non-unique");
}
- if (storageEngineElement.type() != mongo::Object) {
- return Status(ErrorCodes::CannotCreateIndex, "'storageEngine' has to be a document.");
+
+ if (filterElement) {
+ return Status(ErrorCodes::CannotCreateIndex, "_id index cannot be partial");
}
- BSONObj storageEngineOptions = storageEngineElement.Obj();
- if (storageEngineOptions.isEmpty()) {
- return Status(ErrorCodes::CannotCreateIndex,
- "Empty 'storageEngine' options are invalid. "
- "Please remove, or include valid options.");
+ if (isSparse) {
+ return Status(ErrorCodes::CannotCreateIndex, "_id index cannot be sparse");
}
- Status storageEngineStatus = validateStorageOptions(storageEngineOptions,
- &StorageEngine::Factory::validateIndexStorageOptions);
- if (!storageEngineStatus.isOK()) {
- return storageEngineStatus;
+ } else {
+ // for non _id indexes, we check to see if replication has turned off all indexes
+ // we _always_ created _id index
+ if (!repl::getGlobalReplicationCoordinator()->buildsIndexes()) {
+ // this is not exactly the right error code, but I think will make the most sense
+ return Status(ErrorCodes::IndexAlreadyExists, "no indexes per repl");
}
+ }
+
+ // --- only storage engine checks allowed below this ----
+ BSONElement storageEngineElement = spec.getField("storageEngine");
+ if (storageEngineElement.eoo()) {
return Status::OK();
}
+ if (storageEngineElement.type() != mongo::Object) {
+ return Status(ErrorCodes::CannotCreateIndex, "'storageEngine' has to be a document.");
+ }
+ BSONObj storageEngineOptions = storageEngineElement.Obj();
+ if (storageEngineOptions.isEmpty()) {
+ return Status(ErrorCodes::CannotCreateIndex,
+ "Empty 'storageEngine' options are invalid. "
+ "Please remove, or include valid options.");
+ }
+ Status storageEngineStatus = validateStorageOptions(
+ storageEngineOptions, &StorageEngine::Factory::validateIndexStorageOptions);
+ if (!storageEngineStatus.isOK()) {
+ return storageEngineStatus;
+ }
- Status IndexCatalog::_doesSpecConflictWithExisting( OperationContext* txn,
- const BSONObj& spec ) const {
- const char *name = spec.getStringField("name");
- invariant( name[0] );
-
- const BSONObj key = spec.getObjectField("key");
-
- {
- // Check both existing and in-progress indexes (2nd param = true)
- const IndexDescriptor* desc = findIndexByName( txn, name, true );
- if ( desc ) {
- // index already exists with same name
-
- if ( !desc->keyPattern().equal( key ) )
- return Status( ErrorCodes::IndexKeySpecsConflict,
- str::stream() << "Trying to create an index "
- << "with same name " << name
- << " with different key spec " << key
- << " vs existing spec " << desc->keyPattern() );
-
- IndexDescriptor temp( _collection,
- _getAccessMethodName( txn, key ),
- spec );
- if ( !desc->areIndexOptionsEquivalent( &temp ) )
- return Status( ErrorCodes::IndexOptionsConflict,
- str::stream() << "Index with name: " << name
- << " already exists with different options" );
-
- // Index already exists with the same options, so no need to build a new
- // one (not an error). Most likely requested by a client using ensureIndex.
- return Status( ErrorCodes::IndexAlreadyExists, str::stream() <<
- "index already exists: " << name );
- }
- }
+ return Status::OK();
+}
- {
- // Check both existing and in-progress indexes (2nd param = true)
- const IndexDescriptor* desc = findIndexByKeyPattern(txn, key, true);
- if (desc) {
- LOG(2) << "index already exists with diff name " << name
- << ' ' << key << endl;
-
- IndexDescriptor temp( _collection,
- _getAccessMethodName( txn, key ),
- spec );
- if ( !desc->areIndexOptionsEquivalent( &temp ) )
- return Status( ErrorCodes::IndexOptionsConflict,
- str::stream() << "Index with pattern: " << key
- << " already exists with different options" );
-
- return Status( ErrorCodes::IndexAlreadyExists, str::stream() <<
- "index already exists: " << name );
- }
+Status IndexCatalog::_doesSpecConflictWithExisting(OperationContext* txn,
+ const BSONObj& spec) const {
+ const char* name = spec.getStringField("name");
+ invariant(name[0]);
+
+ const BSONObj key = spec.getObjectField("key");
+
+ {
+ // Check both existing and in-progress indexes (2nd param = true)
+ const IndexDescriptor* desc = findIndexByName(txn, name, true);
+ if (desc) {
+ // index already exists with same name
+
+ if (!desc->keyPattern().equal(key))
+ return Status(ErrorCodes::IndexKeySpecsConflict,
+ str::stream() << "Trying to create an index "
+ << "with same name " << name
+ << " with different key spec " << key
+ << " vs existing spec " << desc->keyPattern());
+
+ IndexDescriptor temp(_collection, _getAccessMethodName(txn, key), spec);
+ if (!desc->areIndexOptionsEquivalent(&temp))
+ return Status(ErrorCodes::IndexOptionsConflict,
+ str::stream() << "Index with name: " << name
+ << " already exists with different options");
+
+ // Index already exists with the same options, so no need to build a new
+ // one (not an error). Most likely requested by a client using ensureIndex.
+ return Status(ErrorCodes::IndexAlreadyExists,
+ str::stream() << "index already exists: " << name);
}
+ }
- if ( numIndexesTotal(txn) >= _maxNumIndexesAllowed ) {
- string s = str::stream() << "add index fails, too many indexes for "
- << _collection->ns().ns() << " key:" << key.toString();
- log() << s;
- return Status( ErrorCodes::CannotCreateIndex, s );
- }
+ {
+ // Check both existing and in-progress indexes (2nd param = true)
+ const IndexDescriptor* desc = findIndexByKeyPattern(txn, key, true);
+ if (desc) {
+ LOG(2) << "index already exists with diff name " << name << ' ' << key << endl;
- // Refuse to build text index if another text index exists or is in progress.
- // Collections should only have one text index.
- string pluginName = IndexNames::findPluginName( key );
- if ( pluginName == IndexNames::TEXT ) {
- vector<IndexDescriptor*> textIndexes;
- const bool includeUnfinishedIndexes = true;
- findIndexByType( txn, IndexNames::TEXT, textIndexes, includeUnfinishedIndexes );
- if ( textIndexes.size() > 0 ) {
- return Status( ErrorCodes::CannotCreateIndex,
- str::stream() << "only one text index per collection allowed, "
- << "found existing text index \"" << textIndexes[0]->indexName()
- << "\"" );
- }
+ IndexDescriptor temp(_collection, _getAccessMethodName(txn, key), spec);
+ if (!desc->areIndexOptionsEquivalent(&temp))
+ return Status(ErrorCodes::IndexOptionsConflict,
+ str::stream() << "Index with pattern: " << key
+ << " already exists with different options");
+
+ return Status(ErrorCodes::IndexAlreadyExists,
+ str::stream() << "index already exists: " << name);
}
- return Status::OK();
}
- BSONObj IndexCatalog::getDefaultIdIndexSpec() const {
- dassert( _idObj["_id"].type() == NumberInt );
-
- BSONObjBuilder b;
- b.append( "name", "_id_" );
- b.append( "ns", _collection->ns().ns() );
- b.append( "key", _idObj );
- return b.obj();
+ if (numIndexesTotal(txn) >= _maxNumIndexesAllowed) {
+ string s = str::stream() << "add index fails, too many indexes for "
+ << _collection->ns().ns() << " key:" << key.toString();
+ log() << s;
+ return Status(ErrorCodes::CannotCreateIndex, s);
}
- Status IndexCatalog::dropAllIndexes(OperationContext* txn,
- bool includingIdIndex) {
-
- invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().toString(),
- MODE_X));
-
- BackgroundOperation::assertNoBgOpInProgForNs( _collection->ns().ns() );
-
- // there may be pointers pointing at keys in the btree(s). kill them.
- // TODO: can this can only clear cursors on this index?
- _collection->getCursorManager()->invalidateAll(false, "all indexes on collection dropped");
-
- // make sure nothing in progress
- massert( 17348,
- "cannot dropAllIndexes when index builds in progress",
- numIndexesTotal(txn) == numIndexesReady(txn) );
-
- bool haveIdIndex = false;
-
- vector<string> indexNamesToDrop;
- {
- int seen = 0;
- IndexIterator ii = getIndexIterator( txn, true );
- while ( ii.more() ) {
- seen++;
- IndexDescriptor* desc = ii.next();
- if ( desc->isIdIndex() && includingIdIndex == false ) {
- haveIdIndex = true;
- continue;
- }
- indexNamesToDrop.push_back( desc->indexName() );
- }
- invariant( seen == numIndexesTotal(txn) );
+ // Refuse to build text index if another text index exists or is in progress.
+ // Collections should only have one text index.
+ string pluginName = IndexNames::findPluginName(key);
+ if (pluginName == IndexNames::TEXT) {
+ vector<IndexDescriptor*> textIndexes;
+ const bool includeUnfinishedIndexes = true;
+ findIndexByType(txn, IndexNames::TEXT, textIndexes, includeUnfinishedIndexes);
+ if (textIndexes.size() > 0) {
+ return Status(ErrorCodes::CannotCreateIndex,
+ str::stream() << "only one text index per collection allowed, "
+ << "found existing text index \""
+ << textIndexes[0]->indexName() << "\"");
}
+ }
+ return Status::OK();
+}
- for ( size_t i = 0; i < indexNamesToDrop.size(); i++ ) {
- string indexName = indexNamesToDrop[i];
- IndexDescriptor* desc = findIndexByName( txn, indexName, true );
- invariant( desc );
- LOG(1) << "\t dropAllIndexes dropping: " << desc->toString();
- IndexCatalogEntry* entry = _entries.find( desc );
- invariant( entry );
- _dropIndex(txn, entry);
- }
+BSONObj IndexCatalog::getDefaultIdIndexSpec() const {
+ dassert(_idObj["_id"].type() == NumberInt);
- // verify state is sane post cleaning
+ BSONObjBuilder b;
+ b.append("name", "_id_");
+ b.append("ns", _collection->ns().ns());
+ b.append("key", _idObj);
+ return b.obj();
+}
- long long numIndexesInCollectionCatalogEntry =
- _collection->getCatalogEntry()->getTotalIndexCount( txn );
+Status IndexCatalog::dropAllIndexes(OperationContext* txn, bool includingIdIndex) {
+ invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().toString(), MODE_X));
- if ( haveIdIndex ) {
- fassert( 17324, numIndexesTotal(txn) == 1 );
- fassert( 17325, numIndexesReady(txn) == 1 );
- fassert( 17326, numIndexesInCollectionCatalogEntry == 1 );
- fassert( 17336, _entries.size() == 1 );
- }
- else {
- if ( numIndexesTotal(txn) || numIndexesInCollectionCatalogEntry || _entries.size() ) {
- error() << "About to fassert - "
- << " numIndexesTotal(): " << numIndexesTotal(txn)
- << " numSystemIndexesEntries: " << numIndexesInCollectionCatalogEntry
- << " _entries.size(): " << _entries.size()
- << " indexNamesToDrop: " << indexNamesToDrop.size()
- << " haveIdIndex: " << haveIdIndex;
+ BackgroundOperation::assertNoBgOpInProgForNs(_collection->ns().ns());
+
+ // there may be pointers pointing at keys in the btree(s). kill them.
+ // TODO: can this can only clear cursors on this index?
+ _collection->getCursorManager()->invalidateAll(false, "all indexes on collection dropped");
+
+ // make sure nothing in progress
+ massert(17348,
+ "cannot dropAllIndexes when index builds in progress",
+ numIndexesTotal(txn) == numIndexesReady(txn));
+
+ bool haveIdIndex = false;
+
+ vector<string> indexNamesToDrop;
+ {
+ int seen = 0;
+ IndexIterator ii = getIndexIterator(txn, true);
+ while (ii.more()) {
+ seen++;
+ IndexDescriptor* desc = ii.next();
+ if (desc->isIdIndex() && includingIdIndex == false) {
+ haveIdIndex = true;
+ continue;
}
- fassert( 17327, numIndexesTotal(txn) == 0 );
- fassert( 17328, numIndexesInCollectionCatalogEntry == 0 );
- fassert( 17337, _entries.size() == 0 );
+ indexNamesToDrop.push_back(desc->indexName());
}
-
- return Status::OK();
+ invariant(seen == numIndexesTotal(txn));
}
- Status IndexCatalog::dropIndex(OperationContext* txn,
- IndexDescriptor* desc ) {
- invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().toString(),
- MODE_X));
- IndexCatalogEntry* entry = _entries.find( desc );
+ for (size_t i = 0; i < indexNamesToDrop.size(); i++) {
+ string indexName = indexNamesToDrop[i];
+ IndexDescriptor* desc = findIndexByName(txn, indexName, true);
+ invariant(desc);
+ LOG(1) << "\t dropAllIndexes dropping: " << desc->toString();
+ IndexCatalogEntry* entry = _entries.find(desc);
+ invariant(entry);
+ _dropIndex(txn, entry);
+ }
- if ( !entry )
- return Status( ErrorCodes::InternalError, "cannot find index to delete" );
+ // verify state is sane post cleaning
+
+ long long numIndexesInCollectionCatalogEntry =
+ _collection->getCatalogEntry()->getTotalIndexCount(txn);
+
+ if (haveIdIndex) {
+ fassert(17324, numIndexesTotal(txn) == 1);
+ fassert(17325, numIndexesReady(txn) == 1);
+ fassert(17326, numIndexesInCollectionCatalogEntry == 1);
+ fassert(17336, _entries.size() == 1);
+ } else {
+ if (numIndexesTotal(txn) || numIndexesInCollectionCatalogEntry || _entries.size()) {
+ error() << "About to fassert - "
+ << " numIndexesTotal(): " << numIndexesTotal(txn)
+ << " numSystemIndexesEntries: " << numIndexesInCollectionCatalogEntry
+ << " _entries.size(): " << _entries.size()
+ << " indexNamesToDrop: " << indexNamesToDrop.size()
+ << " haveIdIndex: " << haveIdIndex;
+ }
+ fassert(17327, numIndexesTotal(txn) == 0);
+ fassert(17328, numIndexesInCollectionCatalogEntry == 0);
+ fassert(17337, _entries.size() == 0);
+ }
- if ( !entry->isReady( txn ) )
- return Status( ErrorCodes::InternalError, "cannot delete not ready index" );
+ return Status::OK();
+}
- BackgroundOperation::assertNoBgOpInProgForNs( _collection->ns().ns() );
+Status IndexCatalog::dropIndex(OperationContext* txn, IndexDescriptor* desc) {
+ invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().toString(), MODE_X));
+ IndexCatalogEntry* entry = _entries.find(desc);
- return _dropIndex(txn, entry);
- }
+ if (!entry)
+ return Status(ErrorCodes::InternalError, "cannot find index to delete");
-namespace {
- class IndexRemoveChange : public RecoveryUnit::Change {
- public:
- IndexRemoveChange(OperationContext* txn,
- Collection* collection,
- IndexCatalogEntryContainer* entries,
- IndexCatalogEntry* entry)
- : _txn(txn),
- _collection(collection),
- _entries(entries),
- _entry(entry) {
- }
+ if (!entry->isReady(txn))
+ return Status(ErrorCodes::InternalError, "cannot delete not ready index");
- virtual void commit() {
- delete _entry;
- }
+ BackgroundOperation::assertNoBgOpInProgForNs(_collection->ns().ns());
- virtual void rollback() {
- _entries->add(_entry);
- _collection->infoCache()->reset(_txn);
- }
+ return _dropIndex(txn, entry);
+}
- private:
- OperationContext* _txn;
- Collection* _collection;
- IndexCatalogEntryContainer* _entries;
- IndexCatalogEntry* _entry;
- };
-} // namespace
-
- Status IndexCatalog::_dropIndex(OperationContext* txn,
- IndexCatalogEntry* entry ) {
- /**
- * IndexState in order
- * <db>.system.indexes
- * NamespaceDetails
- * <db>.system.ns
- */
-
- // ----- SANITY CHECKS -------------
- if ( !entry )
- return Status( ErrorCodes::BadValue, "IndexCatalog::_dropIndex passed NULL" );
+namespace {
+class IndexRemoveChange : public RecoveryUnit::Change {
+public:
+ IndexRemoveChange(OperationContext* txn,
+ Collection* collection,
+ IndexCatalogEntryContainer* entries,
+ IndexCatalogEntry* entry)
+ : _txn(txn), _collection(collection), _entries(entries), _entry(entry) {}
+
+ virtual void commit() {
+ delete _entry;
+ }
- _checkMagic();
- Status status = checkUnfinished();
- if ( !status.isOK() )
- return status;
+ virtual void rollback() {
+ _entries->add(_entry);
+ _collection->infoCache()->reset(_txn);
+ }
- // Pulling indexName/indexNamespace out as they are needed post descriptor release.
- string indexName = entry->descriptor()->indexName();
- string indexNamespace = entry->descriptor()->indexNamespace();
+private:
+ OperationContext* _txn;
+ Collection* _collection;
+ IndexCatalogEntryContainer* _entries;
+ IndexCatalogEntry* _entry;
+};
+} // namespace
- // there may be pointers pointing at keys in the btree(s). kill them.
- // TODO: can this can only clear cursors on this index?
- _collection->getCursorManager()->invalidateAll(false, str::stream() << "index '"
- << indexName
- << "' dropped");
+Status IndexCatalog::_dropIndex(OperationContext* txn, IndexCatalogEntry* entry) {
+ /**
+ * IndexState in order
+ * <db>.system.indexes
+ * NamespaceDetails
+ * <db>.system.ns
+ */
- // --------- START REAL WORK ----------
+ // ----- SANITY CHECKS -------------
+ if (!entry)
+ return Status(ErrorCodes::BadValue, "IndexCatalog::_dropIndex passed NULL");
- audit::logDropIndex( &cc(), indexName, _collection->ns().ns() );
+ _checkMagic();
+ Status status = checkUnfinished();
+ if (!status.isOK())
+ return status;
- invariant(_entries.release(entry->descriptor()) == entry);
- txn->recoveryUnit()->registerChange(new IndexRemoveChange(txn, _collection,
- &_entries, entry));
- entry = NULL;
+ // Pulling indexName/indexNamespace out as they are needed post descriptor release.
+ string indexName = entry->descriptor()->indexName();
+ string indexNamespace = entry->descriptor()->indexNamespace();
- _deleteIndexFromDisk(txn, indexName, indexNamespace);
+ // there may be pointers pointing at keys in the btree(s). kill them.
+ // TODO: can this can only clear cursors on this index?
+ _collection->getCursorManager()->invalidateAll(
+ false, str::stream() << "index '" << indexName << "' dropped");
- _checkMagic();
+ // --------- START REAL WORK ----------
- // Now that we've dropped the index, ask the info cache to rebuild its cached view of
- // collection state.
- _collection->infoCache()->reset(txn);
+ audit::logDropIndex(&cc(), indexName, _collection->ns().ns());
- return Status::OK();
- }
+ invariant(_entries.release(entry->descriptor()) == entry);
+ txn->recoveryUnit()->registerChange(new IndexRemoveChange(txn, _collection, &_entries, entry));
+ entry = NULL;
- void IndexCatalog::_deleteIndexFromDisk( OperationContext* txn,
- const string& indexName,
- const string& indexNamespace ) {
- Status status = _collection->getCatalogEntry()->removeIndex( txn, indexName );
- if ( status.code() == ErrorCodes::NamespaceNotFound ) {
- // this is ok, as we may be partially through index creation
- }
- else if ( !status.isOK() ) {
- warning() << "couldn't drop index " << indexName
- << " on collection: " << _collection->ns()
- << " because of " << status.toString();
- }
- }
+ _deleteIndexFromDisk(txn, indexName, indexNamespace);
- vector<BSONObj> IndexCatalog::getAndClearUnfinishedIndexes(OperationContext* txn) {
- vector<BSONObj> toReturn = _unfinishedIndexes;
- _unfinishedIndexes.clear();
- for ( size_t i = 0; i < toReturn.size(); i++ ) {
- BSONObj spec = toReturn[i];
+ _checkMagic();
- BSONObj keyPattern = spec.getObjectField("key");
- IndexDescriptor desc( _collection, _getAccessMethodName(txn, keyPattern), spec );
+ // Now that we've dropped the index, ask the info cache to rebuild its cached view of
+ // collection state.
+ _collection->infoCache()->reset(txn);
- _deleteIndexFromDisk( txn,
- desc.indexName(),
- desc.indexNamespace() );
- }
- return toReturn;
- }
+ return Status::OK();
+}
- bool IndexCatalog::isMultikey( OperationContext* txn, const IndexDescriptor* idx ) {
- IndexCatalogEntry* entry = _entries.find( idx );
- invariant( entry );
- return entry->isMultikey();
+void IndexCatalog::_deleteIndexFromDisk(OperationContext* txn,
+ const string& indexName,
+ const string& indexNamespace) {
+ Status status = _collection->getCatalogEntry()->removeIndex(txn, indexName);
+ if (status.code() == ErrorCodes::NamespaceNotFound) {
+ // this is ok, as we may be partially through index creation
+ } else if (!status.isOK()) {
+ warning() << "couldn't drop index " << indexName << " on collection: " << _collection->ns()
+ << " because of " << status.toString();
}
+}
+vector<BSONObj> IndexCatalog::getAndClearUnfinishedIndexes(OperationContext* txn) {
+ vector<BSONObj> toReturn = _unfinishedIndexes;
+ _unfinishedIndexes.clear();
+ for (size_t i = 0; i < toReturn.size(); i++) {
+ BSONObj spec = toReturn[i];
- // ---------------------------
+ BSONObj keyPattern = spec.getObjectField("key");
+ IndexDescriptor desc(_collection, _getAccessMethodName(txn, keyPattern), spec);
- bool IndexCatalog::haveAnyIndexes() const {
- return _entries.size() != 0;
+ _deleteIndexFromDisk(txn, desc.indexName(), desc.indexNamespace());
}
+ return toReturn;
+}
- int IndexCatalog::numIndexesTotal( OperationContext* txn ) const {
- int count = _entries.size() + _unfinishedIndexes.size();
- dassert(_collection->getCatalogEntry()->getTotalIndexCount(txn) == count);
- return count;
- }
+bool IndexCatalog::isMultikey(OperationContext* txn, const IndexDescriptor* idx) {
+ IndexCatalogEntry* entry = _entries.find(idx);
+ invariant(entry);
+ return entry->isMultikey();
+}
- int IndexCatalog::numIndexesReady( OperationContext* txn ) const {
- int count = 0;
- IndexIterator ii = getIndexIterator(txn, /*includeUnfinished*/false);
- while (ii.more()) {
- ii.next();
- count++;
- }
- dassert(_collection->getCatalogEntry()->getCompletedIndexCount(txn) == count);
- return count;
- }
- bool IndexCatalog::haveIdIndex( OperationContext* txn ) const {
- return findIdIndex( txn ) != NULL;
- }
+// ---------------------------
- IndexCatalog::IndexIterator::IndexIterator( OperationContext* txn,
- const IndexCatalog* cat,
- bool includeUnfinishedIndexes )
- : _includeUnfinishedIndexes( includeUnfinishedIndexes ),
- _txn( txn ),
- _catalog( cat ),
- _iterator( cat->_entries.begin() ),
- _start( true ),
- _prev( NULL ),
- _next( NULL ) {
- }
+bool IndexCatalog::haveAnyIndexes() const {
+ return _entries.size() != 0;
+}
- bool IndexCatalog::IndexIterator::more() {
- if ( _start ) {
- _advance();
- _start = false;
- }
- return _next != NULL;
- }
+int IndexCatalog::numIndexesTotal(OperationContext* txn) const {
+ int count = _entries.size() + _unfinishedIndexes.size();
+ dassert(_collection->getCatalogEntry()->getTotalIndexCount(txn) == count);
+ return count;
+}
- IndexDescriptor* IndexCatalog::IndexIterator::next() {
- if ( !more() )
- return NULL;
- _prev = _next;
- _advance();
- return _prev->descriptor();
+int IndexCatalog::numIndexesReady(OperationContext* txn) const {
+ int count = 0;
+ IndexIterator ii = getIndexIterator(txn, /*includeUnfinished*/ false);
+ while (ii.more()) {
+ ii.next();
+ count++;
}
+ dassert(_collection->getCatalogEntry()->getCompletedIndexCount(txn) == count);
+ return count;
+}
- IndexAccessMethod* IndexCatalog::IndexIterator::accessMethod( const IndexDescriptor* desc ) {
- invariant( desc == _prev->descriptor() );
- return _prev->accessMethod();
- }
+bool IndexCatalog::haveIdIndex(OperationContext* txn) const {
+ return findIdIndex(txn) != NULL;
+}
- IndexCatalogEntry* IndexCatalog::IndexIterator::catalogEntry( const IndexDescriptor* desc ) {
- invariant( desc == _prev->descriptor() );
- return _prev;
+IndexCatalog::IndexIterator::IndexIterator(OperationContext* txn,
+ const IndexCatalog* cat,
+ bool includeUnfinishedIndexes)
+ : _includeUnfinishedIndexes(includeUnfinishedIndexes),
+ _txn(txn),
+ _catalog(cat),
+ _iterator(cat->_entries.begin()),
+ _start(true),
+ _prev(NULL),
+ _next(NULL) {}
+
+bool IndexCatalog::IndexIterator::more() {
+ if (_start) {
+ _advance();
+ _start = false;
}
+ return _next != NULL;
+}
- void IndexCatalog::IndexIterator::_advance() {
- _next = NULL;
+IndexDescriptor* IndexCatalog::IndexIterator::next() {
+ if (!more())
+ return NULL;
+ _prev = _next;
+ _advance();
+ return _prev->descriptor();
+}
- while ( _iterator != _catalog->_entries.end() ) {
- IndexCatalogEntry* entry = *_iterator;
- ++_iterator;
+IndexAccessMethod* IndexCatalog::IndexIterator::accessMethod(const IndexDescriptor* desc) {
+ invariant(desc == _prev->descriptor());
+ return _prev->accessMethod();
+}
- if ( _includeUnfinishedIndexes ||
- entry->isReady(_txn) ) {
- _next = entry;
- return;
- }
- }
+IndexCatalogEntry* IndexCatalog::IndexIterator::catalogEntry(const IndexDescriptor* desc) {
+ invariant(desc == _prev->descriptor());
+ return _prev;
+}
- }
+void IndexCatalog::IndexIterator::_advance() {
+ _next = NULL;
+ while (_iterator != _catalog->_entries.end()) {
+ IndexCatalogEntry* entry = *_iterator;
+ ++_iterator;
- IndexDescriptor* IndexCatalog::findIdIndex( OperationContext* txn ) const {
- IndexIterator ii = getIndexIterator( txn, false );
- while ( ii.more() ) {
- IndexDescriptor* desc = ii.next();
- if ( desc->isIdIndex() )
- return desc;
+ if (_includeUnfinishedIndexes || entry->isReady(_txn)) {
+ _next = entry;
+ return;
}
- return NULL;
}
+}
- IndexDescriptor* IndexCatalog::findIndexByName( OperationContext* txn,
- StringData name,
- bool includeUnfinishedIndexes ) const {
- IndexIterator ii = getIndexIterator( txn, includeUnfinishedIndexes );
- while ( ii.more() ) {
- IndexDescriptor* desc = ii.next();
- if ( desc->indexName() == name )
- return desc;
- }
- return NULL;
+
+IndexDescriptor* IndexCatalog::findIdIndex(OperationContext* txn) const {
+ IndexIterator ii = getIndexIterator(txn, false);
+ while (ii.more()) {
+ IndexDescriptor* desc = ii.next();
+ if (desc->isIdIndex())
+ return desc;
}
+ return NULL;
+}
- IndexDescriptor* IndexCatalog::findIndexByKeyPattern( OperationContext* txn,
- const BSONObj& key,
- bool includeUnfinishedIndexes ) const {
- IndexIterator ii = getIndexIterator( txn, includeUnfinishedIndexes );
- while ( ii.more() ) {
- IndexDescriptor* desc = ii.next();
- if ( desc->keyPattern() == key )
- return desc;
- }
- return NULL;
+IndexDescriptor* IndexCatalog::findIndexByName(OperationContext* txn,
+ StringData name,
+ bool includeUnfinishedIndexes) const {
+ IndexIterator ii = getIndexIterator(txn, includeUnfinishedIndexes);
+ while (ii.more()) {
+ IndexDescriptor* desc = ii.next();
+ if (desc->indexName() == name)
+ return desc;
}
+ return NULL;
+}
- IndexDescriptor* IndexCatalog::findShardKeyPrefixedIndex( OperationContext* txn,
- const BSONObj& shardKey,
- bool requireSingleKey ) const {
- IndexDescriptor* best = NULL;
+IndexDescriptor* IndexCatalog::findIndexByKeyPattern(OperationContext* txn,
+ const BSONObj& key,
+ bool includeUnfinishedIndexes) const {
+ IndexIterator ii = getIndexIterator(txn, includeUnfinishedIndexes);
+ while (ii.more()) {
+ IndexDescriptor* desc = ii.next();
+ if (desc->keyPattern() == key)
+ return desc;
+ }
+ return NULL;
+}
- IndexIterator ii = getIndexIterator( txn, false );
- while ( ii.more() ) {
- IndexDescriptor* desc = ii.next();
+IndexDescriptor* IndexCatalog::findShardKeyPrefixedIndex(OperationContext* txn,
+ const BSONObj& shardKey,
+ bool requireSingleKey) const {
+ IndexDescriptor* best = NULL;
- if ( desc->isPartial() )
- continue;
+ IndexIterator ii = getIndexIterator(txn, false);
+ while (ii.more()) {
+ IndexDescriptor* desc = ii.next();
- if ( !shardKey.isPrefixOf( desc->keyPattern() ) )
- continue;
+ if (desc->isPartial())
+ continue;
- if( !desc->isMultikey( txn ) )
- return desc;
+ if (!shardKey.isPrefixOf(desc->keyPattern()))
+ continue;
- if ( !requireSingleKey )
- best = desc;
- }
+ if (!desc->isMultikey(txn))
+ return desc;
- return best;
+ if (!requireSingleKey)
+ best = desc;
}
- void IndexCatalog::findIndexByType( OperationContext* txn,
- const string& type, vector<IndexDescriptor*>& matches,
- bool includeUnfinishedIndexes ) const {
- IndexIterator ii = getIndexIterator( txn, includeUnfinishedIndexes );
- while ( ii.more() ) {
- IndexDescriptor* desc = ii.next();
- if ( IndexNames::findPluginName( desc->keyPattern() ) == type ) {
- matches.push_back( desc );
- }
+ return best;
+}
+
+void IndexCatalog::findIndexByType(OperationContext* txn,
+ const string& type,
+ vector<IndexDescriptor*>& matches,
+ bool includeUnfinishedIndexes) const {
+ IndexIterator ii = getIndexIterator(txn, includeUnfinishedIndexes);
+ while (ii.more()) {
+ IndexDescriptor* desc = ii.next();
+ if (IndexNames::findPluginName(desc->keyPattern()) == type) {
+ matches.push_back(desc);
}
}
+}
- IndexAccessMethod* IndexCatalog::getIndex( const IndexDescriptor* desc ) {
- IndexCatalogEntry* entry = _entries.find( desc );
- massert( 17334, "cannot find index entry", entry );
- return entry->accessMethod();
- }
+IndexAccessMethod* IndexCatalog::getIndex(const IndexDescriptor* desc) {
+ IndexCatalogEntry* entry = _entries.find(desc);
+ massert(17334, "cannot find index entry", entry);
+ return entry->accessMethod();
+}
- const IndexAccessMethod* IndexCatalog::getIndex( const IndexDescriptor* desc ) const {
- return getEntry( desc )->accessMethod();
- }
+const IndexAccessMethod* IndexCatalog::getIndex(const IndexDescriptor* desc) const {
+ return getEntry(desc)->accessMethod();
+}
- const IndexCatalogEntry* IndexCatalog::getEntry( const IndexDescriptor* desc ) const {
- const IndexCatalogEntry* entry = _entries.find( desc );
- massert( 17357, "cannot find index entry", entry );
- return entry;
- }
+const IndexCatalogEntry* IndexCatalog::getEntry(const IndexDescriptor* desc) const {
+ const IndexCatalogEntry* entry = _entries.find(desc);
+ massert(17357, "cannot find index entry", entry);
+ return entry;
+}
- const IndexDescriptor* IndexCatalog::refreshEntry( OperationContext* txn,
- const IndexDescriptor* oldDesc ) {
- invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().ns(),
- MODE_X));
-
- const std::string indexName = oldDesc->indexName();
- invariant( _collection->getCatalogEntry()->isIndexReady( txn, indexName ) );
-
- // Notify other users of the IndexCatalog that we're about to invalidate 'oldDesc'.
- const bool collectionGoingAway = false;
- _collection->getCursorManager()->invalidateAll(collectionGoingAway,
- str::stream() << "definition of index '"
- << indexName << "' changed");
-
- // Delete the IndexCatalogEntry that owns this descriptor. After deletion, 'oldDesc' is
- // invalid and should not be dereferenced.
- IndexCatalogEntry* oldEntry = _entries.release(oldDesc);
- txn->recoveryUnit()->registerChange(new IndexRemoveChange(txn,
- _collection,
- &_entries,
- oldEntry));
-
- // Ask the CollectionCatalogEntry for the new index spec.
- BSONObj spec = _collection->getCatalogEntry()->getIndexSpec( txn, indexName ).getOwned();
- BSONObj keyPattern = spec.getObjectField( "key" );
-
- // Re-register this index in the index catalog with the new spec.
- IndexDescriptor* newDesc = new IndexDescriptor( _collection,
- _getAccessMethodName( txn, keyPattern ),
- spec );
- const bool initFromDisk = false;
- const IndexCatalogEntry* newEntry = _setupInMemoryStructures( txn, newDesc, initFromDisk );
- invariant( newEntry->isReady( txn ) );
-
- // Return the new descriptor.
- return newEntry->descriptor();
- }
+const IndexDescriptor* IndexCatalog::refreshEntry(OperationContext* txn,
+ const IndexDescriptor* oldDesc) {
+ invariant(txn->lockState()->isCollectionLockedForMode(_collection->ns().ns(), MODE_X));
- // ---------------------------
+ const std::string indexName = oldDesc->indexName();
+ invariant(_collection->getCatalogEntry()->isIndexReady(txn, indexName));
- namespace {
- bool isDupsAllowed( IndexDescriptor* desc ) {
- bool isUnique = desc->unique() || KeyPattern::isIdKeyPattern(desc->keyPattern());
- if ( !isUnique )
- return true;
+ // Notify other users of the IndexCatalog that we're about to invalidate 'oldDesc'.
+ const bool collectionGoingAway = false;
+ _collection->getCursorManager()->invalidateAll(
+ collectionGoingAway, str::stream() << "definition of index '" << indexName << "' changed");
- return repl::getGlobalReplicationCoordinator()->shouldIgnoreUniqueIndex(desc);
- }
+ // Delete the IndexCatalogEntry that owns this descriptor. After deletion, 'oldDesc' is
+ // invalid and should not be dereferenced.
+ IndexCatalogEntry* oldEntry = _entries.release(oldDesc);
+ txn->recoveryUnit()->registerChange(
+ new IndexRemoveChange(txn, _collection, &_entries, oldEntry));
- }
+ // Ask the CollectionCatalogEntry for the new index spec.
+ BSONObj spec = _collection->getCatalogEntry()->getIndexSpec(txn, indexName).getOwned();
+ BSONObj keyPattern = spec.getObjectField("key");
- Status IndexCatalog::_indexRecord(OperationContext* txn,
- IndexCatalogEntry* index,
- const BSONObj& obj,
- const RecordId &loc ) {
- const MatchExpression* filter = index->getFilterExpression();
- if ( filter && !filter->matchesBSON( obj ) ) {
- return Status::OK();
- }
+ // Re-register this index in the index catalog with the new spec.
+ IndexDescriptor* newDesc =
+ new IndexDescriptor(_collection, _getAccessMethodName(txn, keyPattern), spec);
+ const bool initFromDisk = false;
+ const IndexCatalogEntry* newEntry = _setupInMemoryStructures(txn, newDesc, initFromDisk);
+ invariant(newEntry->isReady(txn));
- InsertDeleteOptions options;
- options.logIfError = false;
- options.dupsAllowed = isDupsAllowed( index->descriptor() );
+ // Return the new descriptor.
+ return newEntry->descriptor();
+}
- int64_t inserted;
- return index->accessMethod()->insert(txn, obj, loc, options, &inserted);
- }
+// ---------------------------
- Status IndexCatalog::_unindexRecord(OperationContext* txn,
- IndexCatalogEntry* index,
- const BSONObj& obj,
- const RecordId &loc,
- bool logIfError) {
- InsertDeleteOptions options;
- options.logIfError = logIfError;
- options.dupsAllowed = isDupsAllowed( index->descriptor() );
-
- // For unindex operations, dupsAllowed=false really means that it is safe to delete anything
- // that matches the key, without checking the RecordID, since dups are impossible. We need
- // to disable this behavior for in-progress indexes. See SERVER-17487 for more details.
- options.dupsAllowed = options.dupsAllowed || !index->isReady(txn);
-
- int64_t removed;
- Status status = index->accessMethod()->remove(txn, obj, loc, options, &removed);
-
- if ( !status.isOK() ) {
- log() << "Couldn't unindex record " << obj.toString()
- << " from collection " << _collection->ns()
- << ". Status: " << status.toString();
- }
+namespace {
+bool isDupsAllowed(IndexDescriptor* desc) {
+ bool isUnique = desc->unique() || KeyPattern::isIdKeyPattern(desc->keyPattern());
+ if (!isUnique)
+ return true;
+
+ return repl::getGlobalReplicationCoordinator()->shouldIgnoreUniqueIndex(desc);
+}
+}
+Status IndexCatalog::_indexRecord(OperationContext* txn,
+ IndexCatalogEntry* index,
+ const BSONObj& obj,
+ const RecordId& loc) {
+ const MatchExpression* filter = index->getFilterExpression();
+ if (filter && !filter->matchesBSON(obj)) {
return Status::OK();
}
+ InsertDeleteOptions options;
+ options.logIfError = false;
+ options.dupsAllowed = isDupsAllowed(index->descriptor());
- Status IndexCatalog::indexRecord(OperationContext* txn,
- const BSONObj& obj,
- const RecordId &loc ) {
-
- for ( IndexCatalogEntryContainer::const_iterator i = _entries.begin();
- i != _entries.end();
- ++i ) {
- Status s = _indexRecord(txn, *i, obj, loc);
- if (!s.isOK())
- return s;
- }
+ int64_t inserted;
+ return index->accessMethod()->insert(txn, obj, loc, options, &inserted);
+}
- return Status::OK();
+Status IndexCatalog::_unindexRecord(OperationContext* txn,
+ IndexCatalogEntry* index,
+ const BSONObj& obj,
+ const RecordId& loc,
+ bool logIfError) {
+ InsertDeleteOptions options;
+ options.logIfError = logIfError;
+ options.dupsAllowed = isDupsAllowed(index->descriptor());
+
+ // For unindex operations, dupsAllowed=false really means that it is safe to delete anything
+ // that matches the key, without checking the RecordID, since dups are impossible. We need
+ // to disable this behavior for in-progress indexes. See SERVER-17487 for more details.
+ options.dupsAllowed = options.dupsAllowed || !index->isReady(txn);
+
+ int64_t removed;
+ Status status = index->accessMethod()->remove(txn, obj, loc, options, &removed);
+
+ if (!status.isOK()) {
+ log() << "Couldn't unindex record " << obj.toString() << " from collection "
+ << _collection->ns() << ". Status: " << status.toString();
}
- void IndexCatalog::unindexRecord(OperationContext* txn,
- const BSONObj& obj,
- const RecordId& loc,
- bool noWarn) {
+ return Status::OK();
+}
- for ( IndexCatalogEntryContainer::const_iterator i = _entries.begin();
- i != _entries.end();
- ++i ) {
- IndexCatalogEntry* entry = *i;
+Status IndexCatalog::indexRecord(OperationContext* txn, const BSONObj& obj, const RecordId& loc) {
+ for (IndexCatalogEntryContainer::const_iterator i = _entries.begin(); i != _entries.end();
+ ++i) {
+ Status s = _indexRecord(txn, *i, obj, loc);
+ if (!s.isOK())
+ return s;
+ }
- // If it's a background index, we DO NOT want to log anything.
- bool logIfError = entry->isReady(txn) ? !noWarn : false;
- _unindexRecord(txn, entry, obj, loc, logIfError);
- }
+ return Status::OK();
+}
+
+void IndexCatalog::unindexRecord(OperationContext* txn,
+ const BSONObj& obj,
+ const RecordId& loc,
+ bool noWarn) {
+ for (IndexCatalogEntryContainer::const_iterator i = _entries.begin(); i != _entries.end();
+ ++i) {
+ IndexCatalogEntry* entry = *i;
+
+ // If it's a background index, we DO NOT want to log anything.
+ bool logIfError = entry->isReady(txn) ? !noWarn : false;
+ _unindexRecord(txn, entry, obj, loc, logIfError);
}
+}
- BSONObj IndexCatalog::fixIndexKey( const BSONObj& key ) {
- if ( IndexDescriptor::isIdIndexPattern( key ) ) {
- return _idObj;
- }
- if ( key["_id"].type() == Bool && key.nFields() == 1 ) {
- return _idObj;
- }
- return key;
+BSONObj IndexCatalog::fixIndexKey(const BSONObj& key) {
+ if (IndexDescriptor::isIdIndexPattern(key)) {
+ return _idObj;
}
+ if (key["_id"].type() == Bool && key.nFields() == 1) {
+ return _idObj;
+ }
+ return key;
+}
- BSONObj IndexCatalog::_fixIndexSpec( const BSONObj& spec ) {
- BSONObj o = IndexLegacy::adjustIndexSpecObject( spec );
+BSONObj IndexCatalog::_fixIndexSpec(const BSONObj& spec) {
+ BSONObj o = IndexLegacy::adjustIndexSpecObject(spec);
- BSONObjBuilder b;
+ BSONObjBuilder b;
- int v = DefaultIndexVersionNumber;
- if( !o["v"].eoo() ) {
- v = o["v"].numberInt();
- }
+ int v = DefaultIndexVersionNumber;
+ if (!o["v"].eoo()) {
+ v = o["v"].numberInt();
+ }
- // idea is to put things we use a lot earlier
- b.append("v", v);
+ // idea is to put things we use a lot earlier
+ b.append("v", v);
- if( o["unique"].trueValue() )
- b.appendBool("unique", true); // normalize to bool true in case was int 1 or something...
+ if (o["unique"].trueValue())
+ b.appendBool("unique", true); // normalize to bool true in case was int 1 or something...
- BSONObj key = fixIndexKey( o["key"].Obj() );
- b.append( "key", key );
+ BSONObj key = fixIndexKey(o["key"].Obj());
+ b.append("key", key);
- string name = o["name"].String();
- if ( IndexDescriptor::isIdIndexPattern( key ) ) {
- name = "_id_";
- }
- b.append( "name", name );
-
- {
- BSONObjIterator i(o);
- while ( i.more() ) {
- BSONElement e = i.next();
- string s = e.fieldName();
-
- if ( s == "_id" ) {
- // skip
- }
- else if ( s == "dropDups" ) {
- // dropDups is silently ignored and removed from the spec as of SERVER-14710.
- }
- else if ( s == "v" || s == "unique" ||
- s == "key" || s == "name" ) {
- // covered above
- }
- else {
- b.append(e);
- }
+ string name = o["name"].String();
+ if (IndexDescriptor::isIdIndexPattern(key)) {
+ name = "_id_";
+ }
+ b.append("name", name);
+
+ {
+ BSONObjIterator i(o);
+ while (i.more()) {
+ BSONElement e = i.next();
+ string s = e.fieldName();
+
+ if (s == "_id") {
+ // skip
+ } else if (s == "dropDups") {
+ // dropDups is silently ignored and removed from the spec as of SERVER-14710.
+ } else if (s == "v" || s == "unique" || s == "key" || s == "name") {
+ // covered above
+ } else {
+ b.append(e);
}
}
-
- return b.obj();
}
+ return b.obj();
+}
}
diff --git a/src/mongo/db/catalog/index_catalog.h b/src/mongo/db/catalog/index_catalog.h
index 6b746fe6fd5..9cb9186a25e 100644
--- a/src/mongo/db/catalog/index_catalog.h
+++ b/src/mongo/db/catalog/index_catalog.h
@@ -40,318 +40,309 @@
namespace mongo {
- class Client;
- class Collection;
+class Client;
+class Collection;
- class IndexDescriptor;
- class IndexAccessMethod;
+class IndexDescriptor;
+class IndexAccessMethod;
- /**
- * how many: 1 per Collection
- * lifecycle: attached to a Collection
- */
- class IndexCatalog {
- public:
- IndexCatalog( Collection* collection );
- ~IndexCatalog();
-
- // must be called before used
- Status init(OperationContext* txn);
-
- bool ok() const;
-
- // ---- accessors -----
-
- bool haveAnyIndexes() const;
- int numIndexesTotal( OperationContext* txn ) const;
- int numIndexesReady( OperationContext* txn ) const;
- int numIndexesInProgress( OperationContext* txn ) const {
- return numIndexesTotal(txn) - numIndexesReady(txn);
- }
-
- /**
- * this is in "alive" until the Collection goes away
- * in which case everything from this tree has to go away
- */
-
- bool haveIdIndex( OperationContext* txn ) const;
+/**
+ * how many: 1 per Collection
+ * lifecycle: attached to a Collection
+ */
+class IndexCatalog {
+public:
+ IndexCatalog(Collection* collection);
+ ~IndexCatalog();
- /**
- * Returns the spec for the id index to create by default for this collection.
- */
- BSONObj getDefaultIdIndexSpec() const;
+ // must be called before used
+ Status init(OperationContext* txn);
- IndexDescriptor* findIdIndex( OperationContext* txn ) const;
+ bool ok() const;
- /**
- * @return null if cannot find
- */
- IndexDescriptor* findIndexByName( OperationContext* txn,
- StringData name,
- bool includeUnfinishedIndexes = false ) const;
+ // ---- accessors -----
- /**
- * @return null if cannot find
- */
- IndexDescriptor* findIndexByKeyPattern( OperationContext* txn,
- const BSONObj& key,
- bool includeUnfinishedIndexes = false ) const;
-
- /**
- * Returns an index suitable for shard key range scans.
- *
- * This index:
- * - must be prefixed by 'shardKey', and
- * - must not be a partial index.
- *
- * If the parameter 'requireSingleKey' is true, then this index additionally must not be
- * multi-key.
- *
- * If no such index exists, returns NULL.
- */
- IndexDescriptor* findShardKeyPrefixedIndex( OperationContext* txn,
- const BSONObj& shardKey,
- bool requireSingleKey ) const;
-
- void findIndexByType( OperationContext* txn,
- const std::string& type,
- std::vector<IndexDescriptor*>& matches,
- bool includeUnfinishedIndexes = false ) const;
+ bool haveAnyIndexes() const;
+ int numIndexesTotal(OperationContext* txn) const;
+ int numIndexesReady(OperationContext* txn) const;
+ int numIndexesInProgress(OperationContext* txn) const {
+ return numIndexesTotal(txn) - numIndexesReady(txn);
+ }
+ /**
+ * this is in "alive" until the Collection goes away
+ * in which case everything from this tree has to go away
+ */
- /**
- * Reload the index definition for 'oldDesc' from the CollectionCatalogEntry. 'oldDesc'
- * must be a ready index that is already registered with the index catalog. Returns an
- * unowned pointer to the descriptor for the new index definition.
- *
- * Use this method to notify the IndexCatalog that the spec for this index has changed.
- *
- * It is invalid to dereference 'oldDesc' after calling this method. This method broadcasts
- * an invalidateAll() on the cursor manager to notify other users of the IndexCatalog that
- * this descriptor is now invalid.
- */
- const IndexDescriptor* refreshEntry( OperationContext* txn,
- const IndexDescriptor* oldDesc );
+ bool haveIdIndex(OperationContext* txn) const;
- // never returns NULL
- const IndexCatalogEntry* getEntry( const IndexDescriptor* desc ) const;
+ /**
+ * Returns the spec for the id index to create by default for this collection.
+ */
+ BSONObj getDefaultIdIndexSpec() const;
- IndexAccessMethod* getIndex( const IndexDescriptor* desc );
- const IndexAccessMethod* getIndex( const IndexDescriptor* desc ) const;
+ IndexDescriptor* findIdIndex(OperationContext* txn) const;
- /**
- * Returns a not-ok Status if there are any unfinished index builds. No new indexes should
- * be built when in this state.
- */
- Status checkUnfinished() const;
-
- class IndexIterator {
- public:
- bool more();
- IndexDescriptor* next();
+ /**
+ * @return null if cannot find
+ */
+ IndexDescriptor* findIndexByName(OperationContext* txn,
+ StringData name,
+ bool includeUnfinishedIndexes = false) const;
- // returns the access method for the last return IndexDescriptor
- IndexAccessMethod* accessMethod( const IndexDescriptor* desc );
+ /**
+ * @return null if cannot find
+ */
+ IndexDescriptor* findIndexByKeyPattern(OperationContext* txn,
+ const BSONObj& key,
+ bool includeUnfinishedIndexes = false) const;
- // returns the IndexCatalogEntry for the last return IndexDescriptor
- IndexCatalogEntry* catalogEntry( const IndexDescriptor* desc );
+ /**
+ * Returns an index suitable for shard key range scans.
+ *
+ * This index:
+ * - must be prefixed by 'shardKey', and
+ * - must not be a partial index.
+ *
+ * If the parameter 'requireSingleKey' is true, then this index additionally must not be
+ * multi-key.
+ *
+ * If no such index exists, returns NULL.
+ */
+ IndexDescriptor* findShardKeyPrefixedIndex(OperationContext* txn,
+ const BSONObj& shardKey,
+ bool requireSingleKey) const;
- private:
- IndexIterator( OperationContext* txn,
- const IndexCatalog* cat,
- bool includeUnfinishedIndexes );
+ void findIndexByType(OperationContext* txn,
+ const std::string& type,
+ std::vector<IndexDescriptor*>& matches,
+ bool includeUnfinishedIndexes = false) const;
- void _advance();
- bool _includeUnfinishedIndexes;
+ /**
+ * Reload the index definition for 'oldDesc' from the CollectionCatalogEntry. 'oldDesc'
+ * must be a ready index that is already registered with the index catalog. Returns an
+ * unowned pointer to the descriptor for the new index definition.
+ *
+ * Use this method to notify the IndexCatalog that the spec for this index has changed.
+ *
+ * It is invalid to dereference 'oldDesc' after calling this method. This method broadcasts
+ * an invalidateAll() on the cursor manager to notify other users of the IndexCatalog that
+ * this descriptor is now invalid.
+ */
+ const IndexDescriptor* refreshEntry(OperationContext* txn, const IndexDescriptor* oldDesc);
- OperationContext* const _txn;
- const IndexCatalog* _catalog;
- IndexCatalogEntryContainer::const_iterator _iterator;
+ // never returns NULL
+ const IndexCatalogEntry* getEntry(const IndexDescriptor* desc) const;
- bool _start; // only true before we've called next() or more()
+ IndexAccessMethod* getIndex(const IndexDescriptor* desc);
+ const IndexAccessMethod* getIndex(const IndexDescriptor* desc) const;
- IndexCatalogEntry* _prev;
- IndexCatalogEntry* _next;
+ /**
+ * Returns a not-ok Status if there are any unfinished index builds. No new indexes should
+ * be built when in this state.
+ */
+ Status checkUnfinished() const;
- friend class IndexCatalog;
- };
+ class IndexIterator {
+ public:
+ bool more();
+ IndexDescriptor* next();
- IndexIterator getIndexIterator( OperationContext* txn,
- bool includeUnfinishedIndexes ) const {
- return IndexIterator( txn, this, includeUnfinishedIndexes );
- };
+ // returns the access method for the last return IndexDescriptor
+ IndexAccessMethod* accessMethod(const IndexDescriptor* desc);
- // ---- index set modifiers ------
+ // returns the IndexCatalogEntry for the last return IndexDescriptor
+ IndexCatalogEntry* catalogEntry(const IndexDescriptor* desc);
- /**
- * Call this only on an empty collection from inside a WriteUnitOfWork. Index creation on an
- * empty collection can be rolled back as part of a larger WUOW.
- */
- Status createIndexOnEmptyCollection(OperationContext* txn, BSONObj spec);
+ private:
+ IndexIterator(OperationContext* txn,
+ const IndexCatalog* cat,
+ bool includeUnfinishedIndexes);
- StatusWith<BSONObj> prepareSpecForCreate( OperationContext* txn,
- const BSONObj& original ) const;
+ void _advance();
- Status dropAllIndexes(OperationContext* txn,
- bool includingIdIndex );
+ bool _includeUnfinishedIndexes;
- Status dropIndex(OperationContext* txn,
- IndexDescriptor* desc );
+ OperationContext* const _txn;
+ const IndexCatalog* _catalog;
+ IndexCatalogEntryContainer::const_iterator _iterator;
- /**
- * will drop all incompleted indexes and return specs
- * after this, the indexes can be rebuilt
- */
- std::vector<BSONObj> getAndClearUnfinishedIndexes(OperationContext* txn);
+ bool _start; // only true before we've called next() or more()
+ IndexCatalogEntry* _prev;
+ IndexCatalogEntry* _next;
- struct IndexKillCriteria {
- std::string ns;
- std::string name;
- BSONObj key;
- };
+ friend class IndexCatalog;
+ };
- // ---- modify single index
+ IndexIterator getIndexIterator(OperationContext* txn, bool includeUnfinishedIndexes) const {
+ return IndexIterator(txn, this, includeUnfinishedIndexes);
+ };
- bool isMultikey( OperationContext* txn, const IndexDescriptor* idex );
+ // ---- index set modifiers ------
- // --- these probably become private?
+ /**
+ * Call this only on an empty collection from inside a WriteUnitOfWork. Index creation on an
+ * empty collection can be rolled back as part of a larger WUOW.
+ */
+ Status createIndexOnEmptyCollection(OperationContext* txn, BSONObj spec);
+ StatusWith<BSONObj> prepareSpecForCreate(OperationContext* txn, const BSONObj& original) const;
- /**
- * disk creation order
- * 1) system.indexes entry
- * 2) collection's NamespaceDetails
- * a) info + head
- * b) _indexBuildsInProgress++
- * 3) indexes entry in .ns file
- * 4) system.namespaces entry for index ns
- */
- class IndexBuildBlock {
- MONGO_DISALLOW_COPYING(IndexBuildBlock);
- public:
- IndexBuildBlock(OperationContext* txn,
- Collection* collection,
- const BSONObj& spec );
+ Status dropAllIndexes(OperationContext* txn, bool includingIdIndex);
- ~IndexBuildBlock();
+ Status dropIndex(OperationContext* txn, IndexDescriptor* desc);
- Status init();
+ /**
+ * will drop all incompleted indexes and return specs
+ * after this, the indexes can be rebuilt
+ */
+ std::vector<BSONObj> getAndClearUnfinishedIndexes(OperationContext* txn);
- void success();
- /**
- * index build failed, clean up meta data
- */
- void fail();
+ struct IndexKillCriteria {
+ std::string ns;
+ std::string name;
+ BSONObj key;
+ };
- IndexCatalogEntry* getEntry() { return _entry; }
+ // ---- modify single index
- private:
- Collection* const _collection;
- IndexCatalog* const _catalog;
- const std::string _ns;
+ bool isMultikey(OperationContext* txn, const IndexDescriptor* idex);
- BSONObj _spec;
+ // --- these probably become private?
- std::string _indexName;
- std::string _indexNamespace;
- IndexCatalogEntry* _entry;
- bool _inProgress;
+ /**
+ * disk creation order
+ * 1) system.indexes entry
+ * 2) collection's NamespaceDetails
+ * a) info + head
+ * b) _indexBuildsInProgress++
+ * 3) indexes entry in .ns file
+ * 4) system.namespaces entry for index ns
+ */
+ class IndexBuildBlock {
+ MONGO_DISALLOW_COPYING(IndexBuildBlock);
- OperationContext* _txn;
- };
+ public:
+ IndexBuildBlock(OperationContext* txn, Collection* collection, const BSONObj& spec);
- // ----- data modifiers ------
+ ~IndexBuildBlock();
- // this throws for now
- Status indexRecord(OperationContext* txn, const BSONObj& obj, const RecordId &loc);
+ Status init();
- void unindexRecord(OperationContext* txn,
- const BSONObj& obj,
- const RecordId& loc,
- bool noWarn);
+ void success();
- // ------- temp internal -------
+ /**
+ * index build failed, clean up meta data
+ */
+ void fail();
- std::string getAccessMethodName(OperationContext* txn, const BSONObj& keyPattern) {
- return _getAccessMethodName( txn, keyPattern );
+ IndexCatalogEntry* getEntry() {
+ return _entry;
}
- Status _upgradeDatabaseMinorVersionIfNeeded( OperationContext* txn,
- const std::string& newPluginName );
+ private:
+ Collection* const _collection;
+ IndexCatalog* const _catalog;
+ const std::string _ns;
- // public static helpers
+ BSONObj _spec;
- static BSONObj fixIndexKey( const BSONObj& key );
+ std::string _indexName;
+ std::string _indexNamespace;
- private:
- static const BSONObj _idObj; // { _id : 1 }
+ IndexCatalogEntry* _entry;
+ bool _inProgress;
- bool _shouldOverridePlugin( OperationContext* txn, const BSONObj& keyPattern ) const;
+ OperationContext* _txn;
+ };
- /**
- * This differs from IndexNames::findPluginName in that returns the plugin name we *should*
- * use, not the plugin name inside of the provided key pattern. To understand when these
- * differ, see shouldOverridePlugin.
- */
- std::string _getAccessMethodName(OperationContext* txn, const BSONObj& keyPattern) const;
+ // ----- data modifiers ------
- void _checkMagic() const;
+ // this throws for now
+ Status indexRecord(OperationContext* txn, const BSONObj& obj, const RecordId& loc);
- Status _indexRecord(OperationContext* txn,
- IndexCatalogEntry* index,
- const BSONObj& obj,
- const RecordId &loc );
+ void unindexRecord(OperationContext* txn, const BSONObj& obj, const RecordId& loc, bool noWarn);
- Status _unindexRecord(OperationContext* txn,
- IndexCatalogEntry* index,
- const BSONObj& obj,
- const RecordId &loc,
- bool logIfError);
+ // ------- temp internal -------
- /**
- * this does no sanity checks
- */
- Status _dropIndex(OperationContext* txn,
- IndexCatalogEntry* entry );
+ std::string getAccessMethodName(OperationContext* txn, const BSONObj& keyPattern) {
+ return _getAccessMethodName(txn, keyPattern);
+ }
- // just does disk hanges
- // doesn't change memory state, etc...
- void _deleteIndexFromDisk( OperationContext* txn,
- const std::string& indexName,
- const std::string& indexNamespace );
+ Status _upgradeDatabaseMinorVersionIfNeeded(OperationContext* txn,
+ const std::string& newPluginName);
- // descriptor ownership passes to _setupInMemoryStructures
- // initFromDisk: Avoids registering a change to undo this operation when set to true.
- // You must set this flag if calling this function outside of a UnitOfWork.
- IndexCatalogEntry* _setupInMemoryStructures(OperationContext* txn,
- IndexDescriptor* descriptor,
- bool initFromDisk);
+ // public static helpers
- // Apply a set of transformations to the user-provided index object 'spec' to make it
- // conform to the standard for insertion. This function adds the 'v' field if it didn't
- // exist, removes the '_id' field if it exists, applies plugin-level transformations if
- // appropriate, etc.
- static BSONObj _fixIndexSpec( const BSONObj& spec );
+ static BSONObj fixIndexKey(const BSONObj& key);
- Status _isSpecOk( const BSONObj& spec ) const;
+private:
+ static const BSONObj _idObj; // { _id : 1 }
- Status _doesSpecConflictWithExisting( OperationContext* txn, const BSONObj& spec ) const;
+ bool _shouldOverridePlugin(OperationContext* txn, const BSONObj& keyPattern) const;
- int _magic;
- Collection* const _collection;
- const int _maxNumIndexesAllowed;
+ /**
+ * This differs from IndexNames::findPluginName in that returns the plugin name we *should*
+ * use, not the plugin name inside of the provided key pattern. To understand when these
+ * differ, see shouldOverridePlugin.
+ */
+ std::string _getAccessMethodName(OperationContext* txn, const BSONObj& keyPattern) const;
- IndexCatalogEntryContainer _entries;
+ void _checkMagic() const;
- // These are the index specs of indexes that were "leftover".
- // "Leftover" means they were unfinished when a mongod shut down.
- // Certain operations are prohibited until someone fixes.
- // Retrieve by calling getAndClearUnfinishedIndexes().
- std::vector<BSONObj> _unfinishedIndexes;
- };
+ Status _indexRecord(OperationContext* txn,
+ IndexCatalogEntry* index,
+ const BSONObj& obj,
+ const RecordId& loc);
+
+ Status _unindexRecord(OperationContext* txn,
+ IndexCatalogEntry* index,
+ const BSONObj& obj,
+ const RecordId& loc,
+ bool logIfError);
+ /**
+ * this does no sanity checks
+ */
+ Status _dropIndex(OperationContext* txn, IndexCatalogEntry* entry);
+
+ // just does disk hanges
+ // doesn't change memory state, etc...
+ void _deleteIndexFromDisk(OperationContext* txn,
+ const std::string& indexName,
+ const std::string& indexNamespace);
+
+ // descriptor ownership passes to _setupInMemoryStructures
+ // initFromDisk: Avoids registering a change to undo this operation when set to true.
+ // You must set this flag if calling this function outside of a UnitOfWork.
+ IndexCatalogEntry* _setupInMemoryStructures(OperationContext* txn,
+ IndexDescriptor* descriptor,
+ bool initFromDisk);
+
+ // Apply a set of transformations to the user-provided index object 'spec' to make it
+ // conform to the standard for insertion. This function adds the 'v' field if it didn't
+ // exist, removes the '_id' field if it exists, applies plugin-level transformations if
+ // appropriate, etc.
+ static BSONObj _fixIndexSpec(const BSONObj& spec);
+
+ Status _isSpecOk(const BSONObj& spec) const;
+
+ Status _doesSpecConflictWithExisting(OperationContext* txn, const BSONObj& spec) const;
+
+ int _magic;
+ Collection* const _collection;
+ const int _maxNumIndexesAllowed;
+
+ IndexCatalogEntryContainer _entries;
+
+ // These are the index specs of indexes that were "leftover".
+ // "Leftover" means they were unfinished when a mongod shut down.
+ // Certain operations are prohibited until someone fixes.
+ // Retrieve by calling getAndClearUnfinishedIndexes().
+ std::vector<BSONObj> _unfinishedIndexes;
+};
}
diff --git a/src/mongo/db/catalog/index_catalog_entry.cpp b/src/mongo/db/catalog/index_catalog_entry.cpp
index 41ad093e8cc..55f5511680b 100644
--- a/src/mongo/db/catalog/index_catalog_entry.cpp
+++ b/src/mongo/db/catalog/index_catalog_entry.cpp
@@ -48,246 +48,240 @@
namespace mongo {
- using std::string;
+using std::string;
- class HeadManagerImpl : public HeadManager {
- public:
- HeadManagerImpl(IndexCatalogEntry* ice) : _catalogEntry(ice) { }
- virtual ~HeadManagerImpl() { }
+class HeadManagerImpl : public HeadManager {
+public:
+ HeadManagerImpl(IndexCatalogEntry* ice) : _catalogEntry(ice) {}
+ virtual ~HeadManagerImpl() {}
- const RecordId getHead(OperationContext* txn) const {
- return _catalogEntry->head(txn);
- }
-
- void setHead(OperationContext* txn, const RecordId newHead) {
- _catalogEntry->setHead(txn, newHead);
- }
-
- private:
- // Not owned here.
- IndexCatalogEntry* _catalogEntry;
- };
-
- IndexCatalogEntry::IndexCatalogEntry( StringData ns,
- CollectionCatalogEntry* collection,
- IndexDescriptor* descriptor,
- CollectionInfoCache* infoCache )
- : _ns( ns.toString() ),
- _collection( collection ),
- _descriptor( descriptor ),
- _infoCache( infoCache ),
- _accessMethod( NULL ),
- _headManager(new HeadManagerImpl(this)),
- _ordering( Ordering::make( descriptor->keyPattern() ) ),
- _isReady( false ) {
-
- _descriptor->_cachedEntry = this;
+ const RecordId getHead(OperationContext* txn) const {
+ return _catalogEntry->head(txn);
}
- IndexCatalogEntry::~IndexCatalogEntry() {
- _descriptor->_cachedEntry = NULL; // defensive
-
- delete _headManager;
- delete _accessMethod;
- delete _descriptor;
+ void setHead(OperationContext* txn, const RecordId newHead) {
+ _catalogEntry->setHead(txn, newHead);
}
- void IndexCatalogEntry::init( OperationContext* txn,
- IndexAccessMethod* accessMethod ) {
- verify( _accessMethod == NULL );
- _accessMethod = accessMethod;
-
- _isReady = _catalogIsReady( txn );
- _head = _catalogHead( txn );
- _isMultikey = _catalogIsMultikey( txn );
-
- BSONElement filterElement = _descriptor->getInfoElement("partialFilterExpression");
- if ( filterElement.type() ) {
- invariant( filterElement.isABSONObj() );
- BSONObj filter = filterElement.Obj();
- StatusWithMatchExpression res = MatchExpressionParser::parse( filter );
- // this should be checked in create, so can blow up here
- invariantOK( res.getStatus() );
- _filterExpression.reset( res.getValue() );
- LOG(2) << "have filter expression for "
- << _ns << " " << _descriptor->indexName()
- << " " << filter;
- }
+private:
+ // Not owned here.
+ IndexCatalogEntry* _catalogEntry;
+};
+
+IndexCatalogEntry::IndexCatalogEntry(StringData ns,
+ CollectionCatalogEntry* collection,
+ IndexDescriptor* descriptor,
+ CollectionInfoCache* infoCache)
+ : _ns(ns.toString()),
+ _collection(collection),
+ _descriptor(descriptor),
+ _infoCache(infoCache),
+ _accessMethod(NULL),
+ _headManager(new HeadManagerImpl(this)),
+ _ordering(Ordering::make(descriptor->keyPattern())),
+ _isReady(false) {
+ _descriptor->_cachedEntry = this;
+}
+
+IndexCatalogEntry::~IndexCatalogEntry() {
+ _descriptor->_cachedEntry = NULL; // defensive
+
+ delete _headManager;
+ delete _accessMethod;
+ delete _descriptor;
+}
+
+void IndexCatalogEntry::init(OperationContext* txn, IndexAccessMethod* accessMethod) {
+ verify(_accessMethod == NULL);
+ _accessMethod = accessMethod;
+
+ _isReady = _catalogIsReady(txn);
+ _head = _catalogHead(txn);
+ _isMultikey = _catalogIsMultikey(txn);
+
+ BSONElement filterElement = _descriptor->getInfoElement("partialFilterExpression");
+ if (filterElement.type()) {
+ invariant(filterElement.isABSONObj());
+ BSONObj filter = filterElement.Obj();
+ StatusWithMatchExpression res = MatchExpressionParser::parse(filter);
+ // this should be checked in create, so can blow up here
+ invariantOK(res.getStatus());
+ _filterExpression.reset(res.getValue());
+ LOG(2) << "have filter expression for " << _ns << " " << _descriptor->indexName() << " "
+ << filter;
}
+}
- const RecordId& IndexCatalogEntry::head( OperationContext* txn ) const {
- DEV invariant( _head == _catalogHead( txn ) );
- return _head;
- }
+const RecordId& IndexCatalogEntry::head(OperationContext* txn) const {
+ DEV invariant(_head == _catalogHead(txn));
+ return _head;
+}
- bool IndexCatalogEntry::isReady( OperationContext* txn ) const {
- DEV invariant( _isReady == _catalogIsReady( txn ) );
- return _isReady;
- }
+bool IndexCatalogEntry::isReady(OperationContext* txn) const {
+ DEV invariant(_isReady == _catalogIsReady(txn));
+ return _isReady;
+}
- bool IndexCatalogEntry::isMultikey() const {
- return _isMultikey;
- }
+bool IndexCatalogEntry::isMultikey() const {
+ return _isMultikey;
+}
+
+// ---
- // ---
+void IndexCatalogEntry::setIsReady(bool newIsReady) {
+ _isReady = newIsReady;
+}
- void IndexCatalogEntry::setIsReady( bool newIsReady ) {
- _isReady = newIsReady;
+class IndexCatalogEntry::SetHeadChange : public RecoveryUnit::Change {
+public:
+ SetHeadChange(IndexCatalogEntry* ice, RecordId oldHead) : _ice(ice), _oldHead(oldHead) {}
+
+ virtual void commit() {}
+ virtual void rollback() {
+ _ice->_head = _oldHead;
}
- class IndexCatalogEntry::SetHeadChange : public RecoveryUnit::Change {
- public:
- SetHeadChange(IndexCatalogEntry* ice, RecordId oldHead) :_ice(ice), _oldHead(oldHead) {
- }
+ IndexCatalogEntry* _ice;
+ const RecordId _oldHead;
+};
- virtual void commit() {}
- virtual void rollback() { _ice->_head = _oldHead; }
+void IndexCatalogEntry::setHead(OperationContext* txn, RecordId newHead) {
+ _collection->setIndexHead(txn, _descriptor->indexName(), newHead);
- IndexCatalogEntry* _ice;
- const RecordId _oldHead;
- };
+ txn->recoveryUnit()->registerChange(new SetHeadChange(this, _head));
+ _head = newHead;
+}
- void IndexCatalogEntry::setHead( OperationContext* txn, RecordId newHead ) {
- _collection->setIndexHead( txn,
- _descriptor->indexName(),
- newHead );
- txn->recoveryUnit()->registerChange(new SetHeadChange(this, _head));
- _head = newHead;
+/**
+ * RAII class, which associates a new RecoveryUnit with an OperationContext for the purposes
+ * of simulating a sub-transaction. Takes ownership of the new recovery unit and frees it at
+ * destruction time.
+ */
+class RecoveryUnitSwap {
+public:
+ RecoveryUnitSwap(OperationContext* txn, RecoveryUnit* newRecoveryUnit)
+ : _txn(txn),
+ _oldRecoveryUnit(_txn->releaseRecoveryUnit()),
+ _oldRecoveryUnitState(
+ _txn->setRecoveryUnit(newRecoveryUnit, OperationContext::kNotInUnitOfWork)),
+ _newRecoveryUnit(newRecoveryUnit) {}
+
+ ~RecoveryUnitSwap() {
+ _txn->releaseRecoveryUnit();
+ _txn->setRecoveryUnit(_oldRecoveryUnit, _oldRecoveryUnitState);
}
+private:
+ // Not owned
+ OperationContext* const _txn;
- /**
- * RAII class, which associates a new RecoveryUnit with an OperationContext for the purposes
- * of simulating a sub-transaction. Takes ownership of the new recovery unit and frees it at
- * destruction time.
- */
- class RecoveryUnitSwap {
- public:
- RecoveryUnitSwap(OperationContext* txn, RecoveryUnit* newRecoveryUnit)
- : _txn(txn),
- _oldRecoveryUnit(_txn->releaseRecoveryUnit()),
- _oldRecoveryUnitState(_txn->setRecoveryUnit(newRecoveryUnit,
- OperationContext::kNotInUnitOfWork)),
- _newRecoveryUnit(newRecoveryUnit) { }
-
- ~RecoveryUnitSwap() {
- _txn->releaseRecoveryUnit();
- _txn->setRecoveryUnit(_oldRecoveryUnit, _oldRecoveryUnitState);
- }
+ // Owned, but life-time is not controlled
+ RecoveryUnit* const _oldRecoveryUnit;
+ OperationContext::RecoveryUnitState const _oldRecoveryUnitState;
- private:
- // Not owned
- OperationContext* const _txn;
+ // Owned and life-time is controlled
+ const std::unique_ptr<RecoveryUnit> _newRecoveryUnit;
+};
- // Owned, but life-time is not controlled
- RecoveryUnit* const _oldRecoveryUnit;
- OperationContext::RecoveryUnitState const _oldRecoveryUnitState;
+void IndexCatalogEntry::setMultikey(OperationContext* txn) {
+ if (isMultikey()) {
+ return;
+ }
- // Owned and life-time is controlled
- const std::unique_ptr<RecoveryUnit> _newRecoveryUnit;
- };
+ // Only one thread should set the multi-key value per collection, because the metadata for
+ // a collection is one large document.
+ Lock::ResourceLock collMDLock(txn->lockState(), ResourceId(RESOURCE_METADATA, _ns), MODE_X);
- void IndexCatalogEntry::setMultikey(OperationContext* txn) {
- if (isMultikey()) {
- return;
- }
+ // Check again in case we blocked on the MD lock and another thread beat us to setting the
+ // multiKey metadata for this index.
+ if (isMultikey()) {
+ return;
+ }
- // Only one thread should set the multi-key value per collection, because the metadata for
- // a collection is one large document.
- Lock::ResourceLock collMDLock(txn->lockState(),
- ResourceId(RESOURCE_METADATA, _ns),
- MODE_X);
+ // This effectively emulates a sub-transaction off the main transaction, which invoked
+ // setMultikey. The reason we need is to avoid artificial WriteConflicts, which happen
+ // with snapshot isolation.
+ {
+ StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
+ RecoveryUnitSwap ruSwap(txn, storageEngine->newRecoveryUnit());
- // Check again in case we blocked on the MD lock and another thread beat us to setting the
- // multiKey metadata for this index.
- if (isMultikey()) {
- return;
- }
+ WriteUnitOfWork wuow(txn);
- // This effectively emulates a sub-transaction off the main transaction, which invoked
- // setMultikey. The reason we need is to avoid artificial WriteConflicts, which happen
- // with snapshot isolation.
- {
- StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine();
- RecoveryUnitSwap ruSwap(txn, storageEngine->newRecoveryUnit());
-
- WriteUnitOfWork wuow(txn);
-
- if (_collection->setIndexIsMultikey(txn, _descriptor->indexName())) {
- if (_infoCache) {
- LOG(1) << _ns << ": clearing plan cache - index "
- << _descriptor->keyPattern() << " set to multi key.";
- _infoCache->clearQueryCache();
- }
+ if (_collection->setIndexIsMultikey(txn, _descriptor->indexName())) {
+ if (_infoCache) {
+ LOG(1) << _ns << ": clearing plan cache - index " << _descriptor->keyPattern()
+ << " set to multi key.";
+ _infoCache->clearQueryCache();
}
-
- wuow.commit();
}
- _isMultikey = true;
+ wuow.commit();
}
- // ----
-
- bool IndexCatalogEntry::_catalogIsReady( OperationContext* txn ) const {
- return _collection->isIndexReady( txn, _descriptor->indexName() );
- }
+ _isMultikey = true;
+}
- RecordId IndexCatalogEntry::_catalogHead( OperationContext* txn ) const {
- return _collection->getIndexHead( txn, _descriptor->indexName() );
- }
+// ----
- bool IndexCatalogEntry::_catalogIsMultikey( OperationContext* txn ) const {
- return _collection->isIndexMultikey( txn, _descriptor->indexName() );
- }
+bool IndexCatalogEntry::_catalogIsReady(OperationContext* txn) const {
+ return _collection->isIndexReady(txn, _descriptor->indexName());
+}
- // ------------------
+RecordId IndexCatalogEntry::_catalogHead(OperationContext* txn) const {
+ return _collection->getIndexHead(txn, _descriptor->indexName());
+}
- const IndexCatalogEntry* IndexCatalogEntryContainer::find( const IndexDescriptor* desc ) const {
- if ( desc->_cachedEntry )
- return desc->_cachedEntry;
+bool IndexCatalogEntry::_catalogIsMultikey(OperationContext* txn) const {
+ return _collection->isIndexMultikey(txn, _descriptor->indexName());
+}
- for ( const_iterator i = begin(); i != end(); ++i ) {
- const IndexCatalogEntry* e = *i;
- if ( e->descriptor() == desc )
- return e;
- }
- return NULL;
- }
+// ------------------
- IndexCatalogEntry* IndexCatalogEntryContainer::find( const IndexDescriptor* desc ) {
- if ( desc->_cachedEntry )
- return desc->_cachedEntry;
+const IndexCatalogEntry* IndexCatalogEntryContainer::find(const IndexDescriptor* desc) const {
+ if (desc->_cachedEntry)
+ return desc->_cachedEntry;
- for ( iterator i = begin(); i != end(); ++i ) {
- IndexCatalogEntry* e = *i;
- if ( e->descriptor() == desc )
- return e;
- }
- return NULL;
+ for (const_iterator i = begin(); i != end(); ++i) {
+ const IndexCatalogEntry* e = *i;
+ if (e->descriptor() == desc)
+ return e;
}
+ return NULL;
+}
- IndexCatalogEntry* IndexCatalogEntryContainer::find( const string& name ) {
- for ( iterator i = begin(); i != end(); ++i ) {
- IndexCatalogEntry* e = *i;
- if ( e->descriptor()->indexName() == name )
- return e;
- }
- return NULL;
+IndexCatalogEntry* IndexCatalogEntryContainer::find(const IndexDescriptor* desc) {
+ if (desc->_cachedEntry)
+ return desc->_cachedEntry;
+
+ for (iterator i = begin(); i != end(); ++i) {
+ IndexCatalogEntry* e = *i;
+ if (e->descriptor() == desc)
+ return e;
}
+ return NULL;
+}
- IndexCatalogEntry* IndexCatalogEntryContainer::release( const IndexDescriptor* desc ) {
- for ( std::vector<IndexCatalogEntry*>::iterator i = _entries.mutableVector().begin();
- i != _entries.mutableVector().end();
- ++i ) {
- IndexCatalogEntry* e = *i;
- if ( e->descriptor() != desc )
- continue;
- _entries.mutableVector().erase( i );
+IndexCatalogEntry* IndexCatalogEntryContainer::find(const string& name) {
+ for (iterator i = begin(); i != end(); ++i) {
+ IndexCatalogEntry* e = *i;
+ if (e->descriptor()->indexName() == name)
return e;
- }
- return NULL;
}
+ return NULL;
+}
+
+IndexCatalogEntry* IndexCatalogEntryContainer::release(const IndexDescriptor* desc) {
+ for (std::vector<IndexCatalogEntry*>::iterator i = _entries.mutableVector().begin();
+ i != _entries.mutableVector().end();
+ ++i) {
+ IndexCatalogEntry* e = *i;
+ if (e->descriptor() != desc)
+ continue;
+ _entries.mutableVector().erase(i);
+ return e;
+ }
+ return NULL;
+}
} // namespace mongo
diff --git a/src/mongo/db/catalog/index_catalog_entry.h b/src/mongo/db/catalog/index_catalog_entry.h
index ad919b2bca5..d7ab07f37ec 100644
--- a/src/mongo/db/catalog/index_catalog_entry.h
+++ b/src/mongo/db/catalog/index_catalog_entry.h
@@ -38,130 +38,155 @@
namespace mongo {
- class CollectionCatalogEntry;
- class CollectionInfoCache;
- class HeadManager;
- class IndexAccessMethod;
- class IndexDescriptor;
- class MatchExpression;
- class OperationContext;
+class CollectionCatalogEntry;
+class CollectionInfoCache;
+class HeadManager;
+class IndexAccessMethod;
+class IndexDescriptor;
+class MatchExpression;
+class OperationContext;
- class IndexCatalogEntry {
- MONGO_DISALLOW_COPYING( IndexCatalogEntry );
- public:
- IndexCatalogEntry( StringData ns,
- CollectionCatalogEntry* collection, // not owned
- IndexDescriptor* descriptor, // ownership passes to me
- CollectionInfoCache* infoCache ); // not owned, optional
+class IndexCatalogEntry {
+ MONGO_DISALLOW_COPYING(IndexCatalogEntry);
- ~IndexCatalogEntry();
+public:
+ IndexCatalogEntry(StringData ns,
+ CollectionCatalogEntry* collection, // not owned
+ IndexDescriptor* descriptor, // ownership passes to me
+ CollectionInfoCache* infoCache); // not owned, optional
- const std::string& ns() const { return _ns; }
+ ~IndexCatalogEntry();
- void init( OperationContext* txn,
- IndexAccessMethod* accessMethod );
+ const std::string& ns() const {
+ return _ns;
+ }
- IndexDescriptor* descriptor() { return _descriptor; }
- const IndexDescriptor* descriptor() const { return _descriptor; }
+ void init(OperationContext* txn, IndexAccessMethod* accessMethod);
- IndexAccessMethod* accessMethod() { return _accessMethod; }
- const IndexAccessMethod* accessMethod() const { return _accessMethod; }
+ IndexDescriptor* descriptor() {
+ return _descriptor;
+ }
+ const IndexDescriptor* descriptor() const {
+ return _descriptor;
+ }
- const Ordering& ordering() const { return _ordering; }
+ IndexAccessMethod* accessMethod() {
+ return _accessMethod;
+ }
+ const IndexAccessMethod* accessMethod() const {
+ return _accessMethod;
+ }
- const MatchExpression* getFilterExpression() const { return _filterExpression.get(); }
+ const Ordering& ordering() const {
+ return _ordering;
+ }
- /// ---------------------
+ const MatchExpression* getFilterExpression() const {
+ return _filterExpression.get();
+ }
- const RecordId& head( OperationContext* txn ) const;
+ /// ---------------------
- void setHead( OperationContext* txn, RecordId newHead );
+ const RecordId& head(OperationContext* txn) const;
- void setIsReady( bool newIsReady );
+ void setHead(OperationContext* txn, RecordId newHead);
- HeadManager* headManager() const { return _headManager; }
+ void setIsReady(bool newIsReady);
- // --
+ HeadManager* headManager() const {
+ return _headManager;
+ }
- bool isMultikey() const;
+ // --
- void setMultikey( OperationContext* txn );
+ bool isMultikey() const;
- // if this ready is ready for queries
- bool isReady( OperationContext* txn ) const;
+ void setMultikey(OperationContext* txn);
- private:
+ // if this ready is ready for queries
+ bool isReady(OperationContext* txn) const;
- class SetMultikeyChange;
- class SetHeadChange;
+private:
+ class SetMultikeyChange;
+ class SetHeadChange;
- bool _catalogIsReady( OperationContext* txn ) const;
- RecordId _catalogHead( OperationContext* txn ) const;
- bool _catalogIsMultikey( OperationContext* txn ) const;
+ bool _catalogIsReady(OperationContext* txn) const;
+ RecordId _catalogHead(OperationContext* txn) const;
+ bool _catalogIsMultikey(OperationContext* txn) const;
- // -----
+ // -----
- std::string _ns;
+ std::string _ns;
- CollectionCatalogEntry* _collection; // not owned here
+ CollectionCatalogEntry* _collection; // not owned here
- IndexDescriptor* _descriptor; // owned here
+ IndexDescriptor* _descriptor; // owned here
- CollectionInfoCache* _infoCache; // not owned here
+ CollectionInfoCache* _infoCache; // not owned here
- IndexAccessMethod* _accessMethod; // owned here
+ IndexAccessMethod* _accessMethod; // owned here
- // Owned here.
- HeadManager* _headManager;
- std::unique_ptr<MatchExpression> _filterExpression;
+ // Owned here.
+ HeadManager* _headManager;
+ std::unique_ptr<MatchExpression> _filterExpression;
- // cached stuff
+ // cached stuff
- Ordering _ordering; // TODO: this might be b-tree specific
- bool _isReady; // cache of NamespaceDetails info
- RecordId _head; // cache of IndexDetails
- bool _isMultikey; // cache of NamespaceDetails info
- };
+ Ordering _ordering; // TODO: this might be b-tree specific
+ bool _isReady; // cache of NamespaceDetails info
+ RecordId _head; // cache of IndexDetails
+ bool _isMultikey; // cache of NamespaceDetails info
+};
- class IndexCatalogEntryContainer {
- public:
+class IndexCatalogEntryContainer {
+public:
+ typedef std::vector<IndexCatalogEntry*>::const_iterator const_iterator;
+ typedef std::vector<IndexCatalogEntry*>::const_iterator iterator;
- typedef std::vector<IndexCatalogEntry*>::const_iterator const_iterator;
- typedef std::vector<IndexCatalogEntry*>::const_iterator iterator;
+ const_iterator begin() const {
+ return _entries.vector().begin();
+ }
+ const_iterator end() const {
+ return _entries.vector().end();
+ }
- const_iterator begin() const { return _entries.vector().begin(); }
- const_iterator end() const { return _entries.vector().end(); }
+ iterator begin() {
+ return _entries.vector().begin();
+ }
+ iterator end() {
+ return _entries.vector().end();
+ }
- iterator begin() { return _entries.vector().begin(); }
- iterator end() { return _entries.vector().end(); }
+ // TODO: these have to be SUPER SUPER FAST
+ // maybe even some pointer trickery is in order
+ const IndexCatalogEntry* find(const IndexDescriptor* desc) const;
+ IndexCatalogEntry* find(const IndexDescriptor* desc);
- // TODO: these have to be SUPER SUPER FAST
- // maybe even some pointer trickery is in order
- const IndexCatalogEntry* find( const IndexDescriptor* desc ) const;
- IndexCatalogEntry* find( const IndexDescriptor* desc );
+ IndexCatalogEntry* find(const std::string& name);
- IndexCatalogEntry* find( const std::string& name );
+ unsigned size() const {
+ return _entries.size();
+ }
+ // -----------------
- unsigned size() const { return _entries.size(); }
- // -----------------
+ /**
+ * Removes from _entries and returns the matching entry or NULL if none matches.
+ */
+ IndexCatalogEntry* release(const IndexDescriptor* desc);
- /**
- * Removes from _entries and returns the matching entry or NULL if none matches.
- */
- IndexCatalogEntry* release( const IndexDescriptor* desc );
+ bool remove(const IndexDescriptor* desc) {
+ IndexCatalogEntry* entry = release(desc);
+ delete entry;
+ return entry;
+ }
- bool remove( const IndexDescriptor* desc ) {
- IndexCatalogEntry* entry = release(desc);
- delete entry;
- return entry;
- }
-
- // pass ownership to EntryContainer
- void add( IndexCatalogEntry* entry ) { _entries.mutableVector().push_back( entry ); }
-
- private:
- OwnedPointerVector<IndexCatalogEntry> _entries;
- };
+ // pass ownership to EntryContainer
+ void add(IndexCatalogEntry* entry) {
+ _entries.mutableVector().push_back(entry);
+ }
+private:
+ OwnedPointerVector<IndexCatalogEntry> _entries;
+};
}
diff --git a/src/mongo/db/catalog/index_create.cpp b/src/mongo/db/catalog/index_create.cpp
index b624c0151f0..c642fcb83a5 100644
--- a/src/mongo/db/catalog/index_create.cpp
+++ b/src/mongo/db/catalog/index_create.cpp
@@ -54,334 +54,318 @@
namespace mongo {
- using std::unique_ptr;
- using std::string;
- using std::endl;
-
- /**
- * On rollback sets MultiIndexBlock::_needToCleanup to true.
- */
- class MultiIndexBlock::SetNeedToCleanupOnRollback : public RecoveryUnit::Change {
- public:
- explicit SetNeedToCleanupOnRollback(MultiIndexBlock* indexer) : _indexer(indexer) {}
-
- virtual void commit() {}
- virtual void rollback() { _indexer->_needToCleanup = true; }
-
- private:
- MultiIndexBlock* const _indexer;
- };
-
- /**
- * On rollback in init(), cleans up _indexes so that ~MultiIndexBlock doesn't try to clean
- * up _indexes manually (since the changes were already rolled back).
- * Due to this, it is thus legal to call init() again after it fails.
- */
- class MultiIndexBlock::CleanupIndexesVectorOnRollback : public RecoveryUnit::Change {
- public:
- explicit CleanupIndexesVectorOnRollback(MultiIndexBlock* indexer) : _indexer(indexer) {}
-
- virtual void commit() {}
- virtual void rollback() { _indexer->_indexes.clear(); }
-
- private:
- MultiIndexBlock* const _indexer;
- };
-
- MultiIndexBlock::MultiIndexBlock(OperationContext* txn, Collection* collection)
- : _collection(collection),
- _txn(txn),
- _buildInBackground(false),
- _allowInterruption(false),
- _ignoreUnique(false),
- _needToCleanup(true) {
+using std::unique_ptr;
+using std::string;
+using std::endl;
+
+/**
+ * On rollback sets MultiIndexBlock::_needToCleanup to true.
+ */
+class MultiIndexBlock::SetNeedToCleanupOnRollback : public RecoveryUnit::Change {
+public:
+ explicit SetNeedToCleanupOnRollback(MultiIndexBlock* indexer) : _indexer(indexer) {}
+
+ virtual void commit() {}
+ virtual void rollback() {
+ _indexer->_needToCleanup = true;
}
- MultiIndexBlock::~MultiIndexBlock() {
- if (!_needToCleanup || _indexes.empty())
- return;
- while (true) {
- try {
- WriteUnitOfWork wunit(_txn);
- // This cleans up all index builds.
- // Because that may need to write, it is done inside
- // of a WUOW. Nothing inside this block can fail, and it is made fatal if it does.
- for (size_t i = 0; i < _indexes.size(); i++) {
- _indexes[i].block->fail();
- }
- wunit.commit();
- return;
- }
- catch (const WriteConflictException& e) {
- continue;
- }
- catch (const std::exception& e) {
- error() << "Caught exception while cleaning up partially built indexes: "
- << e.what();
- }
- catch (...) {
- error() << "Caught unknown exception while cleaning up partially built indexes.";
- }
- fassertFailed(18644);
- }
+private:
+ MultiIndexBlock* const _indexer;
+};
+
+/**
+ * On rollback in init(), cleans up _indexes so that ~MultiIndexBlock doesn't try to clean
+ * up _indexes manually (since the changes were already rolled back).
+ * Due to this, it is thus legal to call init() again after it fails.
+ */
+class MultiIndexBlock::CleanupIndexesVectorOnRollback : public RecoveryUnit::Change {
+public:
+ explicit CleanupIndexesVectorOnRollback(MultiIndexBlock* indexer) : _indexer(indexer) {}
+
+ virtual void commit() {}
+ virtual void rollback() {
+ _indexer->_indexes.clear();
}
- void MultiIndexBlock::removeExistingIndexes(std::vector<BSONObj>* specs) const {
- for (size_t i = 0; i < specs->size(); i++) {
- Status status =
- _collection->getIndexCatalog()->prepareSpecForCreate(_txn, (*specs)[i]).getStatus();
- if (status.code() == ErrorCodes::IndexAlreadyExists) {
- specs->erase(specs->begin() + i);
- i--;
+private:
+ MultiIndexBlock* const _indexer;
+};
+
+MultiIndexBlock::MultiIndexBlock(OperationContext* txn, Collection* collection)
+ : _collection(collection),
+ _txn(txn),
+ _buildInBackground(false),
+ _allowInterruption(false),
+ _ignoreUnique(false),
+ _needToCleanup(true) {}
+
+MultiIndexBlock::~MultiIndexBlock() {
+ if (!_needToCleanup || _indexes.empty())
+ return;
+ while (true) {
+ try {
+ WriteUnitOfWork wunit(_txn);
+ // This cleans up all index builds.
+ // Because that may need to write, it is done inside
+ // of a WUOW. Nothing inside this block can fail, and it is made fatal if it does.
+ for (size_t i = 0; i < _indexes.size(); i++) {
+ _indexes[i].block->fail();
}
- // intentionally ignoring other error codes
+ wunit.commit();
+ return;
+ } catch (const WriteConflictException& e) {
+ continue;
+ } catch (const std::exception& e) {
+ error() << "Caught exception while cleaning up partially built indexes: " << e.what();
+ } catch (...) {
+ error() << "Caught unknown exception while cleaning up partially built indexes.";
}
+ fassertFailed(18644);
}
+}
+
+void MultiIndexBlock::removeExistingIndexes(std::vector<BSONObj>* specs) const {
+ for (size_t i = 0; i < specs->size(); i++) {
+ Status status =
+ _collection->getIndexCatalog()->prepareSpecForCreate(_txn, (*specs)[i]).getStatus();
+ if (status.code() == ErrorCodes::IndexAlreadyExists) {
+ specs->erase(specs->begin() + i);
+ i--;
+ }
+ // intentionally ignoring other error codes
+ }
+}
- Status MultiIndexBlock::init(const std::vector<BSONObj>& indexSpecs) {
- WriteUnitOfWork wunit(_txn);
-
- invariant(_indexes.empty());
- _txn->recoveryUnit()->registerChange(new CleanupIndexesVectorOnRollback(this));
+Status MultiIndexBlock::init(const std::vector<BSONObj>& indexSpecs) {
+ WriteUnitOfWork wunit(_txn);
- const string& ns = _collection->ns().ns();
+ invariant(_indexes.empty());
+ _txn->recoveryUnit()->registerChange(new CleanupIndexesVectorOnRollback(this));
- Status status = _collection->getIndexCatalog()->checkUnfinished();
- if ( !status.isOK() )
- return status;
+ const string& ns = _collection->ns().ns();
- for ( size_t i = 0; i < indexSpecs.size(); i++ ) {
- BSONObj info = indexSpecs[i];
+ Status status = _collection->getIndexCatalog()->checkUnfinished();
+ if (!status.isOK())
+ return status;
- string pluginName = IndexNames::findPluginName( info["key"].Obj() );
- if ( pluginName.size() ) {
- Status s =
- _collection->getIndexCatalog()->_upgradeDatabaseMinorVersionIfNeeded(_txn, pluginName);
- if ( !s.isOK() )
- return s;
- }
+ for (size_t i = 0; i < indexSpecs.size(); i++) {
+ BSONObj info = indexSpecs[i];
- // Any foreground indexes make all indexes be built in the foreground.
- _buildInBackground = (_buildInBackground && info["background"].trueValue());
+ string pluginName = IndexNames::findPluginName(info["key"].Obj());
+ if (pluginName.size()) {
+ Status s = _collection->getIndexCatalog()->_upgradeDatabaseMinorVersionIfNeeded(
+ _txn, pluginName);
+ if (!s.isOK())
+ return s;
}
- for ( size_t i = 0; i < indexSpecs.size(); i++ ) {
- BSONObj info = indexSpecs[i];
- StatusWith<BSONObj> statusWithInfo =
- _collection->getIndexCatalog()->prepareSpecForCreate( _txn, info );
- Status status = statusWithInfo.getStatus();
- if ( !status.isOK() )
- return status;
- info = statusWithInfo.getValue();
-
- IndexToBuild index;
- index.block.reset(new IndexCatalog::IndexBuildBlock(_txn, _collection, info));
- status = index.block->init();
- if ( !status.isOK() )
- return status;
-
- index.real = index.block->getEntry()->accessMethod();
- status = index.real->initializeAsEmpty(_txn);
- if ( !status.isOK() )
- return status;
-
- if (!_buildInBackground) {
- // Bulk build process requires foreground building as it assumes nothing is changing
- // under it.
- index.bulk = index.real->initiateBulk();
- }
+ // Any foreground indexes make all indexes be built in the foreground.
+ _buildInBackground = (_buildInBackground && info["background"].trueValue());
+ }
+
+ for (size_t i = 0; i < indexSpecs.size(); i++) {
+ BSONObj info = indexSpecs[i];
+ StatusWith<BSONObj> statusWithInfo =
+ _collection->getIndexCatalog()->prepareSpecForCreate(_txn, info);
+ Status status = statusWithInfo.getStatus();
+ if (!status.isOK())
+ return status;
+ info = statusWithInfo.getValue();
- const IndexDescriptor* descriptor = index.block->getEntry()->descriptor();
+ IndexToBuild index;
+ index.block.reset(new IndexCatalog::IndexBuildBlock(_txn, _collection, info));
+ status = index.block->init();
+ if (!status.isOK())
+ return status;
- index.options.logIfError = false; // logging happens elsewhere if needed.
- index.options.dupsAllowed = !descriptor->unique()
- || _ignoreUnique
- || repl::getGlobalReplicationCoordinator()
- ->shouldIgnoreUniqueIndex(descriptor);
+ index.real = index.block->getEntry()->accessMethod();
+ status = index.real->initializeAsEmpty(_txn);
+ if (!status.isOK())
+ return status;
- log() << "build index on: " << ns << " properties: " << descriptor->toString();
- if (index.bulk)
- log() << "\t building index using bulk method";
+ if (!_buildInBackground) {
+ // Bulk build process requires foreground building as it assumes nothing is changing
+ // under it.
+ index.bulk = index.real->initiateBulk();
+ }
- index.filterExpression = index.block->getEntry()->getFilterExpression();
+ const IndexDescriptor* descriptor = index.block->getEntry()->descriptor();
- // TODO SERVER-14888 Suppress this in cases we don't want to audit.
- audit::logCreateIndex(_txn->getClient(), &info, descriptor->indexName(), ns);
+ index.options.logIfError = false; // logging happens elsewhere if needed.
+ index.options.dupsAllowed = !descriptor->unique() || _ignoreUnique ||
+ repl::getGlobalReplicationCoordinator()->shouldIgnoreUniqueIndex(descriptor);
- _indexes.push_back(std::move(index));
- }
+ log() << "build index on: " << ns << " properties: " << descriptor->toString();
+ if (index.bulk)
+ log() << "\t building index using bulk method";
- // this is so that operations examining the list of indexes know there are more keys to look
- // at when doing things like in place updates, etc...
- _collection->infoCache()->addedIndex(_txn);
+ index.filterExpression = index.block->getEntry()->getFilterExpression();
- if (_buildInBackground)
- _backgroundOperation.reset(new BackgroundOperation(ns));
+ // TODO SERVER-14888 Suppress this in cases we don't want to audit.
+ audit::logCreateIndex(_txn->getClient(), &info, descriptor->indexName(), ns);
- wunit.commit();
- return Status::OK();
+ _indexes.push_back(std::move(index));
}
- Status MultiIndexBlock::insertAllDocumentsInCollection(std::set<RecordId>* dupsOut) {
- const char* curopMessage = _buildInBackground ? "Index Build (background)" : "Index Build";
- const auto numRecords = _collection->numRecords(_txn);
- stdx::unique_lock<Client> lk(*_txn->getClient());
- ProgressMeterHolder progress(*_txn->setMessage_inlock(curopMessage,
- curopMessage,
- numRecords));
- lk.unlock();
-
- Timer t;
-
- unsigned long long n = 0;
-
- unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(_txn,
- _collection->ns().ns(),
- _collection));
- if (_buildInBackground) {
- invariant(_allowInterruption);
- exec->setYieldPolicy(PlanExecutor::YIELD_AUTO);
- }
- else {
- exec->setYieldPolicy(PlanExecutor::WRITE_CONFLICT_RETRY_ONLY);
- }
+ // this is so that operations examining the list of indexes know there are more keys to look
+ // at when doing things like in place updates, etc...
+ _collection->infoCache()->addedIndex(_txn);
- Snapshotted<BSONObj> objToIndex;
- RecordId loc;
- PlanExecutor::ExecState state;
- int retries = 0; // non-zero when retrying our last document.
- while (retries
- || (PlanExecutor::ADVANCED == (state = exec->getNextSnapshotted(&objToIndex,
- &loc)))) {
- try {
- if (_allowInterruption)
- _txn->checkForInterrupt();
-
- // Make sure we are working with the latest version of the document.
- if (objToIndex.snapshotId() != _txn->recoveryUnit()->getSnapshotId()
- && !_collection->findDoc(_txn, loc, &objToIndex)) {
- // doc was deleted so don't index it.
- retries = 0;
- continue;
- }
-
- // Done before insert so we can retry document if it WCEs.
- progress->setTotalWhileRunning( _collection->numRecords(_txn) );
-
- WriteUnitOfWork wunit(_txn);
- Status ret = insert(objToIndex.value(), loc);
- if (ret.isOK()) {
- wunit.commit();
- }
- else if (dupsOut && ret.code() == ErrorCodes::DuplicateKey) {
- // If dupsOut is non-null, we should only fail the specific insert that
- // led to a DuplicateKey rather than the whole index build.
- dupsOut->insert(loc);
- }
- else {
- // Fail the index build hard.
- return ret;
- }
-
- // Go to the next document
- progress->hit();
- n++;
- retries = 0;
- }
- catch (const WriteConflictException& wce) {
- CurOp::get(_txn)->debug().writeConflicts++;
- retries++; // logAndBackoff expects this to be 1 on first call.
- wce.logAndBackoff(retries, "index creation", _collection->ns().ns());
-
- // Can't use WRITE_CONFLICT_RETRY_LOOP macros since we need to save/restore exec
- // around call to abandonSnapshot.
- exec->saveState();
- _txn->recoveryUnit()->abandonSnapshot();
- exec->restoreState(_txn); // Handles any WCEs internally.
- }
- }
+ if (_buildInBackground)
+ _backgroundOperation.reset(new BackgroundOperation(ns));
- if (state != PlanExecutor::IS_EOF) {
- // If the plan executor was killed, this means the DB/collection was dropped and so it
- // is not safe to cleanup the in-progress indexes.
- if (state == PlanExecutor::DEAD) {
- abortWithoutCleanup();
- }
+ wunit.commit();
+ return Status::OK();
+}
- uasserted(28550,
- "Unable to complete index build as the collection is no longer readable");
- }
-
- progress->finished();
+Status MultiIndexBlock::insertAllDocumentsInCollection(std::set<RecordId>* dupsOut) {
+ const char* curopMessage = _buildInBackground ? "Index Build (background)" : "Index Build";
+ const auto numRecords = _collection->numRecords(_txn);
+ stdx::unique_lock<Client> lk(*_txn->getClient());
+ ProgressMeterHolder progress(*_txn->setMessage_inlock(curopMessage, curopMessage, numRecords));
+ lk.unlock();
- Status ret = doneInserting(dupsOut);
- if (!ret.isOK())
- return ret;
+ Timer t;
- log() << "build index done. scanned " << n << " total records. "
- << t.seconds() << " secs" << endl;
+ unsigned long long n = 0;
- return Status::OK();
+ unique_ptr<PlanExecutor> exec(
+ InternalPlanner::collectionScan(_txn, _collection->ns().ns(), _collection));
+ if (_buildInBackground) {
+ invariant(_allowInterruption);
+ exec->setYieldPolicy(PlanExecutor::YIELD_AUTO);
+ } else {
+ exec->setYieldPolicy(PlanExecutor::WRITE_CONFLICT_RETRY_ONLY);
}
- Status MultiIndexBlock::insert(const BSONObj& doc, const RecordId& loc) {
- for ( size_t i = 0; i < _indexes.size(); i++ ) {
-
- if ( _indexes[i].filterExpression &&
- !_indexes[i].filterExpression->matchesBSON(doc) ) {
+ Snapshotted<BSONObj> objToIndex;
+ RecordId loc;
+ PlanExecutor::ExecState state;
+ int retries = 0; // non-zero when retrying our last document.
+ while (retries ||
+ (PlanExecutor::ADVANCED == (state = exec->getNextSnapshotted(&objToIndex, &loc)))) {
+ try {
+ if (_allowInterruption)
+ _txn->checkForInterrupt();
+
+ // Make sure we are working with the latest version of the document.
+ if (objToIndex.snapshotId() != _txn->recoveryUnit()->getSnapshotId() &&
+ !_collection->findDoc(_txn, loc, &objToIndex)) {
+ // doc was deleted so don't index it.
+ retries = 0;
continue;
}
- int64_t unused;
- Status idxStatus(ErrorCodes::InternalError, "");
- if (_indexes[i].bulk) {
- idxStatus = _indexes[i].bulk->insert(_txn, doc, loc, _indexes[i].options, &unused);
- }
- else {
- idxStatus = _indexes[i].real->insert(_txn, doc, loc, _indexes[i].options, &unused);
+ // Done before insert so we can retry document if it WCEs.
+ progress->setTotalWhileRunning(_collection->numRecords(_txn));
+
+ WriteUnitOfWork wunit(_txn);
+ Status ret = insert(objToIndex.value(), loc);
+ if (ret.isOK()) {
+ wunit.commit();
+ } else if (dupsOut && ret.code() == ErrorCodes::DuplicateKey) {
+ // If dupsOut is non-null, we should only fail the specific insert that
+ // led to a DuplicateKey rather than the whole index build.
+ dupsOut->insert(loc);
+ } else {
+ // Fail the index build hard.
+ return ret;
}
- if ( !idxStatus.isOK() )
- return idxStatus;
+ // Go to the next document
+ progress->hit();
+ n++;
+ retries = 0;
+ } catch (const WriteConflictException& wce) {
+ CurOp::get(_txn)->debug().writeConflicts++;
+ retries++; // logAndBackoff expects this to be 1 on first call.
+ wce.logAndBackoff(retries, "index creation", _collection->ns().ns());
+
+ // Can't use WRITE_CONFLICT_RETRY_LOOP macros since we need to save/restore exec
+ // around call to abandonSnapshot.
+ exec->saveState();
+ _txn->recoveryUnit()->abandonSnapshot();
+ exec->restoreState(_txn); // Handles any WCEs internally.
}
- return Status::OK();
}
- Status MultiIndexBlock::doneInserting(std::set<RecordId>* dupsOut) {
- for ( size_t i = 0; i < _indexes.size(); i++ ) {
- if ( _indexes[i].bulk == NULL )
- continue;
- LOG(1) << "\t bulk commit starting for index: "
- << _indexes[i].block->getEntry()->descriptor()->indexName();
- Status status = _indexes[i].real->commitBulk( _txn,
- std::move(_indexes[i].bulk),
- _allowInterruption,
- _indexes[i].options.dupsAllowed,
- dupsOut );
- if ( !status.isOK() ) {
- return status;
- }
+ if (state != PlanExecutor::IS_EOF) {
+ // If the plan executor was killed, this means the DB/collection was dropped and so it
+ // is not safe to cleanup the in-progress indexes.
+ if (state == PlanExecutor::DEAD) {
+ abortWithoutCleanup();
}
- return Status::OK();
+ uasserted(28550, "Unable to complete index build as the collection is no longer readable");
}
- void MultiIndexBlock::abortWithoutCleanup() {
- _indexes.clear();
- _needToCleanup = false;
- }
+ progress->finished();
- void MultiIndexBlock::commit() {
- for ( size_t i = 0; i < _indexes.size(); i++ ) {
- _indexes[i].block->success();
+ Status ret = doneInserting(dupsOut);
+ if (!ret.isOK())
+ return ret;
+
+ log() << "build index done. scanned " << n << " total records. " << t.seconds() << " secs"
+ << endl;
+
+ return Status::OK();
+}
+
+Status MultiIndexBlock::insert(const BSONObj& doc, const RecordId& loc) {
+ for (size_t i = 0; i < _indexes.size(); i++) {
+ if (_indexes[i].filterExpression && !_indexes[i].filterExpression->matchesBSON(doc)) {
+ continue;
+ }
+
+ int64_t unused;
+ Status idxStatus(ErrorCodes::InternalError, "");
+ if (_indexes[i].bulk) {
+ idxStatus = _indexes[i].bulk->insert(_txn, doc, loc, _indexes[i].options, &unused);
+ } else {
+ idxStatus = _indexes[i].real->insert(_txn, doc, loc, _indexes[i].options, &unused);
+ }
+
+ if (!idxStatus.isOK())
+ return idxStatus;
+ }
+ return Status::OK();
+}
+
+Status MultiIndexBlock::doneInserting(std::set<RecordId>* dupsOut) {
+ for (size_t i = 0; i < _indexes.size(); i++) {
+ if (_indexes[i].bulk == NULL)
+ continue;
+ LOG(1) << "\t bulk commit starting for index: "
+ << _indexes[i].block->getEntry()->descriptor()->indexName();
+ Status status = _indexes[i].real->commitBulk(_txn,
+ std::move(_indexes[i].bulk),
+ _allowInterruption,
+ _indexes[i].options.dupsAllowed,
+ dupsOut);
+ if (!status.isOK()) {
+ return status;
}
+ }
+
+ return Status::OK();
+}
- // this one is so operations examining the list of indexes know that the index is finished
- _collection->infoCache()->addedIndex(_txn);
+void MultiIndexBlock::abortWithoutCleanup() {
+ _indexes.clear();
+ _needToCleanup = false;
+}
- _txn->recoveryUnit()->registerChange(new SetNeedToCleanupOnRollback(this));
- _needToCleanup = false;
+void MultiIndexBlock::commit() {
+ for (size_t i = 0; i < _indexes.size(); i++) {
+ _indexes[i].block->success();
}
-} // namespace mongo
+ // this one is so operations examining the list of indexes know that the index is finished
+ _collection->infoCache()->addedIndex(_txn);
+
+ _txn->recoveryUnit()->registerChange(new SetNeedToCleanupOnRollback(this));
+ _needToCleanup = false;
+}
+} // namespace mongo
diff --git a/src/mongo/db/catalog/index_create.h b/src/mongo/db/catalog/index_create.h
index f4be38710cd..d3dccb20b46 100644
--- a/src/mongo/db/catalog/index_create.h
+++ b/src/mongo/db/catalog/index_create.h
@@ -42,189 +42,197 @@
namespace mongo {
- class BackgroundOperation;
- class BSONObj;
- class Collection;
- class OperationContext;
+class BackgroundOperation;
+class BSONObj;
+class Collection;
+class OperationContext;
+
+/**
+ * Builds one or more indexes.
+ *
+ * If any method other than insert() returns a not-ok Status, this MultiIndexBlock should be
+ * considered failed and must be destroyed.
+ *
+ * If a MultiIndexBlock is destroyed before commit() or if commit() is rolled back, it will
+ * clean up all traces of the indexes being constructed. MultiIndexBlocks should not be
+ * destructed from inside of a WriteUnitOfWork as any cleanup needed should never be rolled back
+ * (as it is itself essentially a form of rollback, you don't want to "rollback the rollback").
+ */
+class MultiIndexBlock {
+ MONGO_DISALLOW_COPYING(MultiIndexBlock);
+
+public:
+ /**
+ * Neither pointer is owned.
+ */
+ MultiIndexBlock(OperationContext* txn, Collection* collection);
+ ~MultiIndexBlock();
+
+ /**
+ * By default we ignore the 'background' flag in specs when building an index. If this is
+ * called before init(), we will build the indexes in the background as long as *all* specs
+ * call for background indexing. If any spec calls for foreground indexing all indexes will
+ * be built in the foreground, as there is no concurrency benefit to building a subset of
+ * indexes in the background, but there is a performance benefit to building all in the
+ * foreground.
+ */
+ void allowBackgroundBuilding() {
+ _buildInBackground = true;
+ }
/**
- * Builds one or more indexes.
+ * Call this before init() to allow the index build to be interrupted.
+ * This only affects builds using the insertAllDocumentsInCollection helper.
+ */
+ void allowInterruption() {
+ _allowInterruption = true;
+ }
+
+ /**
+ * By default we enforce the 'unique' flag in specs when building an index by failing.
+ * If this is called before init(), we will ignore unique violations. This has no effect if
+ * no specs are unique.
*
- * If any method other than insert() returns a not-ok Status, this MultiIndexBlock should be
- * considered failed and must be destroyed.
+ * If this is called, any dupsOut sets passed in will never be filled.
+ */
+ void ignoreUniqueConstraint() {
+ _ignoreUnique = true;
+ }
+
+ /**
+ * Removes pre-existing indexes from 'specs'. If this isn't done, init() may fail with
+ * IndexAlreadyExists.
+ */
+ void removeExistingIndexes(std::vector<BSONObj>* specs) const;
+
+ /**
+ * Prepares the index(es) for building.
*
- * If a MultiIndexBlock is destroyed before commit() or if commit() is rolled back, it will
- * clean up all traces of the indexes being constructed. MultiIndexBlocks should not be
- * destructed from inside of a WriteUnitOfWork as any cleanup needed should never be rolled back
- * (as it is itself essentially a form of rollback, you don't want to "rollback the rollback").
+ * Does not need to be called inside of a WriteUnitOfWork (but can be due to nesting).
+ *
+ * Requires holding an exclusive database lock.
*/
- class MultiIndexBlock {
- MONGO_DISALLOW_COPYING( MultiIndexBlock );
- public:
- /**
- * Neither pointer is owned.
- */
- MultiIndexBlock(OperationContext* txn, Collection* collection);
- ~MultiIndexBlock();
-
- /**
- * By default we ignore the 'background' flag in specs when building an index. If this is
- * called before init(), we will build the indexes in the background as long as *all* specs
- * call for background indexing. If any spec calls for foreground indexing all indexes will
- * be built in the foreground, as there is no concurrency benefit to building a subset of
- * indexes in the background, but there is a performance benefit to building all in the
- * foreground.
- */
- void allowBackgroundBuilding() { _buildInBackground = true; }
-
- /**
- * Call this before init() to allow the index build to be interrupted.
- * This only affects builds using the insertAllDocumentsInCollection helper.
- */
- void allowInterruption() { _allowInterruption = true; }
-
- /**
- * By default we enforce the 'unique' flag in specs when building an index by failing.
- * If this is called before init(), we will ignore unique violations. This has no effect if
- * no specs are unique.
- *
- * If this is called, any dupsOut sets passed in will never be filled.
- */
- void ignoreUniqueConstraint() { _ignoreUnique = true; }
-
- /**
- * Removes pre-existing indexes from 'specs'. If this isn't done, init() may fail with
- * IndexAlreadyExists.
- */
- void removeExistingIndexes(std::vector<BSONObj>* specs) const;
-
- /**
- * Prepares the index(es) for building.
- *
- * Does not need to be called inside of a WriteUnitOfWork (but can be due to nesting).
- *
- * Requires holding an exclusive database lock.
- */
- Status init(const std::vector<BSONObj>& specs);
- Status init(const BSONObj& spec) {
- return init(std::vector<BSONObj>(1, spec));
- }
+ Status init(const std::vector<BSONObj>& specs);
+ Status init(const BSONObj& spec) {
+ return init(std::vector<BSONObj>(1, spec));
+ }
+
+ /**
+ * Inserts all documents in the Collection into the indexes and logs with timing info.
+ *
+ * This is a simplified replacement for insert and doneInserting. Do not call this if you
+ * are calling either of them.
+ *
+ * If dupsOut is passed as non-NULL, violators of uniqueness constraints will be added to
+ * the set rather than failing the build. Documents added to this set are not indexed, so
+ * callers MUST either fail this index build or delete the documents from the collection.
+ *
+ * Can throw an exception if interrupted.
+ *
+ * Should not be called inside of a WriteUnitOfWork.
+ */
+ Status insertAllDocumentsInCollection(std::set<RecordId>* dupsOut = NULL);
- /**
- * Inserts all documents in the Collection into the indexes and logs with timing info.
- *
- * This is a simplified replacement for insert and doneInserting. Do not call this if you
- * are calling either of them.
- *
- * If dupsOut is passed as non-NULL, violators of uniqueness constraints will be added to
- * the set rather than failing the build. Documents added to this set are not indexed, so
- * callers MUST either fail this index build or delete the documents from the collection.
- *
- * Can throw an exception if interrupted.
- *
- * Should not be called inside of a WriteUnitOfWork.
- */
- Status insertAllDocumentsInCollection(std::set<RecordId>* dupsOut = NULL);
-
- /**
- * Call this after init() for each document in the collection.
- *
- * Do not call if you called insertAllDocumentsInCollection();
- *
- * Should be called inside of a WriteUnitOfWork.
- */
- Status insert(const BSONObj& wholeDocument, const RecordId& loc);
-
- /**
- * Call this after the last insert(). This gives the index builder a chance to do any
- * long-running operations in separate units of work from commit().
- *
- * Do not call if you called insertAllDocumentsInCollection();
- *
- * If dupsOut is passed as non-NULL, violators of uniqueness constraints will be added to
- * the set. Documents added to this set are not indexed, so callers MUST either fail this
- * index build or delete the documents from the collection.
- *
- * Should not be called inside of a WriteUnitOfWork.
- */
- Status doneInserting(std::set<RecordId>* dupsOut = NULL);
-
- /**
- * Marks the index ready for use. Should only be called as the last method after
- * doneInserting() or insertAllDocumentsInCollection() return success.
- *
- * Should be called inside of a WriteUnitOfWork. If the index building is to be logOp'd,
- * logOp() should be called from the same unit of work as commit().
- *
- * Requires holding an exclusive database lock.
- */
- void commit();
-
- /**
- * May be called at any time after construction but before a successful commit(). Suppresses
- * the default behavior on destruction of removing all traces of uncommitted index builds.
- *
- * The most common use of this is if the indexes were already dropped via some other
- * mechanism such as the whole collection being dropped. In that case, it would be invalid
- * to try to remove the indexes again. Also, replication uses this to ensure that indexes
- * that are being built on shutdown are resumed on startup.
- *
- * Do not use this unless you are really sure you need to.
- *
- * Does not matter whether it is called inside of a WriteUnitOfWork. Will not be rolled
- * back.
- */
- void abortWithoutCleanup();
-
- bool getBuildInBackground() const { return _buildInBackground; }
-
- private:
- class SetNeedToCleanupOnRollback;
- class CleanupIndexesVectorOnRollback;
-
- struct IndexToBuild {
-#if defined(_MSC_VER) && _MSC_VER < 1900 // MVSC++ <= 2013 can't generate default move operations
- IndexToBuild() = default;
- IndexToBuild(IndexToBuild&& other)
- : block(std::move(other.block))
- , real(std::move(other.real))
- , bulk(std::move(other.bulk))
- , options(std::move(other.options))
- , filterExpression(std::move(other.filterExpression))
- {}
-
- IndexToBuild& operator= (IndexToBuild&& other) {
- block = std::move(other.block);
- real = std::move(other.real);
- filterExpression = std::move(other.filterExpression);
- bulk = std::move(other.bulk);
- options = std::move(other.options);
- return *this;
- }
+ /**
+ * Call this after init() for each document in the collection.
+ *
+ * Do not call if you called insertAllDocumentsInCollection();
+ *
+ * Should be called inside of a WriteUnitOfWork.
+ */
+ Status insert(const BSONObj& wholeDocument, const RecordId& loc);
+
+ /**
+ * Call this after the last insert(). This gives the index builder a chance to do any
+ * long-running operations in separate units of work from commit().
+ *
+ * Do not call if you called insertAllDocumentsInCollection();
+ *
+ * If dupsOut is passed as non-NULL, violators of uniqueness constraints will be added to
+ * the set. Documents added to this set are not indexed, so callers MUST either fail this
+ * index build or delete the documents from the collection.
+ *
+ * Should not be called inside of a WriteUnitOfWork.
+ */
+ Status doneInserting(std::set<RecordId>* dupsOut = NULL);
+
+ /**
+ * Marks the index ready for use. Should only be called as the last method after
+ * doneInserting() or insertAllDocumentsInCollection() return success.
+ *
+ * Should be called inside of a WriteUnitOfWork. If the index building is to be logOp'd,
+ * logOp() should be called from the same unit of work as commit().
+ *
+ * Requires holding an exclusive database lock.
+ */
+ void commit();
+
+ /**
+ * May be called at any time after construction but before a successful commit(). Suppresses
+ * the default behavior on destruction of removing all traces of uncommitted index builds.
+ *
+ * The most common use of this is if the indexes were already dropped via some other
+ * mechanism such as the whole collection being dropped. In that case, it would be invalid
+ * to try to remove the indexes again. Also, replication uses this to ensure that indexes
+ * that are being built on shutdown are resumed on startup.
+ *
+ * Do not use this unless you are really sure you need to.
+ *
+ * Does not matter whether it is called inside of a WriteUnitOfWork. Will not be rolled
+ * back.
+ */
+ void abortWithoutCleanup();
+
+ bool getBuildInBackground() const {
+ return _buildInBackground;
+ }
+
+private:
+ class SetNeedToCleanupOnRollback;
+ class CleanupIndexesVectorOnRollback;
+
+ struct IndexToBuild {
+#if defined(_MSC_VER) && _MSC_VER < 1900 // MVSC++ <= 2013 can't generate default move operations
+ IndexToBuild() = default;
+ IndexToBuild(IndexToBuild&& other)
+ : block(std::move(other.block)),
+ real(std::move(other.real)),
+ bulk(std::move(other.bulk)),
+ options(std::move(other.options)),
+ filterExpression(std::move(other.filterExpression)) {}
+
+ IndexToBuild& operator=(IndexToBuild&& other) {
+ block = std::move(other.block);
+ real = std::move(other.real);
+ filterExpression = std::move(other.filterExpression);
+ bulk = std::move(other.bulk);
+ options = std::move(other.options);
+ return *this;
+ }
#endif
- std::unique_ptr<IndexCatalog::IndexBuildBlock> block;
+ std::unique_ptr<IndexCatalog::IndexBuildBlock> block;
- IndexAccessMethod* real = NULL; // owned elsewhere
- const MatchExpression* filterExpression; // might be NULL, owned elsewhere
- std::unique_ptr<IndexAccessMethod::BulkBuilder> bulk;
+ IndexAccessMethod* real = NULL; // owned elsewhere
+ const MatchExpression* filterExpression; // might be NULL, owned elsewhere
+ std::unique_ptr<IndexAccessMethod::BulkBuilder> bulk;
- InsertDeleteOptions options;
- };
+ InsertDeleteOptions options;
+ };
- std::vector<IndexToBuild> _indexes;
+ std::vector<IndexToBuild> _indexes;
- std::unique_ptr<BackgroundOperation> _backgroundOperation;
+ std::unique_ptr<BackgroundOperation> _backgroundOperation;
- // Pointers not owned here and must outlive 'this'
- Collection* _collection;
- OperationContext* _txn;
+ // Pointers not owned here and must outlive 'this'
+ Collection* _collection;
+ OperationContext* _txn;
- bool _buildInBackground;
- bool _allowInterruption;
- bool _ignoreUnique;
+ bool _buildInBackground;
+ bool _allowInterruption;
+ bool _ignoreUnique;
- bool _needToCleanup;
- };
+ bool _needToCleanup;
+};
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/db/catalog/index_key_validate.cpp b/src/mongo/db/catalog/index_key_validate.cpp
index c42c2a1921f..d29c4d93d92 100644
--- a/src/mongo/db/catalog/index_key_validate.cpp
+++ b/src/mongo/db/catalog/index_key_validate.cpp
@@ -35,80 +35,78 @@
namespace mongo {
- using std::string;
+using std::string;
- Status validateKeyPattern(const BSONObj& key) {
- const ErrorCodes::Error code = ErrorCodes::CannotCreateIndex;
+Status validateKeyPattern(const BSONObj& key) {
+ const ErrorCodes::Error code = ErrorCodes::CannotCreateIndex;
- if ( key.objsize() > 2048 )
- return Status(code, "Index key pattern too large.");
+ if (key.objsize() > 2048)
+ return Status(code, "Index key pattern too large.");
- if ( key.isEmpty() )
- return Status(code, "Index keys cannot be empty.");
+ if (key.isEmpty())
+ return Status(code, "Index keys cannot be empty.");
- string pluginName = IndexNames::findPluginName( key );
- if ( pluginName.size() ) {
- if ( !IndexNames::isKnownName( pluginName ) )
- return Status(code,
- mongoutils::str::stream() << "Unknown index plugin '"
- << pluginName << '\'');
- }
+ string pluginName = IndexNames::findPluginName(key);
+ if (pluginName.size()) {
+ if (!IndexNames::isKnownName(pluginName))
+ return Status(
+ code, mongoutils::str::stream() << "Unknown index plugin '" << pluginName << '\'');
+ }
- BSONObjIterator it( key );
- while ( it.more() ) {
- BSONElement keyElement = it.next();
+ BSONObjIterator it(key);
+ while (it.more()) {
+ BSONElement keyElement = it.next();
- if( keyElement.type() == Object || keyElement.type() == Array )
- return Status(code, "Index keys cannot be Objects or Arrays.");
+ if (keyElement.type() == Object || keyElement.type() == Array)
+ return Status(code, "Index keys cannot be Objects or Arrays.");
- if ( keyElement.type() == String && pluginName != keyElement.str() ) {
- return Status(code, "Can't use more than one index plugin for a single index.");
- }
+ if (keyElement.type() == String && pluginName != keyElement.str()) {
+ return Status(code, "Can't use more than one index plugin for a single index.");
+ }
- // Ensure that the fields on which we are building the index are valid: a field must not
- // begin with a '$' unless it is part of a DBRef or text index, and a field path cannot
- // contain an empty field. If a field cannot be created or updated, it should not be
- // indexable.
+ // Ensure that the fields on which we are building the index are valid: a field must not
+ // begin with a '$' unless it is part of a DBRef or text index, and a field path cannot
+ // contain an empty field. If a field cannot be created or updated, it should not be
+ // indexable.
- FieldRef keyField( keyElement.fieldName() );
+ FieldRef keyField(keyElement.fieldName());
- const size_t numParts = keyField.numParts();
- if ( numParts == 0 ) {
- return Status(code, "Index keys cannot be an empty field.");
- }
+ const size_t numParts = keyField.numParts();
+ if (numParts == 0) {
+ return Status(code, "Index keys cannot be an empty field.");
+ }
- // "$**" is acceptable for a text index.
- if ( mongoutils::str::equals( keyElement.fieldName(), "$**" ) &&
- keyElement.valuestrsafe() == IndexNames::TEXT )
- continue;
+ // "$**" is acceptable for a text index.
+ if (mongoutils::str::equals(keyElement.fieldName(), "$**") &&
+ keyElement.valuestrsafe() == IndexNames::TEXT)
+ continue;
- for ( size_t i = 0; i != numParts; ++i ) {
- const StringData part = keyField.getPart(i);
+ for (size_t i = 0; i != numParts; ++i) {
+ const StringData part = keyField.getPart(i);
- // Check if the index key path contains an empty field.
- if ( part.empty() ) {
- return Status(code, "Index keys cannot contain an empty field.");
- }
+ // Check if the index key path contains an empty field.
+ if (part.empty()) {
+ return Status(code, "Index keys cannot contain an empty field.");
+ }
- if ( part[0] != '$' )
- continue;
+ if (part[0] != '$')
+ continue;
- // Check if the '$'-prefixed field is part of a DBRef: since we don't have the
- // necessary context to validate whether this is a proper DBRef, we allow index
- // creation on '$'-prefixed names that match those used in a DBRef.
- const bool mightBePartOfDbRef = (i != 0) &&
- (part == "$db" ||
- part == "$id" ||
- part == "$ref");
+ // Check if the '$'-prefixed field is part of a DBRef: since we don't have the
+ // necessary context to validate whether this is a proper DBRef, we allow index
+ // creation on '$'-prefixed names that match those used in a DBRef.
+ const bool mightBePartOfDbRef =
+ (i != 0) && (part == "$db" || part == "$id" || part == "$ref");
- if ( !mightBePartOfDbRef ) {
- return Status(code, "Index key contains an illegal field name: "
- "field name starts with '$'.");
- }
+ if (!mightBePartOfDbRef) {
+ return Status(code,
+ "Index key contains an illegal field name: "
+ "field name starts with '$'.");
}
}
-
- return Status::OK();
}
-} // namespace mongo
+
+ return Status::OK();
+}
+} // namespace mongo
diff --git a/src/mongo/db/catalog/index_key_validate.h b/src/mongo/db/catalog/index_key_validate.h
index 4392e7e01a8..b03722ebc0c 100644
--- a/src/mongo/db/catalog/index_key_validate.h
+++ b/src/mongo/db/catalog/index_key_validate.h
@@ -31,10 +31,10 @@
#include "mongo/base/status.h"
namespace mongo {
- class BSONObj;
+class BSONObj;
- /**
- * Checks if the key is valid for building an index.
- */
- Status validateKeyPattern(const BSONObj& key);
-} // namespace mongo
+/**
+ * Checks if the key is valid for building an index.
+ */
+Status validateKeyPattern(const BSONObj& key);
+} // namespace mongo
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
index 9c73d968e37..a077a452fb8 100644
--- a/src/mongo/db/catalog/rename_collection.cpp
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -51,208 +51,200 @@
namespace mongo {
namespace {
- static void dropCollection(OperationContext* txn, Database* db, StringData collName) {
- WriteUnitOfWork wunit(txn);
- if (db->dropCollection(txn, collName).isOK()) {
- // ignoring failure case
- wunit.commit();
- }
+static void dropCollection(OperationContext* txn, Database* db, StringData collName) {
+ WriteUnitOfWork wunit(txn);
+ if (db->dropCollection(txn, collName).isOK()) {
+ // ignoring failure case
+ wunit.commit();
+ }
+}
+} // namespace
+
+Status renameCollection(OperationContext* txn,
+ const NamespaceString& source,
+ const NamespaceString& target,
+ bool dropTarget,
+ bool stayTemp) {
+ DisableDocumentValidation validationDisabler(txn);
+
+ ScopedTransaction transaction(txn, MODE_X);
+ Lock::GlobalWrite globalWriteLock(txn->lockState());
+ // We stay in source context the whole time. This is mostly to set the CurOp namespace.
+ OldClientContext ctx(txn, source);
+
+ bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(source);
+
+ if (userInitiatedWritesAndNotPrimary) {
+ return Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while renaming collection " << source.ns()
+ << " to " << target.ns());
}
-} // namespace
-
- Status renameCollection(OperationContext* txn,
- const NamespaceString& source,
- const NamespaceString& target,
- bool dropTarget,
- bool stayTemp) {
- DisableDocumentValidation validationDisabler(txn);
-
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite globalWriteLock(txn->lockState());
- // We stay in source context the whole time. This is mostly to set the CurOp namespace.
- OldClientContext ctx(txn, source);
-
- bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(source);
-
- if (userInitiatedWritesAndNotPrimary) {
- return Status(ErrorCodes::NotMaster, str::stream()
- << "Not primary while renaming collection " << source.ns()
- << " to " << target.ns());
- }
- Database* const sourceDB = dbHolder().get(txn, source.db());
- Collection* const sourceColl = sourceDB ? sourceDB->getCollection(source.ns()) : nullptr;
- if (!sourceColl) {
- return Status(ErrorCodes::NamespaceNotFound, "source namespace does not exist");
- }
+ Database* const sourceDB = dbHolder().get(txn, source.db());
+ Collection* const sourceColl = sourceDB ? sourceDB->getCollection(source.ns()) : nullptr;
+ if (!sourceColl) {
+ return Status(ErrorCodes::NamespaceNotFound, "source namespace does not exist");
+ }
- {
- // Ensure that collection name does not exceed maximum length.
- // Ensure that index names do not push the length over the max.
- // Iterator includes unfinished indexes.
- IndexCatalog::IndexIterator sourceIndIt =
- sourceColl->getIndexCatalog()->getIndexIterator(txn, true);
- int longestIndexNameLength = 0;
- while (sourceIndIt.more()) {
- int thisLength = sourceIndIt.next()->indexName().length();
- if (thisLength > longestIndexNameLength)
- longestIndexNameLength = thisLength;
- }
+ {
+ // Ensure that collection name does not exceed maximum length.
+ // Ensure that index names do not push the length over the max.
+ // Iterator includes unfinished indexes.
+ IndexCatalog::IndexIterator sourceIndIt =
+ sourceColl->getIndexCatalog()->getIndexIterator(txn, true);
+ int longestIndexNameLength = 0;
+ while (sourceIndIt.more()) {
+ int thisLength = sourceIndIt.next()->indexName().length();
+ if (thisLength > longestIndexNameLength)
+ longestIndexNameLength = thisLength;
+ }
- unsigned int longestAllowed =
- std::min(int(NamespaceString::MaxNsCollectionLen),
- int(NamespaceString::MaxNsLen) - 2/*strlen(".$")*/ - longestIndexNameLength);
- if (target.size() > longestAllowed) {
- StringBuilder sb;
- sb << "collection name length of " << target.size()
- << " exceeds maximum length of " << longestAllowed
- << ", allowing for index names";
- return Status(ErrorCodes::InvalidLength, sb.str());
- }
+ unsigned int longestAllowed =
+ std::min(int(NamespaceString::MaxNsCollectionLen),
+ int(NamespaceString::MaxNsLen) - 2 /*strlen(".$")*/ - longestIndexNameLength);
+ if (target.size() > longestAllowed) {
+ StringBuilder sb;
+ sb << "collection name length of " << target.size() << " exceeds maximum length of "
+ << longestAllowed << ", allowing for index names";
+ return Status(ErrorCodes::InvalidLength, sb.str());
}
+ }
- BackgroundOperation::assertNoBgOpInProgForNs(source.ns());
+ BackgroundOperation::assertNoBgOpInProgForNs(source.ns());
- Database* const targetDB = dbHolder().openDb(txn, target.db());
+ Database* const targetDB = dbHolder().openDb(txn, target.db());
- {
- WriteUnitOfWork wunit(txn);
+ {
+ WriteUnitOfWork wunit(txn);
- // Check if the target namespace exists and if dropTarget is true.
- // If target exists and dropTarget is not true, return false.
- if (targetDB->getCollection(target)) {
- if (!dropTarget) {
- printStackTrace();
- return Status(ErrorCodes::NamespaceExists, "target namespace exists");
- }
-
- Status s = targetDB->dropCollection(txn, target.ns());
- if (!s.isOK()) {
- return s;
- }
+ // Check if the target namespace exists and if dropTarget is true.
+ // If target exists and dropTarget is not true, return false.
+ if (targetDB->getCollection(target)) {
+ if (!dropTarget) {
+ printStackTrace();
+ return Status(ErrorCodes::NamespaceExists, "target namespace exists");
}
- // If we are renaming in the same database, just
- // rename the namespace and we're done.
- if (sourceDB == targetDB) {
- Status s = targetDB->renameCollection(txn, source.ns(), target.ns(), stayTemp);
- if (!s.isOK()) {
- return s;
- }
-
- getGlobalServiceContext()->getOpObserver()->onRenameCollection(
- txn,
- NamespaceString(source),
- NamespaceString(target),
- dropTarget,
- stayTemp);
-
- wunit.commit();
- return Status::OK();
+ Status s = targetDB->dropCollection(txn, target.ns());
+ if (!s.isOK()) {
+ return s;
+ }
+ }
+
+ // If we are renaming in the same database, just
+ // rename the namespace and we're done.
+ if (sourceDB == targetDB) {
+ Status s = targetDB->renameCollection(txn, source.ns(), target.ns(), stayTemp);
+ if (!s.isOK()) {
+ return s;
}
+ getGlobalServiceContext()->getOpObserver()->onRenameCollection(
+ txn, NamespaceString(source), NamespaceString(target), dropTarget, stayTemp);
+
wunit.commit();
+ return Status::OK();
}
- // If we get here, we are renaming across databases, so we must copy all the data and
- // indexes, then remove the source collection.
+ wunit.commit();
+ }
- // Create the target collection. It will be removed if we fail to copy the collection.
- // TODO use a temp collection and unset the temp flag on success.
- Collection* targetColl = nullptr;
- {
- CollectionOptions options = sourceColl->getCatalogEntry()->getCollectionOptions(txn);
+ // If we get here, we are renaming across databases, so we must copy all the data and
+ // indexes, then remove the source collection.
- WriteUnitOfWork wunit(txn);
+ // Create the target collection. It will be removed if we fail to copy the collection.
+ // TODO use a temp collection and unset the temp flag on success.
+ Collection* targetColl = nullptr;
+ {
+ CollectionOptions options = sourceColl->getCatalogEntry()->getCollectionOptions(txn);
- // No logOp necessary because the entire renameCollection command is one logOp.
- bool shouldReplicateWrites = txn->writesAreReplicated();
- txn->setReplicatedWrites(false);
- targetColl = targetDB->createCollection(txn, target.ns(), options,
- false); // _id index build with others later.
- txn->setReplicatedWrites(shouldReplicateWrites);
- if (!targetColl) {
- return Status(ErrorCodes::OutOfDiskSpace, "Failed to create target collection.");
- }
+ WriteUnitOfWork wunit(txn);
- wunit.commit();
+ // No logOp necessary because the entire renameCollection command is one logOp.
+ bool shouldReplicateWrites = txn->writesAreReplicated();
+ txn->setReplicatedWrites(false);
+ targetColl = targetDB->createCollection(txn,
+ target.ns(),
+ options,
+ false); // _id index build with others later.
+ txn->setReplicatedWrites(shouldReplicateWrites);
+ if (!targetColl) {
+ return Status(ErrorCodes::OutOfDiskSpace, "Failed to create target collection.");
}
- // Dismissed on success
- ScopeGuard targetCollectionDropper = MakeGuard(dropCollection, txn, targetDB, target.ns());
-
- MultiIndexBlock indexer(txn, targetColl);
- indexer.allowInterruption();
-
- // Copy the index descriptions from the source collection, adjusting the ns field.
- {
- std::vector<BSONObj> indexesToCopy;
- IndexCatalog::IndexIterator sourceIndIt =
- sourceColl->getIndexCatalog()->getIndexIterator(txn, true);
- while (sourceIndIt.more()) {
- const BSONObj currIndex = sourceIndIt.next()->infoObj();
-
- // Process the source index.
- BSONObjBuilder newIndex;
- newIndex.append("ns", target);
- newIndex.appendElementsUnique(currIndex);
- indexesToCopy.push_back(newIndex.obj());
- }
- indexer.init(indexesToCopy);
- }
+ wunit.commit();
+ }
- {
- // Copy over all the data from source collection to target collection.
- auto cursor = sourceColl->getCursor(txn);
- while (auto record = cursor->next()) {
- txn->checkForInterrupt();
-
- const auto obj = record->data.releaseToBson();
-
- WriteUnitOfWork wunit(txn);
- // No logOp necessary because the entire renameCollection command is one logOp.
- bool shouldReplicateWrites = txn->writesAreReplicated();
- txn->setReplicatedWrites(false);
- Status status =
- targetColl->insertDocument(txn, obj, &indexer, true).getStatus();
- txn->setReplicatedWrites(shouldReplicateWrites);
- if (!status.isOK())
- return status;
- wunit.commit();
- }
+ // Dismissed on success
+ ScopeGuard targetCollectionDropper = MakeGuard(dropCollection, txn, targetDB, target.ns());
+
+ MultiIndexBlock indexer(txn, targetColl);
+ indexer.allowInterruption();
+
+ // Copy the index descriptions from the source collection, adjusting the ns field.
+ {
+ std::vector<BSONObj> indexesToCopy;
+ IndexCatalog::IndexIterator sourceIndIt =
+ sourceColl->getIndexCatalog()->getIndexIterator(txn, true);
+ while (sourceIndIt.more()) {
+ const BSONObj currIndex = sourceIndIt.next()->infoObj();
+
+ // Process the source index.
+ BSONObjBuilder newIndex;
+ newIndex.append("ns", target);
+ newIndex.appendElementsUnique(currIndex);
+ indexesToCopy.push_back(newIndex.obj());
}
+ indexer.init(indexesToCopy);
+ }
- Status status = indexer.doneInserting();
- if (!status.isOK())
- return status;
+ {
+ // Copy over all the data from source collection to target collection.
+ auto cursor = sourceColl->getCursor(txn);
+ while (auto record = cursor->next()) {
+ txn->checkForInterrupt();
- {
- // Getting here means we successfully built the target copy. We now remove the
- // source collection and finalize the rename.
- WriteUnitOfWork wunit(txn);
+ const auto obj = record->data.releaseToBson();
+ WriteUnitOfWork wunit(txn);
+ // No logOp necessary because the entire renameCollection command is one logOp.
bool shouldReplicateWrites = txn->writesAreReplicated();
txn->setReplicatedWrites(false);
- Status status = sourceDB->dropCollection(txn, source.ns());
+ Status status = targetColl->insertDocument(txn, obj, &indexer, true).getStatus();
txn->setReplicatedWrites(shouldReplicateWrites);
if (!status.isOK())
return status;
+ wunit.commit();
+ }
+ }
- indexer.commit();
+ Status status = indexer.doneInserting();
+ if (!status.isOK())
+ return status;
- getGlobalServiceContext()->getOpObserver()->onRenameCollection(
- txn,
- NamespaceString(source),
- NamespaceString(target),
- dropTarget,
- stayTemp);
+ {
+ // Getting here means we successfully built the target copy. We now remove the
+ // source collection and finalize the rename.
+ WriteUnitOfWork wunit(txn);
- wunit.commit();
- }
+ bool shouldReplicateWrites = txn->writesAreReplicated();
+ txn->setReplicatedWrites(false);
+ Status status = sourceDB->dropCollection(txn, source.ns());
+ txn->setReplicatedWrites(shouldReplicateWrites);
+ if (!status.isOK())
+ return status;
- targetCollectionDropper.Dismiss();
- return Status::OK();
+ indexer.commit();
+
+ getGlobalServiceContext()->getOpObserver()->onRenameCollection(
+ txn, NamespaceString(source), NamespaceString(target), dropTarget, stayTemp);
+
+ wunit.commit();
}
-} // namespace mongo
+ targetCollectionDropper.Dismiss();
+ return Status::OK();
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/catalog/rename_collection.h b/src/mongo/db/catalog/rename_collection.h
index 1ec0b754779..fb1aa7b5387 100644
--- a/src/mongo/db/catalog/rename_collection.h
+++ b/src/mongo/db/catalog/rename_collection.h
@@ -29,18 +29,18 @@
#include "mongo/base/status.h"
namespace mongo {
- class NamespaceString;
- class OperationContext;
+class NamespaceString;
+class OperationContext;
- /**
- * Renames the collection "source" to "target" and drops the existing collection named "target"
- * iff "dropTarget" is true. "stayTemp" indicates whether a collection should maintain its
- * temporariness.
- */
- Status renameCollection(OperationContext* txn,
- const NamespaceString& source,
- const NamespaceString& target,
- bool dropTarget,
- bool stayTemp);
+/**
+ * Renames the collection "source" to "target" and drops the existing collection named "target"
+ * iff "dropTarget" is true. "stayTemp" indicates whether a collection should maintain its
+ * temporariness.
+ */
+Status renameCollection(OperationContext* txn,
+ const NamespaceString& source,
+ const NamespaceString& target,
+ bool dropTarget,
+ bool stayTemp);
-} // namespace mongo
+} // namespace mongo