summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/SConscript7
-rw-r--r--src/mongo/db/catalog/apply_ops.cpp215
-rw-r--r--src/mongo/db/catalog/apply_ops.h45
-rw-r--r--src/mongo/db/catalog/capped_utils.cpp261
-rw-r--r--src/mongo/db/catalog/capped_utils.h58
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp182
-rw-r--r--src/mongo/db/catalog/coll_mod.h45
-rw-r--r--src/mongo/db/catalog/collection.cpp101
-rw-r--r--src/mongo/db/catalog/collection.h34
-rw-r--r--src/mongo/db/catalog/database.cpp28
-rw-r--r--src/mongo/db/catalog/database.h4
-rw-r--r--src/mongo/db/catalog/drop_collection.cpp109
-rw-r--r--src/mongo/db/catalog/drop_collection.h43
-rw-r--r--src/mongo/db/catalog/drop_database.cpp111
-rw-r--r--src/mongo/db/catalog/drop_database.h38
-rw-r--r--src/mongo/db/catalog/drop_indexes.cpp194
-rw-r--r--src/mongo/db/catalog/drop_indexes.h46
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp292
-rw-r--r--src/mongo/db/catalog/rename_collection.h46
-rw-r--r--src/mongo/db/cloner.cpp36
-rw-r--r--src/mongo/db/commands.cpp6
-rw-r--r--src/mongo/db/commands.h4
-rw-r--r--src/mongo/db/commands/apply_ops.cpp161
-rw-r--r--src/mongo/db/commands/collection_to_capped.cpp172
-rw-r--r--src/mongo/db/commands/create_indexes.cpp9
-rw-r--r--src/mongo/db/commands/drop_indexes.cpp149
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp20
-rw-r--r--src/mongo/db/commands/mr.cpp19
-rw-r--r--src/mongo/db/commands/rename_collection.cpp220
-rw-r--r--src/mongo/db/commands/test_commands.cpp56
-rw-r--r--src/mongo/db/commands/write_commands/batch_executor.cpp11
-rw-r--r--src/mongo/db/commands/write_commands/write_commands.cpp3
-rw-r--r--src/mongo/db/db.cpp5
-rw-r--r--src/mongo/db/dbcommands.cpp335
-rw-r--r--src/mongo/db/dbhelpers.cpp9
-rw-r--r--src/mongo/db/exec/delete.cpp14
-rw-r--r--src/mongo/db/exec/update.cpp67
-rw-r--r--src/mongo/db/instance.cpp9
-rw-r--r--src/mongo/db/introspect.cpp4
-rw-r--r--src/mongo/db/namespace_string.h1
-rw-r--r--src/mongo/db/op_observer.cpp88
-rw-r--r--src/mongo/db/op_observer.h16
-rw-r--r--src/mongo/db/operation_context.h10
-rw-r--r--src/mongo/db/operation_context_impl.cpp10
-rw-r--r--src/mongo/db/operation_context_impl.h4
-rw-r--r--src/mongo/db/operation_context_noop.h6
-rw-r--r--src/mongo/db/ops/delete.cpp2
-rw-r--r--src/mongo/db/ops/delete.h1
-rw-r--r--src/mongo/db/ops/delete_request.h4
-rw-r--r--src/mongo/db/ops/parsed_update.cpp4
-rw-r--r--src/mongo/db/ops/update.cpp16
-rw-r--r--src/mongo/db/ops/update_request.h26
-rw-r--r--src/mongo/db/query/get_executor.cpp13
-rw-r--r--src/mongo/db/repl/master_slave.cpp1
-rw-r--r--src/mongo/db/repl/oplog.cpp19
-rw-r--r--src/mongo/db/repl/rs_initialsync.cpp1
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp2
-rw-r--r--src/mongo/db/repl/sync_tail.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/repair_database.cpp5
-rw-r--r--src/mongo/db/ttl.cpp3
-rw-r--r--src/mongo/dbtests/query_stage_count.cpp13
-rw-r--r--src/mongo/dbtests/query_stage_sort.cpp5
-rw-r--r--src/mongo/dbtests/querytests.cpp2
-rw-r--r--src/mongo/dbtests/repltests.cpp12
-rw-r--r--src/mongo/dbtests/rollbacktests.cpp29
-rw-r--r--src/mongo/s/d_migrate.cpp3
66 files changed, 2147 insertions, 1319 deletions
diff --git a/src/mongo/SConscript b/src/mongo/SConscript
index 5698fbceab4..6087caba743 100644
--- a/src/mongo/SConscript
+++ b/src/mongo/SConscript
@@ -723,12 +723,19 @@ env.Library('elapsed_tracker',
# mongod files - also files used in tools. present in dbtests, but not in mongos and not in client
# libs.
serverOnlyFiles = [ "db/background.cpp",
+ "db/catalog/apply_ops.cpp",
+ "db/catalog/coll_mod.cpp",
"db/catalog/collection.cpp",
"db/catalog/collection_compact.cpp",
"db/catalog/collection_info_cache.cpp",
+ "db/catalog/capped_utils.cpp",
"db/catalog/cursor_manager.cpp",
"db/catalog/database.cpp",
"db/catalog/database_holder.cpp",
+ "db/catalog/drop_collection.cpp",
+ "db/catalog/drop_database.cpp",
+ "db/catalog/drop_indexes.cpp",
+ "db/catalog/rename_collection.cpp",
"db/catalog/index_catalog.cpp",
"db/catalog/index_catalog_entry.cpp",
"db/catalog/index_create.cpp",
diff --git a/src/mongo/db/catalog/apply_ops.cpp b/src/mongo/db/catalog/apply_ops.cpp
new file mode 100644
index 00000000000..2ef6d46c9d0
--- /dev/null
+++ b/src/mongo/db/catalog/apply_ops.cpp
@@ -0,0 +1,215 @@
+/**
+ * Copyright (C) 2015 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kCommand
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/catalog/apply_ops.h"
+
+#include "mongo/db/client.h"
+#include "mongo/db/commands/dbhash.h"
+#include "mongo/db/concurrency/write_conflict_exception.h"
+#include "mongo/db/db_raii.h"
+#include "mongo/db/dbdirectclient.h"
+#include "mongo/db/matcher/matcher.h"
+#include "mongo/db/op_observer.h"
+#include "mongo/db/operation_context_impl.h"
+#include "mongo/db/repl/oplog.h"
+#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/service_context.h"
+#include "mongo/util/log.h"
+
+namespace mongo {
+ Status applyOps(OperationContext* txn,
+ const std::string& dbName,
+ const BSONObj& applyOpCmd,
+ BSONObjBuilder* result) {
+ // SERVER-4328 todo : is global ok or does this take a long time? i believe multiple
+ // ns used so locking individually requires more analysis
+ ScopedTransaction scopedXact(txn, MODE_X);
+ Lock::GlobalWrite globalWriteLock(txn->lockState());
+
+ bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(dbName);
+
+ if (userInitiatedWritesAndNotPrimary) {
+ return Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while applying ops to database " << dbName);
+ }
+
+ bool shouldReplicateWrites = txn->writesAreReplicated();
+ txn->setReplicatedWrites(false);
+ BSONObj ops = applyOpCmd.firstElement().Obj();
+ // Preconditions check reads the database state, so needs to be done locked
+ if (applyOpCmd["preCondition"].type() == Array) {
+ BSONObjIterator i(applyOpCmd["preCondition"].Obj());
+ while (i.more()) {
+ BSONObj f = i.next().Obj();
+
+ DBDirectClient db(txn);
+ BSONObj realres = db.findOne(f["ns"].String() , f["q"].Obj());
+
+ // Apply-ops would never have a $where matcher, so use the default callback,
+ // which will throw an error if $where is found.
+ Matcher m(f["res"].Obj());
+ if (! m.matches(realres)) {
+ result->append("got" , realres);
+ result->append("whatFailed" , f);
+ txn->setReplicatedWrites(shouldReplicateWrites);
+ return Status(ErrorCodes::BadValue, "pre-condition failed");
+ }
+ }
+ }
+
+ // apply
+ int num = 0;
+ int errors = 0;
+
+ BSONObjIterator i(ops);
+ BSONArrayBuilder ab;
+ const bool alwaysUpsert = applyOpCmd.hasField("alwaysUpsert") ?
+ applyOpCmd["alwaysUpsert"].trueValue() : true;
+
+ while (i.more()) {
+ BSONElement e = i.next();
+ const BSONObj& temp = e.Obj();
+
+ // Ignore 'n' operations.
+ const char *opType = temp["op"].valuestrsafe();
+ if (*opType == 'n') continue;
+
+ const std::string ns = temp["ns"].String();
+
+ // Run operations under a nested lock as a hack to prevent yielding.
+ //
+ // The list of operations is supposed to be applied atomically; yielding
+ // would break atomicity by allowing an interruption or a shutdown to occur
+ // after only some operations are applied. We are already locked globally
+ // at this point, so taking a DBLock on the namespace creates a nested lock,
+ // and yields are disallowed for operations that hold a nested lock.
+ //
+ // We do not have a wrapping WriteUnitOfWork so it is possible for a journal
+ // commit to happen with a subset of ops applied.
+ // TODO figure out what to do about this.
+ Lock::GlobalWrite globalWriteLockDisallowTempRelease(txn->lockState());
+
+ // Ensures that yielding will not happen (see the comment above).
+ DEV {
+ Locker::LockSnapshot lockSnapshot;
+ invariant(!txn->lockState()->saveLockStateAndUnlock(&lockSnapshot));
+ };
+
+ OldClientContext ctx(txn, ns);
+
+ Status status(ErrorCodes::InternalError, "");
+ while (true) {
+ try {
+ // We assume that in the WriteConflict retry case, either the op rolls back
+ // any changes it makes or is otherwise safe to rerun.
+ status =
+ repl::applyOperation_inlock(txn,
+ ctx.db(),
+ temp,
+ !txn->writesAreReplicated(),
+ alwaysUpsert);
+ break;
+ }
+ catch (const WriteConflictException& wce) {
+ LOG(2) << "WriteConflictException in applyOps command, retrying.";
+ txn->recoveryUnit()->commitAndRestart();
+ continue;
+ }
+ }
+
+ ab.append(status.isOK());
+ if (!status.isOK()) {
+ errors++;
+ }
+
+ num++;
+
+ WriteUnitOfWork wuow(txn);
+ logOpForDbHash(txn, ns.c_str());
+ wuow.commit();
+ }
+
+ result->append("applied" , num);
+ result->append("results" , ab.arr());
+ txn->setReplicatedWrites(shouldReplicateWrites);
+
+ if (txn->writesAreReplicated()) {
+ // We want this applied atomically on slaves
+ // so we re-wrap without the pre-condition for speed
+
+ std::string tempNS = str::stream() << dbName << ".$cmd";
+
+ // TODO: possibly use mutable BSON to remove preCondition field
+ // once it is available
+ BSONObjIterator iter(applyOpCmd);
+ BSONObjBuilder cmdBuilder;
+
+ while (iter.more()) {
+ BSONElement elem(iter.next());
+ if (strcmp(elem.fieldName(), "preCondition") != 0) {
+ cmdBuilder.append(elem);
+ }
+ }
+
+ const BSONObj cmdRewritten = cmdBuilder.done();
+
+ // We currently always logOp the command regardless of whether the individial ops
+ // succeeded and rely on any failures to also happen on secondaries. This isn't
+ // perfect, but it's what the command has always done and is part of its "correct"
+ // behavior.
+ while (true) {
+ try {
+ WriteUnitOfWork wunit(txn);
+ getGlobalServiceContext()->getOpObserver()->onApplyOps(txn,
+ tempNS,
+ cmdRewritten);
+ wunit.commit();
+ break;
+ }
+ catch (const WriteConflictException& wce) {
+ LOG(2) <<
+ "WriteConflictException while logging applyOps command, retrying.";
+ txn->recoveryUnit()->commitAndRestart();
+ continue;
+ }
+ }
+ }
+
+ if (errors != 0) {
+ return Status(ErrorCodes::UnknownError, "");
+ }
+
+ return Status::OK();
+ }
+
+} // namespace mongo
diff --git a/src/mongo/db/catalog/apply_ops.h b/src/mongo/db/catalog/apply_ops.h
new file mode 100644
index 00000000000..13639deb586
--- /dev/null
+++ b/src/mongo/db/catalog/apply_ops.h
@@ -0,0 +1,45 @@
+/**
+ * Copyright (C) 2015 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/base/status.h"
+
+namespace mongo {
+ class BSONObj;
+ class BSONObjBuilder;
+ class OperationContext;
+
+ /**
+ * Applies ops contained in "applyOpCmd" and populates fields in "result" to be returned to the
+ * user.
+ */
+ Status applyOps(OperationContext* txn,
+ const std::string& dbName,
+ const BSONObj& applyOpCmd,
+ BSONObjBuilder* result);
+
+} // namespace mongo
diff --git a/src/mongo/db/catalog/capped_utils.cpp b/src/mongo/db/catalog/capped_utils.cpp
new file mode 100644
index 00000000000..76be6e8d040
--- /dev/null
+++ b/src/mongo/db/catalog/capped_utils.cpp
@@ -0,0 +1,261 @@
+/**
+ * Copyright (C) 2015 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/catalog/capped_utils.h"
+
+#include "mongo/db/background.h"
+#include "mongo/db/catalog/collection.h"
+#include "mongo/db/catalog/database.h"
+#include "mongo/db/catalog/index_catalog.h"
+#include "mongo/db/client.h"
+#include "mongo/db/db_raii.h"
+#include "mongo/db/index_builder.h"
+#include "mongo/db/operation_context_impl.h"
+#include "mongo/db/query/internal_plans.h"
+#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/service_context.h"
+
+namespace mongo {
+namespace {
+ std::vector<BSONObj> stopIndexBuildsEmptyCapped(OperationContext* opCtx,
+ Database* db,
+ const NamespaceString& ns) {
+ IndexCatalog::IndexKillCriteria criteria;
+ criteria.ns = ns;
+ return IndexBuilder::killMatchingIndexBuilds(db->getCollection(ns), criteria);
+ }
+
+ std::vector<BSONObj> stopIndexBuildsConvertToCapped(OperationContext* opCtx,
+ Database* db,
+ const NamespaceString& ns) {
+ IndexCatalog::IndexKillCriteria criteria;
+ criteria.ns = ns;
+ Collection* coll = db->getCollection(ns);
+ if (coll) {
+ return IndexBuilder::killMatchingIndexBuilds(coll, criteria);
+ }
+ return std::vector<BSONObj>();
+ }
+
+} // namespace
+
+ Status emptyCapped(OperationContext* txn,
+ const NamespaceString& collectionName) {
+ ScopedTransaction scopedXact(txn, MODE_IX);
+ AutoGetDb autoDb(txn, collectionName.db(), MODE_X);
+
+ bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(
+ collectionName.db());
+
+ if (userInitiatedWritesAndNotPrimary) {
+ return Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while truncating collection "
+ << collectionName.ns());
+ }
+
+ Database* db = autoDb.getDb();
+ massert(13429, "no such database", db);
+
+ Collection* collection = db->getCollection(collectionName);
+ massert(28584, "no such collection", collection);
+
+ std::vector<BSONObj> indexes = stopIndexBuildsEmptyCapped(txn, db, collectionName);
+
+ WriteUnitOfWork wuow(txn);
+
+ Status status = collection->truncate(txn);
+ if (!status.isOK()) {
+ return status;
+ }
+
+ IndexBuilder::restoreIndexes(txn, indexes);
+
+ getGlobalServiceContext()->getOpObserver()->onEmptyCapped(txn, collection->ns());
+
+ wuow.commit();
+
+ return Status::OK();
+ }
+
+ Status cloneCollectionAsCapped(OperationContext* txn,
+ Database* db,
+ const std::string& shortFrom,
+ const std::string& shortTo,
+ double size,
+ bool temp) {
+
+ std::string fromNs = db->name() + "." + shortFrom;
+ std::string toNs = db->name() + "." + shortTo;
+
+ Collection* fromCollection = db->getCollection(fromNs);
+ if (!fromCollection)
+ return Status(ErrorCodes::NamespaceNotFound,
+ str::stream() << "source collection " << fromNs << " does not exist");
+
+ if (db->getCollection(toNs))
+ return Status(ErrorCodes::NamespaceExists, "to collection already exists");
+
+ // create new collection
+ {
+ OldClientContext ctx(txn, toNs);
+ BSONObjBuilder spec;
+ spec.appendBool("capped", true);
+ spec.append("size", size);
+ if (temp)
+ spec.appendBool("temp", true);
+
+ WriteUnitOfWork wunit(txn);
+ Status status = userCreateNS(txn, ctx.db(), toNs, spec.done());
+ if (!status.isOK())
+ return status;
+ wunit.commit();
+ }
+
+ Collection* toCollection = db->getCollection(toNs);
+ invariant(toCollection); // we created above
+
+ // how much data to ignore because it won't fit anyway
+ // datasize and extentSize can't be compared exactly, so add some padding to 'size'
+
+ long long allocatedSpaceGuess =
+ std::max(static_cast<long long>(size * 2),
+ static_cast<long long>(toCollection->getRecordStore()->storageSize(txn) * 2));
+
+ long long excessSize = fromCollection->dataSize(txn) - allocatedSpaceGuess;
+
+ boost::scoped_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(
+ txn,
+ fromNs,
+ fromCollection,
+ InternalPlanner::FORWARD));
+
+
+ while (true) {
+ BSONObj obj;
+ PlanExecutor::ExecState state = exec->getNext(&obj, NULL);
+
+ switch(state) {
+ case PlanExecutor::IS_EOF:
+ return Status::OK();
+ case PlanExecutor::DEAD:
+ db->dropCollection(txn, toNs);
+ return Status(ErrorCodes::InternalError, "executor turned dead while iterating");
+ case PlanExecutor::FAILURE:
+ return Status(ErrorCodes::InternalError, "executor error while iterating");
+ case PlanExecutor::ADVANCED:
+ if (excessSize > 0) {
+ excessSize -= (4 * obj.objsize()); // 4x is for padding, power of 2, etc...
+ continue;
+ }
+
+ WriteUnitOfWork wunit(txn);
+ toCollection->insertDocument(txn, obj, true, txn->writesAreReplicated());
+ wunit.commit();
+ }
+ }
+
+ invariant(false); // unreachable
+ }
+
+ Status convertToCapped(OperationContext* txn,
+ const NamespaceString& collectionName,
+ double size) {
+
+ StringData dbname = collectionName.db();
+ StringData shortSource = collectionName.coll();
+
+ ScopedTransaction transaction(txn, MODE_IX);
+ AutoGetDb autoDb(txn, collectionName.db(), MODE_X);
+
+ bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(dbname);
+
+ if (userInitiatedWritesAndNotPrimary) {
+ return Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while converting "
+ << collectionName.ns() << " to a capped collection");
+ }
+
+ Database* const db = autoDb.getDb();
+ if (!db) {
+ return Status(ErrorCodes::DatabaseNotFound,
+ str::stream() << "database " << dbname << " not found");
+ }
+
+ stopIndexBuildsConvertToCapped(txn, db, collectionName);
+ BackgroundOperation::assertNoBgOpInProgForDb(dbname);
+
+ std::string shortTmpName = str::stream() << "tmp.convertToCapped." << shortSource;
+ std::string longTmpName = str::stream() << dbname << "." << shortTmpName;
+
+ WriteUnitOfWork wunit(txn);
+ if (db->getCollection(longTmpName)) {
+ Status status = db->dropCollection(txn, longTmpName);
+ if (!status.isOK())
+ return status;
+ }
+
+
+ bool shouldReplicateWrites = txn->writesAreReplicated();
+ txn->setReplicatedWrites(false);
+ Status status = cloneCollectionAsCapped(txn,
+ db,
+ shortSource.toString(),
+ shortTmpName,
+ size,
+ true);
+
+ if (!status.isOK()) {
+ txn->setReplicatedWrites(shouldReplicateWrites);
+ return status;
+ }
+
+ verify(db->getCollection(longTmpName));
+
+ status = db->dropCollection(txn, collectionName.ns());
+ txn->setReplicatedWrites(shouldReplicateWrites);
+ if (!status.isOK())
+ return status;
+
+ status = db->renameCollection(txn, longTmpName, collectionName.ns(), false);
+ if (!status.isOK())
+ return status;
+
+ getGlobalServiceContext()->getOpObserver()->onConvertToCapped(
+ txn,
+ NamespaceString(collectionName),
+ size);
+
+ wunit.commit();
+ return Status::OK();
+ }
+
+} // namespace mongo
diff --git a/src/mongo/db/catalog/capped_utils.h b/src/mongo/db/catalog/capped_utils.h
new file mode 100644
index 00000000000..05104230fcf
--- /dev/null
+++ b/src/mongo/db/catalog/capped_utils.h
@@ -0,0 +1,58 @@
+/**
+ * Copyright (C) 2015 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/base/status.h"
+
+namespace mongo {
+ class Database;
+ class NamespaceString;
+ class OperationContext;
+
+ /**
+ * Drops all documents contained in the capped collection, "collectionName".
+ */
+ Status emptyCapped(OperationContext* txn,
+ const NamespaceString& collectionName);
+
+ /**
+ * Clones the collection "shortFrom" to the capped collection "shortTo" with a size of "size".
+ */
+ Status cloneCollectionAsCapped(OperationContext* txn,
+ Database* db,
+ const std::string& shortFrom,
+ const std::string& shortTo,
+ double size,
+ bool temp);
+
+ /**
+ * Converts the collection "collectionName" to a capped collection with a size of "size".
+ */
+ Status convertToCapped(OperationContext* txn,
+ const NamespaceString& collectionName,
+ double size);
+} // namespace mongo
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
new file mode 100644
index 00000000000..765f2285792
--- /dev/null
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -0,0 +1,182 @@
+/**
+ * Copyright (C) 2015 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/catalog/coll_mod.h"
+
+#include "mongo/db/catalog/collection.h"
+#include "mongo/db/catalog/collection_catalog_entry.h"
+#include "mongo/db/catalog/database.h"
+#include "mongo/db/client.h"
+#include "mongo/db/db_raii.h"
+#include "mongo/db/index/index_descriptor.h"
+#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/service_context.h"
+
+namespace mongo {
+ Status collMod(OperationContext* txn,
+ const NamespaceString& ns,
+ const BSONObj& cmdObj,
+ BSONObjBuilder* result) {
+ StringData dbName = ns.db();
+ ScopedTransaction transaction(txn, MODE_IX);
+ AutoGetDb autoDb(txn, dbName, MODE_X);
+ Database* const db = autoDb.getDb();
+ Collection* coll = db ? db->getCollection(ns) : NULL;
+
+ // If db/collection does not exist, short circuit and return.
+ if (!db || !coll) {
+ return Status(ErrorCodes::NamespaceNotFound, "ns does not exist");
+ }
+
+ OldClientContext ctx(txn, ns);
+
+ bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(dbName);
+
+ if (userInitiatedWritesAndNotPrimary) {
+ return Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while setting collection options on "
+ << ns.toString());
+ }
+
+ WriteUnitOfWork wunit(txn);
+
+ Status errorStatus = Status::OK();
+
+ BSONForEach(e, cmdObj) {
+ if (str::equals("collMod", e.fieldName())) {
+ // no-op
+ }
+ else if (str::startsWith(e.fieldName(), "$")) {
+ // no-op ignore top-level fields prefixed with $. They are for the command processor
+ }
+ else if (LiteParsedQuery::cmdOptionMaxTimeMS == e.fieldNameStringData()) {
+ // no-op
+ }
+ else if (str::equals("index", e.fieldName())) {
+ BSONObj indexObj = e.Obj();
+ BSONObj keyPattern = indexObj.getObjectField("keyPattern");
+
+ if (keyPattern.isEmpty()){
+ errorStatus = Status(ErrorCodes::InvalidOptions, "no keyPattern specified");
+ continue;
+ }
+
+ BSONElement newExpireSecs = indexObj["expireAfterSeconds"];
+ if (newExpireSecs.eoo()) {
+ errorStatus = Status(ErrorCodes::InvalidOptions, "no expireAfterSeconds field");
+ continue;
+ }
+ if (! newExpireSecs.isNumber()) {
+ errorStatus = Status(ErrorCodes::InvalidOptions,
+ "expireAfterSeconds field must be a number");
+ continue;
+ }
+
+ const IndexDescriptor* idx = coll->getIndexCatalog()
+ ->findIndexByKeyPattern(txn, keyPattern);
+ if (idx == NULL) {
+ errorStatus = Status(ErrorCodes::InvalidOptions,
+ str::stream() << "cannot find index " << keyPattern
+ << " for ns " << ns.toString());
+ continue;
+ }
+ BSONElement oldExpireSecs = idx->infoObj().getField("expireAfterSeconds");
+ if (oldExpireSecs.eoo()){
+ errorStatus = Status(ErrorCodes::InvalidOptions,
+ "no expireAfterSeconds field to update");
+ continue;
+ }
+ if (! oldExpireSecs.isNumber()) {
+ errorStatus = Status(ErrorCodes::InvalidOptions,
+ "existing expireAfterSeconds field is not a number");
+ continue;
+ }
+
+ if (oldExpireSecs != newExpireSecs) {
+ result->appendAs(oldExpireSecs, "expireAfterSeconds_old");
+ // Change the value of "expireAfterSeconds" on disk.
+ coll->getCatalogEntry()->updateTTLSetting(txn,
+ idx->indexName(),
+ newExpireSecs.numberLong());
+ // Notify the index catalog that the definition of this index changed.
+ idx = coll->getIndexCatalog()->refreshEntry(txn, idx);
+ result->appendAs(newExpireSecs , "expireAfterSeconds_new");
+ }
+ }
+ else {
+ // As of SERVER-17312 we only support these two options. When SERVER-17320 is
+ // resolved this will need to be enhanced to handle other options.
+ typedef CollectionOptions CO;
+ const StringData name = e.fieldNameStringData();
+ const int flag = (name == "usePowerOf2Sizes") ? CO::Flag_UsePowerOf2Sizes :
+ (name == "noPadding") ? CO::Flag_NoPadding :
+ 0;
+ if (!flag) {
+ errorStatus = Status(ErrorCodes::InvalidOptions,
+ str::stream() << "unknown option to collMod: " << name);
+ continue;
+ }
+
+ CollectionCatalogEntry* cce = coll->getCatalogEntry();
+
+ const int oldFlags = cce->getCollectionOptions(txn).flags;
+ const bool oldSetting = oldFlags & flag;
+ const bool newSetting = e.trueValue();
+
+ result->appendBool(name.toString() + "_old", oldSetting);
+ result->appendBool(name.toString() + "_new", newSetting);
+
+ const int newFlags = newSetting
+ ? (oldFlags | flag) // set flag
+ : (oldFlags & ~flag); // clear flag
+
+ // NOTE we do this unconditionally to ensure that we note that the user has
+ // explicitly set flags, even if they are just setting the default.
+ cce->updateFlags(txn, newFlags);
+
+ const CollectionOptions newOptions = cce->getCollectionOptions(txn);
+ invariant(newOptions.flags == newFlags);
+ invariant(newOptions.flagsSet);
+ }
+ }
+
+ if (!errorStatus.isOK()) {
+ return errorStatus;
+ }
+
+ getGlobalServiceContext()->getOpObserver()->onCollMod(txn,
+ (dbName.toString() + ".$cmd").c_str(),
+ cmdObj);
+
+ wunit.commit();
+ return Status::OK();
+ }
+} // namespace mongo
diff --git a/src/mongo/db/catalog/coll_mod.h b/src/mongo/db/catalog/coll_mod.h
new file mode 100644
index 00000000000..1f511f45145
--- /dev/null
+++ b/src/mongo/db/catalog/coll_mod.h
@@ -0,0 +1,45 @@
+/**
+ * Copyright (C) 2015 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/base/status.h"
+
+namespace mongo {
+ class BSONObj;
+ class BSONObjBuilder;
+ class NamespaceString;
+ class OperationContext;
+
+ /**
+ * Performs the collection modification described in "cmdObj" on the collection "ns".
+ */
+ Status collMod(OperationContext* txn,
+ const NamespaceString& ns,
+ const BSONObj& cmdObj,
+ BSONObjBuilder* result);
+} // namespace mongo
+
diff --git a/src/mongo/db/catalog/collection.cpp b/src/mongo/db/catalog/collection.cpp
index 2940e9c7c9b..88f8cde49b3 100644
--- a/src/mongo/db/catalog/collection.cpp
+++ b/src/mongo/db/catalog/collection.cpp
@@ -38,15 +38,19 @@
#include "mongo/base/counter.h"
#include "mongo/base/owned_pointer_map.h"
-#include "mongo/db/clientcursor.h"
-#include "mongo/db/commands/server_status_metric.h"
-#include "mongo/db/curop.h"
#include "mongo/db/catalog/collection_catalog_entry.h"
#include "mongo/db/catalog/database_catalog_entry.h"
#include "mongo/db/catalog/index_create.h"
+#include "mongo/db/clientcursor.h"
+#include "mongo/db/commands/server_status_metric.h"
+#include "mongo/db/curop.h"
#include "mongo/db/index/index_access_method.h"
+#include "mongo/db/op_observer.h"
#include "mongo/db/operation_context.h"
+#include "mongo/db/ops/update_driver.h"
+#include "mongo/db/ops/update_request.h"
#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/service_context.h"
#include "mongo/db/storage/mmap_v1/mmap_v1_options.h"
#include "mongo/db/storage/record_fetcher.h"
@@ -169,9 +173,9 @@ namespace mongo {
return true;
}
- StatusWith<RecordId> Collection::insertDocument( OperationContext* txn,
+ StatusWith<RecordId> Collection::insertDocument(OperationContext* txn,
const DocWriter* doc,
- bool enforceQuota ) {
+ bool enforceQuota) {
dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
invariant( !_indexCatalog.haveAnyIndexes() ); // eventually can implement, just not done
@@ -181,12 +185,16 @@ namespace mongo {
if ( !loc.isOK() )
return loc;
+ // we cannot call into the OpObserver here because the document being written is not present
+ // fortunately, this is currently only used for adding entries to the oplog.
+
return StatusWith<RecordId>( loc );
}
- StatusWith<RecordId> Collection::insertDocument( OperationContext* txn,
+ StatusWith<RecordId> Collection::insertDocument(OperationContext* txn,
const BSONObj& docToInsert,
- bool enforceQuota ) {
+ bool enforceQuota,
+ bool fromMigrate) {
const SnapshotId sid = txn->recoveryUnit()->getSnapshotId();
@@ -200,13 +208,19 @@ namespace mongo {
StatusWith<RecordId> res = _insertDocument( txn, docToInsert, enforceQuota );
invariant( sid == txn->recoveryUnit()->getSnapshotId() );
+ if (res.isOK()) {
+ getGlobalServiceContext()->getOpObserver()->onInsert(txn,
+ ns(),
+ docToInsert,
+ fromMigrate);
+ }
return res;
}
- StatusWith<RecordId> Collection::insertDocument( OperationContext* txn,
+ StatusWith<RecordId> Collection::insertDocument(OperationContext* txn,
const BSONObj& doc,
MultiIndexBlock* indexBlock,
- bool enforceQuota ) {
+ bool enforceQuota) {
dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
StatusWith<RecordId> loc = _recordStore->insertRecord( txn,
@@ -221,6 +235,8 @@ namespace mongo {
if ( !status.isOK() )
return StatusWith<RecordId>( status );
+ getGlobalServiceContext()->getOpObserver()->onInsert(txn, ns(), doc);
+
return loc;
}
@@ -271,11 +287,11 @@ namespace mongo {
return Status::OK();
}
- void Collection::deleteDocument( OperationContext* txn,
- const RecordId& loc,
- bool cappedOK,
- bool noWarn,
- BSONObj* deletedId ) {
+ void Collection::deleteDocument(OperationContext* txn,
+ const RecordId& loc,
+ bool cappedOK,
+ bool noWarn,
+ BSONObj* deletedId) {
if ( isCapped() && !cappedOK ) {
log() << "failing remove on a capped ns " << _ns << endl;
uasserted( 10089, "cannot remove from a capped collection" );
@@ -284,9 +300,11 @@ namespace mongo {
Snapshotted<BSONObj> doc = docFor(txn, loc);
- if (deletedId) {
- BSONElement e = doc.value()["_id"];
- if (e.type()) {
+ BSONElement e = doc.value()["_id"];
+ BSONObj id;
+ if (e.type()) {
+ id = e.wrap();
+ if (deletedId) {
*deletedId = e.wrap();
}
}
@@ -299,6 +317,10 @@ namespace mongo {
_recordStore->deleteRecord(txn, loc);
_infoCache.notifyOfWriteOp();
+
+ if (!id.isEmpty()) {
+ getGlobalServiceContext()->getOpObserver()->onDelete(txn, ns().ns(), id);
+ }
}
Counter64 moveCounter;
@@ -306,25 +328,26 @@ namespace mongo {
StatusWith<RecordId> Collection::updateDocument( OperationContext* txn,
const RecordId& oldLocation,
- const Snapshotted<BSONObj>& objOld,
- const BSONObj& objNew,
+ const Snapshotted<BSONObj>& oldDoc,
+ const BSONObj& newDoc,
bool enforceQuota,
bool indexesAffected,
- OpDebug* debug ) {
+ OpDebug* debug,
+ oplogUpdateEntryArgs& args) {
dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
- invariant(objOld.snapshotId() == txn->recoveryUnit()->getSnapshotId());
+ invariant(oldDoc.snapshotId() == txn->recoveryUnit()->getSnapshotId());
SnapshotId sid = txn->recoveryUnit()->getSnapshotId();
- BSONElement oldId = objOld.value()["_id"];
- if ( !oldId.eoo() && ( oldId != objNew["_id"] ) )
+ BSONElement oldId = oldDoc.value()["_id"];
+ if ( !oldId.eoo() && ( oldId != newDoc["_id"] ) )
return StatusWith<RecordId>( ErrorCodes::InternalError,
"in Collection::updateDocument _id mismatch",
13596 );
// At the end of this step, we will have a map of UpdateTickets, one per index, which
- // represent the index updates needed to be done, based on the changes between objOld and
- // objNew.
+ // represent the index updates needed to be done, based on the changes between oldDoc and
+ // newDoc.
OwnedPointerMap<IndexDescriptor*,UpdateTicket> updateTickets;
if ( indexesAffected ) {
IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator( txn, true );
@@ -341,8 +364,8 @@ namespace mongo {
UpdateTicket* updateTicket = new UpdateTicket();
updateTickets.mutableMap()[descriptor] = updateTicket;
Status ret = iam->validateUpdate(txn,
- objOld.value(),
- objNew,
+ oldDoc.value(),
+ newDoc,
oldLocation,
options,
updateTicket,
@@ -357,8 +380,8 @@ namespace mongo {
// object is removed from all indexes.
StatusWith<RecordId> newLocation = _recordStore->updateRecord( txn,
oldLocation,
- objNew.objdata(),
- objNew.objsize(),
+ newDoc.objdata(),
+ newDoc.objsize(),
_enforceQuota( enforceQuota ),
this );
@@ -381,10 +404,13 @@ namespace mongo {
debug->nmoved += 1;
}
- Status s = _indexCatalog.indexRecord(txn, objNew, newLocation.getValue());
+ Status s = _indexCatalog.indexRecord(txn, newDoc, newLocation.getValue());
if (!s.isOK())
return StatusWith<RecordId>(s);
invariant( sid == txn->recoveryUnit()->getSnapshotId() );
+ args.ns = ns().ns();
+ getGlobalServiceContext()->getOpObserver()->onUpdate(txn, args);
+
return newLocation;
}
@@ -410,6 +436,9 @@ namespace mongo {
}
invariant( sid == txn->recoveryUnit()->getSnapshotId() );
+ args.ns = ns().ns();
+ getGlobalServiceContext()->getOpObserver()->onUpdate(txn, args);
+
return newLocation;
}
@@ -435,7 +464,8 @@ namespace mongo {
const RecordId& loc,
const Snapshotted<RecordData>& oldRec,
const char* damageSource,
- const mutablebson::DamageVector& damages ) {
+ const mutablebson::DamageVector& damages,
+ oplogUpdateEntryArgs& args) {
dassert(txn->lockState()->isCollectionLockedForMode(ns().toString(), MODE_IX));
invariant(oldRec.snapshotId() == txn->recoveryUnit()->getSnapshotId());
@@ -443,7 +473,14 @@ namespace mongo {
// Broadcast the mutation so that query results stay correct.
_cursorManager.invalidateDocument(txn, loc, INVALIDATION_MUTATION);
- return _recordStore->updateWithDamages(txn, loc, oldRec.value(), damageSource, damages);
+ Status status =
+ _recordStore->updateWithDamages(txn, loc, oldRec.value(), damageSource, damages);
+
+ if (status.isOK()) {
+ args.ns = ns().ns();
+ getGlobalServiceContext()->getOpObserver()->onUpdate(txn, args);
+ }
+ return status;
}
bool Collection::_enforceQuota( bool userEnforeQuota ) const {
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h
index f1518065aaf..9f537cb5fdb 100644
--- a/src/mongo/db/catalog/collection.h
+++ b/src/mongo/db/catalog/collection.h
@@ -39,6 +39,7 @@
#include "mongo/db/catalog/index_catalog.h"
#include "mongo/db/exec/collection_scan_common.h"
#include "mongo/db/namespace_string.h"
+#include "mongo/db/op_observer.h"
#include "mongo/db/record_id.h"
#include "mongo/db/storage/capped_callback.h"
#include "mongo/db/storage/record_store.h"
@@ -54,6 +55,9 @@ namespace mongo {
class MultiIndexBlock;
class OperationContext;
+ class UpdateDriver;
+ class UpdateRequest;
+
class RecordIterator;
class RecordFetcher;
@@ -167,7 +171,8 @@ namespace mongo {
*/
StatusWith<RecordId> insertDocument( OperationContext* txn,
const BSONObj& doc,
- bool enforceQuota );
+ bool enforceQuota,
+ bool fromMigrate = false);
StatusWith<RecordId> insertDocument( OperationContext* txn,
const DocWriter* doc,
@@ -196,22 +201,23 @@ namespace mongo {
* if not, it is moved
* @return the post update location of the doc (may or may not be the same as oldLocation)
*/
- StatusWith<RecordId> updateDocument( OperationContext* txn,
- const RecordId& oldLocation,
- const Snapshotted<BSONObj>& oldDoc,
- const BSONObj& newDoc,
- bool enforceQuota,
- bool indexesAffected,
- OpDebug* debug );
-
+ StatusWith<RecordId> updateDocument(OperationContext* txn,
+ const RecordId& oldLocation,
+ const Snapshotted<BSONObj>& oldDoc,
+ const BSONObj& newDoc,
+ bool enforceQuota,
+ bool indexesAffected,
+ OpDebug* debug,
+ oplogUpdateEntryArgs& args);
/**
* right now not allowed to modify indexes
*/
- Status updateDocumentWithDamages( OperationContext* txn,
- const RecordId& loc,
- const Snapshotted<RecordData>& oldRec,
- const char* damageSource,
- const mutablebson::DamageVector& damages );
+ Status updateDocumentWithDamages(OperationContext* txn,
+ const RecordId& loc,
+ const Snapshotted<RecordData>& oldRec,
+ const char* damageSource,
+ const mutablebson::DamageVector& damages,
+ oplogUpdateEntryArgs& args);
// -----------
diff --git a/src/mongo/db/catalog/database.cpp b/src/mongo/db/catalog/database.cpp
index 814184ef694..46b58a16618 100644
--- a/src/mongo/db/catalog/database.cpp
+++ b/src/mongo/db/catalog/database.cpp
@@ -55,6 +55,7 @@
#include "mongo/db/introspect.h"
#include "mongo/db/op_observer.h"
#include "mongo/db/repl/oplog.h"
+#include "mongo/db/repl/replication_coordinator_global.h"
#include "mongo/db/server_parameters.h"
#include "mongo/db/stats/top.h"
#include "mongo/db/storage_options.h"
@@ -272,8 +273,6 @@ namespace mongo {
continue;
}
- getGlobalServiceContext()->getOpObserver()->onDropCollection(
- txn, NamespaceString(ns));
wunit.commit();
}
catch (const WriteConflictException& exp) {
@@ -351,7 +350,7 @@ namespace mongo {
_dbEntry->appendExtraStats( opCtx, output, scale );
}
- Status Database::dropCollection( OperationContext* txn, StringData fullns ) {
+ Status Database::dropCollection(OperationContext* txn, StringData fullns) {
invariant(txn->lockState()->isDbLockedForMode(name(), MODE_X));
LOG(1) << "dropCollection: " << fullns << endl;
@@ -363,12 +362,12 @@ namespace mongo {
return Status::OK();
}
+ NamespaceString nss(fullns);
{
- NamespaceString s( fullns );
- verify( s.db() == _name );
+ verify(nss.db() == _name);
- if( s.isSystem() ) {
- if( s.coll() == "system.profile" ) {
+ if (nss.isSystem()) {
+ if (nss.isSystemDotProfile()) {
if ( _profile != 0 )
return Status( ErrorCodes::IllegalOperation,
"turn off profiling before dropping system.profile collection" );
@@ -417,6 +416,7 @@ namespace mongo {
}
}
+ getGlobalServiceContext()->getOpObserver()->onDropCollection(txn, nss);
return Status::OK();
}
@@ -486,7 +486,6 @@ namespace mongo {
Collection* Database::createCollection( OperationContext* txn,
StringData ns,
const CollectionOptions& options,
- bool allocateDefaultSpace,
bool createIdIndex ) {
massert( 17399, "collection already exists", getCollection( ns ) == NULL );
massertNamespaceNotIndex( ns, "createCollection" );
@@ -513,7 +512,7 @@ namespace mongo {
txn->recoveryUnit()->registerChange( new AddCollectionChange(this, ns) );
- Status status = _dbEntry->createCollection(txn, ns, options, allocateDefaultSpace);
+ Status status = _dbEntry->createCollection(txn, ns, options, true /*allocateDefaultSpace*/);
massertNoTraceStatusOK(status);
@@ -537,6 +536,8 @@ namespace mongo {
}
+ getGlobalServiceContext()->getOpObserver()->onCreateCollection(txn, nss, options);
+
return collection;
}
@@ -592,7 +593,6 @@ namespace mongo {
Database* db,
StringData ns,
BSONObj options,
- bool logForReplication,
bool createDefaultIndexes ) {
invariant( db );
@@ -619,13 +619,7 @@ namespace mongo {
if ( !status.isOK() )
return status;
- invariant( db->createCollection( txn, ns, collectionOptions, true, createDefaultIndexes ) );
-
- if ( logForReplication ) {
- getGlobalServiceContext()->getOpObserver()->onCreateCollection(txn,
- NamespaceString(ns),
- collectionOptions);
- }
+ invariant(db->createCollection(txn, ns, collectionOptions, createDefaultIndexes));
return Status::OK();
}
diff --git a/src/mongo/db/catalog/database.h b/src/mongo/db/catalog/database.h
index 57fbece74d8..251946310fb 100644
--- a/src/mongo/db/catalog/database.h
+++ b/src/mongo/db/catalog/database.h
@@ -82,12 +82,11 @@ namespace mongo {
const DatabaseCatalogEntry* getDatabaseCatalogEntry() const;
- Status dropCollection( OperationContext* txn, StringData fullns );
+ Status dropCollection(OperationContext* txn, StringData fullns);
Collection* createCollection( OperationContext* txn,
StringData ns,
const CollectionOptions& options = CollectionOptions(),
- bool allocateSpace = true,
bool createDefaultIndexes = true );
/**
@@ -162,7 +161,6 @@ namespace mongo {
Database* db,
StringData ns,
BSONObj options,
- bool logForReplication,
bool createDefaultIndexes = true );
} // namespace mongo
diff --git a/src/mongo/db/catalog/drop_collection.cpp b/src/mongo/db/catalog/drop_collection.cpp
new file mode 100644
index 00000000000..bf58462449f
--- /dev/null
+++ b/src/mongo/db/catalog/drop_collection.cpp
@@ -0,0 +1,109 @@
+/**
+ * Copyright (C) 2015 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kCommand
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/catalog/drop_collection.h"
+
+#include "mongo/db/catalog/collection.h"
+#include "mongo/db/catalog/database.h"
+#include "mongo/db/catalog/index_catalog.h"
+#include "mongo/db/client.h"
+#include "mongo/db/concurrency/write_conflict_exception.h"
+#include "mongo/db/curop.h"
+#include "mongo/db/db_raii.h"
+#include "mongo/db/index_builder.h"
+#include "mongo/db/operation_context_impl.h"
+#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/server_options.h"
+#include "mongo/db/service_context.h"
+#include "mongo/util/log.h"
+
+namespace mongo {
+namespace {
+ std::vector<BSONObj> stopIndexBuilds(OperationContext* opCtx,
+ Database* db,
+ const NamespaceString& collectionName) {
+ IndexCatalog::IndexKillCriteria criteria;
+ criteria.ns = collectionName;
+ return IndexBuilder::killMatchingIndexBuilds(db->getCollection(collectionName),
+ criteria);
+ }
+
+} // namespace
+ Status dropCollection(OperationContext* txn,
+ const NamespaceString& collectionName,
+ BSONObjBuilder& result) {
+ if (!serverGlobalParams.quiet) {
+ log() << "CMD: drop " << collectionName;
+ }
+
+ std::string dbname = collectionName.db().toString();
+
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ ScopedTransaction transaction(txn, MODE_IX);
+
+ AutoGetDb autoDb(txn, dbname, MODE_X);
+ Database* const db = autoDb.getDb();
+ Collection* coll = db ? db->getCollection(collectionName) : nullptr;
+
+ // If db/collection does not exist, short circuit and return.
+ if ( !db || !coll ) {
+ return Status(ErrorCodes::NamespaceNotFound, "ns not found");
+ }
+ OldClientContext context(txn, collectionName);
+
+ bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(dbname);
+
+ if (userInitiatedWritesAndNotPrimary) {
+ return Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while dropping collection "
+ << collectionName.ns());
+ }
+
+ int numIndexes = coll->getIndexCatalog()->numIndexesTotal(txn);
+
+ stopIndexBuilds(txn, db, collectionName);
+
+ result.append("ns", collectionName);
+ result.append("nIndexesWas", numIndexes);
+ WriteUnitOfWork wunit(txn);
+ Status s = db->dropCollection(txn, collectionName.ns());
+
+ if ( !s.isOK() ) {
+ return s;
+ }
+
+ wunit.commit();
+ } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "drop", collectionName.ns());
+ return Status::OK();
+ }
+} // namespace mongo
diff --git a/src/mongo/db/catalog/drop_collection.h b/src/mongo/db/catalog/drop_collection.h
new file mode 100644
index 00000000000..a12f5e8419c
--- /dev/null
+++ b/src/mongo/db/catalog/drop_collection.h
@@ -0,0 +1,43 @@
+/**
+ * Copyright (C) 2015 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/base/status.h"
+
+namespace mongo {
+ class BSONObjBuilder;
+ class NamespaceString;
+ class OperationContext;
+
+ /**
+ * Drops the collection "collectionName" and populates "result" with statistics about what
+ * was removed.
+ */
+ Status dropCollection(OperationContext* txn,
+ const NamespaceString& collectionName,
+ BSONObjBuilder& result);
+} // namespace mongo
diff --git a/src/mongo/db/catalog/drop_database.cpp b/src/mongo/db/catalog/drop_database.cpp
new file mode 100644
index 00000000000..655305d216e
--- /dev/null
+++ b/src/mongo/db/catalog/drop_database.cpp
@@ -0,0 +1,111 @@
+/**
+ * Copyright (C) 2015 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kCommand
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/catalog/drop_database.h"
+
+#include "mongo/db/catalog/database.h"
+#include "mongo/db/catalog/database_catalog_entry.h"
+#include "mongo/db/catalog/index_catalog.h"
+#include "mongo/db/client.h"
+#include "mongo/db/concurrency/write_conflict_exception.h"
+#include "mongo/db/curop.h"
+#include "mongo/db/db_raii.h"
+#include "mongo/db/index_builder.h"
+#include "mongo/db/op_observer.h"
+#include "mongo/db/operation_context_impl.h"
+#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/service_context.h"
+#include "mongo/util/log.h"
+
+namespace mongo {
+namespace {
+ std::vector<BSONObj> stopIndexBuilds(OperationContext* opCtx, Database* db) {
+ invariant(db);
+ std::list<std::string> collections;
+ db->getDatabaseCatalogEntry()->getCollectionNamespaces(&collections);
+
+ std::vector<BSONObj> allKilledIndexes;
+ for (std::list<std::string>::iterator it = collections.begin();
+ it != collections.end();
+ ++it) {
+ std::string ns = *it;
+
+ IndexCatalog::IndexKillCriteria criteria;
+ criteria.ns = ns;
+ std::vector<BSONObj> killedIndexes =
+ IndexBuilder::killMatchingIndexBuilds(db->getCollection(ns), criteria);
+ allKilledIndexes.insert(allKilledIndexes.end(),
+ killedIndexes.begin(),
+ killedIndexes.end());
+ }
+ return allKilledIndexes;
+ }
+} // namespace
+
+ Status dropDatabase(OperationContext* txn, const std::string& dbName) {
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ ScopedTransaction transaction(txn, MODE_X);
+ Lock::GlobalWrite lk(txn->lockState());
+ AutoGetDb autoDB(txn, dbName, MODE_X);
+ Database* const db = autoDB.getDb();
+ if (!db) {
+ // DB doesn't exist, so deem it a success.
+ return Status::OK();
+ }
+ OldClientContext context(txn, dbName);
+
+ bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(dbName);
+
+ if (userInitiatedWritesAndNotPrimary) {
+ return Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while dropping database " << dbName);
+ }
+
+ log() << "dropDatabase " << dbName << " starting";
+
+ stopIndexBuilds(txn, db);
+ mongo::dropDatabase(txn, db);
+
+ log() << "dropDatabase " << dbName << " finished";
+
+ WriteUnitOfWork wunit(txn);
+
+ getGlobalServiceContext()->getOpObserver()->onDropDatabase(txn, dbName + ".$cmd");
+
+ wunit.commit();
+ } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "dropDatabase", dbName);
+
+ return Status::OK();
+ }
+
+} // namespace mongo
diff --git a/src/mongo/db/catalog/drop_database.h b/src/mongo/db/catalog/drop_database.h
new file mode 100644
index 00000000000..184d66d5bf2
--- /dev/null
+++ b/src/mongo/db/catalog/drop_database.h
@@ -0,0 +1,38 @@
+/**
+ * Copyright (C) 2015 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/base/status.h"
+
+namespace mongo {
+ class OperationContext;
+
+ /**
+ * Drops the database "dbName".
+ */
+ Status dropDatabase(OperationContext* txn, const std::string& dbName);
+} // namespace mongo
diff --git a/src/mongo/db/catalog/drop_indexes.cpp b/src/mongo/db/catalog/drop_indexes.cpp
new file mode 100644
index 00000000000..0ac1c2dae13
--- /dev/null
+++ b/src/mongo/db/catalog/drop_indexes.cpp
@@ -0,0 +1,194 @@
+/**
+ * Copyright (C) 2015 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kCommand
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/catalog/drop_indexes.h"
+
+#include "mongo/db/catalog/collection.h"
+#include "mongo/db/catalog/database.h"
+#include "mongo/db/catalog/index_catalog.h"
+#include "mongo/db/client.h"
+#include "mongo/db/concurrency/write_conflict_exception.h"
+#include "mongo/db/curop.h"
+#include "mongo/db/db_raii.h"
+#include "mongo/db/index/index_descriptor.h"
+#include "mongo/db/index_builder.h"
+#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/service_context.h"
+#include "mongo/util/log.h"
+
+namespace mongo {
+namespace {
+ std::vector<BSONObj> stopIndexBuilds(OperationContext* opCtx,
+ Database* db,
+ const std::string& toDeleteNs,
+ const BSONObj& cmdObj) {
+ Collection* collection = db->getCollection(toDeleteNs);
+ IndexCatalog::IndexKillCriteria criteria;
+
+ // Get index name to drop
+ BSONElement toDrop = cmdObj.getField("index");
+
+ if (toDrop.type() == String) {
+ // Kill all in-progress indexes
+ if (strcmp("*", toDrop.valuestr()) == 0) {
+ criteria.ns = toDeleteNs;
+ return IndexBuilder::killMatchingIndexBuilds(collection, criteria);
+ }
+ // Kill an in-progress index by name
+ else {
+ criteria.name = toDrop.valuestr();
+ return IndexBuilder::killMatchingIndexBuilds(collection, criteria);
+ }
+ }
+ // Kill an in-progress index build by index key
+ else if (toDrop.type() == Object) {
+ criteria.key = toDrop.Obj();
+ return IndexBuilder::killMatchingIndexBuilds(collection, criteria);
+ }
+
+ return std::vector<BSONObj>();
+ }
+
+ Status wrappedRun(OperationContext* txn,
+ const StringData& dbname,
+ const std::string& toDeleteNs,
+ Database* const db,
+ const BSONObj& jsobj,
+ BSONObjBuilder* anObjBuilder) {
+ if (!serverGlobalParams.quiet) {
+ LOG(0) << "CMD: dropIndexes " << toDeleteNs;
+ }
+ Collection* collection = db ? db->getCollection(toDeleteNs) : nullptr;
+
+ // If db/collection does not exist, short circuit and return.
+ if (!db || !collection) {
+ return Status(ErrorCodes::NamespaceNotFound, "ns not found");
+ }
+
+ OldClientContext ctx(txn, toDeleteNs);
+ stopIndexBuilds(txn, db, toDeleteNs, jsobj);
+
+ IndexCatalog* indexCatalog = collection->getIndexCatalog();
+ anObjBuilder->appendNumber("nIndexesWas", indexCatalog->numIndexesTotal(txn));
+
+
+ BSONElement f = jsobj.getField("index");
+ if (f.type() == String) {
+
+ std::string indexToDelete = f.valuestr();
+
+ if (indexToDelete == "*") {
+ Status s = indexCatalog->dropAllIndexes(txn, false);
+ if (!s.isOK()) {
+ return s;
+ }
+ anObjBuilder->append("msg", "non-_id indexes dropped for collection");
+ return Status::OK();
+ }
+
+ IndexDescriptor* desc = collection->getIndexCatalog()->findIndexByName(txn,
+ indexToDelete);
+ if (desc == NULL) {
+ return Status(ErrorCodes::IndexNotFound,
+ str::stream() << "index not found with name ["
+ << indexToDelete << "]");
+ }
+
+ if (desc->isIdIndex()) {
+ return Status(ErrorCodes::InvalidOptions, "cannot drop _id index");
+ }
+
+ Status s = indexCatalog->dropIndex(txn, desc);
+ if (!s.isOK()) {
+ return s;
+ }
+
+ return Status::OK();
+ }
+
+ if (f.type() == Object) {
+ IndexDescriptor* desc =
+ collection->getIndexCatalog()->findIndexByKeyPattern(txn, f.embeddedObject());
+ if (desc == NULL) {
+ return Status(ErrorCodes::InvalidOptions,
+ str::stream() << "can't find index with key: "
+ << f.embeddedObject().toString());
+ }
+
+ if (desc->isIdIndex()) {
+ return Status(ErrorCodes::InvalidOptions, "cannot drop _id index");
+ }
+
+ Status s = indexCatalog->dropIndex(txn, desc);
+ if (!s.isOK()) {
+ return s;
+ }
+
+ return Status::OK();
+ }
+
+ return Status(ErrorCodes::IndexNotFound, "invalid index name spec");
+ }
+} // namespace
+
+ Status dropIndexes(OperationContext* txn,
+ const NamespaceString& ns,
+ const BSONObj& idxDescriptor,
+ BSONObjBuilder* result) {
+ StringData dbName = ns.db();
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ ScopedTransaction transaction(txn, MODE_IX);
+ AutoGetDb autoDb(txn, dbName, MODE_X);
+
+ bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(dbName);
+
+ if (userInitiatedWritesAndNotPrimary) {
+ return Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while dropping indexes in "
+ << ns.toString());
+ }
+
+ WriteUnitOfWork wunit(txn);
+ Status status = wrappedRun(txn, dbName, ns, autoDb.getDb(), idxDescriptor, result);
+ if (!status.isOK()) {
+ return status;
+ }
+ getGlobalServiceContext()->getOpObserver()->onDropIndex(txn,
+ dbName.toString() + ".$cmd",
+ idxDescriptor);
+ wunit.commit();
+ } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "dropIndexes", dbName);
+ return Status::OK();
+ }
+
+} // namespace mongo
diff --git a/src/mongo/db/catalog/drop_indexes.h b/src/mongo/db/catalog/drop_indexes.h
new file mode 100644
index 00000000000..ba07687098e
--- /dev/null
+++ b/src/mongo/db/catalog/drop_indexes.h
@@ -0,0 +1,46 @@
+/**
+ * Copyright (C) 2015 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/base/status.h"
+
+namespace mongo {
+ class BSONObj;
+ class BSONObjBuilder;
+ class NamespaceString;
+ class OperationContext;
+
+ /**
+ * Drops the index from collection "ns" that matches the "idxDescriptor" and populates
+ * "result" with some statistics about the dropped index.
+ */
+ Status dropIndexes(OperationContext* txn,
+ const NamespaceString& ns,
+ const BSONObj& idxDescriptor,
+ BSONObjBuilder* result);
+
+} // namespace mongo
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
new file mode 100644
index 00000000000..8d21fea5d8c
--- /dev/null
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -0,0 +1,292 @@
+/**
+ * Copyright (C) 2015 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kCommand
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/db/catalog/rename_collection.h"
+
+#include "mongo/db/catalog/collection.h"
+#include "mongo/db/catalog/collection_catalog_entry.h"
+#include "mongo/db/catalog/database.h"
+#include "mongo/db/catalog/database_holder.h"
+#include "mongo/db/catalog/index_catalog.h"
+#include "mongo/db/catalog/index_create.h"
+#include "mongo/db/client.h"
+#include "mongo/db/db_raii.h"
+#include "mongo/db/index/index_descriptor.h"
+#include "mongo/db/index_builder.h"
+#include "mongo/db/jsobj.h"
+#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/service_context.h"
+#include "mongo/util/scopeguard.h"
+
+namespace mongo {
+namespace {
+ static void dropCollection(OperationContext* txn, Database* db, StringData collName) {
+ WriteUnitOfWork wunit(txn);
+ if (db->dropCollection(txn, collName).isOK()) {
+ // ignoring failure case
+ wunit.commit();
+ }
+ }
+
+ // renameCollection's
+ std::vector<BSONObj> stopIndexBuilds(OperationContext* opCtx,
+ Database* db,
+ const NamespaceString& source,
+ const NamespaceString& target) {
+
+ IndexCatalog::IndexKillCriteria criteria;
+ criteria.ns = source;
+ std::vector<BSONObj> prelim =
+ IndexBuilder::killMatchingIndexBuilds(db->getCollection(source), criteria);
+
+ std::vector<BSONObj> indexes;
+
+ for (int i = 0; i < static_cast<int>(prelim.size()); i++) {
+ // Change the ns
+ BSONObj stripped = prelim[i].removeField("ns");
+ BSONObjBuilder builder;
+ builder.appendElements(stripped);
+ builder.append("ns", target);
+ indexes.push_back(builder.obj());
+ }
+
+ return indexes;
+ }
+} // namespace
+
+ Status renameCollection(OperationContext* txn,
+ const NamespaceString& source,
+ const NamespaceString& target,
+ bool dropTarget,
+ bool stayTemp) {
+ ScopedTransaction transaction(txn, MODE_X);
+ Lock::GlobalWrite globalWriteLock(txn->lockState());
+ // We stay in source context the whole time. This is mostly to set the CurOp namespace.
+ OldClientContext ctx(txn, source);
+
+ bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(source.db());
+
+ if (userInitiatedWritesAndNotPrimary) {
+ return Status(ErrorCodes::NotMaster, str::stream()
+ << "Not primary while renaming collection " << source.ns()
+ << " to " << target.ns());
+ }
+
+ Database* const sourceDB = dbHolder().get(txn, source.db());
+ Collection* const sourceColl = sourceDB ? sourceDB->getCollection(source.ns()) : nullptr;
+ if (!sourceColl) {
+ return Status(ErrorCodes::NamespaceNotFound, "source namespace does not exist");
+ }
+
+ {
+ // Ensure that collection name does not exceed maximum length.
+ // Ensure that index names do not push the length over the max.
+ // Iterator includes unfinished indexes.
+ IndexCatalog::IndexIterator sourceIndIt =
+ sourceColl->getIndexCatalog()->getIndexIterator(txn, true);
+ int longestIndexNameLength = 0;
+ while (sourceIndIt.more()) {
+ int thisLength = sourceIndIt.next()->indexName().length();
+ if (thisLength > longestIndexNameLength)
+ longestIndexNameLength = thisLength;
+ }
+
+ unsigned int longestAllowed =
+ std::min(int(NamespaceString::MaxNsCollectionLen),
+ int(NamespaceString::MaxNsLen) - 2/*strlen(".$")*/ - longestIndexNameLength);
+ if (target.size() > longestAllowed) {
+ StringBuilder sb;
+ sb << "collection name length of " << target.size()
+ << " exceeds maximum length of " << longestAllowed
+ << ", allowing for index names";
+ return Status(ErrorCodes::InvalidLength, sb.str());
+ }
+ }
+
+ const std::vector<BSONObj> indexesInProg = stopIndexBuilds(txn, sourceDB, source, target);
+ // Dismissed on success
+ ScopeGuard indexBuildRestorer = MakeGuard(IndexBuilder::restoreIndexes, txn, indexesInProg);
+
+ Database* const targetDB = dbHolder().openDb(txn, target.db());
+
+ {
+ WriteUnitOfWork wunit(txn);
+
+ // Check if the target namespace exists and if dropTarget is true.
+ // If target exists and dropTarget is not true, return false.
+ if (targetDB->getCollection(target)) {
+ if (!dropTarget) {
+ printStackTrace();
+ return Status(ErrorCodes::NamespaceExists, "target namespace exists");
+ }
+
+ Status s = targetDB->dropCollection(txn, target.ns());
+ if (!s.isOK()) {
+ return s;
+ }
+ }
+
+ // If we are renaming in the same database, just
+ // rename the namespace and we're done.
+ if (sourceDB == targetDB) {
+ Status s = targetDB->renameCollection(txn, source.ns(), target.ns(), stayTemp);
+ if (!s.isOK()) {
+ return s;
+ }
+
+ getGlobalServiceContext()->getOpObserver()->onRenameCollection(
+ txn,
+ NamespaceString(source),
+ NamespaceString(target),
+ dropTarget,
+ stayTemp);
+
+ wunit.commit();
+ indexBuildRestorer.Dismiss();
+ return Status::OK();
+ }
+
+ wunit.commit();
+ }
+
+ // If we get here, we are renaming across databases, so we must copy all the data and
+ // indexes, then remove the source collection.
+
+ // Create the target collection. It will be removed if we fail to copy the collection.
+ // TODO use a temp collection and unset the temp flag on success.
+ Collection* targetColl = nullptr;
+ {
+ CollectionOptions options;
+ options.setNoIdIndex();
+
+ if (sourceColl->isCapped()) {
+ const CollectionOptions sourceOpts =
+ sourceColl->getCatalogEntry()->getCollectionOptions(txn);
+
+ options.capped = true;
+ options.cappedSize = sourceOpts.cappedSize;
+ options.cappedMaxDocs = sourceOpts.cappedMaxDocs;
+ }
+
+ WriteUnitOfWork wunit(txn);
+
+ // No logOp necessary because the entire renameCollection command is one logOp.
+ bool shouldReplicateWrites = txn->writesAreReplicated();
+ txn->setReplicatedWrites(false);
+ targetColl = targetDB->createCollection(txn, target.ns(), options);
+ txn->setReplicatedWrites(shouldReplicateWrites);
+ if (!targetColl) {
+ return Status(ErrorCodes::OutOfDiskSpace, "Failed to create target collection.");
+ }
+
+ wunit.commit();
+ }
+
+ // Dismissed on success
+ ScopeGuard targetCollectionDropper = MakeGuard(dropCollection, txn, targetDB, target.ns());
+
+ MultiIndexBlock indexer(txn, targetColl);
+ indexer.allowInterruption();
+
+ // Copy the index descriptions from the source collection, adjusting the ns field.
+ {
+ std::vector<BSONObj> indexesToCopy;
+ IndexCatalog::IndexIterator sourceIndIt =
+ sourceColl->getIndexCatalog()->getIndexIterator(txn, true);
+ while (sourceIndIt.more()) {
+ const BSONObj currIndex = sourceIndIt.next()->infoObj();
+
+ // Process the source index.
+ BSONObjBuilder newIndex;
+ newIndex.append("ns", target);
+ newIndex.appendElementsUnique(currIndex);
+ indexesToCopy.push_back(newIndex.obj());
+ }
+ indexer.init(indexesToCopy);
+ }
+
+ {
+ // Copy over all the data from source collection to target collection.
+ boost::scoped_ptr<RecordIterator> sourceIt(sourceColl->getIterator(txn));
+ while (!sourceIt->isEOF()) {
+ txn->checkForInterrupt();
+
+ const Snapshotted<BSONObj> obj = sourceColl->docFor(txn, sourceIt->getNext());
+
+ WriteUnitOfWork wunit(txn);
+ // No logOp necessary because the entire renameCollection command is one logOp.
+ bool shouldReplicateWrites = txn->writesAreReplicated();
+ txn->setReplicatedWrites(false);
+ Status status =
+ targetColl->insertDocument(txn, obj.value(), &indexer, true).getStatus();
+ txn->setReplicatedWrites(shouldReplicateWrites);
+ if (!status.isOK())
+ return status;
+ wunit.commit();
+ }
+ }
+
+ Status status = indexer.doneInserting();
+ if (!status.isOK())
+ return status;
+
+ {
+ // Getting here means we successfully built the target copy. We now remove the
+ // source collection and finalize the rename.
+ WriteUnitOfWork wunit(txn);
+
+ bool shouldReplicateWrites = txn->writesAreReplicated();
+ txn->setReplicatedWrites(false);
+ Status status = sourceDB->dropCollection(txn, source.ns());
+ txn->setReplicatedWrites(shouldReplicateWrites);
+ if (!status.isOK())
+ return status;
+
+ indexer.commit();
+
+ getGlobalServiceContext()->getOpObserver()->onRenameCollection(
+ txn,
+ NamespaceString(source),
+ NamespaceString(target),
+ dropTarget,
+ stayTemp);
+
+ wunit.commit();
+ }
+
+ indexBuildRestorer.Dismiss();
+ targetCollectionDropper.Dismiss();
+ return Status::OK();
+ }
+
+} // namespace mongo
diff --git a/src/mongo/db/catalog/rename_collection.h b/src/mongo/db/catalog/rename_collection.h
new file mode 100644
index 00000000000..1ec0b754779
--- /dev/null
+++ b/src/mongo/db/catalog/rename_collection.h
@@ -0,0 +1,46 @@
+/**
+ * Copyright (C) 2015 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/base/status.h"
+
+namespace mongo {
+ class NamespaceString;
+ class OperationContext;
+
+ /**
+ * Renames the collection "source" to "target" and drops the existing collection named "target"
+ * iff "dropTarget" is true. "stayTemp" indicates whether a collection should maintain its
+ * temporariness.
+ */
+ Status renameCollection(OperationContext* txn,
+ const NamespaceString& source,
+ const NamespaceString& target,
+ bool dropTarget,
+ bool stayTemp);
+
+} // namespace mongo
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index fa69594e2fa..c8dae4e827d 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -148,15 +148,10 @@ namespace mongo {
<< to_collection.ns() << "]",
!createdCollection );
WriteUnitOfWork wunit(txn);
- collection = db->createCollection(txn, to_collection.ns());
+ invariant(logForRepl == txn->writesAreReplicated());
+ collection = db->createCollection(txn, to_collection.ns(), CollectionOptions());
verify(collection);
- if (logForRepl) {
- getGlobalServiceContext()->getOpObserver()->onCreateCollection(
- txn,
- to_collection,
- CollectionOptions());
- }
wunit.commit();
}
@@ -233,8 +228,6 @@ namespace mongo {
<< ' ' << loc.getStatus() << " obj:" << js;
}
uassertStatusOK( loc.getStatus() );
- if (logForRepl)
- getGlobalServiceContext()->getOpObserver()->onInsert(txn, to_collection.ns(), js);
wunit.commit();
@@ -338,13 +331,9 @@ namespace mongo {
Collection* collection = db->getCollection( to_collection );
if ( !collection ) {
WriteUnitOfWork wunit(txn);
- collection = db->createCollection( txn, to_collection.ns() );
+ invariant(logForRepl == txn->writesAreReplicated());
+ collection = db->createCollection(txn, to_collection.ns(), CollectionOptions());
invariant(collection);
- if (logForRepl) {
- getGlobalServiceContext()->getOpObserver()->onCreateCollection(txn,
- to_collection,
- CollectionOptions());
- }
wunit.commit();
}
@@ -372,7 +361,7 @@ namespace mongo {
const char* createIndexNs = targetSystemIndexesCollectionName.c_str();
for (vector<BSONObj>::const_iterator it = indexesToBuild.begin();
it != indexesToBuild.end(); ++it) {
- getGlobalServiceContext()->getOpObserver()->onInsert(txn, createIndexNs, *it);
+ getGlobalServiceContext()->getOpObserver()->onCreateIndex(txn, createIndexNs, *it);
}
}
wunit.commit();
@@ -408,7 +397,8 @@ namespace mongo {
BSONObj col = collList.front();
if (col["options"].isABSONObj()) {
WriteUnitOfWork wunit(txn);
- Status status = userCreateNS(txn, db, ns, col["options"].Obj(), logForRepl, 0);
+ invariant(logForRepl == txn->writesAreReplicated());
+ Status status = userCreateNS(txn, db, ns, col["options"].Obj(), 0);
if ( !status.isOK() ) {
errmsg = status.toString();
return false;
@@ -589,12 +579,8 @@ namespace mongo {
// we defer building id index for performance - building it in batch is much
// faster
- Status createStatus = userCreateNS(txn,
- db,
- to_name.ns(),
- options,
- opts.logForRepl,
- false);
+ invariant(opts.logForRepl == txn->writesAreReplicated());
+ Status createStatus = userCreateNS(txn, db, to_name.ns(), options, false);
if ( !createStatus.isOK() ) {
errmsg = str::stream() << "failed to create collection \""
<< to_name.ns() << "\": "
@@ -650,10 +636,6 @@ namespace mongo {
BSONObj id;
c->deleteDocument(txn, *it, true, true, opts.logForRepl ? &id : NULL);
- if (opts.logForRepl)
- getGlobalServiceContext()->getOpObserver()->onDelete(txn,
- c->ns().ns(),
- id);
wunit.commit();
}
diff --git a/src/mongo/db/commands.cpp b/src/mongo/db/commands.cpp
index 45a84e3452e..35dae6c2fb6 100644
--- a/src/mongo/db/commands.cpp
+++ b/src/mongo/db/commands.cpp
@@ -227,12 +227,6 @@ namespace mongo {
help << "no help defined";
}
- std::vector<BSONObj> Command::stopIndexBuilds(OperationContext* opCtx,
- Database* db,
- const BSONObj& cmdObj) {
- return std::vector<BSONObj>();
- }
-
Command* Command::findCommand( StringData name ) {
CommandMap::const_iterator i = _commands->find( name );
if ( i == _commands->end() )
diff --git a/src/mongo/db/commands.h b/src/mongo/db/commands.h
index 37b2750684e..c3da3ad9578 100644
--- a/src/mongo/db/commands.h
+++ b/src/mongo/db/commands.h
@@ -243,10 +243,6 @@ namespace mutablebson {
ServerStatusMetricField<Counter64> _commandsFailedMetric;
public:
- // Stops all index builds required to run this command and returns index builds killed.
- virtual std::vector<BSONObj> stopIndexBuilds(OperationContext* opCtx,
- Database* db,
- const BSONObj& cmdObj);
static const CommandMap* commandsByBestName() { return _commandsByBestName; }
static const CommandMap* webCommands() { return _webCommands; }
diff --git a/src/mongo/db/commands/apply_ops.cpp b/src/mongo/db/commands/apply_ops.cpp
index 6551a5588fe..5342d8ac1c8 100644
--- a/src/mongo/db/commands/apply_ops.cpp
+++ b/src/mongo/db/commands/apply_ops.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/auth/privilege.h"
#include "mongo/db/auth/resource_pattern.h"
+#include "mongo/db/catalog/apply_ops.h"
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
#include "mongo/db/commands/dbhash.h"
@@ -74,7 +75,14 @@ namespace mongo {
// applyOps can do pretty much anything, so require all privileges.
RoleGraph::generateUniversalPrivileges(out);
}
- virtual bool run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result,
+ bool fromRepl) {
+ invariant(!fromRepl == txn->writesAreReplicated());
if ( cmdObj.firstElement().type() != Array ) {
errmsg = "ops has to be an array";
@@ -94,156 +102,7 @@ namespace mongo {
}
}
- // SERVER-4328 todo : is global ok or does this take a long time? i believe multiple
- // ns used so locking individually requires more analysis
- ScopedTransaction scopedXact(txn, MODE_X);
- Lock::GlobalWrite globalWriteLock(txn->lockState());
-
- if (!fromRepl &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(dbname)) {
- return appendCommandStatus(result, Status(ErrorCodes::NotMaster, str::stream()
- << "Not primary while applying ops to database " << dbname));
- }
-
- // Preconditions check reads the database state, so needs to be done locked
- if ( cmdObj["preCondition"].type() == Array ) {
- BSONObjIterator i( cmdObj["preCondition"].Obj() );
- while ( i.more() ) {
- BSONObj f = i.next().Obj();
-
- DBDirectClient db( txn );
- BSONObj realres = db.findOne( f["ns"].String() , f["q"].Obj() );
-
- // Apply-ops would never have a $where matcher, so use the default callback,
- // which will throw an error if $where is found.
- Matcher m(f["res"].Obj());
- if ( ! m.matches( realres ) ) {
- result.append( "got" , realres );
- result.append( "whatFailed" , f );
- errmsg = "pre-condition failed";
- return false;
- }
- }
- }
-
- // apply
- int num = 0;
- int errors = 0;
-
- BSONObjIterator i( ops );
- BSONArrayBuilder ab;
- const bool alwaysUpsert = cmdObj.hasField("alwaysUpsert") ?
- cmdObj["alwaysUpsert"].trueValue() : true;
-
- while ( i.more() ) {
- BSONElement e = i.next();
- const BSONObj& temp = e.Obj();
-
- // Ignore 'n' operations.
- const char *opType = temp["op"].valuestrsafe();
- if (*opType == 'n') continue;
-
- const string ns = temp["ns"].String();
-
- // Run operations under a nested lock as a hack to prevent yielding.
- //
- // The list of operations is supposed to be applied atomically; yielding
- // would break atomicity by allowing an interruption or a shutdown to occur
- // after only some operations are applied. We are already locked globally
- // at this point, so taking a DBLock on the namespace creates a nested lock,
- // and yields are disallowed for operations that hold a nested lock.
- //
- // We do not have a wrapping WriteUnitOfWork so it is possible for a journal
- // commit to happen with a subset of ops applied.
- // TODO figure out what to do about this.
- Lock::GlobalWrite globalWriteLockDisallowTempRelease(txn->lockState());
-
- // Ensures that yielding will not happen (see the comment above).
- DEV {
- Locker::LockSnapshot lockSnapshot;
- invariant(!txn->lockState()->saveLockStateAndUnlock(&lockSnapshot));
- };
-
- OldClientContext ctx(txn, ns);
-
- Status status(ErrorCodes::InternalError, "");
- while (true) {
- try {
- // We assume that in the WriteConflict retry case, either the op rolls back
- // any changes it makes or is otherwise safe to rerun.
- status =
- repl::applyOperation_inlock(txn, ctx.db(), temp, false, alwaysUpsert);
- break;
- }
- catch (const WriteConflictException& wce) {
- LOG(2) << "WriteConflictException in applyOps command, retrying.";
- txn->recoveryUnit()->commitAndRestart();
- continue;
- }
- }
-
- ab.append(status.isOK());
- if (!status.isOK()) {
- errors++;
- }
-
- num++;
-
- WriteUnitOfWork wuow(txn);
- logOpForDbHash(txn, ns.c_str());
- wuow.commit();
- }
-
- result.append( "applied" , num );
- result.append( "results" , ab.arr() );
-
- if ( ! fromRepl ) {
- // We want this applied atomically on slaves
- // so we re-wrap without the pre-condition for speed
-
- string tempNS = str::stream() << dbname << ".$cmd";
-
- // TODO: possibly use mutable BSON to remove preCondition field
- // once it is available
- BSONObjIterator iter(cmdObj);
- BSONObjBuilder cmdBuilder;
-
- while (iter.more()) {
- BSONElement elem(iter.next());
- if (strcmp(elem.fieldName(), "preCondition") != 0) {
- cmdBuilder.append(elem);
- }
- }
-
- const BSONObj cmdRewritten = cmdBuilder.done();
-
- // We currently always logOp the command regardless of whether the individial ops
- // succeeded and rely on any failures to also happen on secondaries. This isn't
- // perfect, but it's what the command has always done and is part of its "correct"
- // behavior.
- while (true) {
- try {
- WriteUnitOfWork wunit(txn);
- getGlobalServiceContext()->getOpObserver()->onApplyOps(txn,
- tempNS,
- cmdRewritten);
- wunit.commit();
- break;
- }
- catch (const WriteConflictException& wce) {
- LOG(2) <<
- "WriteConflictException while logging applyOps command, retrying.";
- txn->recoveryUnit()->commitAndRestart();
- continue;
- }
- }
- }
-
- if (errors != 0) {
- return false;
- }
-
- return true;
+ return appendCommandStatus(result, applyOps(txn, dbname, cmdObj, &result));
}
private:
diff --git a/src/mongo/db/commands/collection_to_capped.cpp b/src/mongo/db/commands/collection_to_capped.cpp
index 8384c71ec3f..5538e4b70e0 100644
--- a/src/mongo/db/commands/collection_to_capped.cpp
+++ b/src/mongo/db/commands/collection_to_capped.cpp
@@ -33,6 +33,7 @@
#include <boost/scoped_ptr.hpp>
#include "mongo/db/background.h"
+#include "mongo/db/catalog/capped_utils.h"
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
#include "mongo/db/db_raii.h"
@@ -50,94 +51,6 @@ namespace mongo {
using std::string;
using std::stringstream;
-namespace {
-
- Status cloneCollectionAsCapped( OperationContext* txn,
- Database* db,
- const string& shortFrom,
- const string& shortTo,
- double size,
- bool temp,
- bool logForReplication ) {
-
- string fromNs = db->name() + "." + shortFrom;
- string toNs = db->name() + "." + shortTo;
-
- Collection* fromCollection = db->getCollection( fromNs );
- if ( !fromCollection )
- return Status( ErrorCodes::NamespaceNotFound,
- str::stream() << "source collection " << fromNs << " does not exist" );
-
- if ( db->getCollection( toNs ) )
- return Status( ErrorCodes::NamespaceExists, "to collection already exists" );
-
- // create new collection
- {
- OldClientContext ctx(txn, toNs );
- BSONObjBuilder spec;
- spec.appendBool( "capped", true );
- spec.append( "size", size );
- if ( temp )
- spec.appendBool( "temp", true );
-
- WriteUnitOfWork wunit(txn);
- Status status = userCreateNS( txn, ctx.db(), toNs, spec.done(), logForReplication );
- if ( !status.isOK() )
- return status;
- wunit.commit();
- }
-
- Collection* toCollection = db->getCollection( toNs );
- invariant( toCollection ); // we created above
-
- // how much data to ignore because it won't fit anyway
- // datasize and extentSize can't be compared exactly, so add some padding to 'size'
-
- long long allocatedSpaceGuess =
- std::max( static_cast<long long>(size * 2),
- static_cast<long long>(toCollection->getRecordStore()->storageSize(txn) * 2));
-
- long long excessSize = fromCollection->dataSize(txn) - allocatedSpaceGuess;
-
- scoped_ptr<PlanExecutor> exec( InternalPlanner::collectionScan(txn,
- fromNs,
- fromCollection,
- InternalPlanner::FORWARD ) );
-
-
- while ( true ) {
- BSONObj obj;
- PlanExecutor::ExecState state = exec->getNext(&obj, NULL);
-
- switch( state ) {
- case PlanExecutor::IS_EOF:
- return Status::OK();
- case PlanExecutor::DEAD:
- db->dropCollection( txn, toNs );
- return Status( ErrorCodes::InternalError, "executor turned dead while iterating" );
- case PlanExecutor::FAILURE:
- return Status( ErrorCodes::InternalError, "executor error while iterating" );
- case PlanExecutor::ADVANCED:
- if ( excessSize > 0 ) {
- excessSize -= ( 4 * obj.objsize() ); // 4x is for padding, power of 2, etc...
- continue;
- }
-
- WriteUnitOfWork wunit(txn);
- toCollection->insertDocument( txn, obj, true );
- if ( logForReplication ) {
- getGlobalServiceContext()->getOpObserver()->onInsert(txn, toNs, obj);
- }
- wunit.commit();
- }
- }
-
- invariant( false ); // unreachable
- }
-
-} // namespace
-
- /* convertToCapped seems to use this */
class CmdCloneCollectionAsCapped : public Command {
public:
CmdCloneCollectionAsCapped() : Command( "cloneCollectionAsCapped" ) {}
@@ -164,7 +77,14 @@ namespace {
NamespaceString(dbname, collection)),
targetActions));
}
- bool run(OperationContext* txn, const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& jsobj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result,
+ bool fromRepl) {
+ invariant(!fromRepl == txn->writesAreReplicated());
string from = jsobj.getStringField( "cloneCollectionAsCapped" );
string to = jsobj.getStringField( "toCollection" );
double size = jsobj.getField( "size" ).number();
@@ -193,7 +113,7 @@ namespace {
<< " not found"));
}
- Status status = cloneCollectionAsCapped(txn, db, from, to, size, temp, true);
+ Status status = cloneCollectionAsCapped(txn, db, from, to, size, temp);
return appendCommandStatus( result, status );
}
} cmdCloneCollectionAsCapped;
@@ -219,11 +139,9 @@ namespace {
out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
}
- virtual std::vector<BSONObj> stopIndexBuilds(OperationContext* opCtx,
- Database* db,
- const BSONObj& cmdObj) {
- const std::string ns = parseNsCollectionRequired(db->name(), cmdObj);
-
+ std::vector<BSONObj> stopIndexBuilds(OperationContext* opCtx,
+ Database* db,
+ const NamespaceString& ns) {
IndexCatalog::IndexKillCriteria criteria;
criteria.ns = ns;
Collection* coll = db->getCollection(ns);
@@ -241,72 +159,20 @@ namespace {
BSONObjBuilder& result,
bool fromRepl ) {
- const std::string ns = parseNsCollectionRequired(dbname, jsobj);
-
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetDb autoDb(txn, dbname, MODE_X);
-
- if (!fromRepl &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(dbname)) {
- return appendCommandStatus(result, Status(ErrorCodes::NotMaster, str::stream()
- << "Not primary while converting " << ns << " to a capped collection"));
- }
-
- Database* const db = autoDb.getDb();
- if (!db) {
- return appendCommandStatus(result,
- Status(ErrorCodes::DatabaseNotFound,
- str::stream() << "database " << dbname
- << " not found"));
- }
-
- stopIndexBuilds(txn, db, jsobj);
- BackgroundOperation::assertNoBgOpInProgForDb(dbname.c_str());
-
string shortSource = jsobj.getStringField( "convertToCapped" );
- string longSource = dbname + "." + shortSource;
double size = jsobj.getField( "size" ).number();
- if ( shortSource.empty() || size == 0 ) {
+ if (shortSource.empty() || size == 0) {
errmsg = "invalid command spec";
return false;
}
- string shortTmpName = str::stream() << "tmp.convertToCapped." << shortSource;
- string longTmpName = str::stream() << dbname << "." << shortTmpName;
-
- if ( db->getCollection( longTmpName ) ) {
- Status status = db->dropCollection( txn, longTmpName );
- if ( !status.isOK() )
- return appendCommandStatus( result, status );
- }
-
- Status status = cloneCollectionAsCapped( txn, db, shortSource, shortTmpName, size, true, false );
-
- if ( !status.isOK() )
- return appendCommandStatus( result, status );
-
- verify( db->getCollection( longTmpName ) );
-
- WriteUnitOfWork wunit(txn);
- status = db->dropCollection( txn, longSource );
- if ( !status.isOK() )
- return appendCommandStatus( result, status );
-
- status = db->renameCollection( txn, longTmpName, longSource, false );
- if ( !status.isOK() )
- return appendCommandStatus( result, status );
-
- if (!fromRepl) {
- getGlobalServiceContext()->getOpObserver()->onConvertToCapped(
- txn,
- NamespaceString(longSource),
- size);
- }
-
- wunit.commit();
- return true;
+ return appendCommandStatus(result,
+ convertToCapped(txn,
+ NamespaceString(dbname, shortSource),
+ size));
}
+
} cmdConvertToCapped;
}
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index 39db8afa3fe..32d1300c539 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -86,6 +86,7 @@ namespace mongo {
virtual bool run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int options,
string& errmsg, BSONObjBuilder& result,
bool fromRepl = false ) {
+ invariant(!fromRepl == txn->writesAreReplicated());
// --- parse
@@ -159,14 +160,8 @@ namespace mongo {
if ( !collection ) {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
WriteUnitOfWork wunit(txn);
- collection = db->createCollection( txn, ns.ns() );
+ collection = db->createCollection(txn, ns.ns(), CollectionOptions());
invariant( collection );
- if (!fromRepl) {
- getGlobalServiceContext()->getOpObserver()->onCreateCollection(
- txn,
- ns,
- CollectionOptions());
- }
wunit.commit();
} MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "createIndexes", ns.ns());
}
diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp
index d427e3a720f..8b1a898be1e 100644
--- a/src/mongo/db/commands/drop_indexes.cpp
+++ b/src/mongo/db/commands/drop_indexes.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/collection_catalog_entry.h"
#include "mongo/db/catalog/database.h"
+#include "mongo/db/catalog/drop_indexes.h"
#include "mongo/db/catalog/index_catalog.h"
#include "mongo/db/catalog/index_create.h"
#include "mongo/db/catalog/index_key_validate.h"
@@ -81,152 +82,16 @@ namespace mongo {
out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
}
- virtual std::vector<BSONObj> stopIndexBuilds(OperationContext* opCtx,
- Database* db,
- const BSONObj& cmdObj) {
- const std::string toDeleteNs = parseNsCollectionRequired(db->name(), cmdObj);
- Collection* collection = db->getCollection(toDeleteNs);
- IndexCatalog::IndexKillCriteria criteria;
-
- // Get index name to drop
- BSONElement toDrop = cmdObj.getField("index");
-
- if (toDrop.type() == String) {
- // Kill all in-progress indexes
- if (strcmp("*", toDrop.valuestr()) == 0) {
- criteria.ns = toDeleteNs;
- return IndexBuilder::killMatchingIndexBuilds(collection, criteria);
- }
- // Kill an in-progress index by name
- else {
- criteria.name = toDrop.valuestr();
- return IndexBuilder::killMatchingIndexBuilds(collection, criteria);
- }
- }
- // Kill an in-progress index build by index key
- else if (toDrop.type() == Object) {
- criteria.key = toDrop.Obj();
- return IndexBuilder::killMatchingIndexBuilds(collection, criteria);
- }
-
- return std::vector<BSONObj>();
- }
-
CmdDropIndexes() : Command("dropIndexes", false, "deleteIndexes") { }
bool run(OperationContext* txn, const string& dbname, BSONObj& jsobj, int, string& errmsg,
BSONObjBuilder& result, bool fromRepl) {
+ invariant(!fromRepl == txn->writesAreReplicated());
const std::string ns = parseNsCollectionRequired(dbname, jsobj);
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetDb autoDb(txn, dbname, MODE_X);
-
- if (!fromRepl &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(dbname)) {
- return appendCommandStatus(result, Status(ErrorCodes::NotMaster, str::stream()
- << "Not primary while dropping indexes in " << ns));
- }
-
- WriteUnitOfWork wunit(txn);
- bool ok = wrappedRun(txn, dbname, ns, autoDb.getDb(), jsobj, errmsg, result);
- if (!ok) {
- return false;
- }
- if (!fromRepl) {
- getGlobalServiceContext()->getOpObserver()->onDropIndex(txn,
- dbname + ".$cmd",
- jsobj);
- }
- wunit.commit();
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "dropIndexes", dbname);
- return true;
- }
-
- bool wrappedRun(OperationContext* txn,
- const string& dbname,
- const std::string& toDeleteNs,
- Database* const db,
- BSONObj& jsobj,
- string& errmsg,
- BSONObjBuilder& anObjBuilder) {
- if (!serverGlobalParams.quiet) {
- LOG(0) << "CMD: dropIndexes " << toDeleteNs << endl;
- }
- Collection* collection = db ? db->getCollection(toDeleteNs) : NULL;
-
- // If db/collection does not exist, short circuit and return.
- if ( !db || !collection ) {
- errmsg = "ns not found";
- return false;
- }
-
- OldClientContext ctx(txn, toDeleteNs);
- stopIndexBuilds(txn, db, jsobj);
-
- IndexCatalog* indexCatalog = collection->getIndexCatalog();
- anObjBuilder.appendNumber("nIndexesWas", indexCatalog->numIndexesTotal(txn) );
-
-
- BSONElement f = jsobj.getField("index");
- if ( f.type() == String ) {
-
- string indexToDelete = f.valuestr();
-
- if ( indexToDelete == "*" ) {
- Status s = indexCatalog->dropAllIndexes(txn, false);
- if ( !s.isOK() ) {
- appendCommandStatus( anObjBuilder, s );
- return false;
- }
- anObjBuilder.append("msg", "non-_id indexes dropped for collection");
- return true;
- }
-
- IndexDescriptor* desc = collection->getIndexCatalog()->findIndexByName( txn,
- indexToDelete );
- if ( desc == NULL ) {
- errmsg = str::stream() << "index not found with name [" << indexToDelete << "]";
- return false;
- }
-
- if ( desc->isIdIndex() ) {
- errmsg = "cannot drop _id index";
- return false;
- }
-
- Status s = indexCatalog->dropIndex(txn, desc);
- if ( !s.isOK() ) {
- appendCommandStatus( anObjBuilder, s );
- return false;
- }
-
- return true;
- }
-
- if ( f.type() == Object ) {
- IndexDescriptor* desc =
- collection->getIndexCatalog()->findIndexByKeyPattern( txn, f.embeddedObject() );
- if ( desc == NULL ) {
- errmsg = "can't find index with key:";
- errmsg += f.embeddedObject().toString();
- return false;
- }
-
- if ( desc->isIdIndex() ) {
- errmsg = "cannot drop _id index";
- return false;
- }
-
- Status s = indexCatalog->dropIndex(txn, desc);
- if ( !s.isOK() ) {
- appendCommandStatus( anObjBuilder, s );
- return false;
- }
-
- return true;
- }
-
- errmsg = "invalid index name spec";
- return false;
+ return appendCommandStatus(result,
+ dropIndexes(txn,
+ NamespaceString(ns),
+ jsobj,
+ &result));
}
} cmdDropIndexes;
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index 34c00a80183..22ebb5c3ce3 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -86,6 +86,7 @@ namespace mongo {
string& errmsg,
BSONObjBuilder& result,
bool fromRepl) {
+ invariant(!fromRepl == txn->writesAreReplicated());
const std::string ns = parseNsCollectionRequired(dbname, cmdObj);
@@ -163,9 +164,7 @@ namespace mongo {
else {
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
WriteUnitOfWork wuow(txn);
- uassertStatusOK( userCreateNS( txn, db,
- ns, BSONObj(),
- !fromRepl ) );
+ uassertStatusOK(userCreateNS(txn, db, ns, BSONObj()));
wuow.commit();
} MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "findAndModify", ns);
}
@@ -388,8 +387,12 @@ namespace mongo {
if ( remove ) {
_appendHelper(result, doc, found, fields, whereCallback);
if ( found ) {
- deleteObjects(txn, ctx.db(), ns, queryModified, PlanExecutor::YIELD_MANUAL,
- true, true);
+ deleteObjects(txn,
+ ctx.db(),
+ ns,
+ queryModified,
+ PlanExecutor::YIELD_MANUAL,
+ true);
// Committing the WUOW can close the current snapshot. Until this happens, the
// snapshot id should not have changed.
@@ -446,12 +449,6 @@ namespace mongo {
uassertStatusOK(collection->insertDocument(txn, newDoc, enforceQuota)
.getStatus());
- // This is the last thing we do before the WriteUnitOfWork commits (except
- // for some BSON manipulation).
- getGlobalServiceContext()->getOpObserver()->onInsert(txn,
- collection->ns().ns(),
- newDoc);
-
// Must commit the write and logOp() before doing anything that could throw.
wuow.commit();
@@ -483,7 +480,6 @@ namespace mongo {
request.setQuery(queryModified);
request.setUpdates(update);
request.setUpsert(upsert);
- request.setUpdateOpLog();
request.setStoreResultDoc(returnNew);
request.setYieldPolicy(PlanExecutor::YIELD_MANUAL);
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index ad506e9aee2..b107808ab86 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -436,16 +436,6 @@ namespace mongo {
options.temp = true;
tempColl = tempCtx.db()->createCollection(_txn, _config.tempNamespace, options);
- // Log the createCollection operation.
- BSONObjBuilder b;
- b.append( "create", nsToCollectionSubstring( _config.tempNamespace ));
- b.appendElements( options.toBSON() );
- string logNs = nsToDatabase( _config.tempNamespace ) + ".$cmd";
- getGlobalServiceContext()->getOpObserver()->onCreateCollection(
- _txn,
- NamespaceString(_config.tempNamespace),
- options);
-
for ( vector<BSONObj>::iterator it = indexesToInsert.begin();
it != indexesToInsert.end(); ++it ) {
Status status =
@@ -458,7 +448,7 @@ namespace mongo {
}
// Log the createIndex operation.
string logNs = nsToDatabase( _config.tempNamespace ) + ".system.indexes";
- getGlobalServiceContext()->getOpObserver()->onInsert(_txn, logNs, *it);
+ getGlobalServiceContext()->getOpObserver()->onCreateIndex(_txn, logNs, *it);
}
wuow.commit();
}
@@ -695,7 +685,6 @@ namespace mongo {
BSONObj bo = b.obj();
uassertStatusOK( coll->insertDocument( _txn, bo, true ).getStatus() );
- getGlobalServiceContext()->getOpObserver()->onInsert(_txn, ns, bo);
wuow.commit();
}
@@ -708,7 +697,10 @@ namespace mongo {
OldClientWriteContext ctx(_txn, _config.incLong );
WriteUnitOfWork wuow(_txn);
Collection* coll = getCollectionOrUassert(ctx.db(), _config.incLong);
- uassertStatusOK( coll->insertDocument( _txn, o, true ).getStatus() );
+ bool shouldReplicateWrites = _txn->writesAreReplicated();
+ _txn->setReplicatedWrites(false);
+ ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, _txn, shouldReplicateWrites);
+ uassertStatusOK(coll->insertDocument(_txn, o, true, false).getStatus());
wuow.commit();
}
@@ -1274,6 +1266,7 @@ namespace mongo {
}
bool run(OperationContext* txn, const string& dbname , BSONObj& cmd, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ invariant(!fromRepl == txn->writesAreReplicated());
Timer t;
if (txn->getClient()->isInDirectClient()) {
diff --git a/src/mongo/db/commands/rename_collection.cpp b/src/mongo/db/commands/rename_collection.cpp
index 162208b73cf..2cc098bf621 100644
--- a/src/mongo/db/commands/rename_collection.cpp
+++ b/src/mongo/db/commands/rename_collection.cpp
@@ -31,6 +31,7 @@
#include "mongo/client/dbclientcursor.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/collection_catalog_entry.h"
+#include "mongo/db/catalog/rename_collection.h"
#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/catalog/index_catalog.h"
#include "mongo/db/catalog/index_create.h"
@@ -74,11 +75,10 @@ namespace mongo {
help << " example: { renameCollection: foo.a, to: bar.b }";
}
- virtual std::vector<BSONObj> stopIndexBuilds(OperationContext* opCtx,
- Database* db,
- const BSONObj& cmdObj) {
- string source = cmdObj.getStringField( name.c_str() );
- string target = cmdObj.getStringField( "to" );
+ std::vector<BSONObj> stopIndexBuilds(OperationContext* opCtx,
+ Database* db,
+ const NamespaceString& source,
+ const NamespaceString& target) {
IndexCatalog::IndexKillCriteria criteria;
criteria.ns = source;
@@ -114,20 +114,10 @@ namespace mongo {
string& errmsg,
BSONObjBuilder& result,
bool fromRepl) {
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite globalWriteLock(txn->lockState());
+ invariant(!fromRepl == txn->writesAreReplicated());
string source = cmdObj.getStringField( name.c_str() );
string target = cmdObj.getStringField( "to" );
- if (!fromRepl &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(dbname)) {
- return appendCommandStatus(result, Status(ErrorCodes::NotMaster, str::stream()
- << "Not primary while renaming collection " << source << " to " << target));
- }
-
- // We stay in source context the whole time. This is mostly to set the CurOp namespace.
- OldClientContext ctx(txn, source);
-
if ( !NamespaceString::validCollectionComponent(target.c_str()) ) {
errmsg = "invalid collection name: " + target;
return false;
@@ -175,197 +165,13 @@ namespace mongo {
return false;
}
- Database* const sourceDB = dbHolder().get(txn, nsToDatabase(source));
- Collection* const sourceColl = sourceDB ? sourceDB->getCollection(source)
- : NULL;
- if (!sourceColl) {
- errmsg = "source namespace does not exist";
- return false;
- }
-
- {
- // Ensure that collection name does not exceed maximum length.
- // Ensure that index names do not push the length over the max.
- // Iterator includes unfinished indexes.
- IndexCatalog::IndexIterator sourceIndIt =
- sourceColl->getIndexCatalog()->getIndexIterator( txn, true );
- int longestIndexNameLength = 0;
- while ( sourceIndIt.more() ) {
- int thisLength = sourceIndIt.next()->indexName().length();
- if ( thisLength > longestIndexNameLength )
- longestIndexNameLength = thisLength;
- }
-
- unsigned int longestAllowed =
- min(int(NamespaceString::MaxNsCollectionLen),
- int(NamespaceString::MaxNsLen) - 2/*strlen(".$")*/ - longestIndexNameLength);
- if (target.size() > longestAllowed) {
- StringBuilder sb;
- sb << "collection name length of " << target.size()
- << " exceeds maximum length of " << longestAllowed
- << ", allowing for index names";
- errmsg = sb.str();
- return false;
- }
- }
-
- const std::vector<BSONObj> indexesInProg = stopIndexBuilds(txn, sourceDB, cmdObj);
- // Dismissed on success
- ScopeGuard indexBuildRestorer = MakeGuard(IndexBuilder::restoreIndexes,
- txn,
- indexesInProg);
-
- Database* const targetDB = dbHolder().openDb(txn, nsToDatabase(target));
-
- {
- WriteUnitOfWork wunit(txn);
-
- // Check if the target namespace exists and if dropTarget is true.
- // If target exists and dropTarget is not true, return false.
- if (targetDB->getCollection(target)) {
- if (!cmdObj["dropTarget"].trueValue()) {
- errmsg = "target namespace exists";
- return false;
- }
-
- Status s = targetDB->dropCollection(txn, target);
- if ( !s.isOK() ) {
- errmsg = s.toString();
- return false;
- }
- }
-
- // If we are renaming in the same database, just
- // rename the namespace and we're done.
- if (sourceDB == targetDB) {
- Status s = targetDB->renameCollection(txn,
- source,
- target,
- cmdObj["stayTemp"].trueValue() );
- if (!s.isOK()) {
- return appendCommandStatus(result, s);
- }
-
- if (!fromRepl) {
- getGlobalServiceContext()->getOpObserver()->onRenameCollection(
- txn,
- NamespaceString(source),
- NamespaceString(target),
- cmdObj["dropTarget"].trueValue(),
- cmdObj["stayTemp"].trueValue());
- }
-
- wunit.commit();
- indexBuildRestorer.Dismiss();
- return true;
- }
-
- wunit.commit();
- }
-
- // If we get here, we are renaming across databases, so we must copy all the data and
- // indexes, then remove the source collection.
-
- // Create the target collection. It will be removed if we fail to copy the collection.
- // TODO use a temp collection and unset the temp flag on success.
- Collection* targetColl = NULL;
- {
- CollectionOptions options;
- options.setNoIdIndex();
-
- if (sourceColl->isCapped()) {
- const CollectionOptions sourceOpts =
- sourceColl->getCatalogEntry()->getCollectionOptions(txn);
-
- options.capped = true;
- options.cappedSize = sourceOpts.cappedSize;
- options.cappedMaxDocs = sourceOpts.cappedMaxDocs;
- }
-
- WriteUnitOfWork wunit(txn);
-
- // No logOp necessary because the entire renameCollection command is one logOp.
- targetColl = targetDB->createCollection(txn, target, options);
- if (!targetColl) {
- errmsg = "Failed to create target collection.";
- return false;
- }
-
- wunit.commit();
- }
-
- // Dismissed on success
- ScopeGuard targetCollectionDropper = MakeGuard(dropCollection, txn, targetDB, target);
-
- MultiIndexBlock indexer(txn, targetColl);
- indexer.allowInterruption();
-
- // Copy the index descriptions from the source collection, adjusting the ns field.
- {
- std::vector<BSONObj> indexesToCopy;
- IndexCatalog::IndexIterator sourceIndIt =
- sourceColl->getIndexCatalog()->getIndexIterator( txn, true );
- while (sourceIndIt.more()) {
- const BSONObj currIndex = sourceIndIt.next()->infoObj();
-
- // Process the source index.
- BSONObjBuilder newIndex;
- newIndex.append("ns", target);
- newIndex.appendElementsUnique(currIndex);
- indexesToCopy.push_back(newIndex.obj());
- }
- indexer.init(indexesToCopy);
- }
-
- {
- // Copy over all the data from source collection to target collection.
- boost::scoped_ptr<RecordIterator> sourceIt(sourceColl->getIterator(txn));
- while (!sourceIt->isEOF()) {
- txn->checkForInterrupt();
-
- const Snapshotted<BSONObj> obj = sourceColl->docFor(txn, sourceIt->getNext());
-
- WriteUnitOfWork wunit(txn);
- // No logOp necessary because the entire renameCollection command is one logOp.
- Status status =
- targetColl->insertDocument(txn, obj.value(), &indexer, true).getStatus();
- if (!status.isOK())
- return appendCommandStatus(result, status);
- wunit.commit();
- }
- }
-
- Status status = indexer.doneInserting();
- if (!status.isOK())
- return appendCommandStatus(result, status);
-
- {
- // Getting here means we successfully built the target copy. We now remove the
- // source collection and finalize the rename.
- WriteUnitOfWork wunit(txn);
-
- Status status = sourceDB->dropCollection(txn, source);
- if (!status.isOK())
- return appendCommandStatus(result, status);
-
- indexer.commit();
-
- if (!fromRepl) {
- getGlobalServiceContext()->getOpObserver()->onRenameCollection(
- txn,
- NamespaceString(source),
- NamespaceString(target),
- cmdObj["dropTarget"].trueValue(),
- cmdObj["stayTemp"].trueValue());
- }
-
- wunit.commit();
- }
-
- indexBuildRestorer.Dismiss();
- targetCollectionDropper.Dismiss();
- return true;
+ return appendCommandStatus(result,
+ renameCollection(txn,
+ NamespaceString(source),
+ NamespaceString(target),
+ cmdObj["dropTarget"].trueValue(),
+ cmdObj["stayTemp"].trueValue()));
}
} cmdrenamecollection;
-}
+} // namespace mongo
diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp
index eb6408e6d5b..14a636e4b5a 100644
--- a/src/mongo/db/commands/test_commands.cpp
+++ b/src/mongo/db/commands/test_commands.cpp
@@ -34,7 +34,7 @@
#include "mongo/base/init.h"
#include "mongo/base/initializer_context.h"
-#include "mongo/db/catalog/collection.h"
+#include "mongo/db/catalog/capped_utils.h"
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
#include "mongo/db/db_raii.h"
@@ -79,6 +79,7 @@ namespace mongo {
Database* db = ctx.db();
WriteUnitOfWork wunit(txn);
+ txn->setReplicatedWrites(false);
Collection* collection = db->getCollection( ns );
if ( !collection ) {
collection = db->createCollection( txn, ns );
@@ -195,53 +196,18 @@ namespace mongo {
const BSONObj& cmdObj,
std::vector<Privilege>* out) {}
- virtual std::vector<BSONObj> stopIndexBuilds(OperationContext* opCtx,
- Database* db,
- const BSONObj& cmdObj) {
- const std::string ns = parseNsCollectionRequired(db->name(), cmdObj);
-
- IndexCatalog::IndexKillCriteria criteria;
- criteria.ns = ns;
- return IndexBuilder::killMatchingIndexBuilds(db->getCollection(ns), criteria);
- }
-
- virtual bool run(OperationContext* txn, const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result,
+ bool fromRepl) {
const std::string ns = parseNsCollectionRequired(dbname, cmdObj);
- ScopedTransaction scopedXact(txn, MODE_IX);
- AutoGetDb autoDb(txn, dbname, MODE_X);
-
- if (!fromRepl &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(dbname)) {
- return appendCommandStatus(result, Status(ErrorCodes::NotMaster, str::stream()
- << "Not primary while truncating collection " << ns));
- }
-
- Database* db = autoDb.getDb();
- massert(13429, "no such database", db);
-
- Collection* collection = db->getCollection(ns);
- massert(28584, "no such collection", collection);
-
- std::vector<BSONObj> indexes = stopIndexBuilds(txn, db, cmdObj);
-
- WriteUnitOfWork wuow(txn);
-
- Status status = collection->truncate(txn);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
-
- IndexBuilder::restoreIndexes(txn, indexes);
-
- if (!fromRepl) {
- getGlobalServiceContext()->getOpObserver()->onEmptyCapped(txn, collection->ns());
- }
-
- wuow.commit();
-
- return true;
+ return appendCommandStatus(result, emptyCapped(txn, NamespaceString(ns)));
}
+
};
// ----------------------------
diff --git a/src/mongo/db/commands/write_commands/batch_executor.cpp b/src/mongo/db/commands/write_commands/batch_executor.cpp
index fceb7bd042c..010f44deb04 100644
--- a/src/mongo/db/commands/write_commands/batch_executor.cpp
+++ b/src/mongo/db/commands/write_commands/batch_executor.cpp
@@ -1077,10 +1077,6 @@ namespace mongo {
request->getTargetingNS())));
return false;
}
- getGlobalServiceContext()->getOpObserver()->onCreateCollection(
- txn,
- NamespaceString(request->getTargetingNS()),
- CollectionOptions());
wunit.commit();
}
return true;
@@ -1204,7 +1200,6 @@ namespace mongo {
result->setError(toWriteError(status.getStatus()));
}
else {
- getGlobalServiceContext()->getOpObserver()->onInsert(txn, insertNS, docToInsert);
result->getStats().n = 1;
wunit.commit();
}
@@ -1263,7 +1258,6 @@ namespace mongo {
request.setUpdates(updateItem.getUpdate()->getUpdateExpr());
request.setMulti(isMulti);
request.setUpsert(updateItem.getUpdate()->getUpsert());
- request.setUpdateOpLog(true);
UpdateLifecycleImpl updateLifecycle(true, request.getNamespaceString());
request.setLifecycle(&updateLifecycle);
@@ -1297,9 +1291,7 @@ namespace mongo {
}
else {
WriteUnitOfWork wuow(txn);
- uassertStatusOK( userCreateNS( txn, db,
- nsString.ns(), BSONObj(),
- !request.isFromReplication() ) );
+ uassertStatusOK(userCreateNS(txn, db, nsString.ns(), BSONObj()));
wuow.commit();
}
} MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "update", nsString.ns());
@@ -1435,7 +1427,6 @@ namespace mongo {
DeleteRequest request(nss);
request.setQuery( removeItem.getDelete()->getQuery() );
request.setMulti( removeItem.getDelete()->getLimit() != 1 );
- request.setUpdateOpLog(true);
request.setGod( false );
// Deletes running through the write commands path can yield.
diff --git a/src/mongo/db/commands/write_commands/write_commands.cpp b/src/mongo/db/commands/write_commands/write_commands.cpp
index e7e85a6a78a..3b6cdba6a64 100644
--- a/src/mongo/db/commands/write_commands/write_commands.cpp
+++ b/src/mongo/db/commands/write_commands/write_commands.cpp
@@ -120,6 +120,7 @@ namespace mongo {
string& errMsg,
BSONObjBuilder& result,
bool fromRepl) {
+ invariant(!fromRepl == txn->writesAreReplicated());
// Can't be run on secondaries (logTheOp() == false, slaveOk() == false).
dassert( !fromRepl );
@@ -207,7 +208,6 @@ namespace mongo {
updateRequest.setUpdates( batchItem.getUpdate()->getUpdateExpr() );
updateRequest.setMulti( batchItem.getUpdate()->getMulti() );
updateRequest.setUpsert( batchItem.getUpdate()->getUpsert() );
- updateRequest.setUpdateOpLog( true );
UpdateLifecycleImpl updateLifecycle( true, updateRequest.getNamespaceString() );
updateRequest.setLifecycle( &updateLifecycle );
updateRequest.setExplain();
@@ -255,7 +255,6 @@ namespace mongo {
DeleteRequest deleteRequest( nsString );
deleteRequest.setQuery( batchItem.getDelete()->getQuery() );
deleteRequest.setMulti( batchItem.getDelete()->getLimit() != 1 );
- deleteRequest.setUpdateOpLog(true);
deleteRequest.setGod( false );
deleteRequest.setExplain();
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index 3adf1b9a5e9..1abffa3aebf 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -238,7 +238,10 @@ namespace mongo {
WriteUnitOfWork wunit(&txn);
if (!collection) {
BSONObj options = BSON("capped" << true << "size" << 10 * 1024 * 1024);
- uassertStatusOK(userCreateNS(&txn, db, ns, options, true));
+ bool shouldReplicateWrites = txn.writesAreReplicated();
+ txn.setReplicatedWrites(false);
+ ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, &txn, shouldReplicateWrites);
+ uassertStatusOK(userCreateNS(&txn, db, ns, options));
collection = db->getCollection(ns);
}
invariant(collection);
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index 8d9c44b3d95..5b3cf3d46b9 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -48,7 +48,11 @@
#include "mongo/db/auth/user_management_commands_parser.h"
#include "mongo/db/auth/user_name.h"
#include "mongo/db/background.h"
+#include "mongo/db/clientcursor.h"
+#include "mongo/db/catalog/coll_mod.h"
#include "mongo/db/catalog/collection_catalog_entry.h"
+#include "mongo/db/catalog/drop_collection.h"
+#include "mongo/db/catalog/drop_database.h"
#include "mongo/db/catalog/database_catalog_entry.h"
#include "mongo/db/clientcursor.h"
#include "mongo/db/commands.h"
@@ -69,6 +73,7 @@
#include "mongo/db/lasterror.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/op_observer.h"
+#include "mongo/db/operation_context_impl.h"
#include "mongo/db/ops/insert.h"
#include "mongo/db/query/get_executor.h"
#include "mongo/db/query/internal_plans.h"
@@ -87,6 +92,7 @@
#include "mongo/util/log.h"
#include "mongo/util/md5.hpp"
#include "mongo/util/print.h"
+#include "mongo/util/scopeguard.h"
namespace mongo {
@@ -109,6 +115,7 @@ namespace mongo {
}
bool CmdShutdown::run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ invariant(!fromRepl == txn->writesAreReplicated());
bool force = cmdObj.hasField("force") && cmdObj["force"].trueValue();
long long timeoutSecs = 0;
@@ -148,88 +155,46 @@ namespace mongo {
virtual bool isWriteCommandForConfigServer() const { return true; }
- virtual std::vector<BSONObj> stopIndexBuilds(OperationContext* opCtx,
- Database* db,
- const BSONObj& cmdObj) {
- invariant(db);
- std::list<std::string> collections;
- db->getDatabaseCatalogEntry()->getCollectionNamespaces(&collections);
-
- std::vector<BSONObj> allKilledIndexes;
- for (std::list<std::string>::iterator it = collections.begin();
- it != collections.end();
- ++it) {
- std::string ns = *it;
-
- IndexCatalog::IndexKillCriteria criteria;
- criteria.ns = ns;
- std::vector<BSONObj> killedIndexes =
- IndexBuilder::killMatchingIndexBuilds(db->getCollection(ns), criteria);
- allKilledIndexes.insert(allKilledIndexes.end(),
- killedIndexes.begin(),
- killedIndexes.end());
- }
- return allKilledIndexes;
- }
-
CmdDropDatabase() : Command("dropDatabase") {}
- bool run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result,
+ bool fromRepl) {
// disallow dropping the config database
if (serverGlobalParams.configsvr && (dbname == "config")) {
- errmsg = "Cannot drop 'config' database if mongod started with --configsvr";
- return false;
+ return appendCommandStatus(result,
+ Status(ErrorCodes::IllegalOperation,
+ "Cannot drop 'config' database if mongod started "
+ "with --configsvr"));
}
if ((repl::getGlobalReplicationCoordinator()->getReplicationMode() !=
repl::ReplicationCoordinator::modeNone) &&
(dbname == "local")) {
- errmsg = "Cannot drop 'local' database while replication is active";
- return false;
+ return appendCommandStatus(result,
+ Status(ErrorCodes::IllegalOperation,
+ "Cannot drop 'local' database while replication "
+ "is active"));
}
BSONElement e = cmdObj.firstElement();
int p = (int) e.number();
if ( p != 1 ) {
- errmsg = "have to pass 1 as db parameter";
- return false;
+ return appendCommandStatus(result,
+ Status(ErrorCodes::IllegalOperation,
+ "have to pass 1 as db parameter"));
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_X);
- Lock::GlobalWrite lk(txn->lockState());
- AutoGetDb autoDB(txn, dbname, MODE_X);
- Database* const db = autoDB.getDb();
- if (!db) {
- // DB doesn't exist, so deem it a success.
- return true;
- }
- OldClientContext context(txn, dbname);
- if (!fromRepl &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(dbname)) {
- return appendCommandStatus(result, Status(ErrorCodes::NotMaster, str::stream()
- << "Not primary while dropping database " << dbname));
- }
-
- log() << "dropDatabase " << dbname << " starting" << endl;
-
- stopIndexBuilds(txn, db, cmdObj);
- dropDatabase(txn, db);
-
- log() << "dropDatabase " << dbname << " finished";
-
- WriteUnitOfWork wunit(txn);
-
- if (!fromRepl) {
- getGlobalServiceContext()->getOpObserver()->onDropDatabase(txn, dbname + ".$cmd");
- }
-
- wunit.commit();
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "dropDatabase", dbname);
-
- result.append( "dropped" , dbname );
-
- return true;
+ Status status = dropDatabase(txn, dbname);
+ if (status.isOK()) {
+ result.append( "dropped" , dbname );
+ }
+ return appendCommandStatus(result, status);
}
+
} cmdDropDatabase;
class CmdRepairDatabase : public Command {
@@ -281,6 +246,7 @@ namespace mongo {
}
bool run(OperationContext* txn, const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ invariant(!fromRepl == txn->writesAreReplicated());
BSONElement e = cmdObj.firstElement();
if ( e.numberInt() != 1 ) {
errmsg = "bad option";
@@ -301,6 +267,9 @@ namespace mongo {
bool backupOriginalFiles = e.isBoolean() && e.boolean();
StorageEngine* engine = getGlobalServiceContext()->getGlobalStorageEngine();
+ bool shouldReplicateWrites = txn->writesAreReplicated();
+ txn->setReplicatedWrites(false);
+ ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, txn, shouldReplicateWrites);
Status status = repairDatabase(txn, engine, dbname, preserveClonedFilesOnFailure,
backupOriginalFiles );
@@ -367,6 +336,7 @@ namespace mongo {
string& errmsg,
BSONObjBuilder& result,
bool fromRepl) {
+ invariant(!fromRepl == txn->writesAreReplicated());
// Needs to be locked exclusively, because creates the system.profile collection
// in the local database.
@@ -467,24 +437,17 @@ namespace mongo {
virtual bool isWriteCommandForConfigServer() const { return true; }
- virtual std::vector<BSONObj> stopIndexBuilds(OperationContext* opCtx,
- Database* db,
- const BSONObj& cmdObj) {
- const std::string nsToDrop = parseNsCollectionRequired(db->name(), cmdObj);
-
- IndexCatalog::IndexKillCriteria criteria;
- criteria.ns = nsToDrop;
- return IndexBuilder::killMatchingIndexBuilds(db->getCollection(nsToDrop), criteria);
- }
-
- virtual bool run(OperationContext* txn, const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result,
+ bool fromRepl) {
+ invariant(!fromRepl == txn->writesAreReplicated());
const std::string nsToDrop = parseNsCollectionRequired(dbname, cmdObj);
- if (!serverGlobalParams.quiet) {
- LOG(0) << "CMD: drop " << nsToDrop << endl;
- }
-
- if ( nsToDrop.find( '$' ) != string::npos ) {
+ if (nsToDrop.find('$') != string::npos) {
errmsg = "can't drop collection with reserved $ character in name";
return false;
}
@@ -496,48 +459,11 @@ namespace mongo {
return false;
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_IX);
-
- AutoGetDb autoDb(txn, dbname, MODE_X);
- Database* const db = autoDb.getDb();
- Collection* coll = db ? db->getCollection( nsToDrop ) : NULL;
-
- // If db/collection does not exist, short circuit and return.
- if ( !db || !coll ) {
- errmsg = "ns not found";
- return false;
- }
- OldClientContext context(txn, nsToDrop);
- if (!fromRepl &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(dbname)) {
- return appendCommandStatus(result, Status(ErrorCodes::NotMaster, str::stream()
- << "Not primary while dropping collection " << nsToDrop));
- }
-
- int numIndexes = coll->getIndexCatalog()->numIndexesTotal( txn );
-
- stopIndexBuilds(txn, db, cmdObj);
-
- result.append( "ns", nsToDrop );
- result.append( "nIndexesWas", numIndexes );
-
- WriteUnitOfWork wunit(txn);
- Status s = db->dropCollection( txn, nsToDrop );
-
- if ( !s.isOK() ) {
- return appendCommandStatus( result, s );
- }
-
- if ( !fromRepl ) {
- getGlobalServiceContext()->getOpObserver()->onDropCollection(
- txn,
- NamespaceString(nsToDrop));
- }
- wunit.commit();
- } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "drop", nsToDrop);
- return true;
+ result.append("ns", nsToDrop);
+ return appendCommandStatus(result,
+ dropCollection(txn, NamespaceString(nsToDrop), result));
}
+
} cmdDrop;
/* create collection */
@@ -578,7 +504,14 @@ namespace mongo {
return Status(ErrorCodes::Unauthorized, "unauthorized");
}
- virtual bool run(OperationContext* txn, const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result,
+ bool fromRepl) {
+ invariant(!fromRepl == txn->writesAreReplicated());
BSONObjIterator it(cmdObj);
// Extract ns from first cmdObj element.
@@ -619,7 +552,7 @@ namespace mongo {
WriteUnitOfWork wunit(txn);
// Create collection.
- status = userCreateNS(txn, ctx.db(), ns.c_str(), options, !fromRepl);
+ status = userCreateNS(txn, ctx.db(), ns.c_str(), options);
if ( !status.isOK() ) {
return appendCommandStatus( result, status );
}
@@ -662,6 +595,7 @@ namespace mongo {
}
bool run(OperationContext* txn, const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ invariant(!fromRepl == txn->writesAreReplicated());
const std::string ns = parseNs(dbname, jsobj);
md5digest d;
@@ -815,6 +749,7 @@ namespace mongo {
}
bool run(OperationContext* txn, const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ invariant(!fromRepl == txn->writesAreReplicated());
Timer timer;
string ns = jsobj.firstElement().String();
@@ -939,6 +874,7 @@ namespace mongo {
}
bool run(OperationContext* txn, const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ invariant(!fromRepl == txn->writesAreReplicated());
int scale = 1;
if ( jsobj["scale"].isNumber() ) {
scale = jsobj["scale"].numberInt();
@@ -1044,148 +980,22 @@ namespace mongo {
out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
}
- bool run(OperationContext* txn, const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
- const std::string ns = parseNsCollectionRequired(dbname, jsobj);
-
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetDb autoDb(txn, dbname, MODE_X);
- Database* const db = autoDb.getDb();
- Collection* coll = db ? db->getCollection(ns) : NULL;
-
- // If db/collection does not exist, short circuit and return.
- if ( !db || !coll ) {
- errmsg = "ns does not exist";
- return false;
- }
-
- OldClientContext ctx(txn, ns);
- if (!fromRepl &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(dbname)) {
- return appendCommandStatus(result, Status(ErrorCodes::NotMaster, str::stream()
- << "Not primary while setting collection options on " << ns));
- }
-
- WriteUnitOfWork wunit(txn);
-
- bool ok = true;
-
- BSONForEach( e, jsobj ) {
- if ( str::equals( "collMod", e.fieldName() ) ) {
- // no-op
- }
- else if ( str::startsWith( e.fieldName(), "$" ) ) {
- // no-op: ignore top-level fields prefixed with $. They are for the command processor.
- }
- else if ( LiteParsedQuery::cmdOptionMaxTimeMS == e.fieldNameStringData() ) {
- // no-op
- }
- else if ( str::equals( "index", e.fieldName() ) ) {
- BSONObj indexObj = e.Obj();
- BSONObj keyPattern = indexObj.getObjectField( "keyPattern" );
-
- if ( keyPattern.isEmpty() ){
- errmsg = "no keyPattern specified";
- ok = false;
- continue;
- }
-
- BSONElement newExpireSecs = indexObj["expireAfterSeconds"];
- if ( newExpireSecs.eoo() ) {
- errmsg = "no expireAfterSeconds field";
- ok = false;
- continue;
- }
- if ( ! newExpireSecs.isNumber() ) {
- errmsg = "expireAfterSeconds field must be a number";
- ok = false;
- continue;
- }
-
- const IndexDescriptor* idx = coll->getIndexCatalog()
- ->findIndexByKeyPattern( txn, keyPattern );
- if ( idx == NULL ) {
- errmsg = str::stream() << "cannot find index " << keyPattern
- << " for ns " << ns;
- ok = false;
- continue;
- }
- BSONElement oldExpireSecs = idx->infoObj().getField("expireAfterSeconds");
- if( oldExpireSecs.eoo() ){
- errmsg = "no expireAfterSeconds field to update";
- ok = false;
- continue;
- }
- if( ! oldExpireSecs.isNumber() ) {
- errmsg = "existing expireAfterSeconds field is not a number";
- ok = false;
- continue;
- }
-
- if ( oldExpireSecs != newExpireSecs ) {
- result.appendAs( oldExpireSecs, "expireAfterSeconds_old" );
- // Change the value of "expireAfterSeconds" on disk.
- coll->getCatalogEntry()->updateTTLSetting( txn,
- idx->indexName(),
- newExpireSecs.numberLong() );
- // Notify the index catalog that the definition of this index changed.
- idx = coll->getIndexCatalog()->refreshEntry( txn, idx );
- result.appendAs( newExpireSecs , "expireAfterSeconds_new" );
- }
- }
- else {
- // As of SERVER-17312 we only support these two options. When SERVER-17320 is
- // resolved this will need to be enhanced to handle other options.
- typedef CollectionOptions CO;
- const StringData name = e.fieldNameStringData();
- const int flag = (name == "usePowerOf2Sizes") ? CO::Flag_UsePowerOf2Sizes :
- (name == "noPadding") ? CO::Flag_NoPadding :
- 0;
- if (!flag) {
- errmsg = str::stream() << "unknown option to collMod: " << name;
- ok = false;
- continue;
- }
-
- CollectionCatalogEntry* cce = coll->getCatalogEntry();
-
- const int oldFlags = cce->getCollectionOptions(txn).flags;
- const bool oldSetting = oldFlags & flag;
- const bool newSetting = e.trueValue();
-
- result.appendBool( name.toString() + "_old", oldSetting );
- result.appendBool( name.toString() + "_new", newSetting );
-
- const int newFlags = newSetting
- ? (oldFlags | flag) // set flag
- : (oldFlags & ~flag); // clear flag
-
- // NOTE we do this unconditionally to ensure that we note that the user has
- // explicitly set flags, even if they are just setting the default.
- cce->updateFlags(txn, newFlags);
-
- const CollectionOptions newOptions = cce->getCollectionOptions(txn);
- invariant(newOptions.flags == newFlags);
- invariant(newOptions.flagsSet);
- }
- }
-
- if (!ok) {
- return false;
- }
-
- if (!fromRepl) {
- getGlobalServiceContext()->getOpObserver()->onCollMod(txn,
- (dbname + ".$cmd").c_str(),
- jsobj);
- }
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& jsobj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result,
+ bool fromRepl) {
+ invariant(!fromRepl == txn->writesAreReplicated());
- wunit.commit();
- return true;
+ const std::string ns = parseNsCollectionRequired(dbname, jsobj);
+ return appendCommandStatus(result,
+ collMod(txn, NamespaceString(ns), jsobj, &result));
}
} collectionModCommand;
-
class DBStats : public Command {
public:
DBStats() : Command( "dbStats", false, "dbstats" ) {
@@ -1210,6 +1020,7 @@ namespace mongo {
}
bool run(OperationContext* txn, const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ invariant(!fromRepl == txn->writesAreReplicated());
int scale = 1;
if ( jsobj["scale"].isNumber() ) {
scale = jsobj["scale"].numberInt();
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index f9fb4f0d33a..22ac295e4a0 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -48,7 +48,6 @@
#include "mongo/db/json.h"
#include "mongo/db/keypattern.h"
#include "mongo/db/op_observer.h"
-#include "mongo/db/operation_context_impl.h"
#include "mongo/db/ops/delete.h"
#include "mongo/db/ops/update.h"
#include "mongo/db/ops/update_lifecycle_impl.h"
@@ -66,6 +65,7 @@
#include "mongo/s/d_state.h"
#include "mongo/s/shard_key_pattern.h"
#include "mongo/util/log.h"
+#include "mongo/util/scopeguard.h"
namespace mongo {
@@ -247,7 +247,6 @@ namespace mongo {
request.setQuery(id);
request.setUpdates(o);
request.setUpsert();
- request.setUpdateOpLog();
request.setFromMigration(fromMigrate);
UpdateLifecycleImpl updateLifecycle(true, requestNs);
request.setLifecycle(&updateLifecycle);
@@ -264,7 +263,6 @@ namespace mongo {
request.setUpdates(obj);
request.setUpsert();
- request.setUpdateOpLog();
UpdateLifecycleImpl updateLifecycle(true, requestNs);
request.setLifecycle(&updateLifecycle);
@@ -449,8 +447,6 @@ namespace mongo {
BSONObj deletedId;
collection->deleteDocument( txn, rloc, false, false, &deletedId );
- // The above throws on failure, and so is not logged
- getGlobalServiceContext()->getOpObserver()->onDelete(txn, ns, deletedId, fromMigrate);
wuow.commit();
numDeleted++;
}
@@ -582,6 +578,9 @@ namespace mongo {
void Helpers::emptyCollection(OperationContext* txn, const char *ns) {
OldClientContext context(txn, ns);
+ bool shouldReplicateWrites = txn->writesAreReplicated();
+ txn->setReplicatedWrites(false);
+ ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, txn, shouldReplicateWrites);
deleteObjects(txn, context.db(), ns, BSONObj(), PlanExecutor::YIELD_MANUAL, false);
}
diff --git a/src/mongo/db/exec/delete.cpp b/src/mongo/db/exec/delete.cpp
index b6bf7b03361..d519cb2ec47 100644
--- a/src/mongo/db/exec/delete.cpp
+++ b/src/mongo/db/exec/delete.cpp
@@ -158,20 +158,6 @@ namespace mongo {
_collection->deleteDocument(_txn, rloc, deleteCappedOK, deleteNoWarn,
_params.shouldCallLogOp ? &deletedDoc : NULL);
- if (_params.shouldCallLogOp) {
- if (deletedDoc.isEmpty()) {
- log() << "Deleted object without id in collection " << _collection->ns()
- << ", not logging.";
- }
- else {
- getGlobalServiceContext()->getOpObserver()->onDelete(
- _txn,
- _collection->ns().ns(),
- deletedDoc,
- _params.fromMigrate);
- }
- }
-
wunit.commit();
}
diff --git a/src/mongo/db/exec/update.cpp b/src/mongo/db/exec/update.cpp
index d9f8e291662..835d2eb7b33 100644
--- a/src/mongo/db/exec/update.cpp
+++ b/src/mongo/db/exec/update.cpp
@@ -530,7 +530,7 @@ namespace mongo {
// Verify that no immutable fields were changed and data is valid for storage.
- if (!(request->isFromReplication() || request->isFromMigration())) {
+ if (!(!_txn->writesAreReplicated() || request->isFromMigration())) {
const std::vector<FieldRef*>* immutableFields = NULL;
if (lifecycle)
immutableFields = lifecycle->getImmutableFields();
@@ -554,14 +554,22 @@ namespace mongo {
// Don't actually do the write if this is an explain.
if (!request->isExplain()) {
invariant(_collection);
+ newObj = oldObj.value();
const RecordData oldRec(oldObj.value().objdata(), oldObj.value().objsize());
- _collection->updateDocumentWithDamages(_txn, loc,
- Snapshotted<RecordData>(oldObj.snapshotId(),
- oldRec),
- source, _damages);
+ BSONObj idQuery = driver->makeOplogEntryQuery(newObj, request->isMulti());
+ oplogUpdateEntryArgs args;
+ args.update = logObj;
+ args.criteria = idQuery;
+ args.fromMigrate = request->isFromMigration();
+ _collection->updateDocumentWithDamages(
+ _txn,
+ loc,
+ Snapshotted<RecordData>(oldObj.snapshotId(), oldRec),
+ source,
+ _damages,
+ args);
}
- newObj = oldObj.value();
_specificStats.fastmod = true;
newLoc = loc;
}
@@ -577,27 +585,25 @@ namespace mongo {
// Don't actually do the write if this is an explain.
if (!request->isExplain()) {
invariant(_collection);
+ BSONObj idQuery = driver->makeOplogEntryQuery(newObj, request->isMulti());
+ oplogUpdateEntryArgs args;
+ args.update = logObj;
+ args.criteria = idQuery;
+ args.fromMigrate = request->isFromMigration();
StatusWith<RecordId> res = _collection->updateDocument(
- _txn,
- loc, oldObj, newObj,
- true, driver->modsAffectIndices(),
- _params.opDebug);
+ _txn,
+ loc,
+ oldObj,
+ newObj,
+ true,
+ driver->modsAffectIndices(),
+ _params.opDebug,
+ args);
uassertStatusOK(res.getStatus());
newLoc = res.getValue();
}
}
- // Call logOp if requested, and we're not an explain.
- if (request->shouldCallLogOp() && !logObj.isEmpty() && !request->isExplain()) {
- BSONObj idQuery = driver->makeOplogEntryQuery(newObj, request->isMulti());
- getGlobalServiceContext()->getOpObserver()->onUpdate(
- _txn,
- request->getNamespaceString().ns().c_str(),
- logObj,
- idQuery,
- request->isFromMigration());
- }
-
invariant(oldObj.snapshotId() == _txn->recoveryUnit()->getSnapshotId());
wunit.commit();
@@ -710,7 +716,7 @@ namespace mongo {
_specificStats.inserted = true;
const UpdateRequest* request = _params.request;
- bool isInternalRequest = request->isFromReplication() || request->isFromMigration();
+ bool isInternalRequest = !_txn->writesAreReplicated() || request->isFromMigration();
// Reset the document we will be writing to.
_doc.reset();
@@ -738,15 +744,10 @@ namespace mongo {
WriteUnitOfWork wunit(_txn);
invariant(_collection);
StatusWith<RecordId> newLoc = _collection->insertDocument(_txn,
- newObj,
- !request->isGod()/*enforceQuota*/);
+ newObj,
+ !request->isGod()/*enforceQuota*/,
+ request->isFromMigration());
uassertStatusOK(newLoc.getStatus());
- if (request->shouldCallLogOp()) {
- getGlobalServiceContext()->getOpObserver()->onInsert(_txn,
- request->getNamespaceString().ns(),
- newObj,
- request->isFromMigration());
- }
// Technically, we should save/restore state here, but since we are going to return EOF
// immediately after, it would just be wasted work.
@@ -948,8 +949,10 @@ namespace mongo {
const NamespaceString& nsString(request.getNamespaceString());
// We may have stepped down during the yield.
- if (request.shouldCallLogOp() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(nsString.db())) {
+ bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(nsString.db());
+
+ if (userInitiatedWritesAndNotPrimary) {
return Status(ErrorCodes::NotMaster,
str::stream() << "Demoted from primary while performing update on "
<< nsString.ns());
diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp
index 0595cf83ede..63d3ae04ac0 100644
--- a/src/mongo/db/instance.cpp
+++ b/src/mongo/db/instance.cpp
@@ -642,7 +642,6 @@ namespace {
request.setMulti(multi);
request.setQuery(query);
request.setUpdates(toupdate);
- request.setUpdateOpLog(); // TODO: This is wasteful if repl is not active.
UpdateLifecycleImpl updateLifecycle(broadcast, nsString);
request.setLifecycle(&updateLifecycle);
@@ -718,7 +717,7 @@ namespace {
}
else {
WriteUnitOfWork wuow(txn);
- uassertStatusOK(userCreateNS(txn, db, nsString.ns(), BSONObj(), true));
+ uassertStatusOK(userCreateNS(txn, db, nsString.ns(), BSONObj()));
wuow.commit();
}
@@ -762,7 +761,6 @@ namespace {
DeleteRequest request(nsString);
request.setQuery(pattern);
request.setMulti(!justOne);
- request.setUpdateOpLog(true);
request.setYieldPolicy(PlanExecutor::YIELD_AUTO);
@@ -952,15 +950,10 @@ namespace {
if ( !collection ) {
collection = ctx.db()->createCollection( txn, ns );
verify( collection );
- getGlobalServiceContext()->getOpObserver()->onCreateCollection(
- txn,
- NamespaceString(ns),
- CollectionOptions());
}
StatusWith<RecordId> status = collection->insertDocument( txn, js, true );
uassertStatusOK( status.getStatus() );
- getGlobalServiceContext()->getOpObserver()->onInsert(txn, std::string(ns), js);
wunit.commit();
break;
}
diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp
index b7ed58abce3..93b7fa405fd 100644
--- a/src/mongo/db/introspect.cpp
+++ b/src/mongo/db/introspect.cpp
@@ -41,6 +41,7 @@
#include "mongo/db/db_raii.h"
#include "mongo/db/jsobj.h"
#include "mongo/util/log.h"
+#include "mongo/util/scopeguard.h"
namespace mongo {
@@ -184,6 +185,9 @@ namespace {
collectionOptions.cappedSize = 1024 * 1024;
WriteUnitOfWork wunit(txn);
+ bool shouldReplicateWrites = txn->writesAreReplicated();
+ txn->setReplicatedWrites(false);
+ ON_BLOCK_EXIT(&OperationContext::setReplicatedWrites, txn, shouldReplicateWrites);
invariant(db->createCollection(txn, dbProfilingNS, collectionOptions));
wunit.commit();
diff --git a/src/mongo/db/namespace_string.h b/src/mongo/db/namespace_string.h
index 386ddb6ad67..75f7ce89cd0 100644
--- a/src/mongo/db/namespace_string.h
+++ b/src/mongo/db/namespace_string.h
@@ -103,6 +103,7 @@ namespace mongo {
bool isSystem() const { return coll().startsWith( "system." ); }
bool isSystemDotIndexes() const { return coll() == "system.indexes"; }
+ bool isSystemDotProfile() const { return coll() == "system.profile"; }
bool isConfigDB() const { return db() == "config"; }
bool isCommand() const { return coll() == "$cmd"; }
bool isOplog() const { return oplog( _ns ); }
diff --git a/src/mongo/db/op_observer.cpp b/src/mongo/db/op_observer.cpp
index eab851bcc0c..9d655fabdf7 100644
--- a/src/mongo/db/op_observer.cpp
+++ b/src/mongo/db/op_observer.cpp
@@ -47,9 +47,7 @@ namespace mongo {
const std::string& ns,
BSONObj indexDoc,
bool fromMigrate) {
- if (repl::getGlobalReplicationCoordinator()->isReplEnabled()) {
- repl::_logOp(txn, "i", ns.c_str(), indexDoc, nullptr, fromMigrate);
- }
+ repl::_logOp(txn, "i", ns.c_str(), indexDoc, nullptr, fromMigrate);
getGlobalAuthorizationManager()->logOp(txn, "i", ns.c_str(), indexDoc, nullptr);
logOpForSharding(txn, "i", ns.c_str(), indexDoc, nullptr, fromMigrate);
@@ -57,34 +55,31 @@ namespace mongo {
}
void OpObserver::onInsert(OperationContext* txn,
- const std::string& ns,
+ const NamespaceString& ns,
BSONObj doc,
bool fromMigrate) {
- if ( repl::getGlobalReplicationCoordinator()->isReplEnabled() ) {
- repl::_logOp(txn, "i", ns.c_str(), doc, nullptr, fromMigrate);
- }
+ repl::_logOp(txn, "i", ns.ns().c_str(), doc, nullptr, fromMigrate);
- getGlobalAuthorizationManager()->logOp(txn, "i", ns.c_str(), doc, nullptr);
- logOpForSharding(txn, "i", ns.c_str(), doc, nullptr, fromMigrate);
- logOpForDbHash(txn, ns.c_str());
- if ( strstr( ns.c_str(), ".system.js" ) ) {
+ getGlobalAuthorizationManager()->logOp(txn, "i", ns.ns().c_str(), doc, nullptr);
+ logOpForSharding(txn, "i", ns.ns().c_str(), doc, nullptr, fromMigrate);
+ logOpForDbHash(txn, ns.ns().c_str());
+ if (strstr(ns.ns().c_str(), ".system.js")) {
Scope::storedFuncMod(txn);
}
}
void OpObserver::onUpdate(OperationContext* txn,
- const std::string& ns,
- const BSONObj& update,
- BSONObj& criteria,
- bool fromMigrate) {
- if ( repl::getGlobalReplicationCoordinator()->isReplEnabled() ) {
- repl::_logOp(txn, "u", ns.c_str(), update, &criteria, fromMigrate);
- }
-
- getGlobalAuthorizationManager()->logOp(txn, "u", ns.c_str(), update, &criteria);
- logOpForSharding(txn, "u", ns.c_str(), update, &criteria, fromMigrate);
- logOpForDbHash(txn, ns.c_str());
- if ( strstr( ns.c_str(), ".system.js" ) ) {
+ oplogUpdateEntryArgs args) {
+ repl::_logOp(txn, "u", args.ns.c_str(), args.update, &args.criteria, args.fromMigrate);
+
+ getGlobalAuthorizationManager()->logOp(txn,
+ "u",
+ args.ns.c_str(),
+ args.update,
+ &args.criteria);
+ logOpForSharding(txn, "u", args.ns.c_str(), args.update, &args.criteria, args.fromMigrate);
+ logOpForDbHash(txn, args.ns.c_str());
+ if (strstr(args.ns.c_str(), ".system.js")) {
Scope::storedFuncMod(txn);
}
}
@@ -93,23 +88,18 @@ namespace mongo {
const std::string& ns,
const BSONObj& idDoc,
bool fromMigrate) {
-
- if ( repl::getGlobalReplicationCoordinator()->isReplEnabled() ) {
- repl::_logOp(txn, "d", ns.c_str(), idDoc, nullptr, fromMigrate);
- }
+ repl::_logOp(txn, "d", ns.c_str(), idDoc, nullptr, fromMigrate);
getGlobalAuthorizationManager()->logOp(txn, "d", ns.c_str(), idDoc, nullptr);
logOpForSharding(txn, "d", ns.c_str(), idDoc, nullptr, fromMigrate);
logOpForDbHash(txn, ns.c_str());
- if ( strstr( ns.c_str(), ".system.js" ) ) {
+ if (strstr(ns.c_str(), ".system.js")) {
Scope::storedFuncMod(txn);
}
}
void OpObserver::onOpMessage(OperationContext* txn, const BSONObj& msgObj) {
- if ( repl::getGlobalReplicationCoordinator()->isReplEnabled() ) {
- repl::_logOp(txn, "n", "", msgObj, nullptr, false);
- }
+ repl::_logOp(txn, "n", "", msgObj, nullptr, false);
}
void OpObserver::onCreateCollection(OperationContext* txn,
@@ -121,9 +111,7 @@ namespace mongo {
b.appendElements(options.toBSON());
BSONObj cmdObj = b.obj();
- if ( repl::getGlobalReplicationCoordinator()->isReplEnabled() ) {
- repl::_logOp(txn, "c", dbName.c_str(), cmdObj, nullptr, false);
- }
+ repl::_logOp(txn, "c", dbName.c_str(), cmdObj, nullptr, false);
getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), cmdObj, nullptr);
logOpForDbHash(txn, dbName.c_str());
@@ -132,9 +120,7 @@ namespace mongo {
void OpObserver::onCollMod(OperationContext* txn,
const std::string& dbName,
const BSONObj& collModCmd) {
- if ( repl::getGlobalReplicationCoordinator()->isReplEnabled() ) {
- repl::_logOp(txn, "c", dbName.c_str(), collModCmd, nullptr, false);
- }
+ repl::_logOp(txn, "c", dbName.c_str(), collModCmd, nullptr, false);
getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), collModCmd, nullptr);
logOpForDbHash(txn, dbName.c_str());
@@ -144,9 +130,7 @@ namespace mongo {
const std::string& dbName) {
BSONObj cmdObj = BSON("dropDatabase" << 1);
- if ( repl::getGlobalReplicationCoordinator()->isReplEnabled() ) {
- repl::_logOp(txn, "c", dbName.c_str(), cmdObj, nullptr, false);
- }
+ repl::_logOp(txn, "c", dbName.c_str(), cmdObj, nullptr, false);
getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), cmdObj, nullptr);
logOpForDbHash(txn, dbName.c_str());
@@ -157,9 +141,7 @@ namespace mongo {
std::string dbName = collectionName.db().toString() + ".$cmd";
BSONObj cmdObj = BSON("drop" << collectionName.coll().toString());
- if ( repl::getGlobalReplicationCoordinator()->isReplEnabled() ) {
- repl::_logOp(txn, "c", dbName.c_str(), cmdObj, nullptr, false);
- }
+ repl::_logOp(txn, "c", dbName.c_str(), cmdObj, nullptr, false);
getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), cmdObj, nullptr);
logOpForDbHash(txn, dbName.c_str());
@@ -168,9 +150,7 @@ namespace mongo {
void OpObserver::onDropIndex(OperationContext* txn,
const std::string& dbName,
const BSONObj& idxDescriptor) {
- if ( repl::getGlobalReplicationCoordinator()->isReplEnabled() ) {
- repl::_logOp(txn, "c", dbName.c_str(), idxDescriptor, nullptr, false);
- }
+ repl::_logOp(txn, "c", dbName.c_str(), idxDescriptor, nullptr, false);
getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), idxDescriptor, nullptr);
logOpForDbHash(txn, dbName.c_str());
@@ -187,9 +167,7 @@ namespace mongo {
"stayTemp" << stayTemp <<
"dropTarget" << dropTarget);
- if ( repl::getGlobalReplicationCoordinator()->isReplEnabled() ) {
- repl::_logOp(txn, "c", dbName.c_str(), cmdObj, nullptr, false);
- }
+ repl::_logOp(txn, "c", dbName.c_str(), cmdObj, nullptr, false);
getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), cmdObj, nullptr);
logOpForDbHash(txn, dbName.c_str());
@@ -198,9 +176,7 @@ namespace mongo {
void OpObserver::onApplyOps(OperationContext* txn,
const std::string& dbName,
const BSONObj& applyOpCmd) {
- if ( repl::getGlobalReplicationCoordinator()->isReplEnabled() ) {
- repl::_logOp(txn, "c", dbName.c_str(), applyOpCmd, nullptr, false);
- }
+ repl::_logOp(txn, "c", dbName.c_str(), applyOpCmd, nullptr, false);
getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), applyOpCmd, nullptr);
logOpForDbHash(txn, dbName.c_str());
@@ -212,9 +188,7 @@ namespace mongo {
std::string dbName = collectionName.db().toString() + ".$cmd";
BSONObj cmdObj = BSON("convertToCapped" << collectionName.coll() << "size" << size);
- if ( repl::getGlobalReplicationCoordinator()->isReplEnabled() ) {
- repl::_logOp(txn, "c", dbName.c_str(), cmdObj, nullptr, false);
- }
+ repl::_logOp(txn, "c", dbName.c_str(), cmdObj, nullptr, false);
getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), cmdObj, nullptr);
logOpForDbHash(txn, dbName.c_str());
@@ -224,9 +198,7 @@ namespace mongo {
std::string dbName = collectionName.db().toString() + ".$cmd";
BSONObj cmdObj = BSON("emptycapped" << collectionName.coll());
- if ( repl::getGlobalReplicationCoordinator()->isReplEnabled() ) {
- repl::_logOp(txn, "c", dbName.c_str(), cmdObj, nullptr, false);
- }
+ repl::_logOp(txn, "c", dbName.c_str(), cmdObj, nullptr, false);
getGlobalAuthorizationManager()->logOp(txn, "c", dbName.c_str(), cmdObj, nullptr);
logOpForDbHash(txn, dbName.c_str());
diff --git a/src/mongo/db/op_observer.h b/src/mongo/db/op_observer.h
index ebc296acf13..4769c0733a4 100644
--- a/src/mongo/db/op_observer.h
+++ b/src/mongo/db/op_observer.h
@@ -31,13 +31,20 @@
#include <string>
#include "mongo/base/disallow_copying.h"
+#include "mongo/db/jsobj.h"
namespace mongo {
- class BSONObj;
struct CollectionOptions;
class NamespaceString;
class OperationContext;
+ struct oplogUpdateEntryArgs {
+ std::string ns;
+ BSONObj update;
+ BSONObj criteria;
+ bool fromMigrate;
+ };
+
class OpObserver {
MONGO_DISALLOW_COPYING(OpObserver);
@@ -49,14 +56,11 @@ namespace mongo {
BSONObj indexDoc,
bool fromMigrate = false);
void onInsert(OperationContext* txn,
- const std::string& ns,
+ const NamespaceString& ns,
BSONObj doc,
bool fromMigrate = false);
void onUpdate(OperationContext* txn,
- const std::string& ns,
- const BSONObj& update,
- BSONObj& criteria,
- bool fromMigrate);
+ oplogUpdateEntryArgs args);
void onDelete(OperationContext* txn,
const std::string& ns,
const BSONObj& idDoc,
diff --git a/src/mongo/db/operation_context.h b/src/mongo/db/operation_context.h
index d921fcfaa5c..046b9780a14 100644
--- a/src/mongo/db/operation_context.h
+++ b/src/mongo/db/operation_context.h
@@ -142,6 +142,16 @@ namespace mongo {
_writeConcern = writeConcern;
}
+ /**
+ * Set whether or not operations should generate oplog entries.
+ */
+ virtual void setReplicatedWrites(bool writesAreReplicated = true) = 0;
+
+ /**
+ * Returns true if operations should generate oplog entries.
+ */
+ virtual bool writesAreReplicated() const = 0;
+
protected:
OperationContext() { }
diff --git a/src/mongo/db/operation_context_impl.cpp b/src/mongo/db/operation_context_impl.cpp
index 9bb5cc6af60..ca9cfa3a61d 100644
--- a/src/mongo/db/operation_context_impl.cpp
+++ b/src/mongo/db/operation_context_impl.cpp
@@ -75,7 +75,8 @@ namespace {
OperationContextImpl::OperationContextImpl()
: _client(currentClient.get()),
- _locker(clientOperationInfoDecoration(_client).getLocker()) {
+ _locker(clientOperationInfoDecoration(_client).getLocker()),
+ _writesAreReplicated(true) {
invariant(_locker);
@@ -218,4 +219,11 @@ namespace {
NamespaceString(ns).db());
}
+ void OperationContextImpl::setReplicatedWrites(bool writesAreReplicated) {
+ _writesAreReplicated = writesAreReplicated;
+ }
+
+ bool OperationContextImpl::writesAreReplicated() const {
+ return _writesAreReplicated;
+ }
} // namespace mongo
diff --git a/src/mongo/db/operation_context_impl.h b/src/mongo/db/operation_context_impl.h
index 3828f979594..735e59db6e4 100644
--- a/src/mongo/db/operation_context_impl.h
+++ b/src/mongo/db/operation_context_impl.h
@@ -66,10 +66,14 @@ namespace mongo {
virtual bool isPrimaryFor( StringData ns );
+ virtual void setReplicatedWrites(bool writesAreReplicated = true);
+ virtual bool writesAreReplicated() const;
+
private:
std::auto_ptr<RecoveryUnit> _recovery;
Client* const _client; // cached, not owned
Locker* const _locker; // cached, not owned
+ bool _writesAreReplicated;
};
} // namespace mongo
diff --git a/src/mongo/db/operation_context_noop.h b/src/mongo/db/operation_context_noop.h
index 0a24af2fdd6..0b2650531ce 100644
--- a/src/mongo/db/operation_context_noop.h
+++ b/src/mongo/db/operation_context_noop.h
@@ -103,6 +103,12 @@ namespace mongo {
return 0;
}
+ void setReplicatedWrites(bool writesAreReplicated = true) {}
+
+ bool writesAreReplicated() const {
+ return false;
+ }
+
private:
std::auto_ptr<RecoveryUnit> _recoveryUnit;
boost::scoped_ptr<Locker> _locker;
diff --git a/src/mongo/db/ops/delete.cpp b/src/mongo/db/ops/delete.cpp
index ed53c86e1e8..f7782d14edf 100644
--- a/src/mongo/db/ops/delete.cpp
+++ b/src/mongo/db/ops/delete.cpp
@@ -49,14 +49,12 @@ namespace mongo {
BSONObj pattern,
PlanExecutor::YieldPolicy policy,
bool justOne,
- bool logop,
bool god,
bool fromMigrate) {
NamespaceString nsString(ns);
DeleteRequest request(nsString);
request.setQuery(pattern);
request.setMulti(!justOne);
- request.setUpdateOpLog(logop);
request.setGod(god);
request.setFromMigrate(fromMigrate);
request.setYieldPolicy(policy);
diff --git a/src/mongo/db/ops/delete.h b/src/mongo/db/ops/delete.h
index 7616bb9bcec..70957d988d0 100644
--- a/src/mongo/db/ops/delete.h
+++ b/src/mongo/db/ops/delete.h
@@ -46,7 +46,6 @@ namespace mongo {
BSONObj pattern,
PlanExecutor::YieldPolicy policy,
bool justOne,
- bool logop = false,
bool god = false,
bool fromMigrate = false);
diff --git a/src/mongo/db/ops/delete_request.h b/src/mongo/db/ops/delete_request.h
index 3b3d97328ef..2d2a3f46a0b 100644
--- a/src/mongo/db/ops/delete_request.h
+++ b/src/mongo/db/ops/delete_request.h
@@ -42,7 +42,6 @@ namespace mongo {
explicit DeleteRequest(const NamespaceString& nsString) :
_nsString(nsString),
_multi(false),
- _logop(false),
_god(false),
_fromMigrate(false),
_isExplain(false),
@@ -50,7 +49,6 @@ namespace mongo {
void setQuery(const BSONObj& query) { _query = query; }
void setMulti(bool multi = true) { _multi = multi; }
- void setUpdateOpLog(bool logop = true) { _logop = logop; }
void setGod(bool god = true) { _god = god; }
void setFromMigrate(bool fromMigrate = true) { _fromMigrate = fromMigrate; }
void setExplain(bool isExplain = true) { _isExplain = isExplain; }
@@ -59,7 +57,6 @@ namespace mongo {
const NamespaceString& getNamespaceString() const { return _nsString; }
const BSONObj& getQuery() const { return _query; }
bool isMulti() const { return _multi; }
- bool shouldCallLogOp() const { return _logop; }
bool isGod() const { return _god; }
bool isFromMigrate() const { return _fromMigrate; }
bool isExplain() const { return _isExplain; }
@@ -71,7 +68,6 @@ namespace mongo {
const NamespaceString& _nsString;
BSONObj _query;
bool _multi;
- bool _logop;
bool _god;
bool _fromMigrate;
bool _isExplain;
diff --git a/src/mongo/db/ops/parsed_update.cpp b/src/mongo/db/ops/parsed_update.cpp
index d6a26fdaed2..9e084b2dfe5 100644
--- a/src/mongo/db/ops/parsed_update.cpp
+++ b/src/mongo/db/ops/parsed_update.cpp
@@ -94,12 +94,12 @@ namespace mongo {
// Only user updates should be checked. Any system or replication stuff should pass through.
// Config db docs shouldn't get checked for valid field names since the shard key can have
// a dot (".") in it.
- const bool shouldValidate = !(_request->isFromReplication() ||
+ const bool shouldValidate = !(!_txn->writesAreReplicated() ||
ns.isConfigDB() ||
_request->isFromMigration());
_driver.setLogOp(true);
- _driver.setModOptions(ModifierInterface::Options(_request->isFromReplication(),
+ _driver.setModOptions(ModifierInterface::Options(!_txn->writesAreReplicated(),
shouldValidate));
return _driver.parse(_request->getUpdates(), _request->isMulti());
diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp
index 2c828d164c3..ffdc244e42c 100644
--- a/src/mongo/db/ops/update.cpp
+++ b/src/mongo/db/ops/update.cpp
@@ -41,7 +41,6 @@
#include "mongo/db/clientcursor.h"
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/exec/update.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/operation_context_impl.h"
#include "mongo/db/op_observer.h"
#include "mongo/db/ops/update_driver.h"
@@ -79,24 +78,19 @@ namespace mongo {
ScopedTransaction transaction(txn, MODE_IX);
Lock::DBLock lk(txn->lockState(), nsString.db(), MODE_X);
- if (!request.isFromReplication() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(
- nsString.db())) {
+ bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(nsString.db());
+
+ if (userInitiatedWritesAndNotPrimary) {
uassertStatusOK(Status(ErrorCodes::NotMaster, str::stream()
<< "Not primary while creating collection " << nsString.ns()
<< " during upsert"));
}
WriteUnitOfWork wuow(txn);
- collection = db->createCollection(txn, nsString.ns());
+ collection = db->createCollection(txn, nsString.ns(), CollectionOptions());
invariant(collection);
- if (!request.isFromReplication()) {
- getGlobalServiceContext()->getOpObserver()->onCreateCollection(
- txn,
- NamespaceString(nsString),
- CollectionOptions());
- }
wuow.commit();
}
diff --git a/src/mongo/db/ops/update_request.h b/src/mongo/db/ops/update_request.h
index 99649663ece..9f69fdc3142 100644
--- a/src/mongo/db/ops/update_request.h
+++ b/src/mongo/db/ops/update_request.h
@@ -48,9 +48,7 @@ namespace mongo {
, _god(false)
, _upsert(false)
, _multi(false)
- , _callLogOp(false)
, _fromMigration(false)
- , _fromReplication(false)
, _lifecycle(NULL)
, _isExplain(false)
, _storeResultDoc(false)
@@ -103,14 +101,6 @@ namespace mongo {
return _multi;
}
- inline void setUpdateOpLog(bool value = true) {
- _callLogOp = value;
- }
-
- bool shouldCallLogOp() const {
- return _callLogOp;
- }
-
inline void setFromMigration(bool value = true) {
_fromMigration = value;
}
@@ -119,14 +109,6 @@ namespace mongo {
return _fromMigration;
}
- inline void setFromReplication(bool value = true) {
- _fromReplication = value;
- }
-
- bool isFromReplication() const {
- return _fromReplication;
- }
-
inline void setLifecycle(UpdateLifecycle* value) {
_lifecycle = value;
}
@@ -166,9 +148,7 @@ namespace mongo {
<< " god: " << _god
<< " upsert: " << _upsert
<< " multi: " << _multi
- << " callLogOp: " << _callLogOp
<< " fromMigration: " << _fromMigration
- << " fromReplications: " << _fromReplication
<< " isExplain: " << _isExplain;
}
private:
@@ -193,15 +173,9 @@ namespace mongo {
// True if this update is allowed to affect more than one document.
bool _multi;
- // True if the effects of the update should be written to the oplog.
- bool _callLogOp;
-
// True if this update is on behalf of a chunk migration.
bool _fromMigration;
- // True if this update is being applied during the application for the oplog.
- bool _fromReplication;
-
// The lifecycle data, and events used during the update request.
UpdateLifecycle* _lifecycle;
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index 202be486a68..f7bfb4569e1 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -659,15 +659,16 @@ namespace {
str::stream() << "cannot remove from a capped collection: " << nss.ns());
}
- if (request->shouldCallLogOp() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(nss.db())) {
+ bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(nss.db());
+
+ if (userInitiatedWritesAndNotPrimary) {
return Status(ErrorCodes::NotMaster,
str::stream() << "Not primary while removing from " << nss.ns());
}
DeleteStageParams deleteStageParams;
deleteStageParams.isMulti = request->isMulti();
- deleteStageParams.shouldCallLogOp = request->shouldCallLogOp();
deleteStageParams.fromMigrate = request->isFromMigrate();
deleteStageParams.isExplain = request->isExplain();
@@ -785,8 +786,10 @@ namespace {
// writes on a secondary. If this is an update to a secondary from the replication system,
// however, then we make an exception and let the write proceed. In this case,
// shouldCallLogOp() will be false.
- if (request->shouldCallLogOp() &&
- !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(nsString.db())) {
+ bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() &&
+ !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(nsString.db());
+
+ if (userInitiatedWritesAndNotPrimary) {
return Status(ErrorCodes::NotMaster,
str::stream() << "Not primary while performing update on "
<< nsString.ns());
diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp
index 2165b91da3d..a27588382d3 100644
--- a/src/mongo/db/repl/master_slave.cpp
+++ b/src/mongo/db/repl/master_slave.cpp
@@ -676,6 +676,7 @@ namespace repl {
if ( !only.empty() && only != clientName )
return;
+ txn->setReplicatedWrites(false);
const ReplSettings& replSettings = getGlobalReplicationCoordinator()->getSettings();
if (replSettings.pretouch &&
!alreadyLocked/*doesn't make sense if in write lock already*/) {
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 7c90e58c8a5..b4bfd2376d1 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -236,6 +236,20 @@ namespace {
return;
}
+ if (NamespaceString(ns).isSystemDotProfile()) {
+ return;
+ }
+
+ if (!getGlobalReplicationCoordinator()->isReplEnabled()) {
+ return;
+ }
+
+ if (!txn->writesAreReplicated()) {
+ return;
+ }
+
+ fassert(28626, txn->recoveryUnit());
+
Lock::DBLock lk(txn->lockState(), "local", MODE_IX);
ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
@@ -522,7 +536,6 @@ namespace {
request.setQuery(b.done());
request.setUpdates(o);
request.setUpsert();
- request.setFromReplication();
UpdateLifecycleImpl updateLifecycle(true, requestNs);
request.setLifecycle(&updateLifecycle);
@@ -546,7 +559,6 @@ namespace {
request.setQuery(updateCriteria);
request.setUpdates(o);
request.setUpsert(upsert);
- request.setFromReplication();
UpdateLifecycleImpl updateLifecycle(true, requestNs);
request.setLifecycle(&updateLifecycle);
@@ -599,8 +611,9 @@ namespace {
"Failed to apply delete due to missing _id: " << op.toString(),
o.hasField("_id"));
- if ( opType[1] == 0 )
+ if (opType[1] == 0) {
deleteObjects(txn, db, ns, o, PlanExecutor::YIELD_MANUAL, /*justOne*/ valueB);
+ }
else
verify( opType[1] == 'b' ); // "db" advertisement
}
diff --git a/src/mongo/db/repl/rs_initialsync.cpp b/src/mongo/db/repl/rs_initialsync.cpp
index 1380ae190c9..69cef872dee 100644
--- a/src/mongo/db/repl/rs_initialsync.cpp
+++ b/src/mongo/db/repl/rs_initialsync.cpp
@@ -328,6 +328,7 @@ namespace {
BackgroundSync* bgsync(BackgroundSync::get());
OperationContextImpl txn;
+ txn.setReplicatedWrites(false);
ReplicationCoordinator* replCoord(getGlobalReplicationCoordinator());
truncateAndResetOplog(&txn, replCoord, bgsync);
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index 0d1f4f79929..82b1da7a5d5 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -638,7 +638,6 @@ namespace {
pattern,
PlanExecutor::YIELD_MANUAL,
true, // justone
- false, // logop
true); // god
}
// did we just empty the collection? if so let's check if it even
@@ -825,6 +824,7 @@ namespace {
log() << "beginning rollback" << rsLog;
+ txn->setReplicatedWrites(false);
unsigned s = _syncRollback(txn, oplogreader, replCoord);
if (s)
sleepsecs(s);
diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp
index 40f70579704..8d4866d8213 100644
--- a/src/mongo/db/repl/sync_tail.cpp
+++ b/src/mongo/db/repl/sync_tail.cpp
@@ -660,6 +660,7 @@ namespace {
initializeWriterThread();
OperationContextImpl txn;
+ txn.setReplicatedWrites(false);
// allow us to get through the magic barrier
txn.lockState()->setIsBatchWriter(true);
@@ -692,6 +693,7 @@ namespace {
initializeWriterThread();
OperationContextImpl txn;
+ txn.setReplicatedWrites(false);
// allow us to get through the magic barrier
txn.lockState()->setIsBatchWriter(true);
diff --git a/src/mongo/db/storage/mmap_v1/repair_database.cpp b/src/mongo/db/storage/mmap_v1/repair_database.cpp
index 36f2b36b671..e8d05a3d352 100644
--- a/src/mongo/db/storage/mmap_v1/repair_database.cpp
+++ b/src/mongo/db/storage/mmap_v1/repair_database.cpp
@@ -377,7 +377,7 @@ namespace mongo {
Collection* tempCollection = NULL;
{
WriteUnitOfWork wunit(txn);
- tempCollection = tempDatabase->createCollection(txn, ns, options, true, false);
+ tempCollection = tempDatabase->createCollection(txn, ns, options, false);
wunit.commit();
}
@@ -399,8 +399,9 @@ namespace mongo {
}
Status status = indexer.init( indexes );
- if ( !status.isOK() )
+ if (!status.isOK()) {
return status;
+ }
}
scoped_ptr<RecordIterator> iterator(originalCollection->getIterator(txn));
diff --git a/src/mongo/db/ttl.cpp b/src/mongo/db/ttl.cpp
index c2ba257e213..d516d9c20ee 100644
--- a/src/mongo/db/ttl.cpp
+++ b/src/mongo/db/ttl.cpp
@@ -270,8 +270,7 @@ namespace mongo {
ns,
query,
PlanExecutor::YIELD_AUTO,
- false,
- true);
+ false);
break;
}
catch (const WriteConflictException& dle) {
diff --git a/src/mongo/dbtests/query_stage_count.cpp b/src/mongo/dbtests/query_stage_count.cpp
index 1973ba6b6d7..a4cdf32b9ea 100644
--- a/src/mongo/dbtests/query_stage_count.cpp
+++ b/src/mongo/dbtests/query_stage_count.cpp
@@ -124,9 +124,16 @@ namespace QueryStageCount {
void update(const RecordId& oldLoc, const BSONObj& newDoc) {
WriteUnitOfWork wunit(&_txn);
BSONObj oldDoc = _coll->getRecordStore()->dataFor( &_txn, oldLoc ).releaseToBson();
- _coll->updateDocument(&_txn, oldLoc,
- Snapshotted<BSONObj>(_txn.recoveryUnit()->getSnapshotId(), oldDoc),
- newDoc, false, true, NULL);
+ oplogUpdateEntryArgs args;
+ _coll->updateDocument(&_txn,
+ oldLoc,
+ Snapshotted<BSONObj>(_txn.recoveryUnit()->getSnapshotId(),
+ oldDoc),
+ newDoc,
+ false,
+ true,
+ NULL,
+ args);
wunit.commit();
}
diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp
index b2a484f7add..e398b696339 100644
--- a/src/mongo/dbtests/query_stage_sort.cpp
+++ b/src/mongo/dbtests/query_stage_sort.cpp
@@ -334,9 +334,10 @@ namespace QueryStageSortTests {
// This allows us to check that we don't return the new copy of a doc by asserting
// foo < limit().
BSONObj newDoc = BSON("_id" << updatedId << "foo" << limit() + 10);
+ oplogUpdateEntryArgs args;
{
WriteUnitOfWork wuow(&_txn);
- coll->updateDocument(&_txn, *it, oldDoc, newDoc, false, false, NULL);
+ coll->updateDocument(&_txn, *it, oldDoc, newDoc, false, false, NULL, args);
wuow.commit();
}
exec->restoreState(&_txn);
@@ -354,7 +355,7 @@ namespace QueryStageSortTests {
oldDoc = coll->docFor(&_txn, *it);
{
WriteUnitOfWork wuow(&_txn);
- coll->updateDocument(&_txn, *it++, oldDoc, newDoc, false, false, NULL);
+ coll->updateDocument(&_txn, *it++, oldDoc, newDoc, false, false, NULL, args);
wuow.commit();
}
}
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index 5b9b84be5ba..bab0f1e22fe 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -193,7 +193,7 @@ namespace QueryTests {
_collection = NULL;
db->dropCollection( &_txn, ns() );
}
- _collection = db->createCollection( &_txn, ns(), CollectionOptions(), true, false );
+ _collection = db->createCollection( &_txn, ns(), CollectionOptions(), false );
wunit.commit();
}
ASSERT( _collection );
diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp
index d768302d02d..cf6d2576323 100644
--- a/src/mongo/dbtests/repltests.cpp
+++ b/src/mongo/dbtests/repltests.cpp
@@ -203,7 +203,9 @@ namespace ReplTests {
if ( 0 ) {
mongo::unittest::log() << "op: " << *i << endl;
}
+ _txn.setReplicatedWrites(false);
a.applyOperation( &_txn, ctx.db(), *i );
+ _txn.setReplicatedWrites(true);
}
}
}
@@ -247,7 +249,9 @@ namespace ReplTests {
}
delete it;
for( vector< RecordId >::iterator i = toDelete.begin(); i != toDelete.end(); ++i ) {
+ _txn.setReplicatedWrites(false);
coll->deleteDocument( &_txn, *i, true );
+ _txn.setReplicatedWrites(true);
}
wunit.commit();
}
@@ -263,7 +267,9 @@ namespace ReplTests {
}
if ( o.hasField( "_id" ) ) {
+ _txn.setReplicatedWrites(false);
coll->insertDocument( &_txn, o, true );
+ _txn.setReplicatedWrites(true);
wunit.commit();
return;
}
@@ -273,7 +279,9 @@ namespace ReplTests {
id.init();
b.appendOID( "_id", &id );
b.appendElements( o );
+ _txn.setReplicatedWrites(false);
coll->insertDocument( &_txn, b.obj(), true );
+ _txn.setReplicatedWrites(true);
wunit.commit();
}
static BSONObj wid( const char *json ) {
@@ -290,9 +298,9 @@ namespace ReplTests {
class LogBasic : public Base {
public:
void run() {
- ASSERT_EQUALS( 1, opCount() );
- _client.insert( ns(), fromjson( "{\"a\":\"b\"}" ) );
ASSERT_EQUALS( 2, opCount() );
+ _client.insert( ns(), fromjson( "{\"a\":\"b\"}" ) );
+ ASSERT_EQUALS( 3, opCount() );
}
};
diff --git a/src/mongo/dbtests/rollbacktests.cpp b/src/mongo/dbtests/rollbacktests.cpp
index 8abfe02ff43..f742fb68b00 100644
--- a/src/mongo/dbtests/rollbacktests.cpp
+++ b/src/mongo/dbtests/rollbacktests.cpp
@@ -71,7 +71,7 @@ namespace {
{
WriteUnitOfWork uow( txn );
ASSERT( !collectionExists( &ctx, nss.ns() ) );
- ASSERT_OK( userCreateNS( txn, ctx.db(), nss.ns(), BSONObj(), false, false ) );
+ ASSERT_OK( userCreateNS( txn, ctx.db(), nss.ns(), BSONObj(), false ) );
ASSERT( collectionExists( &ctx, nss.ns() ) );
uow.commit();
}
@@ -161,7 +161,7 @@ namespace {
{
WriteUnitOfWork uow( &txn );
ASSERT( !collectionExists( &ctx, ns ) );
- ASSERT_OK( userCreateNS( &txn, ctx.db(), ns, BSONObj(), false, defaultIndexes ) );
+ ASSERT_OK( userCreateNS( &txn, ctx.db(), ns, BSONObj(), defaultIndexes ) );
ASSERT( collectionExists( &ctx, ns ) );
if ( !rollback ) {
uow.commit();
@@ -191,7 +191,7 @@ namespace {
{
WriteUnitOfWork uow( &txn );
ASSERT( !collectionExists( &ctx, ns ) );
- ASSERT_OK( userCreateNS( &txn, ctx.db(), ns, BSONObj(), false, defaultIndexes ) );
+ ASSERT_OK( userCreateNS( &txn, ctx.db(), ns, BSONObj(), defaultIndexes ) );
uow.commit();
}
ASSERT( collectionExists( &ctx, ns ) );
@@ -235,8 +235,7 @@ namespace {
WriteUnitOfWork uow( &txn );
ASSERT( !collectionExists( &ctx, source ) );
ASSERT( !collectionExists( &ctx, target ) );
- ASSERT_OK( userCreateNS( &txn, ctx.db(), source.ns(), BSONObj(), false,
- defaultIndexes ) );
+ ASSERT_OK( userCreateNS( &txn, ctx.db(), source.ns(), BSONObj(), defaultIndexes ) );
uow.commit();
}
ASSERT( collectionExists( &ctx, source ) );
@@ -286,10 +285,8 @@ namespace {
WriteUnitOfWork uow( &txn );
ASSERT( !collectionExists( &ctx, source ) );
ASSERT( !collectionExists( &ctx, target ) );
- ASSERT_OK( userCreateNS( &txn, ctx.db(), source.ns(), BSONObj(), false,
- defaultIndexes ) );
- ASSERT_OK( userCreateNS( &txn, ctx.db(), target.ns(), BSONObj(), false,
- defaultIndexes ) );
+ ASSERT_OK( userCreateNS( &txn, ctx.db(), source.ns(), BSONObj(), defaultIndexes ) );
+ ASSERT_OK( userCreateNS( &txn, ctx.db(), target.ns(), BSONObj(), defaultIndexes ) );
insertRecord( &txn, source, sourceDoc );
insertRecord( &txn, target, targetDoc );
@@ -346,8 +343,7 @@ namespace {
{
WriteUnitOfWork uow( &txn );
ASSERT( !collectionExists( &ctx, nss ) );
- ASSERT_OK( userCreateNS( &txn, ctx.db(), nss.ns(), BSONObj(), false,
- defaultIndexes ) );
+ ASSERT_OK( userCreateNS( &txn, ctx.db(), nss.ns(), BSONObj(), defaultIndexes ) );
insertRecord( &txn, nss, oldDoc );
uow.commit();
}
@@ -360,8 +356,7 @@ namespace {
WriteUnitOfWork uow( &txn );
ASSERT_OK( ctx.db()->dropCollection( &txn, nss.ns() ) );
ASSERT( !collectionExists( &ctx, nss ) );
- ASSERT_OK( userCreateNS( &txn, ctx.db(), nss.ns(), BSONObj(), false,
- defaultIndexes ) );
+ ASSERT_OK( userCreateNS( &txn, ctx.db(), nss.ns(), BSONObj(), defaultIndexes ) );
ASSERT( collectionExists( &ctx, nss ) );
insertRecord( &txn, nss, newDoc );
assertOnlyRecord( &txn, nss, newDoc );
@@ -397,8 +392,7 @@ namespace {
{
WriteUnitOfWork uow( &txn );
- ASSERT_OK( userCreateNS( &txn, ctx.db(), nss.ns(), BSONObj(), false,
- defaultIndexes ) );
+ ASSERT_OK( userCreateNS( &txn, ctx.db(), nss.ns(), BSONObj(), defaultIndexes ) );
ASSERT( collectionExists( &ctx, nss ) );
insertRecord( &txn, nss, doc );
assertOnlyRecord( &txn, nss, doc );
@@ -432,8 +426,7 @@ namespace {
{
WriteUnitOfWork uow( &txn );
- ASSERT_OK( userCreateNS( &txn, ctx.db(), nss.ns(), BSONObj(), false,
- defaultIndexes ) );
+ ASSERT_OK( userCreateNS( &txn, ctx.db(), nss.ns(), BSONObj(), defaultIndexes ) );
ASSERT( collectionExists( &ctx, nss ) );
insertRecord( &txn, nss, doc );
assertOnlyRecord( &txn, nss, doc );
@@ -689,7 +682,7 @@ namespace {
{
WriteUnitOfWork uow( &txn );
ASSERT( !collectionExists( &ctx, nss.ns() ) );
- ASSERT_OK( userCreateNS( &txn, ctx.db(), nss.ns(), BSONObj(), false, false ) );
+ ASSERT_OK( userCreateNS( &txn, ctx.db(), nss.ns(), BSONObj(), false ) );
ASSERT( collectionExists( &ctx, nss.ns() ) );
Collection* coll = ctx.db()->getCollection( ns );
IndexCatalog* catalog = coll->getIndexCatalog();
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
index 354b080d2b8..ceb81d8e088 100644
--- a/src/mongo/s/d_migrate.cpp
+++ b/src/mongo/s/d_migrate.cpp
@@ -1984,7 +1984,7 @@ namespace mongo {
}
WriteUnitOfWork wuow(txn);
- Status status = userCreateNS( txn, db, ns, options, true, false );
+ Status status = userCreateNS(txn, db, ns, options, false);
if ( !status.isOK() ) {
warning() << "failed to create collection [" << ns << "] "
<< " with options " << options << ": " << status;
@@ -2435,7 +2435,6 @@ namespace mongo {
id,
PlanExecutor::YIELD_MANUAL,
true /* justOne */,
- true /* logOp */,
false /* god */,
true /* fromMigrate */);