summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2015-08-21 10:59:45 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2015-08-21 16:10:25 -0400
commit31716d2ae526d82d7d36464f6c9fae8b9f38542f (patch)
tree0e95fcac7ae47450819d51113c86addefaba90ef /src
parent0c695aa1e879af482dc3aea4768dbda223ff4592 (diff)
downloadmongo-31716d2ae526d82d7d36464f6c9fae8b9f38542f.tar.gz
SERVER-19855 Do not perform shard version checking where not necessary
The code in RangeDeleter and sharding migration and split uses AutoGetCollectionForRead, which has the side effect of checking for shard version, based on the context. This causes problems in the cases where we are transmitting shard version information as part of the context, in particular during the migration cases. This change gets rid of these checks and replaces them with plain lock retrieval.
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/db_raii.cpp45
-rw-r--r--src/mongo/db/db_raii.h42
-rw-r--r--src/mongo/db/namespace_string.h6
-rw-r--r--src/mongo/db/range_deleter_db_env.cpp79
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp36
-rw-r--r--src/mongo/db/s/migration_source_manager.h5
-rw-r--r--src/mongo/s/d_split.cpp74
7 files changed, 156 insertions, 131 deletions
diff --git a/src/mongo/db/db_raii.cpp b/src/mongo/db/db_raii.cpp
index c7634052710..a987062b11b 100644
--- a/src/mongo/db/db_raii.cpp
+++ b/src/mongo/db/db_raii.cpp
@@ -32,6 +32,7 @@
#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/catalog/collection.h"
+#include "mongo/db/catalog/database.h"
#include "mongo/db/client.h"
#include "mongo/db/curop.h"
#include "mongo/db/repl/replication_coordinator_global.h"
@@ -43,6 +44,13 @@ namespace mongo {
AutoGetDb::AutoGetDb(OperationContext* txn, StringData ns, LockMode mode)
: _dbLock(txn->lockState(), ns, mode), _db(dbHolder().get(txn, ns)) {}
+AutoGetCollection::AutoGetCollection(OperationContext* txn,
+ const NamespaceString& nss,
+ LockMode mode)
+ : _autoDb(txn, nss.db(), mode),
+ _collLock(txn->lockState(), nss.ns(), mode),
+ _coll(_autoDb.getDb() ? _autoDb.getDb()->getCollection(nss) : nullptr) {}
+
AutoGetOrCreateDb::AutoGetOrCreateDb(OperationContext* txn, StringData ns, LockMode mode)
: _transaction(txn, MODE_IX),
_dbLock(txn->lockState(), ns, mode),
@@ -60,48 +68,31 @@ AutoGetOrCreateDb::AutoGetOrCreateDb(OperationContext* txn, StringData ns, LockM
}
AutoGetCollectionForRead::AutoGetCollectionForRead(OperationContext* txn, const std::string& ns)
- : _txn(txn),
- _transaction(txn, MODE_IS),
- _db(_txn, nsToDatabaseSubstring(ns), MODE_IS),
- _collLock(_txn->lockState(), ns, MODE_IS),
- _coll(NULL) {
- _init(ns, nsToCollectionSubstring(ns));
-}
+ : AutoGetCollectionForRead(txn, NamespaceString(ns)) {}
AutoGetCollectionForRead::AutoGetCollectionForRead(OperationContext* txn,
const NamespaceString& nss)
- : _txn(txn),
- _transaction(txn, MODE_IS),
- _db(_txn, nss.db(), MODE_IS),
- _collLock(_txn->lockState(), nss.toString(), MODE_IS),
- _coll(NULL) {
- _init(nss.toString(), nss.coll());
-}
-
-void AutoGetCollectionForRead::_init(const std::string& ns, StringData coll) {
- massert(28535, "need a non-empty collection name", !coll.empty());
-
+ : _txn(txn), _transaction(txn, MODE_IS), _autoColl(txn, nss, MODE_IS) {
// We have both the DB and collection locked, which the prerequisite to do a stable shard
- // version check.
- ensureShardVersionOKOrThrow(_txn, ns);
+ // version check
+ ensureShardVersionOKOrThrow(_txn, nss.ns());
auto curOp = CurOp::get(_txn);
stdx::lock_guard<Client> lk(*_txn->getClient());
+
// TODO: OldClientContext legacy, needs to be removed
curOp->ensureStarted();
- curOp->setNS_inlock(ns);
+ curOp->setNS_inlock(nss.ns());
// At this point, we are locked in shared mode for the database by the DB lock in the
// constructor, so it is safe to load the DB pointer.
- if (_db.getDb()) {
+ if (_autoColl.getDb()) {
// TODO: OldClientContext legacy, needs to be removed
- curOp->enter_inlock(ns.c_str(), _db.getDb()->getProfilingLevel());
-
- _coll = _db.getDb()->getCollection(ns);
+ curOp->enter_inlock(nss.ns().c_str(), _autoColl.getDb()->getProfilingLevel());
}
- if (_coll) {
- if (auto minSnapshot = _coll->getMinimumVisibleSnapshot()) {
+ if (getCollection()) {
+ if (auto minSnapshot = getCollection()->getMinimumVisibleSnapshot()) {
if (auto mySnapshot = _txn->recoveryUnit()->getMajorityCommittedSnapshot()) {
while (mySnapshot < minSnapshot) {
// Wait until a snapshot is available.
diff --git a/src/mongo/db/db_raii.h b/src/mongo/db/db_raii.h
index b37112cb267..b6522caad2b 100644
--- a/src/mongo/db/db_raii.h
+++ b/src/mongo/db/db_raii.h
@@ -45,7 +45,7 @@ class Collection;
* RAII-style class, which acquires a lock on the specified database in the requested mode and
* obtains a reference to the database. Used as a shortcut for calls to dbHolder().get().
*
- * It is guaranteed that locks will be released when this object goes out of scope, therefore
+ * It is guaranteed that the lock will be released when this object goes out of scope, therefore
* the database reference returned by this class should not be retained.
*/
class AutoGetDb {
@@ -64,6 +64,33 @@ private:
};
/**
+ * RAII-style class, which acquires a locks on the specified database and collection in the
+ * requested mode and obtains references to both.
+ *
+ * It is guaranteed that locks will be released when this object goes out of scope, therefore
+ * the database and the collection references returned by this class should not be retained.
+ */
+class AutoGetCollection {
+ MONGO_DISALLOW_COPYING(AutoGetCollection);
+
+public:
+ AutoGetCollection(OperationContext* txn, const NamespaceString& nss, LockMode mode);
+
+ Database* getDb() const {
+ return _autoDb.getDb();
+ }
+
+ Collection* getCollection() const {
+ return _coll;
+ }
+
+private:
+ const AutoGetDb _autoDb;
+ const Lock::CollectionLock _collLock;
+ Collection* const _coll;
+};
+
+/**
* RAII-style class, which acquires a lock on the specified database in the requested mode and
* obtains a reference to the database, creating it was non-existing. Used as a shortcut for
* calls to dbHolder().openDb(), taking care of locking details. The requested mode must be
@@ -100,7 +127,9 @@ private:
/**
* RAII-style class, which would acquire the appropritate hierarchy of locks for obtaining
- * a particular collection and would retrieve a reference to the collection.
+ * a particular collection and would retrieve a reference to the collection. In addition, this
+ * utility validates the shard version for the specified namespace and sets the current operation's
+ * namespace for the duration while this object is alive.
*
* It is guaranteed that locks will be released when this object goes out of scope, therefore
* database and collection references returned by this class should not be retained.
@@ -114,11 +143,11 @@ public:
~AutoGetCollectionForRead();
Database* getDb() const {
- return _db.getDb();
+ return _autoColl.getDb();
}
Collection* getCollection() const {
- return _coll;
+ return _autoColl.getCollection();
}
private:
@@ -127,10 +156,7 @@ private:
const Timer _timer;
OperationContext* const _txn;
const ScopedTransaction _transaction;
- const AutoGetDb _db;
- const Lock::CollectionLock _collLock;
-
- Collection* _coll;
+ const AutoGetCollection _autoColl;
};
/**
diff --git a/src/mongo/db/namespace_string.h b/src/mongo/db/namespace_string.h
index 28037eb24b4..9078fa2f7e4 100644
--- a/src/mongo/db/namespace_string.h
+++ b/src/mongo/db/namespace_string.h
@@ -269,6 +269,10 @@ private:
size_t _dotIndex;
};
+template <typename StreamType>
+StreamType& operator<<(StreamType& stream, const NamespaceString& value) {
+ return stream << value.ns();
+}
// "database.a.b.c" -> "database"
inline StringData nsToDatabaseSubstring(StringData ns) {
@@ -350,7 +354,7 @@ struct NamespaceDBEquals {
return nsDBEquals(a, b);
}
};
-}
+} // namespace mongo
#include "mongo/db/namespace_string-inl.h"
diff --git a/src/mongo/db/range_deleter_db_env.cpp b/src/mongo/db/range_deleter_db_env.cpp
index b9971f54b03..35279d0e58b 100644
--- a/src/mongo/db/range_deleter_db_env.cpp
+++ b/src/mongo/db/range_deleter_db_env.cpp
@@ -46,7 +46,6 @@
namespace mongo {
-using std::endl;
using std::string;
/**
@@ -75,44 +74,43 @@ bool RangeDeleterDBEnv::deleteRange(OperationContext* txn,
*deletedDocs = 0;
ShardForceVersionOkModeBlock forceVersion(txn->getClient());
- {
- Helpers::RemoveSaver removeSaver("moveChunk", ns, taskDetails.options.removeSaverReason);
- Helpers::RemoveSaver* removeSaverPtr = NULL;
- if (serverGlobalParams.moveParanoia && !taskDetails.options.removeSaverReason.empty()) {
- removeSaverPtr = &removeSaver;
- }
- // log the opId so the user can use it to cancel the delete using killOp.
- unsigned int opId = txn->getOpID();
- log() << "Deleter starting delete for: " << ns << " from " << inclusiveLower << " -> "
- << exclusiveUpper << ", with opId: " << opId << endl;
-
- try {
- *deletedDocs =
- Helpers::removeRange(txn,
- KeyRange(ns, inclusiveLower, exclusiveUpper, keyPattern),
- false, /*maxInclusive*/
- writeConcern,
- removeSaverPtr,
- fromMigrate,
- onlyRemoveOrphans);
-
- if (*deletedDocs < 0) {
- *errMsg = "collection or index dropped before data could be cleaned";
- warning() << *errMsg << endl;
-
- return false;
- }
-
- log() << "rangeDeleter deleted " << *deletedDocs << " documents for " << ns << " from "
- << inclusiveLower << " -> " << exclusiveUpper << endl;
- } catch (const DBException& ex) {
- *errMsg = str::stream() << "Error encountered while deleting range: "
- << "ns" << ns << " from " << inclusiveLower << " -> "
- << exclusiveUpper << ", cause by:" << causedBy(ex);
+ Helpers::RemoveSaver removeSaver("moveChunk", ns, taskDetails.options.removeSaverReason);
+ Helpers::RemoveSaver* removeSaverPtr = NULL;
+ if (serverGlobalParams.moveParanoia && !taskDetails.options.removeSaverReason.empty()) {
+ removeSaverPtr = &removeSaver;
+ }
+
+ // log the opId so the user can use it to cancel the delete using killOp.
+ unsigned int opId = txn->getOpID();
+ log() << "Deleter starting delete for: " << ns << " from " << inclusiveLower << " -> "
+ << exclusiveUpper << ", with opId: " << opId;
+
+ try {
+ *deletedDocs =
+ Helpers::removeRange(txn,
+ KeyRange(ns, inclusiveLower, exclusiveUpper, keyPattern),
+ false, /*maxInclusive*/
+ writeConcern,
+ removeSaverPtr,
+ fromMigrate,
+ onlyRemoveOrphans);
+
+ if (*deletedDocs < 0) {
+ *errMsg = "collection or index dropped before data could be cleaned";
+ warning() << *errMsg;
return false;
}
+
+ log() << "rangeDeleter deleted " << *deletedDocs << " documents for " << ns << " from "
+ << inclusiveLower << " -> " << exclusiveUpper;
+ } catch (const DBException& ex) {
+ *errMsg = str::stream() << "Error encountered while deleting range: "
+ << "ns" << ns << " from " << inclusiveLower << " -> "
+ << exclusiveUpper << ", cause by:" << causedBy(ex);
+
+ return false;
}
return true;
@@ -121,12 +119,11 @@ bool RangeDeleterDBEnv::deleteRange(OperationContext* txn,
void RangeDeleterDBEnv::getCursorIds(OperationContext* txn,
StringData ns,
std::set<CursorId>* openCursors) {
- AutoGetCollectionForRead ctx(txn, ns.toString());
- Collection* collection = ctx.getCollection();
- if (!collection) {
+ AutoGetCollection autoColl(txn, NamespaceString(ns), MODE_IS);
+ if (!autoColl.getCollection())
return;
- }
- collection->getCursorManager()->getCursorIds(openCursors);
-}
+ autoColl.getCollection()->getCursorManager()->getCursorIds(openCursors);
}
+
+} // namespace mongo
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index d2e61cc402a..34a978ae083 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -180,7 +180,7 @@ bool MigrationSourceManager::start(OperationContext* txn,
return false;
}
- _ns = ns;
+ _nss = NamespaceString(ns);
_min = min;
_max = max;
_shardKeyPattern = shardKeyPattern;
@@ -242,7 +242,7 @@ void MigrationSourceManager::logOp(OperationContext* txn,
if (!_active)
return;
- if (_ns != ns)
+ if (_nss != ns)
return;
// no need to log if this is not an insertion, an update, or an actual deletion
@@ -270,8 +270,8 @@ void MigrationSourceManager::logOp(OperationContext* txn,
if (op == 'u') {
BSONObj fullDoc;
- OldClientContext ctx(txn, _ns, false);
- if (!Helpers::findById(txn, ctx.db(), _ns.c_str(), idObj, fullDoc)) {
+ OldClientContext ctx(txn, _nss.ns(), false);
+ if (!Helpers::findById(txn, ctx.db(), _nss.ns().c_str(), idObj, fullDoc)) {
warning() << "logOpForSharding couldn't find: " << idObj << " even though should have"
<< migrateLog;
dassert(false); // TODO: Abort the migration.
@@ -303,8 +303,8 @@ bool MigrationSourceManager::transferMods(OperationContext* txn,
}
// TODO: fix SERVER-16540 race
- _xfer(txn, _ns, ctx.getDb(), &_deleted, b, "deleted", size, false);
- _xfer(txn, _ns, ctx.getDb(), &_reload, b, "reload", size, true);
+ _xfer(txn, _nss.ns(), ctx.getDb(), &_deleted, b, "deleted", size, false);
+ _xfer(txn, _nss.ns(), ctx.getDb(), &_reload, b, "reload", size, true);
}
b.append("size", size);
@@ -316,9 +316,9 @@ bool MigrationSourceManager::storeCurrentLocs(OperationContext* txn,
long long maxChunkSize,
string& errmsg,
BSONObjBuilder& result) {
- AutoGetCollectionForRead ctx(txn, _getNS());
+ AutoGetCollection autoColl(txn, _getNS(), MODE_IS);
- Collection* collection = ctx.getCollection();
+ Collection* collection = autoColl.getCollection();
if (!collection) {
errmsg = "ns not found, should be impossible";
return false;
@@ -333,7 +333,7 @@ bool MigrationSourceManager::storeCurrentLocs(OperationContext* txn,
if (idx == NULL) {
errmsg = str::stream() << "can't find index with prefix " << _shardKeyPattern
- << " in storeCurrentLocs for " << _ns;
+ << " in storeCurrentLocs for " << _nss;
return false;
}
@@ -420,7 +420,7 @@ bool MigrationSourceManager::storeCurrentLocs(OperationContext* txn,
<< maxRecsWhenFull << " , the maximum chunk size is " << maxChunkSize
<< " , average document size is " << avgRecSize << ". Found " << recCount
<< " documents in chunk "
- << " ns: " << _ns << " " << _min << " -> " << _max << migrateLog;
+ << " ns: " << _nss << " " << _min << " -> " << _max << migrateLog;
result.appendBool("chunkTooBig", true);
result.appendNumber("estimatedChunkSize", (long long)(recCount * avgRecSize));
@@ -440,7 +440,7 @@ bool MigrationSourceManager::clone(OperationContext* txn, string& errmsg, BSONOb
int allocSize = 0;
{
- AutoGetCollectionForRead ctx(txn, _getNS());
+ AutoGetCollection autoColl(txn, _getNS(), MODE_IS);
stdx::lock_guard<stdx::mutex> sl(_mutex);
if (!_active) {
@@ -448,9 +448,9 @@ bool MigrationSourceManager::clone(OperationContext* txn, string& errmsg, BSONOb
return false;
}
- Collection* collection = ctx.getCollection();
+ Collection* collection = autoColl.getCollection();
if (!collection) {
- errmsg = str::stream() << "collection " << _ns << " does not exist";
+ errmsg = str::stream() << "collection " << _nss << " does not exist";
return false;
}
@@ -462,7 +462,7 @@ bool MigrationSourceManager::clone(OperationContext* txn, string& errmsg, BSONOb
bool isBufferFilled = false;
BSONArrayBuilder clonedDocsArrayBuilder(allocSize);
while (!isBufferFilled) {
- AutoGetCollectionForRead ctx(txn, _getNS());
+ AutoGetCollection autoColl(txn, _getNS(), MODE_IS);
stdx::lock_guard<stdx::mutex> sl(_mutex);
if (!_active) {
@@ -471,9 +471,9 @@ bool MigrationSourceManager::clone(OperationContext* txn, string& errmsg, BSONOb
}
// TODO: fix SERVER-16540 race
- Collection* collection = ctx.getCollection();
+ Collection* collection = autoColl.getCollection();
if (!collection) {
- errmsg = str::stream() << "collection " << _ns << " does not exist";
+ errmsg = str::stream() << "collection " << _nss << " does not exist";
return false;
}
@@ -597,9 +597,9 @@ void MigrationSourceManager::_xfer(OperationContext* txn,
arr.done();
}
-std::string MigrationSourceManager::_getNS() const {
+NamespaceString MigrationSourceManager::_getNS() const {
stdx::lock_guard<stdx::mutex> lk(_mutex);
- return _ns;
+ return _nss;
}
} // namespace mongo
diff --git a/src/mongo/db/s/migration_source_manager.h b/src/mongo/db/s/migration_source_manager.h
index 0475e771fcd..4921610332d 100644
--- a/src/mongo/db/s/migration_source_manager.h
+++ b/src/mongo/db/s/migration_source_manager.h
@@ -34,6 +34,7 @@
#include "mongo/base/disallow_copying.h"
#include "mongo/bson/bsonobj.h"
+#include "mongo/db/namespace_string.h"
#include "mongo/stdx/condition_variable.h"
namespace mongo {
@@ -131,7 +132,7 @@ private:
long long& size,
bool explode);
- std::string _getNS() const;
+ NamespaceString _getNS() const;
// All member variables are labeled with one of the following codes indicating the
// synchronization rules for accessing them.
@@ -166,7 +167,7 @@ private:
// If a migration is currently active.
bool _active{false}; // (MG)
- std::string _ns; // (MG)
+ NamespaceString _nss; // (MG)
BSONObj _min; // (MG)
BSONObj _max; // (MG)
BSONObj _shardKeyPattern; // (MG)
diff --git a/src/mongo/s/d_split.cpp b/src/mongo/s/d_split.cpp
index ad6a3f8434f..693a032877f 100644
--- a/src/mongo/s/d_split.cpp
+++ b/src/mongo/s/d_split.cpp
@@ -99,7 +99,7 @@ public:
int,
string& errmsg,
BSONObjBuilder& result) {
- std::string ns = parseNs(dbname, jsobj);
+ const NamespaceString nss = NamespaceString(parseNs(dbname, jsobj));
BSONObj keyPattern = jsobj.getObjectField("keyPattern");
if (keyPattern.isEmpty()) {
@@ -119,8 +119,9 @@ public:
return false;
}
- AutoGetCollectionForRead ctx(txn, ns);
- Collection* collection = ctx.getCollection();
+ AutoGetCollection autoColl(txn, nss, MODE_IS);
+
+ Collection* const collection = autoColl.getCollection();
if (!collection) {
errmsg = "ns not found";
return false;
@@ -199,7 +200,7 @@ public:
ostringstream os;
os << "found missing value in key " << currKey << " for doc: "
<< (obj.hasField("_id") ? obj.toString() : obj["_id"].toString());
- log() << "checkShardingIndex for '" << ns << "' failed: " << os.str();
+ log() << "checkShardingIndex for '" << nss << "' failed: " << os.str();
errmsg = os.str();
return false;
@@ -262,7 +263,7 @@ public:
// access the actual data.
//
- const std::string ns = parseNs(dbname, jsobj);
+ const NamespaceString nss = NamespaceString(parseNs(dbname, jsobj));
BSONObj keyPattern = jsobj.getObjectField("keyPattern");
if (keyPattern.isEmpty()) {
@@ -295,8 +296,9 @@ public:
{
// Get the size estimate for this namespace
- AutoGetCollectionForRead ctx(txn, ns);
- Collection* collection = ctx.getCollection();
+ AutoGetCollection autoColl(txn, nss, MODE_IS);
+
+ Collection* const collection = autoColl.getCollection();
if (!collection) {
errmsg = "ns not found";
return false;
@@ -372,7 +374,7 @@ public:
return true;
}
- log() << "request split points lookup for chunk " << ns << " " << min << " -->> "
+ log() << "request split points lookup for chunk " << nss << " " << min << " -->> "
<< max;
// We'll use the average object size and number of object to find approximately how many
@@ -442,7 +444,7 @@ public:
// Stop if we have enough split points.
if (maxSplitPoints && (numChunks >= maxSplitPoints)) {
log() << "max number of requested split points reached (" << numChunks
- << ") before the end of chunk " << ns << " " << min << " -->> "
+ << ") before the end of chunk " << nss << " " << min << " -->> "
<< max;
break;
}
@@ -486,7 +488,7 @@ public:
for (set<BSONObj>::const_iterator it = tooFrequentKeys.begin();
it != tooFrequentKeys.end();
++it) {
- warning() << "possible low cardinality key detected in " << ns << " - key is "
+ warning() << "possible low cardinality key detected in " << nss << " - key is "
<< prettyKey(idx->keyPattern(), *it);
}
@@ -494,7 +496,7 @@ public:
splitKeys.erase(splitKeys.begin());
if (timer.millis() > serverGlobalParams.slowMS) {
- warning() << "Finding the split vector for " << ns << " over " << keyPattern
+ warning() << "Finding the split vector for " << nss << " over " << keyPattern
<< " keyCount: " << keyCount << " numSplits: " << splitKeys.size()
<< " lookedAt: " << currCount << " took " << timer.millis() << "ms";
}
@@ -508,6 +510,7 @@ public:
result.append("splitKeys", splitKeys);
return true;
}
+
} cmdSplitVector;
class SplitChunkCommand : public Command {
@@ -552,9 +555,9 @@ public:
// 1. check whether parameters passed to splitChunk are sound
//
- const string ns = parseNs(dbname, cmdObj);
- if (ns.empty()) {
- errmsg = "need to specify namespace in command";
+ const NamespaceString nss = NamespaceString(parseNs(dbname, cmdObj));
+ if (!nss.isValid()) {
+ errmsg = str::stream() << "invalid namespace '" << nss << "' specified for command";
return false;
}
@@ -621,12 +624,12 @@ public:
// 2. lock the collection's metadata and get highest version for the current shard
//
- string whyMessage(str::stream() << "splitting chunk [" << minKey << ", " << maxKey
- << ") in " << ns);
- auto scopedDistLock = grid.catalogManager(txn)->distLock(ns, whyMessage);
+ const string whyMessage(str::stream() << "splitting chunk [" << minKey << ", " << maxKey
+ << ") in " << nss);
+ auto scopedDistLock = grid.catalogManager(txn)->distLock(nss.ns(), whyMessage);
if (!scopedDistLock.isOK()) {
- errmsg = str::stream() << "could not acquire collection lock for " << ns
+ errmsg = str::stream() << "could not acquire collection lock for " << nss
<< " to split chunk [" << minKey << "," << maxKey << ")"
<< causedBy(scopedDistLock.getStatus());
warning() << errmsg;
@@ -635,7 +638,7 @@ public:
// Always check our version remotely
ChunkVersion shardVersion;
- Status refreshStatus = shardingState->refreshMetadataNow(txn, ns, &shardVersion);
+ Status refreshStatus = shardingState->refreshMetadataNow(txn, nss.ns(), &shardVersion);
if (!refreshStatus.isOK()) {
errmsg = str::stream() << "splitChunk cannot split chunk "
@@ -674,7 +677,7 @@ public:
// Get collection metadata
const std::shared_ptr<CollectionMetadata> collMetadata(
- shardingState->getCollectionMetadata(ns));
+ shardingState->getCollectionMetadata(nss.ns()));
// With nonzero shard version, we must have metadata
invariant(NULL != collMetadata);
@@ -744,9 +747,9 @@ public:
// add the modified (new) chunk information as the update object
BSONObjBuilder n(op.subobjStart("o"));
- n.append(ChunkType::name(), Chunk::genID(ns, startKey));
+ n.append(ChunkType::name(), Chunk::genID(nss.ns(), startKey));
nextChunkVersion.addToBSON(n, ChunkType::DEPRECATED_lastmod());
- n.append(ChunkType::ns(), ns);
+ n.append(ChunkType::ns(), nss.ns());
n.append(ChunkType::min(), startKey);
n.append(ChunkType::max(), endKey);
n.append(ChunkType::shard(), shardName);
@@ -754,7 +757,7 @@ public:
// add the chunk's _id as the query part of the update statement
BSONObjBuilder q(op.subobjStart("o2"));
- q.append(ChunkType::name(), Chunk::genID(ns, startKey));
+ q.append(ChunkType::name(), Chunk::genID(nss.ns(), startKey));
q.done();
updates.append(op.obj());
@@ -777,7 +780,7 @@ public:
BSONObjBuilder b;
b.append("ns", ChunkType::ConfigNS);
b.append("q",
- BSON("query" << BSON(ChunkType::ns(ns)) << "orderby"
+ BSON("query" << BSON(ChunkType::ns(nss.ns())) << "orderby"
<< BSON(ChunkType::DEPRECATED_lastmod() << -1)));
{
BSONObjBuilder bb(b.subobjStart("res"));
@@ -803,8 +806,8 @@ public:
{
ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock writeLk(txn->lockState(), nsToDatabaseSubstring(ns), MODE_IX);
- Lock::CollectionLock collLock(txn->lockState(), ns, MODE_X);
+ Lock::DBLock writeLk(txn->lockState(), nss.db(), MODE_IX);
+ Lock::CollectionLock collLock(txn->lockState(), nss.ns(), MODE_X);
// NOTE: The newShardVersion resulting from this split is higher than any
// other chunk version, so it's also implicitly the newCollVersion
@@ -816,7 +819,7 @@ public:
// TODO: Revisit this interface, it's a bit clunky
newShardVersion.incMinor();
- shardingState->splitChunk(txn, ns, min, max, splitKeys, newShardVersion);
+ shardingState->splitChunk(txn, nss.ns(), min, max, splitKeys, newShardVersion);
}
//
@@ -828,8 +831,8 @@ public:
appendShortVersion(logDetail.subobjStart("left"), *newChunks[0]);
appendShortVersion(logDetail.subobjStart("right"), *newChunks[1]);
- grid.catalogManager(txn)
- ->logChange(txn->getClient()->clientAddress(true), "split", ns, logDetail.obj());
+ grid.catalogManager(txn)->logChange(
+ txn->getClient()->clientAddress(true), "split", nss.ns(), logDetail.obj());
} else {
BSONObj beforeDetailObj = logDetail.obj();
BSONObj firstDetailObj = beforeDetailObj.getOwned();
@@ -842,8 +845,10 @@ public:
chunkDetail.append("of", newChunksSize);
appendShortVersion(chunkDetail.subobjStart("chunk"), *newChunks[i]);
- grid.catalogManager(txn)->logChange(
- txn->getClient()->clientAddress(true), "multi-split", ns, chunkDetail.obj());
+ grid.catalogManager(txn)->logChange(txn->getClient()->clientAddress(true),
+ "multi-split",
+ nss.ns(),
+ chunkDetail.obj());
}
}
@@ -853,10 +858,11 @@ public:
// Select chunk to move out for "top chunk optimization".
KeyPattern shardKeyPattern(collMetadata->getKeyPattern());
- AutoGetCollectionForRead ctx(txn, ns);
- Collection* collection = ctx.getCollection();
+ AutoGetCollection autoColl(txn, nss, MODE_IS);
+
+ Collection* const collection = autoColl.getCollection();
if (!collection) {
- warning() << "will not perform top-chunk checking since " << ns
+ warning() << "will not perform top-chunk checking since " << nss
<< " does not exist after splitting";
return true;
}