summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorBenety Goh <benety@mongodb.com>2015-08-21 20:31:40 -0400
committerBenety Goh <benety@mongodb.com>2015-08-21 20:31:40 -0400
commit045cd1070cae1e7827255850c2fe35194e48b24e (patch)
tree154354109f0039ff21d6e93e0b1687115f184751 /src/mongo
parent6124799c4e14d1ffc9419e6548ec96626e73dcda (diff)
downloadmongo-045cd1070cae1e7827255850c2fe35194e48b24e.tar.gz
Revert "SERVER-19855 Do not perform shard version checking where not necessary"
This reverts commit 31716d2ae526d82d7d36464f6c9fae8b9f38542f.
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/db_raii.cpp45
-rw-r--r--src/mongo/db/db_raii.h42
-rw-r--r--src/mongo/db/namespace_string.h6
-rw-r--r--src/mongo/db/range_deleter_db_env.cpp79
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp36
-rw-r--r--src/mongo/db/s/migration_source_manager.h5
-rw-r--r--src/mongo/s/d_split.cpp74
7 files changed, 131 insertions, 156 deletions
diff --git a/src/mongo/db/db_raii.cpp b/src/mongo/db/db_raii.cpp
index a987062b11b..c7634052710 100644
--- a/src/mongo/db/db_raii.cpp
+++ b/src/mongo/db/db_raii.cpp
@@ -32,7 +32,6 @@
#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/catalog/collection.h"
-#include "mongo/db/catalog/database.h"
#include "mongo/db/client.h"
#include "mongo/db/curop.h"
#include "mongo/db/repl/replication_coordinator_global.h"
@@ -44,13 +43,6 @@ namespace mongo {
AutoGetDb::AutoGetDb(OperationContext* txn, StringData ns, LockMode mode)
: _dbLock(txn->lockState(), ns, mode), _db(dbHolder().get(txn, ns)) {}
-AutoGetCollection::AutoGetCollection(OperationContext* txn,
- const NamespaceString& nss,
- LockMode mode)
- : _autoDb(txn, nss.db(), mode),
- _collLock(txn->lockState(), nss.ns(), mode),
- _coll(_autoDb.getDb() ? _autoDb.getDb()->getCollection(nss) : nullptr) {}
-
AutoGetOrCreateDb::AutoGetOrCreateDb(OperationContext* txn, StringData ns, LockMode mode)
: _transaction(txn, MODE_IX),
_dbLock(txn->lockState(), ns, mode),
@@ -68,31 +60,48 @@ AutoGetOrCreateDb::AutoGetOrCreateDb(OperationContext* txn, StringData ns, LockM
}
AutoGetCollectionForRead::AutoGetCollectionForRead(OperationContext* txn, const std::string& ns)
- : AutoGetCollectionForRead(txn, NamespaceString(ns)) {}
+ : _txn(txn),
+ _transaction(txn, MODE_IS),
+ _db(_txn, nsToDatabaseSubstring(ns), MODE_IS),
+ _collLock(_txn->lockState(), ns, MODE_IS),
+ _coll(NULL) {
+ _init(ns, nsToCollectionSubstring(ns));
+}
AutoGetCollectionForRead::AutoGetCollectionForRead(OperationContext* txn,
const NamespaceString& nss)
- : _txn(txn), _transaction(txn, MODE_IS), _autoColl(txn, nss, MODE_IS) {
+ : _txn(txn),
+ _transaction(txn, MODE_IS),
+ _db(_txn, nss.db(), MODE_IS),
+ _collLock(_txn->lockState(), nss.toString(), MODE_IS),
+ _coll(NULL) {
+ _init(nss.toString(), nss.coll());
+}
+
+void AutoGetCollectionForRead::_init(const std::string& ns, StringData coll) {
+ massert(28535, "need a non-empty collection name", !coll.empty());
+
// We have both the DB and collection locked, which the prerequisite to do a stable shard
- // version check
- ensureShardVersionOKOrThrow(_txn, nss.ns());
+ // version check.
+ ensureShardVersionOKOrThrow(_txn, ns);
auto curOp = CurOp::get(_txn);
stdx::lock_guard<Client> lk(*_txn->getClient());
-
// TODO: OldClientContext legacy, needs to be removed
curOp->ensureStarted();
- curOp->setNS_inlock(nss.ns());
+ curOp->setNS_inlock(ns);
// At this point, we are locked in shared mode for the database by the DB lock in the
// constructor, so it is safe to load the DB pointer.
- if (_autoColl.getDb()) {
+ if (_db.getDb()) {
// TODO: OldClientContext legacy, needs to be removed
- curOp->enter_inlock(nss.ns().c_str(), _autoColl.getDb()->getProfilingLevel());
+ curOp->enter_inlock(ns.c_str(), _db.getDb()->getProfilingLevel());
+
+ _coll = _db.getDb()->getCollection(ns);
}
- if (getCollection()) {
- if (auto minSnapshot = getCollection()->getMinimumVisibleSnapshot()) {
+ if (_coll) {
+ if (auto minSnapshot = _coll->getMinimumVisibleSnapshot()) {
if (auto mySnapshot = _txn->recoveryUnit()->getMajorityCommittedSnapshot()) {
while (mySnapshot < minSnapshot) {
// Wait until a snapshot is available.
diff --git a/src/mongo/db/db_raii.h b/src/mongo/db/db_raii.h
index b6522caad2b..b37112cb267 100644
--- a/src/mongo/db/db_raii.h
+++ b/src/mongo/db/db_raii.h
@@ -45,7 +45,7 @@ class Collection;
* RAII-style class, which acquires a lock on the specified database in the requested mode and
* obtains a reference to the database. Used as a shortcut for calls to dbHolder().get().
*
- * It is guaranteed that the lock will be released when this object goes out of scope, therefore
+ * It is guaranteed that locks will be released when this object goes out of scope, therefore
* the database reference returned by this class should not be retained.
*/
class AutoGetDb {
@@ -64,33 +64,6 @@ private:
};
/**
- * RAII-style class, which acquires a locks on the specified database and collection in the
- * requested mode and obtains references to both.
- *
- * It is guaranteed that locks will be released when this object goes out of scope, therefore
- * the database and the collection references returned by this class should not be retained.
- */
-class AutoGetCollection {
- MONGO_DISALLOW_COPYING(AutoGetCollection);
-
-public:
- AutoGetCollection(OperationContext* txn, const NamespaceString& nss, LockMode mode);
-
- Database* getDb() const {
- return _autoDb.getDb();
- }
-
- Collection* getCollection() const {
- return _coll;
- }
-
-private:
- const AutoGetDb _autoDb;
- const Lock::CollectionLock _collLock;
- Collection* const _coll;
-};
-
-/**
* RAII-style class, which acquires a lock on the specified database in the requested mode and
* obtains a reference to the database, creating it was non-existing. Used as a shortcut for
* calls to dbHolder().openDb(), taking care of locking details. The requested mode must be
@@ -127,9 +100,7 @@ private:
/**
* RAII-style class, which would acquire the appropritate hierarchy of locks for obtaining
- * a particular collection and would retrieve a reference to the collection. In addition, this
- * utility validates the shard version for the specified namespace and sets the current operation's
- * namespace for the duration while this object is alive.
+ * a particular collection and would retrieve a reference to the collection.
*
* It is guaranteed that locks will be released when this object goes out of scope, therefore
* database and collection references returned by this class should not be retained.
@@ -143,11 +114,11 @@ public:
~AutoGetCollectionForRead();
Database* getDb() const {
- return _autoColl.getDb();
+ return _db.getDb();
}
Collection* getCollection() const {
- return _autoColl.getCollection();
+ return _coll;
}
private:
@@ -156,7 +127,10 @@ private:
const Timer _timer;
OperationContext* const _txn;
const ScopedTransaction _transaction;
- const AutoGetCollection _autoColl;
+ const AutoGetDb _db;
+ const Lock::CollectionLock _collLock;
+
+ Collection* _coll;
};
/**
diff --git a/src/mongo/db/namespace_string.h b/src/mongo/db/namespace_string.h
index 9078fa2f7e4..28037eb24b4 100644
--- a/src/mongo/db/namespace_string.h
+++ b/src/mongo/db/namespace_string.h
@@ -269,10 +269,6 @@ private:
size_t _dotIndex;
};
-template <typename StreamType>
-StreamType& operator<<(StreamType& stream, const NamespaceString& value) {
- return stream << value.ns();
-}
// "database.a.b.c" -> "database"
inline StringData nsToDatabaseSubstring(StringData ns) {
@@ -354,7 +350,7 @@ struct NamespaceDBEquals {
return nsDBEquals(a, b);
}
};
+}
-} // namespace mongo
#include "mongo/db/namespace_string-inl.h"
diff --git a/src/mongo/db/range_deleter_db_env.cpp b/src/mongo/db/range_deleter_db_env.cpp
index 35279d0e58b..b9971f54b03 100644
--- a/src/mongo/db/range_deleter_db_env.cpp
+++ b/src/mongo/db/range_deleter_db_env.cpp
@@ -46,6 +46,7 @@
namespace mongo {
+using std::endl;
using std::string;
/**
@@ -74,43 +75,44 @@ bool RangeDeleterDBEnv::deleteRange(OperationContext* txn,
*deletedDocs = 0;
ShardForceVersionOkModeBlock forceVersion(txn->getClient());
+ {
+ Helpers::RemoveSaver removeSaver("moveChunk", ns, taskDetails.options.removeSaverReason);
+ Helpers::RemoveSaver* removeSaverPtr = NULL;
+ if (serverGlobalParams.moveParanoia && !taskDetails.options.removeSaverReason.empty()) {
+ removeSaverPtr = &removeSaver;
+ }
- Helpers::RemoveSaver removeSaver("moveChunk", ns, taskDetails.options.removeSaverReason);
- Helpers::RemoveSaver* removeSaverPtr = NULL;
- if (serverGlobalParams.moveParanoia && !taskDetails.options.removeSaverReason.empty()) {
- removeSaverPtr = &removeSaver;
- }
-
- // log the opId so the user can use it to cancel the delete using killOp.
- unsigned int opId = txn->getOpID();
- log() << "Deleter starting delete for: " << ns << " from " << inclusiveLower << " -> "
- << exclusiveUpper << ", with opId: " << opId;
-
- try {
- *deletedDocs =
- Helpers::removeRange(txn,
- KeyRange(ns, inclusiveLower, exclusiveUpper, keyPattern),
- false, /*maxInclusive*/
- writeConcern,
- removeSaverPtr,
- fromMigrate,
- onlyRemoveOrphans);
-
- if (*deletedDocs < 0) {
- *errMsg = "collection or index dropped before data could be cleaned";
- warning() << *errMsg;
+ // log the opId so the user can use it to cancel the delete using killOp.
+ unsigned int opId = txn->getOpID();
+ log() << "Deleter starting delete for: " << ns << " from " << inclusiveLower << " -> "
+ << exclusiveUpper << ", with opId: " << opId << endl;
+
+ try {
+ *deletedDocs =
+ Helpers::removeRange(txn,
+ KeyRange(ns, inclusiveLower, exclusiveUpper, keyPattern),
+ false, /*maxInclusive*/
+ writeConcern,
+ removeSaverPtr,
+ fromMigrate,
+ onlyRemoveOrphans);
+
+ if (*deletedDocs < 0) {
+ *errMsg = "collection or index dropped before data could be cleaned";
+ warning() << *errMsg << endl;
+
+ return false;
+ }
+
+ log() << "rangeDeleter deleted " << *deletedDocs << " documents for " << ns << " from "
+ << inclusiveLower << " -> " << exclusiveUpper << endl;
+ } catch (const DBException& ex) {
+ *errMsg = str::stream() << "Error encountered while deleting range: "
+ << "ns" << ns << " from " << inclusiveLower << " -> "
+ << exclusiveUpper << ", cause by:" << causedBy(ex);
return false;
}
-
- log() << "rangeDeleter deleted " << *deletedDocs << " documents for " << ns << " from "
- << inclusiveLower << " -> " << exclusiveUpper;
- } catch (const DBException& ex) {
- *errMsg = str::stream() << "Error encountered while deleting range: "
- << "ns" << ns << " from " << inclusiveLower << " -> "
- << exclusiveUpper << ", cause by:" << causedBy(ex);
-
- return false;
}
return true;
@@ -119,11 +121,12 @@ bool RangeDeleterDBEnv::deleteRange(OperationContext* txn,
void RangeDeleterDBEnv::getCursorIds(OperationContext* txn,
StringData ns,
std::set<CursorId>* openCursors) {
- AutoGetCollection autoColl(txn, NamespaceString(ns), MODE_IS);
- if (!autoColl.getCollection())
+ AutoGetCollectionForRead ctx(txn, ns.toString());
+ Collection* collection = ctx.getCollection();
+ if (!collection) {
return;
+ }
- autoColl.getCollection()->getCursorManager()->getCursorIds(openCursors);
+ collection->getCursorManager()->getCursorIds(openCursors);
+}
}
-
-} // namespace mongo
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index 34a978ae083..d2e61cc402a 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -180,7 +180,7 @@ bool MigrationSourceManager::start(OperationContext* txn,
return false;
}
- _nss = NamespaceString(ns);
+ _ns = ns;
_min = min;
_max = max;
_shardKeyPattern = shardKeyPattern;
@@ -242,7 +242,7 @@ void MigrationSourceManager::logOp(OperationContext* txn,
if (!_active)
return;
- if (_nss != ns)
+ if (_ns != ns)
return;
// no need to log if this is not an insertion, an update, or an actual deletion
@@ -270,8 +270,8 @@ void MigrationSourceManager::logOp(OperationContext* txn,
if (op == 'u') {
BSONObj fullDoc;
- OldClientContext ctx(txn, _nss.ns(), false);
- if (!Helpers::findById(txn, ctx.db(), _nss.ns().c_str(), idObj, fullDoc)) {
+ OldClientContext ctx(txn, _ns, false);
+ if (!Helpers::findById(txn, ctx.db(), _ns.c_str(), idObj, fullDoc)) {
warning() << "logOpForSharding couldn't find: " << idObj << " even though should have"
<< migrateLog;
dassert(false); // TODO: Abort the migration.
@@ -303,8 +303,8 @@ bool MigrationSourceManager::transferMods(OperationContext* txn,
}
// TODO: fix SERVER-16540 race
- _xfer(txn, _nss.ns(), ctx.getDb(), &_deleted, b, "deleted", size, false);
- _xfer(txn, _nss.ns(), ctx.getDb(), &_reload, b, "reload", size, true);
+ _xfer(txn, _ns, ctx.getDb(), &_deleted, b, "deleted", size, false);
+ _xfer(txn, _ns, ctx.getDb(), &_reload, b, "reload", size, true);
}
b.append("size", size);
@@ -316,9 +316,9 @@ bool MigrationSourceManager::storeCurrentLocs(OperationContext* txn,
long long maxChunkSize,
string& errmsg,
BSONObjBuilder& result) {
- AutoGetCollection autoColl(txn, _getNS(), MODE_IS);
+ AutoGetCollectionForRead ctx(txn, _getNS());
- Collection* collection = autoColl.getCollection();
+ Collection* collection = ctx.getCollection();
if (!collection) {
errmsg = "ns not found, should be impossible";
return false;
@@ -333,7 +333,7 @@ bool MigrationSourceManager::storeCurrentLocs(OperationContext* txn,
if (idx == NULL) {
errmsg = str::stream() << "can't find index with prefix " << _shardKeyPattern
- << " in storeCurrentLocs for " << _nss;
+ << " in storeCurrentLocs for " << _ns;
return false;
}
@@ -420,7 +420,7 @@ bool MigrationSourceManager::storeCurrentLocs(OperationContext* txn,
<< maxRecsWhenFull << " , the maximum chunk size is " << maxChunkSize
<< " , average document size is " << avgRecSize << ". Found " << recCount
<< " documents in chunk "
- << " ns: " << _nss << " " << _min << " -> " << _max << migrateLog;
+ << " ns: " << _ns << " " << _min << " -> " << _max << migrateLog;
result.appendBool("chunkTooBig", true);
result.appendNumber("estimatedChunkSize", (long long)(recCount * avgRecSize));
@@ -440,7 +440,7 @@ bool MigrationSourceManager::clone(OperationContext* txn, string& errmsg, BSONOb
int allocSize = 0;
{
- AutoGetCollection autoColl(txn, _getNS(), MODE_IS);
+ AutoGetCollectionForRead ctx(txn, _getNS());
stdx::lock_guard<stdx::mutex> sl(_mutex);
if (!_active) {
@@ -448,9 +448,9 @@ bool MigrationSourceManager::clone(OperationContext* txn, string& errmsg, BSONOb
return false;
}
- Collection* collection = autoColl.getCollection();
+ Collection* collection = ctx.getCollection();
if (!collection) {
- errmsg = str::stream() << "collection " << _nss << " does not exist";
+ errmsg = str::stream() << "collection " << _ns << " does not exist";
return false;
}
@@ -462,7 +462,7 @@ bool MigrationSourceManager::clone(OperationContext* txn, string& errmsg, BSONOb
bool isBufferFilled = false;
BSONArrayBuilder clonedDocsArrayBuilder(allocSize);
while (!isBufferFilled) {
- AutoGetCollection autoColl(txn, _getNS(), MODE_IS);
+ AutoGetCollectionForRead ctx(txn, _getNS());
stdx::lock_guard<stdx::mutex> sl(_mutex);
if (!_active) {
@@ -471,9 +471,9 @@ bool MigrationSourceManager::clone(OperationContext* txn, string& errmsg, BSONOb
}
// TODO: fix SERVER-16540 race
- Collection* collection = autoColl.getCollection();
+ Collection* collection = ctx.getCollection();
if (!collection) {
- errmsg = str::stream() << "collection " << _nss << " does not exist";
+ errmsg = str::stream() << "collection " << _ns << " does not exist";
return false;
}
@@ -597,9 +597,9 @@ void MigrationSourceManager::_xfer(OperationContext* txn,
arr.done();
}
-NamespaceString MigrationSourceManager::_getNS() const {
+std::string MigrationSourceManager::_getNS() const {
stdx::lock_guard<stdx::mutex> lk(_mutex);
- return _nss;
+ return _ns;
}
} // namespace mongo
diff --git a/src/mongo/db/s/migration_source_manager.h b/src/mongo/db/s/migration_source_manager.h
index 4921610332d..0475e771fcd 100644
--- a/src/mongo/db/s/migration_source_manager.h
+++ b/src/mongo/db/s/migration_source_manager.h
@@ -34,7 +34,6 @@
#include "mongo/base/disallow_copying.h"
#include "mongo/bson/bsonobj.h"
-#include "mongo/db/namespace_string.h"
#include "mongo/stdx/condition_variable.h"
namespace mongo {
@@ -132,7 +131,7 @@ private:
long long& size,
bool explode);
- NamespaceString _getNS() const;
+ std::string _getNS() const;
// All member variables are labeled with one of the following codes indicating the
// synchronization rules for accessing them.
@@ -167,7 +166,7 @@ private:
// If a migration is currently active.
bool _active{false}; // (MG)
- NamespaceString _nss; // (MG)
+ std::string _ns; // (MG)
BSONObj _min; // (MG)
BSONObj _max; // (MG)
BSONObj _shardKeyPattern; // (MG)
diff --git a/src/mongo/s/d_split.cpp b/src/mongo/s/d_split.cpp
index 693a032877f..ad6a3f8434f 100644
--- a/src/mongo/s/d_split.cpp
+++ b/src/mongo/s/d_split.cpp
@@ -99,7 +99,7 @@ public:
int,
string& errmsg,
BSONObjBuilder& result) {
- const NamespaceString nss = NamespaceString(parseNs(dbname, jsobj));
+ std::string ns = parseNs(dbname, jsobj);
BSONObj keyPattern = jsobj.getObjectField("keyPattern");
if (keyPattern.isEmpty()) {
@@ -119,9 +119,8 @@ public:
return false;
}
- AutoGetCollection autoColl(txn, nss, MODE_IS);
-
- Collection* const collection = autoColl.getCollection();
+ AutoGetCollectionForRead ctx(txn, ns);
+ Collection* collection = ctx.getCollection();
if (!collection) {
errmsg = "ns not found";
return false;
@@ -200,7 +199,7 @@ public:
ostringstream os;
os << "found missing value in key " << currKey << " for doc: "
<< (obj.hasField("_id") ? obj.toString() : obj["_id"].toString());
- log() << "checkShardingIndex for '" << nss << "' failed: " << os.str();
+ log() << "checkShardingIndex for '" << ns << "' failed: " << os.str();
errmsg = os.str();
return false;
@@ -263,7 +262,7 @@ public:
// access the actual data.
//
- const NamespaceString nss = NamespaceString(parseNs(dbname, jsobj));
+ const std::string ns = parseNs(dbname, jsobj);
BSONObj keyPattern = jsobj.getObjectField("keyPattern");
if (keyPattern.isEmpty()) {
@@ -296,9 +295,8 @@ public:
{
// Get the size estimate for this namespace
- AutoGetCollection autoColl(txn, nss, MODE_IS);
-
- Collection* const collection = autoColl.getCollection();
+ AutoGetCollectionForRead ctx(txn, ns);
+ Collection* collection = ctx.getCollection();
if (!collection) {
errmsg = "ns not found";
return false;
@@ -374,7 +372,7 @@ public:
return true;
}
- log() << "request split points lookup for chunk " << nss << " " << min << " -->> "
+ log() << "request split points lookup for chunk " << ns << " " << min << " -->> "
<< max;
// We'll use the average object size and number of object to find approximately how many
@@ -444,7 +442,7 @@ public:
// Stop if we have enough split points.
if (maxSplitPoints && (numChunks >= maxSplitPoints)) {
log() << "max number of requested split points reached (" << numChunks
- << ") before the end of chunk " << nss << " " << min << " -->> "
+ << ") before the end of chunk " << ns << " " << min << " -->> "
<< max;
break;
}
@@ -488,7 +486,7 @@ public:
for (set<BSONObj>::const_iterator it = tooFrequentKeys.begin();
it != tooFrequentKeys.end();
++it) {
- warning() << "possible low cardinality key detected in " << nss << " - key is "
+ warning() << "possible low cardinality key detected in " << ns << " - key is "
<< prettyKey(idx->keyPattern(), *it);
}
@@ -496,7 +494,7 @@ public:
splitKeys.erase(splitKeys.begin());
if (timer.millis() > serverGlobalParams.slowMS) {
- warning() << "Finding the split vector for " << nss << " over " << keyPattern
+ warning() << "Finding the split vector for " << ns << " over " << keyPattern
<< " keyCount: " << keyCount << " numSplits: " << splitKeys.size()
<< " lookedAt: " << currCount << " took " << timer.millis() << "ms";
}
@@ -510,7 +508,6 @@ public:
result.append("splitKeys", splitKeys);
return true;
}
-
} cmdSplitVector;
class SplitChunkCommand : public Command {
@@ -555,9 +552,9 @@ public:
// 1. check whether parameters passed to splitChunk are sound
//
- const NamespaceString nss = NamespaceString(parseNs(dbname, cmdObj));
- if (!nss.isValid()) {
- errmsg = str::stream() << "invalid namespace '" << nss << "' specified for command";
+ const string ns = parseNs(dbname, cmdObj);
+ if (ns.empty()) {
+ errmsg = "need to specify namespace in command";
return false;
}
@@ -624,12 +621,12 @@ public:
// 2. lock the collection's metadata and get highest version for the current shard
//
- const string whyMessage(str::stream() << "splitting chunk [" << minKey << ", " << maxKey
- << ") in " << nss);
- auto scopedDistLock = grid.catalogManager(txn)->distLock(nss.ns(), whyMessage);
+ string whyMessage(str::stream() << "splitting chunk [" << minKey << ", " << maxKey
+ << ") in " << ns);
+ auto scopedDistLock = grid.catalogManager(txn)->distLock(ns, whyMessage);
if (!scopedDistLock.isOK()) {
- errmsg = str::stream() << "could not acquire collection lock for " << nss
+ errmsg = str::stream() << "could not acquire collection lock for " << ns
<< " to split chunk [" << minKey << "," << maxKey << ")"
<< causedBy(scopedDistLock.getStatus());
warning() << errmsg;
@@ -638,7 +635,7 @@ public:
// Always check our version remotely
ChunkVersion shardVersion;
- Status refreshStatus = shardingState->refreshMetadataNow(txn, nss.ns(), &shardVersion);
+ Status refreshStatus = shardingState->refreshMetadataNow(txn, ns, &shardVersion);
if (!refreshStatus.isOK()) {
errmsg = str::stream() << "splitChunk cannot split chunk "
@@ -677,7 +674,7 @@ public:
// Get collection metadata
const std::shared_ptr<CollectionMetadata> collMetadata(
- shardingState->getCollectionMetadata(nss.ns()));
+ shardingState->getCollectionMetadata(ns));
// With nonzero shard version, we must have metadata
invariant(NULL != collMetadata);
@@ -747,9 +744,9 @@ public:
// add the modified (new) chunk information as the update object
BSONObjBuilder n(op.subobjStart("o"));
- n.append(ChunkType::name(), Chunk::genID(nss.ns(), startKey));
+ n.append(ChunkType::name(), Chunk::genID(ns, startKey));
nextChunkVersion.addToBSON(n, ChunkType::DEPRECATED_lastmod());
- n.append(ChunkType::ns(), nss.ns());
+ n.append(ChunkType::ns(), ns);
n.append(ChunkType::min(), startKey);
n.append(ChunkType::max(), endKey);
n.append(ChunkType::shard(), shardName);
@@ -757,7 +754,7 @@ public:
// add the chunk's _id as the query part of the update statement
BSONObjBuilder q(op.subobjStart("o2"));
- q.append(ChunkType::name(), Chunk::genID(nss.ns(), startKey));
+ q.append(ChunkType::name(), Chunk::genID(ns, startKey));
q.done();
updates.append(op.obj());
@@ -780,7 +777,7 @@ public:
BSONObjBuilder b;
b.append("ns", ChunkType::ConfigNS);
b.append("q",
- BSON("query" << BSON(ChunkType::ns(nss.ns())) << "orderby"
+ BSON("query" << BSON(ChunkType::ns(ns)) << "orderby"
<< BSON(ChunkType::DEPRECATED_lastmod() << -1)));
{
BSONObjBuilder bb(b.subobjStart("res"));
@@ -806,8 +803,8 @@ public:
{
ScopedTransaction transaction(txn, MODE_IX);
- Lock::DBLock writeLk(txn->lockState(), nss.db(), MODE_IX);
- Lock::CollectionLock collLock(txn->lockState(), nss.ns(), MODE_X);
+ Lock::DBLock writeLk(txn->lockState(), nsToDatabaseSubstring(ns), MODE_IX);
+ Lock::CollectionLock collLock(txn->lockState(), ns, MODE_X);
// NOTE: The newShardVersion resulting from this split is higher than any
// other chunk version, so it's also implicitly the newCollVersion
@@ -819,7 +816,7 @@ public:
// TODO: Revisit this interface, it's a bit clunky
newShardVersion.incMinor();
- shardingState->splitChunk(txn, nss.ns(), min, max, splitKeys, newShardVersion);
+ shardingState->splitChunk(txn, ns, min, max, splitKeys, newShardVersion);
}
//
@@ -831,8 +828,8 @@ public:
appendShortVersion(logDetail.subobjStart("left"), *newChunks[0]);
appendShortVersion(logDetail.subobjStart("right"), *newChunks[1]);
- grid.catalogManager(txn)->logChange(
- txn->getClient()->clientAddress(true), "split", nss.ns(), logDetail.obj());
+ grid.catalogManager(txn)
+ ->logChange(txn->getClient()->clientAddress(true), "split", ns, logDetail.obj());
} else {
BSONObj beforeDetailObj = logDetail.obj();
BSONObj firstDetailObj = beforeDetailObj.getOwned();
@@ -845,10 +842,8 @@ public:
chunkDetail.append("of", newChunksSize);
appendShortVersion(chunkDetail.subobjStart("chunk"), *newChunks[i]);
- grid.catalogManager(txn)->logChange(txn->getClient()->clientAddress(true),
- "multi-split",
- nss.ns(),
- chunkDetail.obj());
+ grid.catalogManager(txn)->logChange(
+ txn->getClient()->clientAddress(true), "multi-split", ns, chunkDetail.obj());
}
}
@@ -858,11 +853,10 @@ public:
// Select chunk to move out for "top chunk optimization".
KeyPattern shardKeyPattern(collMetadata->getKeyPattern());
- AutoGetCollection autoColl(txn, nss, MODE_IS);
-
- Collection* const collection = autoColl.getCollection();
+ AutoGetCollectionForRead ctx(txn, ns);
+ Collection* collection = ctx.getCollection();
if (!collection) {
- warning() << "will not perform top-chunk checking since " << nss
+ warning() << "will not perform top-chunk checking since " << ns
<< " does not exist after splitting";
return true;
}