summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-08-16 16:27:25 -0400
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2016-08-17 18:20:16 -0400
commit08fbb333bc11fba2b9df71a4262da5e18ed00d47 (patch)
treecee6df55cd511efc754dc8a9a6fabdeba8be5c1d
parent03e45d0dd7292153e51fcd98ad3f3f57ceb8a0b4 (diff)
downloadmongo-08fbb333bc11fba2b9df71a4262da5e18ed00d47.tar.gz
SERVER-25602 Make split/mergeChunks commands check the validity of input
-rw-r--r--jstests/sharding/split_against_shard_with_invalid_split_points.js42
-rw-r--r--src/mongo/db/s/sharding_state.cpp60
-rw-r--r--src/mongo/db/s/sharding_state.h47
-rw-r--r--src/mongo/s/d_merge.cpp11
-rw-r--r--src/mongo/s/d_split.cpp27
5 files changed, 89 insertions, 98 deletions
diff --git a/jstests/sharding/split_against_shard_with_invalid_split_points.js b/jstests/sharding/split_against_shard_with_invalid_split_points.js
new file mode 100644
index 00000000000..0ae5424983f
--- /dev/null
+++ b/jstests/sharding/split_against_shard_with_invalid_split_points.js
@@ -0,0 +1,42 @@
+// Tests that executing splitChunk directly against a shard, with an invalid split point will not
+// corrupt the chunks metadata
+(function() {
+ 'use strict';
+
+ var st = new ShardingTest({shards: 1});
+
+ var testDB = st.s.getDB('TestSplitDB');
+ assert.commandWorked(testDB.adminCommand({enableSharding: 'TestSplitDB'}));
+ st.ensurePrimaryShard('TestSplitDB', st.shard0.shardName);
+
+ assert.commandWorked(testDB.adminCommand({shardCollection: 'TestSplitDB.Coll', key: {x: 1}}));
+ assert.commandWorked(testDB.adminCommand({split: 'TestSplitDB.Coll', middle: {x: 0}}));
+
+ var chunksBefore = st.s.getDB('config').chunks.find().toArray();
+
+ // Try to do a split with invalid parameters through mongod
+ var callSplit = function(db, minKey, maxKey, splitPoints) {
+ var res = assert.commandWorked(st.s.adminCommand({getShardVersion: 'TestSplitDB.Coll'}));
+ var shardVersion = [res.version, res.versionEpoch];
+
+ return db.runCommand({
+ splitChunk: 'TestSplitDB.Coll',
+ from: st.shard0.shardName,
+ min: minKey,
+ max: maxKey,
+ keyPattern: {x: 1},
+ splitKeys: splitPoints,
+ shardVersion: shardVersion,
+ });
+ };
+
+ assert.commandFailedWithCode(callSplit(st.d0.getDB('admin'), {x: MinKey}, {x: 0}, [{x: 2}]),
+ ErrorCodes.IllegalOperation);
+
+ var chunksAfter = st.s.getDB('config').chunks.find().toArray();
+ assert.eq(chunksBefore,
+ chunksAfter,
+ 'Split chunks failed, but the chunks were updated in the config database');
+
+ st.stop();
+})();
diff --git a/src/mongo/db/s/sharding_state.cpp b/src/mongo/db/s/sharding_state.cpp
index 9ddf4f1333e..d184ef099dc 100644
--- a/src/mongo/db/s/sharding_state.cpp
+++ b/src/mongo/db/s/sharding_state.cpp
@@ -61,6 +61,7 @@ namespace mongo {
using std::shared_ptr;
using std::string;
+using std::unique_ptr;
using std::vector;
namespace {
@@ -376,52 +377,6 @@ bool ShardingState::forgetPending(OperationContext* txn,
return true;
}
-void ShardingState::splitChunk(OperationContext* txn,
- const string& ns,
- const BSONObj& min,
- const BSONObj& max,
- const vector<BSONObj>& splitKeys,
- ChunkVersion version) {
- invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
- stdx::lock_guard<stdx::mutex> lk(_mutex);
-
- CollectionMetadataMap::const_iterator it = _collMetadata.find(ns);
- verify(it != _collMetadata.end());
-
- ChunkType chunk;
- chunk.setMin(min);
- chunk.setMax(max);
- string errMsg;
-
- shared_ptr<CollectionMetadata> cloned(
- it->second->cloneSplit(chunk, splitKeys, version, &errMsg));
- // uassert to match old behavior, TODO: report errors w/o throwing
- uassert(16857, errMsg, NULL != cloned.get());
-
- _collMetadata[ns] = cloned;
-}
-
-void ShardingState::mergeChunks(OperationContext* txn,
- const string& ns,
- const BSONObj& minKey,
- const BSONObj& maxKey,
- ChunkVersion mergedVersion) {
- invariant(txn->lockState()->isCollectionLockedForMode(ns, MODE_X));
- stdx::lock_guard<stdx::mutex> lk(_mutex);
-
- CollectionMetadataMap::const_iterator it = _collMetadata.find(ns);
- verify(it != _collMetadata.end());
-
- string errMsg;
-
- shared_ptr<CollectionMetadata> cloned(
- it->second->cloneMerge(minKey, maxKey, mergedVersion, &errMsg));
- // uassert to match old behavior, TODO: report errors w/o throwing
- uassert(17004, errMsg, NULL != cloned.get());
-
- _collMetadata[ns] = cloned;
-}
-
bool ShardingState::inCriticalMigrateSection() {
return _migrationSourceManager.getInCriticalSection();
}
@@ -934,6 +889,19 @@ shared_ptr<CollectionMetadata> ShardingState::getCollectionMetadata(const string
}
}
+void ShardingState::exchangeCollectionMetadata(OperationContext* txn,
+ const NamespaceString& nss,
+ unique_ptr<CollectionMetadata> newMetadata) {
+ invariant(txn->lockState()->isCollectionLockedForMode(nss.ns(), MODE_X));
+
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+
+ CollectionMetadataMap::iterator it = _collMetadata.find(nss.ns());
+ invariant(it != _collMetadata.end());
+
+ it->second = std::move(newMetadata);
+}
+
/**
* Global free function.
*/
diff --git a/src/mongo/db/s/sharding_state.h b/src/mongo/db/s/sharding_state.h
index c9487c67cb1..6d22bbe5bd1 100644
--- a/src/mongo/db/s/sharding_state.h
+++ b/src/mongo/db/s/sharding_state.h
@@ -185,6 +185,14 @@ public:
std::shared_ptr<CollectionMetadata> getCollectionMetadata(const std::string& ns);
+ /**
+ * Swaps the current metadata for the namespace with 'newMetadata', which must not be null. May
+ * only be called when the collection distributed lock and the collection X lock are both held.
+ */
+ void exchangeCollectionMetadata(OperationContext* txn,
+ const NamespaceString& nss,
+ std::unique_ptr<CollectionMetadata> newMetadata);
+
// chunk migrate and split support
/**
@@ -263,45 +271,6 @@ public:
const OID& epoch,
std::string* errMsg);
- /**
- * Creates and installs a new chunk metadata for a given collection by splitting one of its
- * chunks in two or more. The version for the first split chunk should be provided. The
- * subsequent chunks' version would be the latter with the minor portion incremented.
- *
- * The effect on clients will depend on the version used. If the major portion is the same
- * as the current shards, clients shouldn't perceive the split.
- *
- * @param ns the collection
- * @param min max the chunk that should be split
- * @param splitKeys point in which to split
- * @param version at which the new metadata should be at
- */
- void splitChunk(OperationContext* txn,
- const std::string& ns,
- const BSONObj& min,
- const BSONObj& max,
- const std::vector<BSONObj>& splitKeys,
- ChunkVersion version);
-
- /**
- * Creates and installs a new chunk metadata for a given collection by merging a range of
- * chunks ['minKey', 'maxKey') into a single chunk with version 'mergedVersion'.
- * The current metadata must overlap the range completely and minKey and maxKey must not
- * divide an existing chunk.
- *
- * The merged chunk version must have a greater version than the current shard version,
- * and if it has a greater major version clients will need to reload metadata.
- *
- * @param ns the collection
- * @param minKey maxKey the range which should be merged
- * @param newShardVersion the shard version the newly merged chunk should have
- */
- void mergeChunks(OperationContext* txn,
- const std::string& ns,
- const BSONObj& minKey,
- const BSONObj& maxKey,
- ChunkVersion mergedVersion);
-
bool inCriticalMigrateSection();
/**
diff --git a/src/mongo/s/d_merge.cpp b/src/mongo/s/d_merge.cpp
index 07aabc79a46..d2481310f94 100644
--- a/src/mongo/s/d_merge.cpp
+++ b/src/mongo/s/d_merge.cpp
@@ -50,7 +50,8 @@ namespace mongo {
using std::shared_ptr;
using std::string;
-using mongoutils::str::stream;
+using std::unique_ptr;
+using str::stream;
static Status runApplyOpsCmd(OperationContext* txn,
const std::vector<ChunkType>&,
@@ -226,6 +227,12 @@ bool mergeChunks(OperationContext* txn,
}
}
+ // Ensure that the newly applied chunks would result in a correct metadata state
+ string mergeErrMsg;
+ unique_ptr<CollectionMetadata> cloned(
+ metadata->cloneMerge(minKey, maxKey, mergeVersion, &mergeErrMsg));
+ uassert(ErrorCodes::IllegalOperation, mergeErrMsg, cloned.get());
+
//
// Run apply ops command
//
@@ -244,7 +251,7 @@ bool mergeChunks(OperationContext* txn,
Lock::DBLock writeLk(txn->lockState(), nss.db(), MODE_IX);
Lock::CollectionLock collLock(txn->lockState(), nss.ns(), MODE_X);
- shardingState->mergeChunks(txn, nss.ns(), minKey, maxKey, mergeVersion);
+ shardingState->exchangeCollectionMetadata(txn, nss, std::move(cloned));
}
//
diff --git a/src/mongo/s/d_split.cpp b/src/mongo/s/d_split.cpp
index e2d06e61853..bfdc97aab79 100644
--- a/src/mongo/s/d_split.cpp
+++ b/src/mongo/s/d_split.cpp
@@ -826,6 +826,21 @@ public:
preCond.append(b.obj());
}
+ // NOTE: The newShardVersion resulting from this split is higher than any
+ // other chunk version, so it's also implicitly the newCollVersion
+ ChunkVersion newShardVersion = collVersion;
+
+ // Increment the minor version once, splitChunk increments once per split point
+ // (resulting in the correct final shard/collection version)
+ //
+ // TODO: Revisit this interface, it's a bit clunky
+ newShardVersion.incMinor();
+
+ string errMsg;
+ unique_ptr<CollectionMetadata> cloned(
+ collMetadata->cloneSplit(origChunk, splitKeys, newShardVersion, &errMsg));
+ uassert(ErrorCodes::IllegalOperation, errMsg, cloned.get());
+
//
// 4. apply the batch of updates to remote and local metadata
//
@@ -845,17 +860,7 @@ public:
Lock::DBLock writeLk(txn->lockState(), nss.db(), MODE_IX);
Lock::CollectionLock collLock(txn->lockState(), nss.ns(), MODE_X);
- // NOTE: The newShardVersion resulting from this split is higher than any
- // other chunk version, so it's also implicitly the newCollVersion
- ChunkVersion newShardVersion = collVersion;
-
- // Increment the minor version once, splitChunk increments once per split point
- // (resulting in the correct final shard/collection version)
- //
- // TODO: Revisit this interface, it's a bit clunky
- newShardVersion.incMinor();
-
- shardingState->splitChunk(txn, nss.ns(), min, max, splitKeys, newShardVersion);
+ shardingState->exchangeCollectionMetadata(txn, nss, std::move(cloned));
}
//