summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRobert Guo <robert.guo@10gen.com>2016-03-19 21:26:21 -0400
committerRobert Guo <robert.guo@10gen.com>2016-04-12 11:04:46 -0400
commit3caa06819ab544bf9e88f4413c363a6439fe1998 (patch)
tree20e22c13c5c5794cc68052f4d650e248d0c441cb
parent80859ca45c2392fd6c7a48f94bd6805d3fd013b0 (diff)
downloadmongo-3caa06819ab544bf9e88f4413c363a6439fe1998.tar.gz
SERVER-22588 make validate command work in a sharded collection
-rw-r--r--jstests/core/apitest_dbcollection.js4
-rw-r--r--jstests/core/index_partial_create_drop.js5
-rw-r--r--jstests/core/index_partial_write_ops.js5
-rw-r--r--jstests/sharding/validate_collection.js84
-rw-r--r--src/mongo/db/commands/validate.cpp9
-rw-r--r--src/mongo/s/commands/commands_public.cpp69
6 files changed, 151 insertions, 25 deletions
diff --git a/jstests/core/apitest_dbcollection.js b/jstests/core/apitest_dbcollection.js
index d542ad6d7e1..8f0129319f7 100644
--- a/jstests/core/apitest_dbcollection.js
+++ b/jstests/core/apitest_dbcollection.js
@@ -42,7 +42,9 @@ for (i = 0; i < 100; i++) {
(function() {
var validateResult = assert.commandWorked(db.getCollection("test_db").validate());
// Extract validation results from mongos output if running in a sharded context.
- if (jsTest.isMongos(db.getMongo())) {
+ var isShardedNS = validateResult.hasOwnProperty('raw');
+
+ if (isShardedNS) {
// Sample mongos format:
// {
// raw: {
diff --git a/jstests/core/index_partial_create_drop.js b/jstests/core/index_partial_create_drop.js
index 34693ec9ae9..483dc26f5e5 100644
--- a/jstests/core/index_partial_create_drop.js
+++ b/jstests/core/index_partial_create_drop.js
@@ -2,13 +2,14 @@
(function() {
"use strict";
- var isMongos = (db.runCommand("isMaster").msg === "isdbgrid");
var coll = db.index_partial_create_drop;
var getNumKeys = function(idxName) {
var res = assert.commandWorked(coll.validate(true));
var kpi;
- if (isMongos) {
+
+ var isShardedNS = res.hasOwnProperty('raw');
+ if (isShardedNS) {
kpi = res.raw[Object.getOwnPropertyNames(res.raw)[0]].keysPerIndex;
} else {
kpi = res.keysPerIndex;
diff --git a/jstests/core/index_partial_write_ops.js b/jstests/core/index_partial_write_ops.js
index b962347a26d..2653a2edf00 100644
--- a/jstests/core/index_partial_write_ops.js
+++ b/jstests/core/index_partial_write_ops.js
@@ -2,13 +2,14 @@
(function() {
"use strict";
- var isMongos = (db.runCommand("isMaster").msg === "isdbgrid");
var coll = db.index_partial_write_ops;
var getNumKeys = function(idxName) {
var res = assert.commandWorked(coll.validate(true));
var kpi;
- if (isMongos) {
+
+ var isShardedNS = res.hasOwnProperty('raw');
+ if (isShardedNS) {
kpi = res.raw[Object.getOwnPropertyNames(res.raw)[0]].keysPerIndex;
} else {
kpi = res.keysPerIndex;
diff --git a/jstests/sharding/validate_collection.js b/jstests/sharding/validate_collection.js
new file mode 100644
index 00000000000..05de51fe63e
--- /dev/null
+++ b/jstests/sharding/validate_collection.js
@@ -0,0 +1,84 @@
+'use strict';
+
+// The validate command should work in the following scenarios on a sharded environment with 3 or
+// more shards:
+//
+// 1. Collection in an unsharded DB.
+// 2. Sharded collection.
+// 3. Sharded collection with chunks on two shards while the collection's DB exists on 3 or more
+// shards. We enforce the latter condition by creating a dummy collection within the same
+// database and splitting it across the shards. See SERVER-22588 for details.
+// 4. The previous scenario, but with validation legitimately failing on one of the shards.
+
+(function() {
+ const NUM_SHARDS = 3;
+ assert(NUM_SHARDS >= 3);
+
+ var st = new ShardingTest({shards: NUM_SHARDS});
+ var shardNames = st.getShardNames();
+ var s = st.s;
+ var testDb = st.getDB('test');
+
+ function setup() {
+ assert.writeOK(testDb.test.insert({_id: 0}));
+ assert.writeOK(testDb.test.insert({_id: 1}));
+
+ assert.writeOK(testDb.dummy.insert({_id: 0}));
+ assert.writeOK(testDb.dummy.insert({_id: 1}));
+ assert.writeOK(testDb.dummy.insert({_id: 2}));
+ }
+
+ function validate(valid) {
+ var res = testDb.runCommand({validate: 'test'});
+ assert.commandWorked(res);
+ assert.eq(res.valid, valid, tojson(res));
+ }
+
+ function setFailValidateFailPointOnShard(enabled, shard) {
+ var mode;
+ if (enabled) {
+ mode = 'alwaysOn';
+ } else {
+ mode = 'off';
+ }
+
+ var res = shard.adminCommand(
+ {configureFailPoint: 'validateCmdCollectionNotValid', mode: mode});
+ assert.commandWorked(res);
+ }
+
+ setup();
+
+ // 1. Collection in an unsharded DB.
+ validate(true);
+
+ // 2. Sharded collection in a DB.
+ assert.commandWorked(s.adminCommand({enableSharding: 'test'}));
+ assert.commandWorked(s.adminCommand({shardCollection: 'test.test', key: {_id: 1}}));
+ assert.commandWorked(s.adminCommand({shardCollection: 'test.dummy', key: {_id: 1}}));
+ validate(true);
+
+ // 3. Sharded collection with chunks on two shards.
+ st.ensurePrimaryShard('test', shardNames[0]);
+ assert.commandWorked(s.adminCommand({split: 'test.test', middle: {_id: 1}}));
+ assert.commandWorked(
+ testDb.adminCommand({moveChunk: 'test.test', find: {_id: 1}, to: shardNames[1]}));
+ // We move the dummy database to NUM_SHARDS shards so that testDb will exist on all NUM_SHARDS
+ // shards but the testDb.test collection will only exist on the first two shards. Prior to
+ // SERVER-22588, this scenario would cause validation to fail.
+ assert.commandWorked(s.adminCommand({split: 'test.dummy', middle: {_id: 1}}));
+ assert.commandWorked(s.adminCommand({split: 'test.dummy', middle: {_id: 2}}));
+ assert.commandWorked(
+ testDb.adminCommand({moveChunk: 'test.dummy', find: {_id: 1}, to: shardNames[1]}));
+ assert.commandWorked(
+ testDb.adminCommand({moveChunk: 'test.dummy', find: {_id: 2}, to: shardNames[2]}));
+ assert.eq(st.onNumShards('test'), 2);
+ assert.eq(st.onNumShards('dummy'), NUM_SHARDS);
+ validate(true);
+
+ // 4. Fail validation on one of the shards.
+ var primaryShard = st.getPrimaryShard('test');
+ setFailValidateFailPointOnShard(true, primaryShard);
+ validate(false);
+ setFailValidateFailPointOnShard(false, primaryShard);
+})();
diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp
index d9b7a588188..e41374cba6a 100644
--- a/src/mongo/db/commands/validate.cpp
+++ b/src/mongo/db/commands/validate.cpp
@@ -37,6 +37,7 @@
#include "mongo/db/db_raii.h"
#include "mongo/db/operation_context_impl.h"
#include "mongo/db/query/internal_plans.h"
+#include "mongo/util/fail_point_service.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -45,6 +46,8 @@ using std::endl;
using std::string;
using std::stringstream;
+MONGO_FP_DECLARE(validateCmdCollectionNotValid);
+
class ValidateCmd : public Command {
public:
ValidateCmd() : Command("validate") {}
@@ -74,6 +77,12 @@ public:
int,
string& errmsg,
BSONObjBuilder& result) {
+ if (MONGO_FAIL_POINT(validateCmdCollectionNotValid)) {
+ errmsg = "validateCmdCollectionNotValid fail point was triggered";
+ result.appendBool("valid", false);
+ return true;
+ }
+
string ns = dbname + "." + cmdObj.firstElement().valuestrsafe();
NamespaceString ns_string(ns);
diff --git a/src/mongo/s/commands/commands_public.cpp b/src/mongo/s/commands/commands_public.cpp
index e74c21c3091..933877fc369 100644
--- a/src/mongo/s/commands/commands_public.cpp
+++ b/src/mongo/s/commands/commands_public.cpp
@@ -388,9 +388,9 @@ public:
} collectionModCmd;
-class ValidateCmd : public AllShardsCollectionCommand {
+class ValidateCmd : public PublicGridCommand {
public:
- ValidateCmd() : AllShardsCollectionCommand("validate") {}
+ ValidateCmd() : PublicGridCommand("validate") {}
virtual void addRequiredPrivileges(const std::string& dbname,
const BSONObj& cmdObj,
std::vector<Privilege>* out) {
@@ -398,28 +398,57 @@ public:
actions.addAction(ActionType::validate);
out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
}
- virtual void aggregateResults(const vector<ShardAndReply>& results, BSONObjBuilder& output) {
- for (vector<ShardAndReply>::const_iterator it(results.begin()), end(results.end());
- it != end;
- it++) {
- const BSONObj& result = std::get<1>(*it);
+
+ bool run(OperationContext* txn,
+ const string& dbName,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& output) {
+ const string ns = parseNsCollectionRequired(dbName, cmdObj);
+
+ auto conf = uassertStatusOK(grid.catalogCache()->getDatabase(txn, dbName));
+ if (!conf->isShardingEnabled() || !conf->isSharded(ns)) {
+ return passthrough(txn, conf, cmdObj, output);
+ }
+
+ ChunkManagerPtr cm = conf->getChunkManager(txn, ns);
+ massert(40051, "chunk manager should not be null", cm);
+
+ vector<Strategy::CommandResult> results;
+ Strategy::commandOp(txn, dbName, cmdObj, options, cm->getns(), BSONObj(), &results);
+
+ BSONObjBuilder rawResBuilder(output.subobjStart("raw"));
+ bool isValid = true;
+ bool errored = false;
+ for (const auto& cmdResult : results) {
+ const string& shardName = cmdResult.shardTargetId;
+ BSONObj result = cmdResult.result;
const BSONElement valid = result["valid"];
- if (!valid.eoo()) {
- if (!valid.trueValue()) {
- output.appendBool("valid", false);
- return;
- }
- } else {
- // Support pre-1.9.0 output with everything in a big string
- const char* s = result["result"].valuestrsafe();
- if (strstr(s, "exception") || strstr(s, "corrupt")) {
- output.appendBool("valid", false);
- return;
- }
+ if (!valid.trueValue()) {
+ isValid = false;
}
+ if (!result["errmsg"].eoo()) {
+ // errmsg indicates a user error, so returning the message from one shard is
+ // sufficient.
+ errmsg = result["errmsg"].toString();
+ errored = true;
+ }
+ rawResBuilder.append(shardName, result);
+ }
+ rawResBuilder.done();
+
+ output.appendBool("valid", isValid);
+
+ int code = getUniqueCodeFromCommandResults(results);
+ if (code != 0) {
+ output.append("code", code);
}
- output.appendBool("valid", true);
+ if (errored) {
+ return false;
+ }
+ return true;
}
} validateCmd;