summaryrefslogtreecommitdiff
path: root/src/mongo/s/commands
diff options
context:
space:
mode:
authorMark Benvenuto <mark.benvenuto@mongodb.com>2015-06-20 00:22:50 -0400
committerMark Benvenuto <mark.benvenuto@mongodb.com>2015-06-20 10:56:02 -0400
commit9c2ed42daa8fbbef4a919c21ec564e2db55e8d60 (patch)
tree3814f79c10d7b490948d8cb7b112ac1dd41ceff1 /src/mongo/s/commands
parent01965cf52bce6976637ecb8f4a622aeb05ab256a (diff)
downloadmongo-9c2ed42daa8fbbef4a919c21ec564e2db55e8d60.tar.gz
SERVER-18579: Clang-Format - reformat code, no comment reflow
Diffstat (limited to 'src/mongo/s/commands')
-rw-r--r--src/mongo/s/commands/cluster_add_shard_cmd.cpp163
-rw-r--r--src/mongo/s/commands/cluster_commands_common.cpp113
-rw-r--r--src/mongo/s/commands/cluster_commands_common.h48
-rw-r--r--src/mongo/s/commands/cluster_count_cmd.cpp301
-rw-r--r--src/mongo/s/commands/cluster_current_op.cpp302
-rw-r--r--src/mongo/s/commands/cluster_db_stats_cmd.cpp130
-rw-r--r--src/mongo/s/commands/cluster_drop_database_cmd.cpp126
-rw-r--r--src/mongo/s/commands/cluster_enable_sharding_cmd.cpp119
-rw-r--r--src/mongo/s/commands/cluster_explain_cmd.cpp171
-rw-r--r--src/mongo/s/commands/cluster_find_and_modify_cmd.cpp285
-rw-r--r--src/mongo/s/commands/cluster_find_cmd.cpp189
-rw-r--r--src/mongo/s/commands/cluster_flush_router_config_cmd.cpp71
-rw-r--r--src/mongo/s/commands/cluster_fsync_cmd.cpp119
-rw-r--r--src/mongo/s/commands/cluster_get_last_error_cmd.cpp283
-rw-r--r--src/mongo/s/commands/cluster_get_prev_error_cmd.cpp60
-rw-r--r--src/mongo/s/commands/cluster_get_shard_map_cmd.cpp77
-rw-r--r--src/mongo/s/commands/cluster_get_shard_version_cmd.cpp131
-rw-r--r--src/mongo/s/commands/cluster_index_filter_cmd.cpp220
-rw-r--r--src/mongo/s/commands/cluster_is_db_grid_cmd.cpp56
-rw-r--r--src/mongo/s/commands/cluster_is_master_cmd.cpp78
-rw-r--r--src/mongo/s/commands/cluster_kill_op.cpp143
-rw-r--r--src/mongo/s/commands/cluster_list_databases_cmd.cpp278
-rw-r--r--src/mongo/s/commands/cluster_list_shards_cmd.cpp98
-rw-r--r--src/mongo/s/commands/cluster_map_reduce_cmd.cpp894
-rw-r--r--src/mongo/s/commands/cluster_merge_chunks_cmd.cpp292
-rw-r--r--src/mongo/s/commands/cluster_move_chunk_cmd.cpp342
-rw-r--r--src/mongo/s/commands/cluster_move_primary_cmd.cpp365
-rw-r--r--src/mongo/s/commands/cluster_netstat_cmd.cpp71
-rw-r--r--src/mongo/s/commands/cluster_pipeline_cmd.cpp597
-rw-r--r--src/mongo/s/commands/cluster_plan_cache_cmd.cpp224
-rw-r--r--src/mongo/s/commands/cluster_profile_cmd.cpp64
-rw-r--r--src/mongo/s/commands/cluster_remove_shard_cmd.cpp162
-rw-r--r--src/mongo/s/commands/cluster_repair_database_cmd.cpp26
-rw-r--r--src/mongo/s/commands/cluster_repl_set_get_status_cmd.cpp88
-rw-r--r--src/mongo/s/commands/cluster_reset_error_cmd.cpp76
-rw-r--r--src/mongo/s/commands/cluster_shard_collection_cmd.cpp738
-rw-r--r--src/mongo/s/commands/cluster_shutdown_cmd.cpp45
-rw-r--r--src/mongo/s/commands/cluster_split_collection_cmd.cpp365
-rw-r--r--src/mongo/s/commands/cluster_user_management_commands.cpp1499
-rw-r--r--src/mongo/s/commands/cluster_whats_my_uri_cmd.cpp60
-rw-r--r--src/mongo/s/commands/cluster_write_cmd.cpp357
-rw-r--r--src/mongo/s/commands/commands_public.cpp2581
-rw-r--r--src/mongo/s/commands/run_on_all_shards_cmd.cpp213
-rw-r--r--src/mongo/s/commands/run_on_all_shards_cmd.h102
44 files changed, 6233 insertions, 6489 deletions
diff --git a/src/mongo/s/commands/cluster_add_shard_cmd.cpp b/src/mongo/s/commands/cluster_add_shard_cmd.cpp
index 8fdeb3f5b4d..78c2a693846 100644
--- a/src/mongo/s/commands/cluster_add_shard_cmd.cpp
+++ b/src/mongo/s/commands/cluster_add_shard_cmd.cpp
@@ -41,105 +41,102 @@
namespace mongo {
- using std::string;
+using std::string;
namespace {
- class AddShardCmd : public Command {
- public:
- AddShardCmd() : Command("addShard", false, "addshard") { }
-
- virtual bool slaveOk() const {
- return true;
- }
-
- virtual bool adminOnly() const {
- return true;
- }
-
- virtual bool isWriteCommandForConfigServer() const {
+class AddShardCmd : public Command {
+public:
+ AddShardCmd() : Command("addShard", false, "addshard") {}
+
+ virtual bool slaveOk() const {
+ return true;
+ }
+
+ virtual bool adminOnly() const {
+ return true;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual void help(std::stringstream& help) const {
+ help << "add a new shard to the system";
+ }
+
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::addShard);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
+
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ // get replica set component hosts
+ ConnectionString servers =
+ ConnectionString::parse(cmdObj.firstElement().valuestrsafe(), errmsg);
+ if (!errmsg.empty()) {
+ log() << "addshard request " << cmdObj << " failed: " << errmsg;
return false;
}
- virtual void help(std::stringstream& help) const {
- help << "add a new shard to the system";
- }
-
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
-
- ActionSet actions;
- actions.addAction(ActionType::addShard);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
- }
-
- virtual bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) {
-
- // get replica set component hosts
- ConnectionString servers = ConnectionString::parse(
- cmdObj.firstElement().valuestrsafe(), errmsg);
- if (!errmsg.empty()) {
- log() << "addshard request " << cmdObj << " failed: " << errmsg;
+ // using localhost in server names implies every other process must use localhost addresses too
+ std::vector<HostAndPort> serverAddrs = servers.getServers();
+ for (size_t i = 0; i < serverAddrs.size(); i++) {
+ if (serverAddrs[i].isLocalHost() != grid.allowLocalHost()) {
+ errmsg = str::stream()
+ << "Can't use localhost as a shard since all shards need to"
+ << " communicate. Either use all shards and configdbs in localhost"
+ << " or all in actual IPs. host: " << serverAddrs[i].toString()
+ << " isLocalHost:" << serverAddrs[i].isLocalHost();
+
+ log() << "addshard request " << cmdObj
+ << " failed: attempt to mix localhosts and IPs";
return false;
}
- // using localhost in server names implies every other process must use localhost addresses too
- std::vector<HostAndPort> serverAddrs = servers.getServers();
- for (size_t i = 0; i < serverAddrs.size(); i++) {
- if (serverAddrs[i].isLocalHost() != grid.allowLocalHost()) {
- errmsg = str::stream() <<
- "Can't use localhost as a shard since all shards need to" <<
- " communicate. Either use all shards and configdbs in localhost" <<
- " or all in actual IPs. host: " << serverAddrs[i].toString() <<
- " isLocalHost:" << serverAddrs[i].isLocalHost();
-
- log() << "addshard request " << cmdObj
- << " failed: attempt to mix localhosts and IPs";
- return false;
- }
-
- // it's fine if mongods of a set all use default port
- if (!serverAddrs[i].hasPort()) {
- serverAddrs[i] = HostAndPort(serverAddrs[i].host(),
- ServerGlobalParams::ShardServerPort);
- }
+ // it's fine if mongods of a set all use default port
+ if (!serverAddrs[i].hasPort()) {
+ serverAddrs[i] =
+ HostAndPort(serverAddrs[i].host(), ServerGlobalParams::ShardServerPort);
}
+ }
- // name is optional; addShard will provide one if needed
- string name = "";
- if (cmdObj["name"].type() == String) {
- name = cmdObj["name"].valuestrsafe();
- }
+ // name is optional; addShard will provide one if needed
+ string name = "";
+ if (cmdObj["name"].type() == String) {
+ name = cmdObj["name"].valuestrsafe();
+ }
- // maxSize is the space usage cap in a shard in MBs
- long long maxSize = 0;
- if (cmdObj[ShardType::maxSizeMB()].isNumber()) {
- maxSize = cmdObj[ShardType::maxSizeMB()].numberLong();
- }
+ // maxSize is the space usage cap in a shard in MBs
+ long long maxSize = 0;
+ if (cmdObj[ShardType::maxSizeMB()].isNumber()) {
+ maxSize = cmdObj[ShardType::maxSizeMB()].numberLong();
+ }
- audit::logAddShard(ClientBasic::getCurrent(), name, servers.toString(), maxSize);
+ audit::logAddShard(ClientBasic::getCurrent(), name, servers.toString(), maxSize);
- StatusWith<string> addShardResult =
- grid.catalogManager()->addShard(name, servers, maxSize);
- if (!addShardResult.isOK()) {
- log() << "addShard request '" << cmdObj << "'"
- << " failed: " << addShardResult.getStatus().reason();
- return appendCommandStatus(result, addShardResult.getStatus());
- }
+ StatusWith<string> addShardResult = grid.catalogManager()->addShard(name, servers, maxSize);
+ if (!addShardResult.isOK()) {
+ log() << "addShard request '" << cmdObj << "'"
+ << " failed: " << addShardResult.getStatus().reason();
+ return appendCommandStatus(result, addShardResult.getStatus());
+ }
- result << "shardAdded" << addShardResult.getValue();
+ result << "shardAdded" << addShardResult.getValue();
- return true;
- }
+ return true;
+ }
- } addShard;
+} addShard;
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_commands_common.cpp b/src/mongo/s/commands/cluster_commands_common.cpp
index 52afad182f5..f390a17d992 100644
--- a/src/mongo/s/commands/cluster_commands_common.cpp
+++ b/src/mongo/s/commands/cluster_commands_common.cpp
@@ -36,79 +36,76 @@
namespace mongo {
- int getUniqueCodeFromCommandResults(const std::vector<Strategy::CommandResult>& results) {
- int commonErrCode = -1;
- for (std::vector<Strategy::CommandResult>::const_iterator it = results.begin();
- it != results.end();
- ++it) {
-
- // Only look at shards with errors.
- if (!it->result["ok"].trueValue()) {
- int errCode = it->result["code"].numberInt();
-
- if (commonErrCode == -1) {
- commonErrCode = errCode;
- }
- else if (commonErrCode != errCode) {
- // At least two shards with errors disagree on the error code
- commonErrCode = 0;
- }
+int getUniqueCodeFromCommandResults(const std::vector<Strategy::CommandResult>& results) {
+ int commonErrCode = -1;
+ for (std::vector<Strategy::CommandResult>::const_iterator it = results.begin();
+ it != results.end();
+ ++it) {
+ // Only look at shards with errors.
+ if (!it->result["ok"].trueValue()) {
+ int errCode = it->result["code"].numberInt();
+
+ if (commonErrCode == -1) {
+ commonErrCode = errCode;
+ } else if (commonErrCode != errCode) {
+ // At least two shards with errors disagree on the error code
+ commonErrCode = 0;
}
}
+ }
- // If no error encountered or shards with errors disagree on the error code, return 0
- if (commonErrCode == -1 || commonErrCode == 0) {
- return 0;
- }
-
- // Otherwise, shards with errors agree on the error code; return that code
- return commonErrCode;
+ // If no error encountered or shards with errors disagree on the error code, return 0
+ if (commonErrCode == -1 || commonErrCode == 0) {
+ return 0;
}
- bool appendEmptyResultSet(BSONObjBuilder& result, Status status, const std::string& ns) {
- invariant(!status.isOK());
+ // Otherwise, shards with errors agree on the error code; return that code
+ return commonErrCode;
+}
- if (status == ErrorCodes::DatabaseNotFound) {
- // Old style reply
- result << "result" << BSONArray();
+bool appendEmptyResultSet(BSONObjBuilder& result, Status status, const std::string& ns) {
+ invariant(!status.isOK());
- // New (command) style reply
- appendCursorResponseObject(0LL, ns, BSONArray(), &result);
+ if (status == ErrorCodes::DatabaseNotFound) {
+ // Old style reply
+ result << "result" << BSONArray();
- return true;
- }
+ // New (command) style reply
+ appendCursorResponseObject(0LL, ns, BSONArray(), &result);
- return Command::appendCommandStatus(result, status);
+ return true;
}
- Status storePossibleCursor(const std::string& server, const BSONObj& cmdResult) {
- if (cmdResult["ok"].trueValue() && cmdResult.hasField("cursor")) {
- BSONElement cursorIdElt = cmdResult.getFieldDotted("cursor.id");
+ return Command::appendCommandStatus(result, status);
+}
+
+Status storePossibleCursor(const std::string& server, const BSONObj& cmdResult) {
+ if (cmdResult["ok"].trueValue() && cmdResult.hasField("cursor")) {
+ BSONElement cursorIdElt = cmdResult.getFieldDotted("cursor.id");
- if (cursorIdElt.type() != mongo::NumberLong) {
+ if (cursorIdElt.type() != mongo::NumberLong) {
+ return Status(ErrorCodes::TypeMismatch,
+ str::stream() << "expected \"cursor.id\" field from shard "
+ << "response to have NumberLong type, instead "
+ << "got: " << typeName(cursorIdElt.type()));
+ }
+
+ const long long cursorId = cursorIdElt.Long();
+ if (cursorId != 0) {
+ BSONElement cursorNsElt = cmdResult.getFieldDotted("cursor.ns");
+ if (cursorNsElt.type() != mongo::String) {
return Status(ErrorCodes::TypeMismatch,
- str::stream() << "expected \"cursor.id\" field from shard "
- << "response to have NumberLong type, instead "
- << "got: " << typeName(cursorIdElt.type()));
+ str::stream() << "expected \"cursor.ns\" field from "
+ << "shard response to have String type, "
+ << "instead got: " << typeName(cursorNsElt.type()));
}
- const long long cursorId = cursorIdElt.Long();
- if (cursorId != 0) {
- BSONElement cursorNsElt = cmdResult.getFieldDotted("cursor.ns");
- if (cursorNsElt.type() != mongo::String) {
- return Status(ErrorCodes::TypeMismatch,
- str::stream() << "expected \"cursor.ns\" field from "
- << "shard response to have String type, "
- << "instead got: "
- << typeName(cursorNsElt.type()));
- }
-
- const std::string cursorNs = cursorNsElt.String();
- cursorCache.storeRef(server, cursorId, cursorNs);
- }
+ const std::string cursorNs = cursorNsElt.String();
+ cursorCache.storeRef(server, cursorId, cursorNs);
}
-
- return Status::OK();
}
-} // namespace mongo
+ return Status::OK();
+}
+
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_commands_common.h b/src/mongo/s/commands/cluster_commands_common.h
index bda035f84c6..d06e13c63f2 100644
--- a/src/mongo/s/commands/cluster_commands_common.h
+++ b/src/mongo/s/commands/cluster_commands_common.h
@@ -35,27 +35,27 @@
namespace mongo {
- class BSONObj;
-
- /**
- * Utility function to compute a single error code from a vector of command results.
- *
- * @return If there is an error code common to all of the error results, returns that error
- * code; otherwise, returns 0.
- */
- int getUniqueCodeFromCommandResults(const std::vector<Strategy::CommandResult>& results);
-
- /**
- * Utility function to return an empty result set from a command.
- */
- bool appendEmptyResultSet(BSONObjBuilder& result, Status status, const std::string& ns);
-
- /**
- * Utility function to parse a cursor command response and save the cursor in the CursorCache
- * "refs" container. Returns Status::OK() if the cursor was successfully saved or no cursor
- * was specified in the command response, and returns an error Status if a parsing error was
- * encountered.
- */
- Status storePossibleCursor(const std::string& server, const BSONObj& cmdResult);
-
-} // namespace mongo
+class BSONObj;
+
+/**
+ * Utility function to compute a single error code from a vector of command results.
+ *
+ * @return If there is an error code common to all of the error results, returns that error
+ * code; otherwise, returns 0.
+ */
+int getUniqueCodeFromCommandResults(const std::vector<Strategy::CommandResult>& results);
+
+/**
+ * Utility function to return an empty result set from a command.
+ */
+bool appendEmptyResultSet(BSONObjBuilder& result, Status status, const std::string& ns);
+
+/**
+ * Utility function to parse a cursor command response and save the cursor in the CursorCache
+ * "refs" container. Returns Status::OK() if the cursor was successfully saved or no cursor
+ * was specified in the command response, and returns an error Status if a parsing error was
+ * encountered.
+ */
+Status storePossibleCursor(const std::string& server, const BSONObj& cmdResult);
+
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_count_cmd.cpp b/src/mongo/s/commands/cluster_count_cmd.cpp
index 6d7d3f22186..9db1762b16a 100644
--- a/src/mongo/s/commands/cluster_count_cmd.cpp
+++ b/src/mongo/s/commands/cluster_count_cmd.cpp
@@ -38,210 +38,191 @@
namespace mongo {
- using std::string;
- using std::vector;
+using std::string;
+using std::vector;
namespace {
- long long applySkipLimit(long long num, const BSONObj& cmd) {
- BSONElement s = cmd["skip"];
- BSONElement l = cmd["limit"];
+long long applySkipLimit(long long num, const BSONObj& cmd) {
+ BSONElement s = cmd["skip"];
+ BSONElement l = cmd["limit"];
- if (s.isNumber()) {
- num = num - s.numberLong();
- if (num < 0) {
- num = 0;
- }
+ if (s.isNumber()) {
+ num = num - s.numberLong();
+ if (num < 0) {
+ num = 0;
}
+ }
- if (l.isNumber()) {
- long long limit = l.numberLong();
- if (limit < 0){
- limit = -limit;
- }
-
- // 0 limit means no limit
- if (limit < num && limit != 0) {
- num = limit;
- }
+ if (l.isNumber()) {
+ long long limit = l.numberLong();
+ if (limit < 0) {
+ limit = -limit;
}
- return num;
+ // 0 limit means no limit
+ if (limit < num && limit != 0) {
+ num = limit;
+ }
}
+ return num;
+}
- class ClusterCountCmd : public Command {
- public:
- ClusterCountCmd() : Command("count", false) { }
-
- virtual bool slaveOk() const {
- return true;
- }
-
- virtual bool adminOnly() const {
- return false;
- }
- virtual bool isWriteCommandForConfigServer() const {
- return false;
- }
+class ClusterCountCmd : public Command {
+public:
+ ClusterCountCmd() : Command("count", false) {}
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
+ virtual bool slaveOk() const {
+ return true;
+ }
- ActionSet actions;
- actions.addAction(ActionType::find);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
- }
+ virtual bool adminOnly() const {
+ return false;
+ }
- virtual bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) {
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- long long skip = 0;
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::find);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
- if (cmdObj["skip"].isNumber()) {
- skip = cmdObj["skip"].numberLong();
- if (skip < 0) {
- errmsg = "skip value is negative in count query";
- return false;
- }
- }
- else if (cmdObj["skip"].ok()) {
- errmsg = "skip value is not a valid number";
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ long long skip = 0;
+
+ if (cmdObj["skip"].isNumber()) {
+ skip = cmdObj["skip"].numberLong();
+ if (skip < 0) {
+ errmsg = "skip value is negative in count query";
return false;
}
+ } else if (cmdObj["skip"].ok()) {
+ errmsg = "skip value is not a valid number";
+ return false;
+ }
- const string collection = cmdObj.firstElement().valuestrsafe();
- const string fullns = dbname + "." + collection;
-
- BSONObjBuilder countCmdBuilder;
- countCmdBuilder.append("count", collection);
+ const string collection = cmdObj.firstElement().valuestrsafe();
+ const string fullns = dbname + "." + collection;
- BSONObj filter;
- if (cmdObj["query"].isABSONObj()) {
- countCmdBuilder.append("query", cmdObj["query"].Obj());
- filter = cmdObj["query"].Obj();
- }
+ BSONObjBuilder countCmdBuilder;
+ countCmdBuilder.append("count", collection);
- if (cmdObj["limit"].isNumber()) {
- long long limit = cmdObj["limit"].numberLong();
-
- // We only need to factor in the skip value when sending to the shards if we
- // have a value for limit, otherwise, we apply it only once we have collected all
- // counts.
- if (limit != 0 && cmdObj["skip"].isNumber()) {
- if (limit > 0)
- limit += skip;
- else
- limit -= skip;
- }
+ BSONObj filter;
+ if (cmdObj["query"].isABSONObj()) {
+ countCmdBuilder.append("query", cmdObj["query"].Obj());
+ filter = cmdObj["query"].Obj();
+ }
- countCmdBuilder.append("limit", limit);
+ if (cmdObj["limit"].isNumber()) {
+ long long limit = cmdObj["limit"].numberLong();
+
+ // We only need to factor in the skip value when sending to the shards if we
+ // have a value for limit, otherwise, we apply it only once we have collected all
+ // counts.
+ if (limit != 0 && cmdObj["skip"].isNumber()) {
+ if (limit > 0)
+ limit += skip;
+ else
+ limit -= skip;
}
- if (cmdObj.hasField("hint")) {
- countCmdBuilder.append(cmdObj["hint"]);
- }
+ countCmdBuilder.append("limit", limit);
+ }
- if (cmdObj.hasField("$queryOptions")) {
- countCmdBuilder.append(cmdObj["$queryOptions"]);
- }
+ if (cmdObj.hasField("hint")) {
+ countCmdBuilder.append(cmdObj["hint"]);
+ }
- if (cmdObj.hasField(LiteParsedQuery::cmdOptionMaxTimeMS)) {
- countCmdBuilder.append(cmdObj[LiteParsedQuery::cmdOptionMaxTimeMS]);
- }
+ if (cmdObj.hasField("$queryOptions")) {
+ countCmdBuilder.append(cmdObj["$queryOptions"]);
+ }
+
+ if (cmdObj.hasField(LiteParsedQuery::cmdOptionMaxTimeMS)) {
+ countCmdBuilder.append(cmdObj[LiteParsedQuery::cmdOptionMaxTimeMS]);
+ }
- vector<Strategy::CommandResult> countResult;
- Strategy::commandOp(dbname,
- countCmdBuilder.done(),
- options,
- fullns,
- filter,
- &countResult);
+ vector<Strategy::CommandResult> countResult;
+ Strategy::commandOp(dbname, countCmdBuilder.done(), options, fullns, filter, &countResult);
- long long total = 0;
- BSONObjBuilder shardSubTotal(result.subobjStart("shards"));
+ long long total = 0;
+ BSONObjBuilder shardSubTotal(result.subobjStart("shards"));
- for (vector<Strategy::CommandResult>::const_iterator iter = countResult.begin();
- iter != countResult.end();
- ++iter) {
+ for (vector<Strategy::CommandResult>::const_iterator iter = countResult.begin();
+ iter != countResult.end();
+ ++iter) {
+ const string& shardName = iter->shardTargetId;
- const string& shardName = iter->shardTargetId;
+ if (iter->result["ok"].trueValue()) {
+ long long shardCount = iter->result["n"].numberLong();
- if (iter->result["ok"].trueValue()) {
- long long shardCount = iter->result["n"].numberLong();
+ shardSubTotal.appendNumber(shardName, shardCount);
+ total += shardCount;
+ } else {
+ shardSubTotal.doneFast();
+ errmsg = "failed on : " + shardName;
+ result.append("cause", iter->result);
- shardSubTotal.appendNumber(shardName, shardCount);
- total += shardCount;
- }
- else {
- shardSubTotal.doneFast();
- errmsg = "failed on : " + shardName;
- result.append("cause", iter->result);
-
- // Add "code" to the top-level response, if the failure of the sharded command
- // can be accounted to a single error
- int code = getUniqueCodeFromCommandResults(countResult);
- if (code != 0) {
- result.append("code", code);
- }
-
- return false;
+ // Add "code" to the top-level response, if the failure of the sharded command
+ // can be accounted to a single error
+ int code = getUniqueCodeFromCommandResults(countResult);
+ if (code != 0) {
+ result.append("code", code);
}
- }
- shardSubTotal.doneFast();
- total = applySkipLimit(total, cmdObj);
- result.appendNumber("n", total);
-
- return true;
+ return false;
+ }
}
- virtual Status explain(OperationContext* txn,
- const std::string& dbname,
- const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
- BSONObjBuilder* out) const {
+ shardSubTotal.doneFast();
+ total = applySkipLimit(total, cmdObj);
+ result.appendNumber("n", total);
- const string fullns = parseNs(dbname, cmdObj);
+ return true;
+ }
- // Extract the targeting query.
- BSONObj targetingQuery;
- if (Object == cmdObj["query"].type()) {
- targetingQuery = cmdObj["query"].Obj();
- }
+ virtual Status explain(OperationContext* txn,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ ExplainCommon::Verbosity verbosity,
+ BSONObjBuilder* out) const {
+ const string fullns = parseNs(dbname, cmdObj);
+
+ // Extract the targeting query.
+ BSONObj targetingQuery;
+ if (Object == cmdObj["query"].type()) {
+ targetingQuery = cmdObj["query"].Obj();
+ }
- BSONObjBuilder explainCmdBob;
- ClusterExplain::wrapAsExplain(cmdObj, verbosity, &explainCmdBob);
+ BSONObjBuilder explainCmdBob;
+ ClusterExplain::wrapAsExplain(cmdObj, verbosity, &explainCmdBob);
- // We will time how long it takes to run the commands on the shards
- Timer timer;
+ // We will time how long it takes to run the commands on the shards
+ Timer timer;
- vector<Strategy::CommandResult> shardResults;
- Strategy::commandOp(dbname,
- explainCmdBob.obj(),
- 0,
- fullns,
- targetingQuery,
- &shardResults);
+ vector<Strategy::CommandResult> shardResults;
+ Strategy::commandOp(dbname, explainCmdBob.obj(), 0, fullns, targetingQuery, &shardResults);
- long long millisElapsed = timer.millis();
+ long long millisElapsed = timer.millis();
- const char* mongosStageName = ClusterExplain::getStageNameForReadOp(shardResults,
- cmdObj);
+ const char* mongosStageName = ClusterExplain::getStageNameForReadOp(shardResults, cmdObj);
- return ClusterExplain::buildExplainResult(shardResults,
- mongosStageName,
- millisElapsed,
- out);
- }
+ return ClusterExplain::buildExplainResult(
+ shardResults, mongosStageName, millisElapsed, out);
+ }
- } clusterCountCmd;
+} clusterCountCmd;
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_current_op.cpp b/src/mongo/s/commands/cluster_current_op.cpp
index c4de4dcee82..28f1311ca40 100644
--- a/src/mongo/s/commands/cluster_current_op.cpp
+++ b/src/mongo/s/commands/cluster_current_op.cpp
@@ -46,181 +46,165 @@
namespace mongo {
namespace {
- const char kInprogFieldName[] = "inprog";
- const char kOpIdFieldName[] = "opid";
- const char kClientFieldName[] = "client";
- // awkward underscores used to make this visually distinct from kClientFieldName
- const char kClient_S_FieldName[] = "client_s";
- const char kLegacyInprogCollection[] = "$cmd.sys.inprog";
-
- const char kCommandName[] = "currentOp";
-
- class ClusterCurrentOpCommand : public RunOnAllShardsCommand {
- public:
-
- ClusterCurrentOpCommand() : RunOnAllShardsCommand(kCommandName) { }
-
- bool adminOnly() const final { return true; }
-
- Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) final {
-
-
- bool isAuthorized = AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forClusterResource(),
- ActionType::inprog);
-
- return isAuthorized ? Status::OK() : Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
-
- // TODO remove after 3.2
- BSONObj specialErrorHandler(const std::string& server,
- const std::string& db,
- const BSONObj& cmdObj,
- const BSONObj& originalResult) const final {
-
- // it is unfortunate that this logic needs to be duplicated from
- // DBClientWithCommands::runPseudoCommand
- // but I don't see a better way to do it without performing heart surgery on
- // Future/CommandResponse.
-
- auto status = getStatusFromCommandResult(originalResult);
- invariant(!status.isOK());
-
- uassert(28629,
- str::stream() << "Received bad "
- << kCommandName
- << " response from server " << server
- << " got: " << originalResult,
- status != ErrorCodes::CommandResultSchemaViolation);
-
- // getStatusFromCommandResult handles cooercing "no such command" into the right
- // Status type
- if (status == ErrorCodes::CommandNotFound) {
- // fall back to the old inprog pseudo-command
- NamespaceString pseudoCommandNss("admin", kLegacyInprogCollection);
- BSONObj legacyResult;
-
- BSONObjBuilder legacyCommandBob;
-
- // need to exclude {currentOp: 1}
- for (auto&& cmdElem : cmdObj) {
- if (cmdElem.fieldNameStringData() != kCommandName) {
- legacyCommandBob.append(cmdElem);
- }
- }
- auto legacyCommand = legacyCommandBob.done();
-
- try {
- ScopedDbConnection conn(server);
- legacyResult =
- conn->findOne(pseudoCommandNss.ns(), legacyCommand);
-
- }
- catch (const DBException& ex) {
- // If there is a non-DBException exception the entire operation will be
- // terminated, as that would be a programmer error.
-
- // We convert the exception to a BSONObj so that the ordinary
- // failure path for RunOnAllShardsCommand will handle the failure
-
- // TODO: consider adding an exceptionToBSONObj utility?
- BSONObjBuilder b;
- b.append("errmsg", ex.toString());
- b.append("code", ex.getCode());
- return b.obj();
+const char kInprogFieldName[] = "inprog";
+const char kOpIdFieldName[] = "opid";
+const char kClientFieldName[] = "client";
+// awkward underscores used to make this visually distinct from kClientFieldName
+const char kClient_S_FieldName[] = "client_s";
+const char kLegacyInprogCollection[] = "$cmd.sys.inprog";
+
+const char kCommandName[] = "currentOp";
+
+class ClusterCurrentOpCommand : public RunOnAllShardsCommand {
+public:
+ ClusterCurrentOpCommand() : RunOnAllShardsCommand(kCommandName) {}
+
+ bool adminOnly() const final {
+ return true;
+ }
+
+ Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) final {
+ bool isAuthorized = AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forClusterResource(), ActionType::inprog);
+
+ return isAuthorized ? Status::OK() : Status(ErrorCodes::Unauthorized, "Unauthorized");
+ }
+
+ // TODO remove after 3.2
+ BSONObj specialErrorHandler(const std::string& server,
+ const std::string& db,
+ const BSONObj& cmdObj,
+ const BSONObj& originalResult) const final {
+ // it is unfortunate that this logic needs to be duplicated from
+ // DBClientWithCommands::runPseudoCommand
+ // but I don't see a better way to do it without performing heart surgery on
+ // Future/CommandResponse.
+
+ auto status = getStatusFromCommandResult(originalResult);
+ invariant(!status.isOK());
+
+ uassert(28629,
+ str::stream() << "Received bad " << kCommandName << " response from server "
+ << server << " got: " << originalResult,
+ status != ErrorCodes::CommandResultSchemaViolation);
+
+ // getStatusFromCommandResult handles cooercing "no such command" into the right
+ // Status type
+ if (status == ErrorCodes::CommandNotFound) {
+ // fall back to the old inprog pseudo-command
+ NamespaceString pseudoCommandNss("admin", kLegacyInprogCollection);
+ BSONObj legacyResult;
+
+ BSONObjBuilder legacyCommandBob;
+
+ // need to exclude {currentOp: 1}
+ for (auto&& cmdElem : cmdObj) {
+ if (cmdElem.fieldNameStringData() != kCommandName) {
+ legacyCommandBob.append(cmdElem);
}
- return legacyResult;
}
- // if the command failed for another reason then we don't retry it.
- return originalResult;
- }
-
- void aggregateResults(const std::vector<ShardAndReply>& results,
- BSONObjBuilder& output) final {
- // Each shard responds with a document containing an array of subdocuments.
- // Each subdocument represents an operation running on that shard.
- // We merge the responses into a single document containg an array
- // of the operations from all shards.
-
- // There are two modifications we make.
- // 1) we prepend the shardid (with a colon separator) to the opid of each operation.
- // This allows users to pass the value of the opid field directly to killOp.
+ auto legacyCommand = legacyCommandBob.done();
- // 2) we change the field name of "client" to "client_s". This is because each
- // client is actually a mongos.
+ try {
+ ScopedDbConnection conn(server);
+ legacyResult = conn->findOne(pseudoCommandNss.ns(), legacyCommand);
- // TODO: failpoint for a shard response being invalid.
+ } catch (const DBException& ex) {
+ // If there is a non-DBException exception the entire operation will be
+ // terminated, as that would be a programmer error.
- // Error handling - we maintain the same behavior as legacy currentOp/inprog
- // that is, if any shard replies with an invalid response (i.e. it does not
- // contain a field 'inprog' that is an array), we ignore it.
- //
- // If there is a lower level error (i.e. the command fails, network error, etc)
- // RunOnAllShardsCommand will handle returning an error to the user.
- BSONArrayBuilder aggregatedOpsBab(output.subarrayStart(kInprogFieldName));
+ // We convert the exception to a BSONObj so that the ordinary
+ // failure path for RunOnAllShardsCommand will handle the failure
- for (auto&& shardResponse : results) {
-
- StringData shardName;
- BSONObj shardResponseObj;
- std::tie(shardName, shardResponseObj) = shardResponse;
+ // TODO: consider adding an exceptionToBSONObj utility?
+ BSONObjBuilder b;
+ b.append("errmsg", ex.toString());
+ b.append("code", ex.getCode());
+ return b.obj();
+ }
+ return legacyResult;
+ }
+ // if the command failed for another reason then we don't retry it.
+ return originalResult;
+ }
+
+ void aggregateResults(const std::vector<ShardAndReply>& results, BSONObjBuilder& output) final {
+ // Each shard responds with a document containing an array of subdocuments.
+ // Each subdocument represents an operation running on that shard.
+ // We merge the responses into a single document containg an array
+ // of the operations from all shards.
+
+ // There are two modifications we make.
+ // 1) we prepend the shardid (with a colon separator) to the opid of each operation.
+ // This allows users to pass the value of the opid field directly to killOp.
+
+ // 2) we change the field name of "client" to "client_s". This is because each
+ // client is actually a mongos.
+
+ // TODO: failpoint for a shard response being invalid.
+
+ // Error handling - we maintain the same behavior as legacy currentOp/inprog
+ // that is, if any shard replies with an invalid response (i.e. it does not
+ // contain a field 'inprog' that is an array), we ignore it.
+ //
+ // If there is a lower level error (i.e. the command fails, network error, etc)
+ // RunOnAllShardsCommand will handle returning an error to the user.
+ BSONArrayBuilder aggregatedOpsBab(output.subarrayStart(kInprogFieldName));
+
+ for (auto&& shardResponse : results) {
+ StringData shardName;
+ BSONObj shardResponseObj;
+ std::tie(shardName, shardResponseObj) = shardResponse;
+
+ auto shardOps = shardResponseObj[kInprogFieldName];
+
+ // legacy behavior
+ if (!shardOps.isABSONObj()) {
+ warning() << "invalid currentOp response from shard " << shardName
+ << ", got: " << shardOps;
+ continue;
+ }
- auto shardOps = shardResponseObj[kInprogFieldName];
+ for (auto&& shardOp : shardOps.Obj()) {
+ BSONObjBuilder modifiedShardOpBob;
- // legacy behavior
- if (!shardOps.isABSONObj()) {
- warning() << "invalid currentOp response from shard "
- << shardName
- << ", got: "
- << shardOps;
+ // maintain legacy behavior
+ // but log it first
+ if (!shardOp.isABSONObj()) {
+ warning() << "invalid currentOp response from shard " << shardName
+ << ", got: " << shardOp;
continue;
}
- for (auto&& shardOp : shardOps.Obj()) {
- BSONObjBuilder modifiedShardOpBob;
-
- // maintain legacy behavior
- // but log it first
- if (!shardOp.isABSONObj()) {
- warning() << "invalid currentOp response from shard "
- << shardName
- << ", got: "
- << shardOp;
- continue;
- }
-
- for (auto&& shardOpElement : shardOp.Obj()) {
- auto fieldName = shardOpElement.fieldNameStringData();
- if (fieldName == kOpIdFieldName) {
- uassert(28630,
- str::stream() << "expected numeric opid from currentOp response"
- << " from shard " << shardName
- << ", got: " << shardOpElement,
- shardOpElement.isNumber());
-
- modifiedShardOpBob.append(kOpIdFieldName,
- str::stream() << shardName
- << ":"
- << shardOpElement.numberInt());
- }
- else if (fieldName == kClientFieldName) {
- modifiedShardOpBob.appendAs(shardOpElement, kClient_S_FieldName);
- }
- else {
- modifiedShardOpBob.append(shardOpElement);
- }
+ for (auto&& shardOpElement : shardOp.Obj()) {
+ auto fieldName = shardOpElement.fieldNameStringData();
+ if (fieldName == kOpIdFieldName) {
+ uassert(28630,
+ str::stream() << "expected numeric opid from currentOp response"
+ << " from shard " << shardName
+ << ", got: " << shardOpElement,
+ shardOpElement.isNumber());
+
+ modifiedShardOpBob.append(kOpIdFieldName,
+ str::stream() << shardName << ":"
+ << shardOpElement.numberInt());
+ } else if (fieldName == kClientFieldName) {
+ modifiedShardOpBob.appendAs(shardOpElement, kClient_S_FieldName);
+ } else {
+ modifiedShardOpBob.append(shardOpElement);
}
- modifiedShardOpBob.done();
- // append the modified document to the output array
- aggregatedOpsBab.append(modifiedShardOpBob.obj());
}
+ modifiedShardOpBob.done();
+ // append the modified document to the output array
+ aggregatedOpsBab.append(modifiedShardOpBob.obj());
}
- aggregatedOpsBab.done();
}
+ aggregatedOpsBab.done();
+ }
- } clusterCurrentOpCmd;
+} clusterCurrentOpCmd;
} // namespace
} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_db_stats_cmd.cpp b/src/mongo/s/commands/cluster_db_stats_cmd.cpp
index 33f804175aa..a5a50c867cf 100644
--- a/src/mongo/s/commands/cluster_db_stats_cmd.cpp
+++ b/src/mongo/s/commands/cluster_db_stats_cmd.cpp
@@ -35,74 +35,72 @@
namespace mongo {
namespace {
- using std::vector;
-
- class DBStatsCmd : public RunOnAllShardsCommand {
- public:
- DBStatsCmd() : RunOnAllShardsCommand("dbStats", "dbstats") {}
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::dbStats);
- out->push_back(Privilege(ResourcePattern::forDatabaseName(dbname), actions));
- }
-
- virtual void aggregateResults(const vector<ShardAndReply>& results,
- BSONObjBuilder& output) {
- long long objects = 0;
- long long unscaledDataSize = 0;
- long long dataSize = 0;
- long long storageSize = 0;
- long long numExtents = 0;
- long long indexes = 0;
- long long indexSize = 0;
- long long fileSize = 0;
-
- long long freeListNum = 0;
- long long freeListSize = 0;
-
- for (const ShardAndReply& shardAndReply : results) {
- const BSONObj& b = std::get<1>(shardAndReply);
-
- objects += b["objects"].numberLong();
- unscaledDataSize += b["avgObjSize"].numberLong() * b["objects"].numberLong();
- dataSize += b["dataSize"].numberLong();
- storageSize += b["storageSize"].numberLong();
- numExtents += b["numExtents"].numberLong();
- indexes += b["indexes"].numberLong();
- indexSize += b["indexSize"].numberLong();
- fileSize += b["fileSize"].numberLong();
-
- if (b["extentFreeList"].isABSONObj()) {
- freeListNum += b["extentFreeList"].Obj()["num"].numberLong();
- freeListSize += b["extentFreeList"].Obj()["totalSize"].numberLong();
- }
+using std::vector;
+
+class DBStatsCmd : public RunOnAllShardsCommand {
+public:
+ DBStatsCmd() : RunOnAllShardsCommand("dbStats", "dbstats") {}
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::dbStats);
+ out->push_back(Privilege(ResourcePattern::forDatabaseName(dbname), actions));
+ }
+
+ virtual void aggregateResults(const vector<ShardAndReply>& results, BSONObjBuilder& output) {
+ long long objects = 0;
+ long long unscaledDataSize = 0;
+ long long dataSize = 0;
+ long long storageSize = 0;
+ long long numExtents = 0;
+ long long indexes = 0;
+ long long indexSize = 0;
+ long long fileSize = 0;
+
+ long long freeListNum = 0;
+ long long freeListSize = 0;
+
+ for (const ShardAndReply& shardAndReply : results) {
+ const BSONObj& b = std::get<1>(shardAndReply);
+
+ objects += b["objects"].numberLong();
+ unscaledDataSize += b["avgObjSize"].numberLong() * b["objects"].numberLong();
+ dataSize += b["dataSize"].numberLong();
+ storageSize += b["storageSize"].numberLong();
+ numExtents += b["numExtents"].numberLong();
+ indexes += b["indexes"].numberLong();
+ indexSize += b["indexSize"].numberLong();
+ fileSize += b["fileSize"].numberLong();
+
+ if (b["extentFreeList"].isABSONObj()) {
+ freeListNum += b["extentFreeList"].Obj()["num"].numberLong();
+ freeListSize += b["extentFreeList"].Obj()["totalSize"].numberLong();
}
+ }
- // result.appendNumber( "collections" , ncollections ); //TODO: need to find a good way to get this
- output.appendNumber("objects", objects);
-
- // avgObjSize on mongod is not scaled based on the argument to db.stats(), so we use
- // unscaledDataSize here for consistency. See SERVER-7347.
- output.append("avgObjSize", objects == 0 ? 0 :
- double(unscaledDataSize) / double(objects));
- output.appendNumber("dataSize", dataSize);
- output.appendNumber("storageSize", storageSize);
- output.appendNumber("numExtents", numExtents);
- output.appendNumber("indexes", indexes);
- output.appendNumber("indexSize", indexSize);
- output.appendNumber("fileSize", fileSize);
-
- {
- BSONObjBuilder extentFreeList(output.subobjStart("extentFreeList"));
- extentFreeList.appendNumber("num", freeListNum);
- extentFreeList.appendNumber("totalSize", freeListSize);
- extentFreeList.done();
- }
+ // result.appendNumber( "collections" , ncollections ); //TODO: need to find a good way to get this
+ output.appendNumber("objects", objects);
+
+ // avgObjSize on mongod is not scaled based on the argument to db.stats(), so we use
+ // unscaledDataSize here for consistency. See SERVER-7347.
+ output.append("avgObjSize", objects == 0 ? 0 : double(unscaledDataSize) / double(objects));
+ output.appendNumber("dataSize", dataSize);
+ output.appendNumber("storageSize", storageSize);
+ output.appendNumber("numExtents", numExtents);
+ output.appendNumber("indexes", indexes);
+ output.appendNumber("indexSize", indexSize);
+ output.appendNumber("fileSize", fileSize);
+
+ {
+ BSONObjBuilder extentFreeList(output.subobjStart("extentFreeList"));
+ extentFreeList.appendNumber("num", freeListNum);
+ extentFreeList.appendNumber("totalSize", freeListSize);
+ extentFreeList.done();
}
+ }
- } clusterDBStatsCmd;
+} clusterDBStatsCmd;
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_drop_database_cmd.cpp b/src/mongo/s/commands/cluster_drop_database_cmd.cpp
index 0cf6eec2f20..8ac6a6351d2 100644
--- a/src/mongo/s/commands/cluster_drop_database_cmd.cpp
+++ b/src/mongo/s/commands/cluster_drop_database_cmd.cpp
@@ -40,85 +40,83 @@
namespace mongo {
- using std::shared_ptr;
+using std::shared_ptr;
namespace {
- class DropDatabaseCmd : public Command {
- public:
- DropDatabaseCmd() : Command("dropDatabase") { }
-
- virtual bool slaveOk() const {
- return true;
+class DropDatabaseCmd : public Command {
+public:
+ DropDatabaseCmd() : Command("dropDatabase") {}
+
+ virtual bool slaveOk() const {
+ return true;
+ }
+
+ virtual bool adminOnly() const {
+ return false;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::dropDatabase);
+ out->push_back(Privilege(ResourcePattern::forDatabaseName(dbname), actions));
+ }
+
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ // Disallow dropping the config database from mongos
+ if (dbname == "config") {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::IllegalOperation, "Cannot drop the config database"));
}
- virtual bool adminOnly() const {
- return false;
- }
+ BSONElement e = cmdObj.firstElement();
- virtual bool isWriteCommandForConfigServer() const {
- return false;
+ if (!e.isNumber() || e.number() != 1) {
+ errmsg = "invalid params";
+ return 0;
}
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
-
- ActionSet actions;
- actions.addAction(ActionType::dropDatabase);
- out->push_back(Privilege(ResourcePattern::forDatabaseName(dbname), actions));
- }
-
- virtual bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) {
-
- // Disallow dropping the config database from mongos
- if (dbname == "config") {
- return appendCommandStatus(result, Status(ErrorCodes::IllegalOperation,
- "Cannot drop the config database"));
- }
-
- BSONElement e = cmdObj.firstElement();
+ // Refresh the database metadata
+ grid.catalogCache()->invalidate(dbname);
- if (!e.isNumber() || e.number() != 1) {
- errmsg = "invalid params";
- return 0;
+ auto status = grid.catalogCache()->getDatabase(dbname);
+ if (!status.isOK()) {
+ if (status == ErrorCodes::DatabaseNotFound) {
+ result.append("info", "database does not exist");
+ return true;
}
- // Refresh the database metadata
- grid.catalogCache()->invalidate(dbname);
-
- auto status = grid.catalogCache()->getDatabase(dbname);
- if (!status.isOK()) {
- if (status == ErrorCodes::DatabaseNotFound) {
- result.append("info", "database does not exist");
- return true;
- }
-
- return appendCommandStatus(result, status.getStatus());
- }
-
- log() << "DROP DATABASE: " << dbname;
+ return appendCommandStatus(result, status.getStatus());
+ }
- shared_ptr<DBConfig> conf = status.getValue();
+ log() << "DROP DATABASE: " << dbname;
- // TODO: Make dropping logic saner and more tolerant of partial drops. This is
- // particularly important since a database drop can be aborted by *any* collection
- // with a distributed namespace lock taken (migrates/splits)
+ shared_ptr<DBConfig> conf = status.getValue();
- if (!conf->dropDatabase(errmsg)) {
- return false;
- }
+ // TODO: Make dropping logic saner and more tolerant of partial drops. This is
+ // particularly important since a database drop can be aborted by *any* collection
+ // with a distributed namespace lock taken (migrates/splits)
- result.append("dropped", dbname);
- return true;
+ if (!conf->dropDatabase(errmsg)) {
+ return false;
}
- } clusterDropDatabaseCmd;
+ result.append("dropped", dbname);
+ return true;
+ }
+
+} clusterDropDatabaseCmd;
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp b/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp
index 3c17bc7433e..62749bfbab2 100644
--- a/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp
+++ b/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp
@@ -48,77 +48,74 @@
namespace mongo {
namespace {
- class EnableShardingCmd : public Command {
- public:
- EnableShardingCmd() : Command("enableSharding", false, "enablesharding") { }
-
- virtual bool slaveOk() const {
- return true;
- }
-
- virtual bool adminOnly() const {
- return true;
- }
-
- virtual bool isWriteCommandForConfigServer() const {
- return false;
+class EnableShardingCmd : public Command {
+public:
+ EnableShardingCmd() : Command("enableSharding", false, "enablesharding") {}
+
+ virtual bool slaveOk() const {
+ return true;
+ }
+
+ virtual bool adminOnly() const {
+ return true;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual void help(std::stringstream& help) const {
+ help << "Enable sharding for a database. "
+ << "(Use 'shardcollection' command afterwards.)\n"
+ << " { enablesharding : \"<dbname>\" }\n";
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forDatabaseName(parseNs(dbname, cmdObj)),
+ ActionType::enableSharding)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
- virtual void help(std::stringstream& help) const {
- help << "Enable sharding for a database. "
- << "(Use 'shardcollection' command afterwards.)\n"
- << " { enablesharding : \"<dbname>\" }\n";
- }
+ return Status::OK();
+ }
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
+ virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
+ return cmdObj.firstElement().str();
+ }
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(
- parseNs(dbname, cmdObj)),
- ActionType::enableSharding)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname_unused,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ const std::string dbname = parseNs("", cmdObj);
- return Status::OK();
+ if (dbname.empty() || !nsIsDbOnly(dbname)) {
+ errmsg = "invalid db name specified: " + dbname;
+ return false;
}
- virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
- return cmdObj.firstElement().str();
+ if (dbname == "admin" || dbname == "config" || dbname == "local") {
+ errmsg = "can't shard " + dbname + " database";
+ return false;
}
- virtual bool run(OperationContext* txn,
- const std::string& dbname_unused,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) {
-
- const std::string dbname = parseNs("", cmdObj);
-
- if (dbname.empty() || !nsIsDbOnly(dbname)) {
- errmsg = "invalid db name specified: " + dbname;
- return false;
- }
-
- if (dbname == "admin" || dbname == "config" || dbname == "local") {
- errmsg = "can't shard " + dbname + " database";
- return false;
- }
-
- Status status = grid.catalogManager()->enableSharding(dbname);
- if (status.isOK()) {
- audit::logEnableSharding(ClientBasic::getCurrent(), dbname);
- }
+ Status status = grid.catalogManager()->enableSharding(dbname);
+ if (status.isOK()) {
+ audit::logEnableSharding(ClientBasic::getCurrent(), dbname);
+ }
- // Make sure to force update of any stale metadata
- grid.catalogCache()->invalidate(dbname);
+ // Make sure to force update of any stale metadata
+ grid.catalogCache()->invalidate(dbname);
- return appendCommandStatus(result, status);
- }
+ return appendCommandStatus(result, status);
+ }
- } enableShardingCmd;
+} enableShardingCmd;
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_explain_cmd.cpp b/src/mongo/s/commands/cluster_explain_cmd.cpp
index 4fa094affed..9cb8bae6b9c 100644
--- a/src/mongo/s/commands/cluster_explain_cmd.cpp
+++ b/src/mongo/s/commands/cluster_explain_cmd.cpp
@@ -33,98 +33,107 @@
namespace mongo {
+/**
+ * Implements the explain command on mongos.
+ *
+ * "Old-style" explains (i.e. queries which have the $explain flag set), do not run
+ * through this path. Such explains will be supported for backwards compatibility,
+ * and must succeed in multiversion clusters.
+ *
+ * "New-style" explains use the explain command. When the explain command is routed
+ * through mongos, it is forwarded to all relevant shards. If *any* shard does not
+ * support a new-style explain, then the entire explain will fail (i.e. new-style
+ * explains cannot be used in multiversion clusters).
+ */
+class ClusterExplainCmd : public Command {
+ MONGO_DISALLOW_COPYING(ClusterExplainCmd);
+
+public:
+ ClusterExplainCmd() : Command("explain") {}
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
/**
- * Implements the explain command on mongos.
- *
- * "Old-style" explains (i.e. queries which have the $explain flag set), do not run
- * through this path. Such explains will be supported for backwards compatibility,
- * and must succeed in multiversion clusters.
- *
- * "New-style" explains use the explain command. When the explain command is routed
- * through mongos, it is forwarded to all relevant shards. If *any* shard does not
- * support a new-style explain, then the entire explain will fail (i.e. new-style
- * explains cannot be used in multiversion clusters).
+ * Running an explain on a secondary requires explicitly setting slaveOk.
*/
- class ClusterExplainCmd : public Command {
- MONGO_DISALLOW_COPYING(ClusterExplainCmd);
- public:
- ClusterExplainCmd() : Command("explain") { }
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool slaveOverrideOk() const {
+ return true;
+ }
- virtual bool isWriteCommandForConfigServer() const { return false; }
+ virtual bool maintenanceOk() const {
+ return false;
+ }
- /**
- * Running an explain on a secondary requires explicitly setting slaveOk.
- */
- virtual bool slaveOk() const { return false; }
- virtual bool slaveOverrideOk() const { return true; }
+ virtual bool adminOnly() const {
+ return false;
+ }
- virtual bool maintenanceOk() const { return false; }
+ virtual void help(std::stringstream& help) const {
+ help << "explain database reads and writes";
+ }
- virtual bool adminOnly() const { return false; }
+ /**
+ * You are authorized to run an explain if you are authorized to run
+ * the command that you are explaining. The auth check is performed recursively
+ * on the nested command.
+ */
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ if (Object != cmdObj.firstElement().type()) {
+ return Status(ErrorCodes::BadValue, "explain command requires a nested object");
+ }
- virtual void help(std::stringstream& help) const {
- help << "explain database reads and writes";
+ BSONObj explainObj = cmdObj.firstElement().Obj();
+
+ Command* commToExplain = Command::findCommand(explainObj.firstElementFieldName());
+ if (NULL == commToExplain) {
+ mongoutils::str::stream ss;
+ ss << "unknown command: " << explainObj.firstElementFieldName();
+ return Status(ErrorCodes::CommandNotFound, ss);
}
- /**
- * You are authorized to run an explain if you are authorized to run
- * the command that you are explaining. The auth check is performed recursively
- * on the nested command.
- */
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
-
- if (Object != cmdObj.firstElement().type()) {
- return Status(ErrorCodes::BadValue, "explain command requires a nested object");
- }
-
- BSONObj explainObj = cmdObj.firstElement().Obj();
-
- Command* commToExplain = Command::findCommand(explainObj.firstElementFieldName());
- if (NULL == commToExplain) {
- mongoutils::str::stream ss;
- ss << "unknown command: " << explainObj.firstElementFieldName();
- return Status(ErrorCodes::CommandNotFound, ss);
- }
-
- return commToExplain->checkAuthForCommand(client, dbname, explainObj);
+ return commToExplain->checkAuthForCommand(client, dbname, explainObj);
+ }
+
+ virtual bool run(OperationContext* txn,
+ const std::string& dbName,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ ExplainCommon::Verbosity verbosity;
+ Status parseStatus = ExplainCommon::parseCmdBSON(cmdObj, &verbosity);
+ if (!parseStatus.isOK()) {
+ return appendCommandStatus(result, parseStatus);
}
- virtual bool run(OperationContext* txn,
- const std::string& dbName,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) {
-
- ExplainCommon::Verbosity verbosity;
- Status parseStatus = ExplainCommon::parseCmdBSON(cmdObj, &verbosity);
- if (!parseStatus.isOK()) {
- return appendCommandStatus(result, parseStatus);
- }
-
- // This is the nested command which we are explaining.
- BSONObj explainObj = cmdObj.firstElement().Obj();
-
- const std::string cmdName = explainObj.firstElementFieldName();
- Command* commToExplain = Command::findCommand(cmdName);
- if (NULL == commToExplain) {
- mongoutils::str::stream ss;
- ss << "Explain failed due to unknown command: " << cmdName;
- Status explainStatus(ErrorCodes::CommandNotFound, ss);
- return appendCommandStatus(result, explainStatus);
- }
-
- // Actually call the nested command's explain(...) method.
- Status explainStatus = commToExplain->explain(txn, dbName, explainObj, verbosity, &result);
- if (!explainStatus.isOK()) {
- return appendCommandStatus(result, explainStatus);
- }
-
- return true;
+ // This is the nested command which we are explaining.
+ BSONObj explainObj = cmdObj.firstElement().Obj();
+
+ const std::string cmdName = explainObj.firstElementFieldName();
+ Command* commToExplain = Command::findCommand(cmdName);
+ if (NULL == commToExplain) {
+ mongoutils::str::stream ss;
+ ss << "Explain failed due to unknown command: " << cmdName;
+ Status explainStatus(ErrorCodes::CommandNotFound, ss);
+ return appendCommandStatus(result, explainStatus);
}
- } cmdExplainCluster;
+ // Actually call the nested command's explain(...) method.
+ Status explainStatus = commToExplain->explain(txn, dbName, explainObj, verbosity, &result);
+ if (!explainStatus.isOK()) {
+ return appendCommandStatus(result, explainStatus);
+ }
+
+ return true;
+ }
+
+} cmdExplainCluster;
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp b/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
index 8fcff46f1cd..4f14f66fe2c 100644
--- a/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
+++ b/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
@@ -49,192 +49,181 @@
namespace mongo {
namespace {
- using std::shared_ptr;
- using std::string;
- using std::vector;
+using std::shared_ptr;
+using std::string;
+using std::vector;
- class FindAndModifyCmd : public Command {
- public:
- FindAndModifyCmd() : Command("findAndModify", false, "findandmodify") { }
+class FindAndModifyCmd : public Command {
+public:
+ FindAndModifyCmd() : Command("findAndModify", false, "findandmodify") {}
- virtual bool slaveOk() const {
- return true;
- }
-
- virtual bool adminOnly() const {
- return false;
- }
-
- virtual bool isWriteCommandForConfigServer() const {
- return false;
- }
-
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
-
- find_and_modify::addPrivilegesRequiredForFindAndModify(this, dbname, cmdObj, out);
- }
-
- virtual Status explain(OperationContext* txn,
- const std::string& dbName,
- const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
- BSONObjBuilder* out) const {
-
- const string ns = parseNsCollectionRequired(dbName, cmdObj);
-
- auto status = grid.catalogCache()->getDatabase(dbName);
- uassertStatusOK(status);
+ virtual bool slaveOk() const {
+ return true;
+ }
- shared_ptr<DBConfig> conf = status.getValue();
+ virtual bool adminOnly() const {
+ return false;
+ }
- shared_ptr<Shard> shard;
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- if (!conf->isShardingEnabled() || !conf->isSharded(ns)) {
- shard = grid.shardRegistry()->getShard(conf->getPrimaryId());
- }
- else {
- shared_ptr<ChunkManager> chunkMgr = _getChunkManager(conf, ns);
-
- const BSONObj query = cmdObj.getObjectField("query");
-
- StatusWith<BSONObj> status = _getShardKey(chunkMgr, query);
- if (!status.isOK()) {
- return status.getStatus();
- }
-
- BSONObj shardKey = status.getValue();
- ChunkPtr chunk = chunkMgr->findIntersectingChunk(shardKey);
-
- shard = grid.shardRegistry()->getShard(chunk->getShardId());
- }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ find_and_modify::addPrivilegesRequiredForFindAndModify(this, dbname, cmdObj, out);
+ }
- BSONObjBuilder explainCmd;
- ClusterExplain::wrapAsExplain(cmdObj, verbosity, &explainCmd);
+ virtual Status explain(OperationContext* txn,
+ const std::string& dbName,
+ const BSONObj& cmdObj,
+ ExplainCommon::Verbosity verbosity,
+ BSONObjBuilder* out) const {
+ const string ns = parseNsCollectionRequired(dbName, cmdObj);
- // Time how long it takes to run the explain command on the shard.
- Timer timer;
+ auto status = grid.catalogCache()->getDatabase(dbName);
+ uassertStatusOK(status);
- BSONObjBuilder result;
- bool ok = _runCommand(conf, shard->getId(), ns, explainCmd.obj(), result);
- long long millisElapsed = timer.millis();
+ shared_ptr<DBConfig> conf = status.getValue();
- if (!ok) {
- BSONObj res = result.obj();
- return Status(ErrorCodes::OperationFailed,
- str::stream() << "Explain for findAndModify failed: " << res);
- }
-
- Strategy::CommandResult cmdResult;
- cmdResult.shardTargetId = shard->getId();
- cmdResult.target = shard->getConnString();
- cmdResult.result = result.obj();
-
- vector<Strategy::CommandResult> shardResults;
- shardResults.push_back(cmdResult);
-
- return ClusterExplain::buildExplainResult(shardResults,
- ClusterExplain::kSingleShard,
- millisElapsed,
- out);
- }
-
- virtual bool run(OperationContext* txn,
- const std::string& dbName,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) {
-
- const string ns = parseNsCollectionRequired(dbName, cmdObj);
-
- // findAndModify should only be creating database if upsert is true, but this would
- // require that the parsing be pulled into this function.
- auto conf = uassertStatusOK(grid.implicitCreateDb(dbName));
- if (!conf->isShardingEnabled() || !conf->isSharded(ns)) {
- return _runCommand(conf, conf->getPrimaryId(), ns, cmdObj, result);
- }
+ shared_ptr<Shard> shard;
+ if (!conf->isShardingEnabled() || !conf->isSharded(ns)) {
+ shard = grid.shardRegistry()->getShard(conf->getPrimaryId());
+ } else {
shared_ptr<ChunkManager> chunkMgr = _getChunkManager(conf, ns);
const BSONObj query = cmdObj.getObjectField("query");
StatusWith<BSONObj> status = _getShardKey(chunkMgr, query);
if (!status.isOK()) {
- // Bad query
- return appendCommandStatus(result, status.getStatus());
+ return status.getStatus();
}
BSONObj shardKey = status.getValue();
ChunkPtr chunk = chunkMgr->findIntersectingChunk(shardKey);
- bool ok = _runCommand(conf, chunk->getShardId(), ns, cmdObj, result);
- if (ok) {
- // check whether split is necessary (using update object for size heuristic)
- if (Chunk::ShouldAutoSplit) {
- chunk->splitIfShould(cmdObj.getObjectField("update").objsize());
- }
- }
-
- return ok;
+ shard = grid.shardRegistry()->getShard(chunk->getShardId());
}
- private:
- shared_ptr<ChunkManager> _getChunkManager(shared_ptr<DBConfig> conf,
- const string& ns) const {
+ BSONObjBuilder explainCmd;
+ ClusterExplain::wrapAsExplain(cmdObj, verbosity, &explainCmd);
- shared_ptr<ChunkManager> chunkMgr = conf->getChunkManager(ns);
- massert(13002, "shard internal error chunk manager should never be null", chunkMgr);
+ // Time how long it takes to run the explain command on the shard.
+ Timer timer;
- return chunkMgr;
+ BSONObjBuilder result;
+ bool ok = _runCommand(conf, shard->getId(), ns, explainCmd.obj(), result);
+ long long millisElapsed = timer.millis();
+
+ if (!ok) {
+ BSONObj res = result.obj();
+ return Status(ErrorCodes::OperationFailed,
+ str::stream() << "Explain for findAndModify failed: " << res);
}
- StatusWith<BSONObj> _getShardKey(shared_ptr<ChunkManager> chunkMgr,
- const BSONObj& query) const {
+ Strategy::CommandResult cmdResult;
+ cmdResult.shardTargetId = shard->getId();
+ cmdResult.target = shard->getConnString();
+ cmdResult.result = result.obj();
+
+ vector<Strategy::CommandResult> shardResults;
+ shardResults.push_back(cmdResult);
+
+ return ClusterExplain::buildExplainResult(
+ shardResults, ClusterExplain::kSingleShard, millisElapsed, out);
+ }
+
+ virtual bool run(OperationContext* txn,
+ const std::string& dbName,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ const string ns = parseNsCollectionRequired(dbName, cmdObj);
+
+ // findAndModify should only be creating database if upsert is true, but this would
+ // require that the parsing be pulled into this function.
+ auto conf = uassertStatusOK(grid.implicitCreateDb(dbName));
+ if (!conf->isShardingEnabled() || !conf->isSharded(ns)) {
+ return _runCommand(conf, conf->getPrimaryId(), ns, cmdObj, result);
+ }
- // Verify that the query has an equality predicate using the shard key
- StatusWith<BSONObj> status =
- chunkMgr->getShardKeyPattern().extractShardKeyFromQuery(query);
+ shared_ptr<ChunkManager> chunkMgr = _getChunkManager(conf, ns);
- if (!status.isOK()) {
- return status;
- }
+ const BSONObj query = cmdObj.getObjectField("query");
- BSONObj shardKey = status.getValue();
+ StatusWith<BSONObj> status = _getShardKey(chunkMgr, query);
+ if (!status.isOK()) {
+ // Bad query
+ return appendCommandStatus(result, status.getStatus());
+ }
- if (shardKey.isEmpty()) {
- return Status(ErrorCodes::ShardKeyNotFound,
- "query for sharded findAndModify must have shardkey");
- }
+ BSONObj shardKey = status.getValue();
+ ChunkPtr chunk = chunkMgr->findIntersectingChunk(shardKey);
- return shardKey;
+ bool ok = _runCommand(conf, chunk->getShardId(), ns, cmdObj, result);
+ if (ok) {
+ // check whether split is necessary (using update object for size heuristic)
+ if (Chunk::ShouldAutoSplit) {
+ chunk->splitIfShould(cmdObj.getObjectField("update").objsize());
+ }
}
- bool _runCommand(DBConfigPtr conf,
- const ShardId& shardId,
- const string& ns,
- const BSONObj& cmdObj,
- BSONObjBuilder& result) const {
+ return ok;
+ }
- BSONObj res;
+private:
+ shared_ptr<ChunkManager> _getChunkManager(shared_ptr<DBConfig> conf, const string& ns) const {
+ shared_ptr<ChunkManager> chunkMgr = conf->getChunkManager(ns);
+ massert(13002, "shard internal error chunk manager should never be null", chunkMgr);
- const auto shard = grid.shardRegistry()->getShard(shardId);
- ShardConnection conn(shard->getConnString(), ns);
- bool ok = conn->runCommand(conf->name(), cmdObj, res);
- conn.done();
+ return chunkMgr;
+ }
- // RecvStaleConfigCode is the code for RecvStaleConfigException.
- if (!ok && res.getIntField("code") == RecvStaleConfigCode) {
- // Command code traps this exception and re-runs
- throw RecvStaleConfigException("FindAndModify", res);
- }
+ StatusWith<BSONObj> _getShardKey(shared_ptr<ChunkManager> chunkMgr,
+ const BSONObj& query) const {
+ // Verify that the query has an equality predicate using the shard key
+ StatusWith<BSONObj> status = chunkMgr->getShardKeyPattern().extractShardKeyFromQuery(query);
+
+ if (!status.isOK()) {
+ return status;
+ }
+
+ BSONObj shardKey = status.getValue();
- result.appendElements(res);
- return ok;
+ if (shardKey.isEmpty()) {
+ return Status(ErrorCodes::ShardKeyNotFound,
+ "query for sharded findAndModify must have shardkey");
}
- } findAndModifyCmd;
+ return shardKey;
+ }
+
+ bool _runCommand(DBConfigPtr conf,
+ const ShardId& shardId,
+ const string& ns,
+ const BSONObj& cmdObj,
+ BSONObjBuilder& result) const {
+ BSONObj res;
+
+ const auto shard = grid.shardRegistry()->getShard(shardId);
+ ShardConnection conn(shard->getConnString(), ns);
+ bool ok = conn->runCommand(conf->name(), cmdObj, res);
+ conn.done();
+
+ // RecvStaleConfigCode is the code for RecvStaleConfigException.
+ if (!ok && res.getIntField("code") == RecvStaleConfigCode) {
+ // Command code traps this exception and re-runs
+ throw RecvStaleConfigException("FindAndModify", res);
+ }
+
+ result.appendElements(res);
+ return ok;
+ }
+
+} findAndModifyCmd;
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_find_cmd.cpp b/src/mongo/s/commands/cluster_find_cmd.cpp
index f85b65fe39e..b40e919ca3c 100644
--- a/src/mongo/s/commands/cluster_find_cmd.cpp
+++ b/src/mongo/s/commands/cluster_find_cmd.cpp
@@ -37,111 +37,118 @@
namespace mongo {
namespace {
- using std::unique_ptr;
- using std::string;
- using std::vector;
+using std::unique_ptr;
+using std::string;
+using std::vector;
- /**
- * Implements the find command on mongos.
- *
- * TODO: this is just a placeholder. It needs to be implemented for real under SERVER-15176.
- */
- class ClusterFindCmd : public Command {
- MONGO_DISALLOW_COPYING(ClusterFindCmd);
- public:
- ClusterFindCmd() : Command("find") { }
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
+/**
+ * Implements the find command on mongos.
+ *
+ * TODO: this is just a placeholder. It needs to be implemented for real under SERVER-15176.
+ */
+class ClusterFindCmd : public Command {
+ MONGO_DISALLOW_COPYING(ClusterFindCmd);
- virtual bool slaveOk() const { return false; }
+public:
+ ClusterFindCmd() : Command("find") {}
- virtual bool slaveOverrideOk() const { return true; }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- virtual bool maintenanceOk() const { return false; }
+ virtual bool slaveOk() const {
+ return false;
+ }
- virtual bool adminOnly() const { return false; }
+ virtual bool slaveOverrideOk() const {
+ return true;
+ }
- virtual void help(std::stringstream& help) const {
- help << "query for documents";
- }
+ virtual bool maintenanceOk() const {
+ return false;
+ }
- /**
- * In order to run the find command, you must be authorized for the "find" action
- * type on the collection.
- */
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
+ virtual bool adminOnly() const {
+ return false;
+ }
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- ResourcePattern pattern = parseResourcePattern(dbname, cmdObj);
+ virtual void help(std::stringstream& help) const {
+ help << "query for documents";
+ }
- if (authzSession->isAuthorizedForActionsOnResource(pattern, ActionType::find)) {
- return Status::OK();
- }
+ /**
+ * In order to run the find command, you must be authorized for the "find" action
+ * type on the collection.
+ */
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ ResourcePattern pattern = parseResourcePattern(dbname, cmdObj);
+
+ if (authzSession->isAuthorizedForActionsOnResource(pattern, ActionType::find)) {
+ return Status::OK();
+ }
- return Status(ErrorCodes::Unauthorized, "unauthorized");
+ return Status(ErrorCodes::Unauthorized, "unauthorized");
+ }
+
+ virtual Status explain(OperationContext* txn,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ ExplainCommon::Verbosity verbosity,
+ BSONObjBuilder* out) const {
+ const string fullns = parseNs(dbname, cmdObj);
+ const NamespaceString nss(fullns);
+ if (!nss.isValid()) {
+ return {ErrorCodes::InvalidNamespace,
+ str::stream() << "Invalid collection name: " << nss.ns()};
}
- virtual Status explain(OperationContext* txn,
- const std::string& dbname,
- const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
- BSONObjBuilder* out) const {
-
- const string fullns = parseNs(dbname, cmdObj);
- const NamespaceString nss(fullns);
- if (!nss.isValid()) {
- return {ErrorCodes::InvalidNamespace,
- str::stream() << "Invalid collection name: " << nss.ns()};
- }
-
- // Parse the command BSON to a LiteParsedQuery.
- bool isExplain = true;
- auto lpqStatus = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- if (!lpqStatus.isOK()) {
- return lpqStatus.getStatus();
- }
-
- auto& lpq = lpqStatus.getValue();
-
- BSONObjBuilder explainCmdBob;
- ClusterExplain::wrapAsExplain(cmdObj, verbosity, &explainCmdBob);
-
- // We will time how long it takes to run the commands on the shards.
- Timer timer;
-
- vector<Strategy::CommandResult> shardResults;
- Strategy::commandOp(dbname,
- explainCmdBob.obj(),
- lpq->getOptions(),
- fullns,
- lpq->getFilter(),
- &shardResults);
-
- long long millisElapsed = timer.millis();
-
- const char* mongosStageName = ClusterExplain::getStageNameForReadOp(shardResults, cmdObj);
-
- return ClusterExplain::buildExplainResult(shardResults,
- mongosStageName,
- millisElapsed,
- out);
+ // Parse the command BSON to a LiteParsedQuery.
+ bool isExplain = true;
+ auto lpqStatus = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
+ if (!lpqStatus.isOK()) {
+ return lpqStatus.getStatus();
}
- virtual bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj, int options,
- std::string& errmsg,
- BSONObjBuilder& result) {
+ auto& lpq = lpqStatus.getValue();
- // Currently only explains of finds run through the find command. Queries that are not
- // explained use the legacy OP_QUERY path.
- errmsg = "find command not yet implemented";
- return false;
- }
+ BSONObjBuilder explainCmdBob;
+ ClusterExplain::wrapAsExplain(cmdObj, verbosity, &explainCmdBob);
+
+ // We will time how long it takes to run the commands on the shards.
+ Timer timer;
+
+ vector<Strategy::CommandResult> shardResults;
+ Strategy::commandOp(dbname,
+ explainCmdBob.obj(),
+ lpq->getOptions(),
+ fullns,
+ lpq->getFilter(),
+ &shardResults);
+
+ long long millisElapsed = timer.millis();
+
+ const char* mongosStageName = ClusterExplain::getStageNameForReadOp(shardResults, cmdObj);
+
+ return ClusterExplain::buildExplainResult(
+ shardResults, mongosStageName, millisElapsed, out);
+ }
+
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ // Currently only explains of finds run through the find command. Queries that are not
+ // explained use the legacy OP_QUERY path.
+ errmsg = "find command not yet implemented";
+ return false;
+ }
- } cmdFindCluster;
+} cmdFindCluster;
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_flush_router_config_cmd.cpp b/src/mongo/s/commands/cluster_flush_router_config_cmd.cpp
index 19477622b64..dbb52870602 100644
--- a/src/mongo/s/commands/cluster_flush_router_config_cmd.cpp
+++ b/src/mongo/s/commands/cluster_flush_router_config_cmd.cpp
@@ -36,48 +36,47 @@
namespace mongo {
namespace {
- class FlushRouterConfigCmd : public Command {
- public:
- FlushRouterConfigCmd() : Command("flushRouterConfig", false, "flushrouterconfig") { }
+class FlushRouterConfigCmd : public Command {
+public:
+ FlushRouterConfigCmd() : Command("flushRouterConfig", false, "flushrouterconfig") {}
- virtual bool slaveOk() const {
- return true;
- }
+ virtual bool slaveOk() const {
+ return true;
+ }
- virtual bool adminOnly() const {
- return true;
- }
+ virtual bool adminOnly() const {
+ return true;
+ }
- virtual bool isWriteCommandForConfigServer() const {
- return false;
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- virtual void help(std::stringstream& help) const {
- help << "flush all router config";
- }
+ virtual void help(std::stringstream& help) const {
+ help << "flush all router config";
+ }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::flushRouterConfig);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
- }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::flushRouterConfig);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
- virtual bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) {
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ grid.catalogCache()->invalidateAll();
- grid.catalogCache()->invalidateAll();
+ result.appendBool("flushed", true);
+ return true;
+ }
- result.appendBool("flushed", true);
- return true;
- }
+} flushRouterConfigCmd;
- } flushRouterConfigCmd;
-
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_fsync_cmd.cpp b/src/mongo/s/commands/cluster_fsync_cmd.cpp
index 99ae1cf8fea..f9dc78ee9d7 100644
--- a/src/mongo/s/commands/cluster_fsync_cmd.cpp
+++ b/src/mongo/s/commands/cluster_fsync_cmd.cpp
@@ -36,77 +36,76 @@
namespace mongo {
namespace {
- class FsyncCommand : public Command {
- public:
- FsyncCommand() : Command("fsync", false, "fsync") { }
-
- virtual bool slaveOk() const {
- return true;
- }
-
- virtual bool adminOnly() const {
- return true;
- }
-
- virtual bool isWriteCommandForConfigServer() const {
+class FsyncCommand : public Command {
+public:
+ FsyncCommand() : Command("fsync", false, "fsync") {}
+
+ virtual bool slaveOk() const {
+ return true;
+ }
+
+ virtual bool adminOnly() const {
+ return true;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual void help(std::stringstream& help) const {
+ help << "invoke fsync on all shards belonging to the cluster";
+ }
+
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::fsync);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
+
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ if (cmdObj["lock"].trueValue()) {
+ errmsg = "can't do lock through mongos";
return false;
}
- virtual void help(std::stringstream& help) const {
- help << "invoke fsync on all shards belonging to the cluster";
- }
+ BSONObjBuilder sub;
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::fsync);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
- }
+ bool ok = true;
+ int numFiles = 0;
- virtual bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) {
+ std::vector<ShardId> shardIds;
+ grid.shardRegistry()->getAllShardIds(&shardIds);
- if (cmdObj["lock"].trueValue()) {
- errmsg = "can't do lock through mongos";
- return false;
+ for (const ShardId& shardId : shardIds) {
+ const auto s = grid.shardRegistry()->getShard(shardId);
+ if (!s) {
+ continue;
}
- BSONObjBuilder sub;
+ BSONObj x = s->runCommand("admin", "fsync");
+ sub.append(s->getId(), x);
- bool ok = true;
- int numFiles = 0;
-
- std::vector<ShardId> shardIds;
- grid.shardRegistry()->getAllShardIds(&shardIds);
-
- for (const ShardId& shardId : shardIds) {
- const auto s = grid.shardRegistry()->getShard(shardId);
- if (!s) {
- continue;
- }
-
- BSONObj x = s->runCommand("admin", "fsync");
- sub.append(s->getId(), x);
-
- if (!x["ok"].trueValue()) {
- ok = false;
- errmsg = x["errmsg"].String();
- }
-
- numFiles += x["numFiles"].numberInt();
+ if (!x["ok"].trueValue()) {
+ ok = false;
+ errmsg = x["errmsg"].String();
}
- result.append("numFiles", numFiles);
- result.append("all", sub.obj());
- return ok;
+ numFiles += x["numFiles"].numberInt();
}
- } fsyncCmd;
+ result.append("numFiles", numFiles);
+ result.append("all", sub.obj());
+ return ok;
+ }
+
+} fsyncCmd;
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_get_last_error_cmd.cpp b/src/mongo/s/commands/cluster_get_last_error_cmd.cpp
index 45a1cc9792e..1e219d77114 100644
--- a/src/mongo/s/commands/cluster_get_last_error_cmd.cpp
+++ b/src/mongo/s/commands/cluster_get_last_error_cmd.cpp
@@ -41,174 +41,163 @@
namespace mongo {
namespace {
- class GetLastErrorCmd : public Command {
- public:
- GetLastErrorCmd() : Command("getLastError", false, "getlasterror") { }
-
- virtual bool isWriteCommandForConfigServer() const {
- return false;
- }
-
- virtual bool slaveOk() const {
- return true;
- }
-
- virtual void help(std::stringstream& help) const {
- help << "check for an error on the last command executed";
- }
-
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
-
- // No auth required for getlasterror
+class GetLastErrorCmd : public Command {
+public:
+ GetLastErrorCmd() : Command("getLastError", false, "getlasterror") {}
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual bool slaveOk() const {
+ return true;
+ }
+
+ virtual void help(std::stringstream& help) const {
+ help << "check for an error on the last command executed";
+ }
+
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ // No auth required for getlasterror
+ }
+
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ // Mongos GLE - finicky.
+ //
+ // To emulate mongod, we first append any write errors we had, then try to append
+ // write concern error if there was no write error. We need to contact the previous
+ // shards regardless to maintain 2.4 behavior.
+ //
+ // If there are any unexpected or connectivity errors when calling GLE, fail the
+ // command.
+ //
+ // Finally, report the write concern errors IF we don't already have an error.
+ // If we only get one write concern error back, report that, otherwise report an
+ // aggregated error.
+ //
+ // TODO: Do we need to contact the prev shards regardless - do we care that much
+ // about 2.4 behavior?
+ //
+
+ LastError* le = &LastError::get(cc());
+ le->disable();
+
+
+ // Write commands always have the error stored in the mongos last error
+ bool errorOccurred = false;
+ if (le->getNPrev() == 1) {
+ errorOccurred = le->appendSelf(result, false);
}
- virtual bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) {
-
- // Mongos GLE - finicky.
- //
- // To emulate mongod, we first append any write errors we had, then try to append
- // write concern error if there was no write error. We need to contact the previous
- // shards regardless to maintain 2.4 behavior.
- //
- // If there are any unexpected or connectivity errors when calling GLE, fail the
- // command.
- //
- // Finally, report the write concern errors IF we don't already have an error.
- // If we only get one write concern error back, report that, otherwise report an
- // aggregated error.
- //
- // TODO: Do we need to contact the prev shards regardless - do we care that much
- // about 2.4 behavior?
- //
-
- LastError *le = &LastError::get(cc());
- le->disable();
-
-
- // Write commands always have the error stored in the mongos last error
- bool errorOccurred = false;
- if (le->getNPrev() == 1) {
- errorOccurred = le->appendSelf(result, false);
- }
-
- // For compatibility with 2.4 sharded GLE, we always enforce the write concern
- // across all shards.
- const HostOpTimeMap hostOpTimes(ClusterLastErrorInfo::get(cc()).getPrevHostOpTimes());
- HostOpTimeMap resolvedHostOpTimes;
-
- Status status(Status::OK());
- for (HostOpTimeMap::const_iterator it = hostOpTimes.begin();
- it != hostOpTimes.end();
- ++it) {
-
- const ConnectionString& shardEndpoint = it->first;
- const HostOpTime& hot = it->second;
+ // For compatibility with 2.4 sharded GLE, we always enforce the write concern
+ // across all shards.
+ const HostOpTimeMap hostOpTimes(ClusterLastErrorInfo::get(cc()).getPrevHostOpTimes());
+ HostOpTimeMap resolvedHostOpTimes;
- ConnectionString resolvedHost;
- status = DBClientShardResolver::findMaster(shardEndpoint, &resolvedHost);
- if (!status.isOK()) {
- break;
- }
-
- resolvedHostOpTimes[resolvedHost] = hot;
- }
-
- DBClientMultiCommand dispatcher;
- std::vector<LegacyWCResponse> wcResponses;
- if (status.isOK()) {
- status = enforceLegacyWriteConcern(&dispatcher,
- dbname,
- cmdObj,
- resolvedHostOpTimes,
- &wcResponses);
- }
-
- // Don't forget about our last hosts, reset the client info
- ClusterLastErrorInfo::get(cc()).disableForCommand();
-
- // We're now done contacting all remote servers, just report results
+ Status status(Status::OK());
+ for (HostOpTimeMap::const_iterator it = hostOpTimes.begin(); it != hostOpTimes.end();
+ ++it) {
+ const ConnectionString& shardEndpoint = it->first;
+ const HostOpTime& hot = it->second;
+ ConnectionString resolvedHost;
+ status = DBClientShardResolver::findMaster(shardEndpoint, &resolvedHost);
if (!status.isOK()) {
- // Return immediately if we failed to contact a shard, unexpected GLE issue
- // Can't return code, since it may have been set above (2.4 compatibility)
- result.append("errmsg", status.reason());
- return false;
+ break;
}
- // Go through all the write concern responses and find errors
- BSONArrayBuilder shards;
- BSONObjBuilder shardRawGLE;
- BSONArrayBuilder errors;
- BSONArrayBuilder errorRawGLE;
+ resolvedHostOpTimes[resolvedHost] = hot;
+ }
- int numWCErrors = 0;
- const LegacyWCResponse* lastErrResponse = NULL;
+ DBClientMultiCommand dispatcher;
+ std::vector<LegacyWCResponse> wcResponses;
+ if (status.isOK()) {
+ status = enforceLegacyWriteConcern(
+ &dispatcher, dbname, cmdObj, resolvedHostOpTimes, &wcResponses);
+ }
- for (std::vector<LegacyWCResponse>::const_iterator it = wcResponses.begin();
- it != wcResponses.end();
- ++it) {
+ // Don't forget about our last hosts, reset the client info
+ ClusterLastErrorInfo::get(cc()).disableForCommand();
- const LegacyWCResponse& wcResponse = *it;
+ // We're now done contacting all remote servers, just report results
- shards.append(wcResponse.shardHost);
- shardRawGLE.append(wcResponse.shardHost, wcResponse.gleResponse);
+ if (!status.isOK()) {
+ // Return immediately if we failed to contact a shard, unexpected GLE issue
+ // Can't return code, since it may have been set above (2.4 compatibility)
+ result.append("errmsg", status.reason());
+ return false;
+ }
- if (!wcResponse.errToReport.empty()) {
- numWCErrors++;
- lastErrResponse = &wcResponse;
- errors.append(wcResponse.errToReport);
- errorRawGLE.append(wcResponse.gleResponse);
- }
- }
+ // Go through all the write concern responses and find errors
+ BSONArrayBuilder shards;
+ BSONObjBuilder shardRawGLE;
+ BSONArrayBuilder errors;
+ BSONArrayBuilder errorRawGLE;
- // Always report what we found to match 2.4 behavior and for debugging
- if (wcResponses.size() == 1u) {
- result.append("singleShard", wcResponses.front().shardHost);
- }
- else {
- result.append("shards", shards.arr());
- result.append("shardRawGLE", shardRawGLE.obj());
- }
+ int numWCErrors = 0;
+ const LegacyWCResponse* lastErrResponse = NULL;
- // Suppress write concern errors if a write error occurred, to match mongod behavior
- if (errorOccurred || numWCErrors == 0) {
- // Still need to return err
- if (!errorOccurred) {
- result.appendNull("err");
- }
+ for (std::vector<LegacyWCResponse>::const_iterator it = wcResponses.begin();
+ it != wcResponses.end();
+ ++it) {
+ const LegacyWCResponse& wcResponse = *it;
- return true;
- }
+ shards.append(wcResponse.shardHost);
+ shardRawGLE.append(wcResponse.shardHost, wcResponse.gleResponse);
- if (numWCErrors == 1) {
- // Return the single write concern error we found, err should be set or not
- // from gle response
- result.appendElements(lastErrResponse->gleResponse);
- return lastErrResponse->gleResponse["ok"].trueValue();
+ if (!wcResponse.errToReport.empty()) {
+ numWCErrors++;
+ lastErrResponse = &wcResponse;
+ errors.append(wcResponse.errToReport);
+ errorRawGLE.append(wcResponse.gleResponse);
}
- else {
+ }
- // Return a generic combined WC error message
- result.append("errs", errors.arr());
- result.append("errObjects", errorRawGLE.arr());
+ // Always report what we found to match 2.4 behavior and for debugging
+ if (wcResponses.size() == 1u) {
+ result.append("singleShard", wcResponses.front().shardHost);
+ } else {
+ result.append("shards", shards.arr());
+ result.append("shardRawGLE", shardRawGLE.obj());
+ }
- // Need to always return err
+ // Suppress write concern errors if a write error occurred, to match mongod behavior
+ if (errorOccurred || numWCErrors == 0) {
+ // Still need to return err
+ if (!errorOccurred) {
result.appendNull("err");
-
- return appendCommandStatus(result,
- Status(ErrorCodes::WriteConcernFailed,
- "multiple write concern errors occurred"));
}
+
+ return true;
+ }
+
+ if (numWCErrors == 1) {
+ // Return the single write concern error we found, err should be set or not
+ // from gle response
+ result.appendElements(lastErrResponse->gleResponse);
+ return lastErrResponse->gleResponse["ok"].trueValue();
+ } else {
+ // Return a generic combined WC error message
+ result.append("errs", errors.arr());
+ result.append("errObjects", errorRawGLE.arr());
+
+ // Need to always return err
+ result.appendNull("err");
+
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::WriteConcernFailed, "multiple write concern errors occurred"));
}
+ }
- } cmdGetLastError;
+} cmdGetLastError;
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_get_prev_error_cmd.cpp b/src/mongo/s/commands/cluster_get_prev_error_cmd.cpp
index 5e04c0471ea..c0dcdfc618c 100644
--- a/src/mongo/s/commands/cluster_get_prev_error_cmd.cpp
+++ b/src/mongo/s/commands/cluster_get_prev_error_cmd.cpp
@@ -38,41 +38,39 @@
namespace mongo {
namespace {
- class GetPrevErrorCmd : public Command {
- public:
- GetPrevErrorCmd() : Command("getPrevError", false, "getpreverror") { }
+class GetPrevErrorCmd : public Command {
+public:
+ GetPrevErrorCmd() : Command("getPrevError", false, "getpreverror") {}
- virtual bool isWriteCommandForConfigServer() const {
- return false;
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- virtual bool slaveOk() const {
- return true;
- }
+ virtual bool slaveOk() const {
+ return true;
+ }
- virtual void help(std::stringstream& help) const {
- help << "get previous error (since last reseterror command)";
- }
+ virtual void help(std::stringstream& help) const {
+ help << "get previous error (since last reseterror command)";
+ }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ // No auth required
+ }
- // No auth required
- }
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ errmsg += "getpreverror not supported for sharded environments";
+ return false;
+ }
- virtual bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) {
+} cmdGetPrevError;
- errmsg += "getpreverror not supported for sharded environments";
- return false;
- }
-
- } cmdGetPrevError;
-
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_get_shard_map_cmd.cpp b/src/mongo/s/commands/cluster_get_shard_map_cmd.cpp
index cd5ee8608fe..0fcdc77d6fc 100644
--- a/src/mongo/s/commands/cluster_get_shard_map_cmd.cpp
+++ b/src/mongo/s/commands/cluster_get_shard_map_cmd.cpp
@@ -38,50 +38,51 @@
namespace mongo {
namespace {
- class CmdGetShardMap : public Command {
- public:
- CmdGetShardMap() : Command("getShardMap") { }
+class CmdGetShardMap : public Command {
+public:
+ CmdGetShardMap() : Command("getShardMap") {}
- virtual bool isWriteCommandForConfigServer() const {
- return false;
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- virtual bool slaveOk() const {
- return true;
- }
+ virtual bool slaveOk() const {
+ return true;
+ }
- virtual void help(std::stringstream& help) const {
- help << "lists the set of shards known to this instance";
- }
+ virtual void help(std::stringstream& help) const {
+ help << "lists the set of shards known to this instance";
+ }
- virtual bool adminOnly() const { return true; }
+ virtual bool adminOnly() const {
+ return true;
+ }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::getShardMap);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
- }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::getShardMap);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
- virtual bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) {
-
- // MongoD instances do not know that they are part of a sharded cluster until they
- // receive a setShardVersion command and that's when the catalog manager and the shard
- // registry get initialized.
- if (grid.shardRegistry()) {
- grid.shardRegistry()->toBSON(&result);
- }
-
- return true;
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ // MongoD instances do not know that they are part of a sharded cluster until they
+ // receive a setShardVersion command and that's when the catalog manager and the shard
+ // registry get initialized.
+ if (grid.shardRegistry()) {
+ grid.shardRegistry()->toBSON(&result);
}
- } getShardMapCmd;
+ return true;
+ }
+
+} getShardMapCmd;
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_get_shard_version_cmd.cpp b/src/mongo/s/commands/cluster_get_shard_version_cmd.cpp
index 486906aaf4a..54d88f7d6bb 100644
--- a/src/mongo/s/commands/cluster_get_shard_version_cmd.cpp
+++ b/src/mongo/s/commands/cluster_get_shard_version_cmd.cpp
@@ -42,86 +42,83 @@
namespace mongo {
- using std::shared_ptr;
+using std::shared_ptr;
namespace {
- class GetShardVersion : public Command {
- public:
- GetShardVersion() : Command("getShardVersion", false, "getshardversion") { }
-
- virtual bool slaveOk() const {
- return true;
+class GetShardVersion : public Command {
+public:
+ GetShardVersion() : Command("getShardVersion", false, "getshardversion") {}
+
+ virtual bool slaveOk() const {
+ return true;
+ }
+
+ virtual bool adminOnly() const {
+ return true;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual void help(std::stringstream& help) const {
+ help << " example: { getShardVersion : 'alleyinsider.foo' } ";
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forExactNamespace(NamespaceString(parseNs(dbname, cmdObj))),
+ ActionType::getShardVersion)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
- virtual bool adminOnly() const {
- return true;
+ return Status::OK();
+ }
+
+ virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
+ return parseNsFullyQualified(dbname, cmdObj);
+ }
+
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ const NamespaceString nss(parseNs(dbname, cmdObj));
+ if (nss.size() == 0) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::InvalidNamespace, "no namespace specified"));
}
- virtual bool isWriteCommandForConfigServer() const {
- return false;
+ auto status = grid.catalogCache()->getDatabase(nss.db().toString());
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status.getStatus());
}
- virtual void help(std::stringstream& help) const {
- help << " example: { getShardVersion : 'alleyinsider.foo' } ";
+ std::shared_ptr<DBConfig> config = status.getValue();
+ if (!config->isSharded(nss.ns())) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::NamespaceNotSharded, "ns [" + nss.ns() + " is not sharded."));
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
-
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(
- NamespaceString(parseNs(dbname,
- cmdObj))),
- ActionType::getShardVersion)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
-
- return Status::OK();
+ ChunkManagerPtr cm = config->getChunkManagerIfExists(nss.ns());
+ if (!cm) {
+ errmsg = "no chunk manager?";
+ return false;
}
- virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
- return parseNsFullyQualified(dbname, cmdObj);
- }
+ cm->_printChunks();
+ cm->getVersion().addToBSON(result);
- virtual bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) {
-
- const NamespaceString nss(parseNs(dbname, cmdObj));
- if (nss.size() == 0) {
- return appendCommandStatus(result, Status(ErrorCodes::InvalidNamespace,
- "no namespace specified"));
- }
-
- auto status = grid.catalogCache()->getDatabase(nss.db().toString());
- if (!status.isOK()) {
- return appendCommandStatus(result, status.getStatus());
- }
-
- std::shared_ptr<DBConfig> config = status.getValue();
- if (!config->isSharded(nss.ns())) {
- return appendCommandStatus(result, Status(ErrorCodes::NamespaceNotSharded,
- "ns [" + nss.ns() + " is not sharded."));
- }
-
- ChunkManagerPtr cm = config->getChunkManagerIfExists(nss.ns());
- if (!cm) {
- errmsg = "no chunk manager?";
- return false;
- }
-
- cm->_printChunks();
- cm->getVersion().addToBSON(result);
-
- return true;
- }
+ return true;
+ }
- } getShardVersionCmd;
+} getShardVersionCmd;
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_index_filter_cmd.cpp b/src/mongo/s/commands/cluster_index_filter_cmd.cpp
index 323ad646a33..d3465a4bb48 100644
--- a/src/mongo/s/commands/cluster_index_filter_cmd.cpp
+++ b/src/mongo/s/commands/cluster_index_filter_cmd.cpp
@@ -38,137 +38,135 @@
namespace mongo {
- using std::string;
- using std::stringstream;
- using std::vector;
+using std::string;
+using std::stringstream;
+using std::vector;
- /**
- * Base class for mongos index filter commands.
- * Cluster index filter commands don't do much more than
- * forwarding the commands to all shards and combining the results.
- */
- class ClusterIndexFilterCmd : public Command {
+/**
+ * Base class for mongos index filter commands.
+ * Cluster index filter commands don't do much more than
+ * forwarding the commands to all shards and combining the results.
+ */
+class ClusterIndexFilterCmd : public Command {
MONGO_DISALLOW_COPYING(ClusterIndexFilterCmd);
- public:
- virtual ~ClusterIndexFilterCmd() {
- }
+public:
+ virtual ~ClusterIndexFilterCmd() {}
- bool slaveOk() const {
- return false;
- }
+ bool slaveOk() const {
+ return false;
+ }
- bool slaveOverrideOk() const {
- return true;
- }
+ bool slaveOverrideOk() const {
+ return true;
+ }
- virtual bool isWriteCommandForConfigServer() const { return false; }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- void help(stringstream& ss) const {
- ss << _helpText;
- }
+ void help(stringstream& ss) const {
+ ss << _helpText;
+ }
- Status checkAuthForCommand( ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj ) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- ResourcePattern pattern = parseResourcePattern(dbname, cmdObj);
-
- if (authzSession->isAuthorizedForActionsOnResource(pattern,
- ActionType::planCacheIndexFilter)) {
- return Status::OK();
- }
-
- return Status(ErrorCodes::Unauthorized, "unauthorized");
- }
+ Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ ResourcePattern pattern = parseResourcePattern(dbname, cmdObj);
- // Cluster plan cache command entry point.
- bool run(OperationContext* txn, const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result);
-
- public:
-
- /**
- * Instantiates a command that can be invoked by "name", which will be described by
- * "helpText", and will require privilege "actionType" to run.
- */
- ClusterIndexFilterCmd( const std::string& name, const std::string& helpText) :
- Command( name ), _helpText( helpText ) {
+ if (authzSession->isAuthorizedForActionsOnResource(pattern,
+ ActionType::planCacheIndexFilter)) {
+ return Status::OK();
}
- private:
-
- std::string _helpText;
- };
-
- //
- // Cluster index filter command implementation(s) below
- //
-
- bool ClusterIndexFilterCmd::run(OperationContext* txn, const std::string& dbName,
- BSONObj& cmdObj,
- int options,
- std::string& errMsg,
- BSONObjBuilder& result) {
- const std::string fullns = parseNs(dbName, cmdObj);
- NamespaceString nss(fullns);
-
- // Dispatch command to all the shards.
- // Targeted shard commands are generally data-dependent but index filter
- // commands are tied to query shape (data has no effect on query shape).
- vector<Strategy::CommandResult> results;
- Strategy::commandOp(dbName, cmdObj, options, nss.ns(), BSONObj(), &results);
-
- // Set value of first shard result's "ok" field.
- bool clusterCmdResult = true;
-
- for (vector<Strategy::CommandResult>::const_iterator i = results.begin();
- i != results.end(); ++i) {
- const Strategy::CommandResult& cmdResult = *i;
-
- // XXX: In absence of sensible aggregation strategy,
- // promote first shard's result to top level.
- if (i == results.begin()) {
- result.appendElements(cmdResult.result);
- clusterCmdResult = cmdResult.result["ok"].trueValue();
- }
-
- // Append shard result as a sub object.
- // Name the field after the shard.
- result.append(cmdResult.shardTargetId, cmdResult.result);
+ return Status(ErrorCodes::Unauthorized, "unauthorized");
+ }
+
+ // Cluster plan cache command entry point.
+ bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result);
+
+public:
+ /**
+ * Instantiates a command that can be invoked by "name", which will be described by
+ * "helpText", and will require privilege "actionType" to run.
+ */
+ ClusterIndexFilterCmd(const std::string& name, const std::string& helpText)
+ : Command(name), _helpText(helpText) {}
+
+private:
+ std::string _helpText;
+};
+
+//
+// Cluster index filter command implementation(s) below
+//
+
+bool ClusterIndexFilterCmd::run(OperationContext* txn,
+ const std::string& dbName,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errMsg,
+ BSONObjBuilder& result) {
+ const std::string fullns = parseNs(dbName, cmdObj);
+ NamespaceString nss(fullns);
+
+ // Dispatch command to all the shards.
+ // Targeted shard commands are generally data-dependent but index filter
+ // commands are tied to query shape (data has no effect on query shape).
+ vector<Strategy::CommandResult> results;
+ Strategy::commandOp(dbName, cmdObj, options, nss.ns(), BSONObj(), &results);
+
+ // Set value of first shard result's "ok" field.
+ bool clusterCmdResult = true;
+
+ for (vector<Strategy::CommandResult>::const_iterator i = results.begin(); i != results.end();
+ ++i) {
+ const Strategy::CommandResult& cmdResult = *i;
+
+ // XXX: In absence of sensible aggregation strategy,
+ // promote first shard's result to top level.
+ if (i == results.begin()) {
+ result.appendElements(cmdResult.result);
+ clusterCmdResult = cmdResult.result["ok"].trueValue();
}
- return clusterCmdResult;
+ // Append shard result as a sub object.
+ // Name the field after the shard.
+ result.append(cmdResult.shardTargetId, cmdResult.result);
}
- //
- // Register index filter commands at startup
- //
+ return clusterCmdResult;
+}
- namespace {
+//
+// Register index filter commands at startup
+//
- MONGO_INITIALIZER(RegisterIndexFilterCommands)(InitializerContext* context) {
- // Leaked intentionally: a Command registers itself when constructed.
+namespace {
- new ClusterIndexFilterCmd(
- "planCacheListFilters",
- "Displays index filters for all query shapes in a collection." );
+MONGO_INITIALIZER(RegisterIndexFilterCommands)(InitializerContext* context) {
+ // Leaked intentionally: a Command registers itself when constructed.
- new ClusterIndexFilterCmd(
- "planCacheClearFilters",
- "Clears index filter for a single query shape or, "
- "if the query shape is omitted, all filters for the collection." );
+ new ClusterIndexFilterCmd("planCacheListFilters",
+ "Displays index filters for all query shapes in a collection.");
- new ClusterIndexFilterCmd(
- "planCacheSetFilter",
- "Sets index filter for a query shape. Overrides existing index filter." );
+ new ClusterIndexFilterCmd("planCacheClearFilters",
+ "Clears index filter for a single query shape or, "
+ "if the query shape is omitted, all filters for the collection.");
- return Status::OK();
- }
+ new ClusterIndexFilterCmd(
+ "planCacheSetFilter",
+ "Sets index filter for a query shape. Overrides existing index filter.");
+
+ return Status::OK();
+}
- } // namespace
+} // namespace
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_is_db_grid_cmd.cpp b/src/mongo/s/commands/cluster_is_db_grid_cmd.cpp
index d4c958582ac..3c16419807d 100644
--- a/src/mongo/s/commands/cluster_is_db_grid_cmd.cpp
+++ b/src/mongo/s/commands/cluster_is_db_grid_cmd.cpp
@@ -34,38 +34,36 @@
namespace mongo {
namespace {
- class IsDbGridCmd : public Command {
- public:
- IsDbGridCmd() : Command("isdbgrid") { }
+class IsDbGridCmd : public Command {
+public:
+ IsDbGridCmd() : Command("isdbgrid") {}
- virtual bool isWriteCommandForConfigServer() const {
- return false;
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- virtual bool slaveOk() const {
- return true;
- }
+ virtual bool slaveOk() const {
+ return true;
+ }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ // No auth required
+ }
- // No auth required
- }
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ result.append("isdbgrid", 1);
+ result.append("hostname", getHostNameCached());
+ return true;
+ }
- virtual bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) {
+} isdbGrid;
- result.append("isdbgrid", 1);
- result.append("hostname", getHostNameCached());
- return true;
- }
-
- } isdbGrid;
-
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_is_master_cmd.cpp b/src/mongo/s/commands/cluster_is_master_cmd.cpp
index 5f926811a27..75dde8f15f9 100644
--- a/src/mongo/s/commands/cluster_is_master_cmd.cpp
+++ b/src/mongo/s/commands/cluster_is_master_cmd.cpp
@@ -35,52 +35,50 @@
namespace mongo {
namespace {
- class CmdIsMaster : public Command {
- public:
- CmdIsMaster() : Command("isMaster", false, "ismaster") { }
+class CmdIsMaster : public Command {
+public:
+ CmdIsMaster() : Command("isMaster", false, "ismaster") {}
- virtual bool isWriteCommandForConfigServer() const {
- return false;
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- virtual bool slaveOk() const {
- return true;
- }
+ virtual bool slaveOk() const {
+ return true;
+ }
- virtual void help(std::stringstream& help) const {
- help << "test if this is master half of a replica pair";
- }
+ virtual void help(std::stringstream& help) const {
+ help << "test if this is master half of a replica pair";
+ }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ // No auth required
+ }
- // No auth required
- }
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ result.appendBool("ismaster", true);
+ result.append("msg", "isdbgrid");
+ result.appendNumber("maxBsonObjectSize", BSONObjMaxUserSize);
+ result.appendNumber("maxMessageSizeBytes", MaxMessageSizeBytes);
+ result.appendNumber("maxWriteBatchSize", BatchedCommandRequest::kMaxWriteBatchSize);
+ result.appendDate("localTime", jsTime());
- virtual bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) {
+ // Mongos tries to keep exactly the same version range of the server for which
+ // it is compiled.
+ result.append("maxWireVersion", maxWireVersion);
+ result.append("minWireVersion", minWireVersion);
- result.appendBool("ismaster", true);
- result.append("msg", "isdbgrid");
- result.appendNumber("maxBsonObjectSize", BSONObjMaxUserSize);
- result.appendNumber("maxMessageSizeBytes", MaxMessageSizeBytes);
- result.appendNumber("maxWriteBatchSize", BatchedCommandRequest::kMaxWriteBatchSize);
- result.appendDate("localTime", jsTime());
+ return true;
+ }
- // Mongos tries to keep exactly the same version range of the server for which
- // it is compiled.
- result.append("maxWireVersion", maxWireVersion);
- result.append("minWireVersion", minWireVersion);
+} isMaster;
- return true;
- }
-
- } isMaster;
-
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_kill_op.cpp b/src/mongo/s/commands/cluster_kill_op.cpp
index e6f3a127b56..056f4b7ff60 100644
--- a/src/mongo/s/commands/cluster_kill_op.cpp
+++ b/src/mongo/s/commands/cluster_kill_op.cpp
@@ -50,81 +50,84 @@
namespace mongo {
namespace {
- class ClusterKillOpCommand : public Command {
- public:
- ClusterKillOpCommand() : Command("killOp") {}
-
- bool isWriteCommandForConfigServer() const final { return false; }
+class ClusterKillOpCommand : public Command {
+public:
+ ClusterKillOpCommand() : Command("killOp") {}
+
+ bool isWriteCommandForConfigServer() const final {
+ return false;
+ }
+
+ bool slaveOk() const final {
+ return true;
+ }
+
+ bool adminOnly() const final {
+ return true;
+ }
+
+ Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) final {
+ bool isAuthorized = AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forClusterResource(), ActionType::killop);
+ return isAuthorized ? Status::OK() : Status(ErrorCodes::Unauthorized, "Unauthorized");
+ }
+
+ bool run(OperationContext* txn,
+ const std::string& db,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) final {
+ // The format of op is shardid:opid
+ // This is different than the format passed to the mongod killOp command.
+ std::string opToKill;
+ uassertStatusOK(bsonExtractStringField(cmdObj, "op", &opToKill));
+
+ const auto opSepPos = opToKill.find(':');
+
+ uassert(28625,
+ str::stream() << "The op argument to killOp must be of the format shardid:opid"
+ << " but found \"" << opToKill << '"',
+ (opToKill.size() >= 3) && // must have at least N:N
+ (opSepPos != std::string::npos) && // must have ':' as separator
+ (opSepPos != 0) && // can't be :NN
+ (opSepPos != (opToKill.size() - 1))); // can't be NN:
+
+ auto shardIdent = opToKill.substr(0, opSepPos);
+ log() << "want to kill op: " << opToKill;
+
+ // Will throw if shard id is not found
+ auto shard = grid.shardRegistry()->getShard(shardIdent);
+ if (!shard) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::ShardNotFound,
+ str::stream() << "shard " << shardIdent << " does not exist"));
+ }
- bool slaveOk() const final { return true; }
+ auto opId = std::stoi(opToKill.substr(opSepPos + 1));
- bool adminOnly() const final { return true; }
+ // shardid is actually the opid - keeping for backwards compatibility.
+ result.append("shard", shardIdent);
+ result.append("shardid", opId);
- Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) final {
+ ScopedDbConnection conn(shard->getConnString());
+ BSONObj cmdRes;
+ BSONObjBuilder argsBob;
+ argsBob.append("op", opId);
+ auto args = argsBob.done();
+ // intentionally ignore return value - that is how legacy killOp worked.
+ conn->runPseudoCommand("admin", "killOp", "$cmd.sys.killop", args, cmdRes);
+ conn.done();
- bool isAuthorized = AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forClusterResource(),
- ActionType::killop);
- return isAuthorized ? Status::OK() : Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
-
- bool run(OperationContext* txn,
- const std::string& db,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) final {
-
- // The format of op is shardid:opid
- // This is different than the format passed to the mongod killOp command.
- std::string opToKill;
- uassertStatusOK(bsonExtractStringField(cmdObj, "op", &opToKill));
-
- const auto opSepPos = opToKill.find(':');
-
- uassert(28625,
- str::stream() << "The op argument to killOp must be of the format shardid:opid"
- << " but found \"" << opToKill << '"',
- (opToKill.size() >= 3) && // must have at least N:N
- (opSepPos != std::string::npos) && // must have ':' as separator
- (opSepPos != 0) && // can't be :NN
- (opSepPos != (opToKill.size() - 1))); // can't be NN:
-
- auto shardIdent = opToKill.substr(0, opSepPos);
- log() << "want to kill op: " << opToKill;
-
- // Will throw if shard id is not found
- auto shard = grid.shardRegistry()->getShard(shardIdent);
- if (!shard) {
- return appendCommandStatus(result,
- Status(ErrorCodes::ShardNotFound,
- str::stream() << "shard " << shardIdent
- << " does not exist"));
- }
-
- auto opId = std::stoi(opToKill.substr(opSepPos + 1));
-
- // shardid is actually the opid - keeping for backwards compatibility.
- result.append("shard", shardIdent);
- result.append("shardid", opId);
-
- ScopedDbConnection conn(shard->getConnString());
- BSONObj cmdRes;
- BSONObjBuilder argsBob;
- argsBob.append("op", opId);
- auto args = argsBob.done();
- // intentionally ignore return value - that is how legacy killOp worked.
- conn->runPseudoCommand("admin", "killOp", "$cmd.sys.killop", args, cmdRes);
- conn.done();
-
- // The original behavior of killOp on mongos is to always return success, regardless of
- // whether the shard reported success or not.
- return true;
- }
+ // The original behavior of killOp on mongos is to always return success, regardless of
+ // whether the shard reported success or not.
+ return true;
+ }
- } clusterKillOpCommand;
+} clusterKillOpCommand;
} // namespace
} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_list_databases_cmd.cpp b/src/mongo/s/commands/cluster_list_databases_cmd.cpp
index 1fc52781cce..248fb79f488 100644
--- a/src/mongo/s/commands/cluster_list_databases_cmd.cpp
+++ b/src/mongo/s/commands/cluster_list_databases_cmd.cpp
@@ -41,173 +41,169 @@
namespace mongo {
- using std::unique_ptr;
- using std::map;
- using std::string;
- using std::vector;
+using std::unique_ptr;
+using std::map;
+using std::string;
+using std::vector;
namespace {
- class ListDatabasesCmd : public Command {
- public:
- ListDatabasesCmd() : Command("listDatabases", true, "listdatabases") { }
-
- virtual bool slaveOk() const {
- return true;
- }
-
- virtual bool slaveOverrideOk() const {
- return true;
- }
-
- virtual bool adminOnly() const {
- return true;
- }
-
- virtual bool isWriteCommandForConfigServer() const {
- return false;
- }
-
- virtual void help(std::stringstream& help) const {
- help << "list databases in a cluster";
- }
-
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::listDatabases);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
- }
-
- virtual bool run(OperationContext* txn,
- const std::string& dbname_unused,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) {
-
- map<string, long long> sizes;
- map<string, unique_ptr<BSONObjBuilder> > dbShardInfo;
-
- vector<ShardId> shardIds;
- grid.shardRegistry()->getAllShardIds(&shardIds);
-
- for (const ShardId& shardId : shardIds) {
- const auto s = grid.shardRegistry()->getShard(shardId);
- if (!s) {
- continue;
- }
-
- BSONObj x = s->runCommand("admin", "listDatabases");
+class ListDatabasesCmd : public Command {
+public:
+ ListDatabasesCmd() : Command("listDatabases", true, "listdatabases") {}
+
+ virtual bool slaveOk() const {
+ return true;
+ }
+
+ virtual bool slaveOverrideOk() const {
+ return true;
+ }
+
+ virtual bool adminOnly() const {
+ return true;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual void help(std::stringstream& help) const {
+ help << "list databases in a cluster";
+ }
+
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::listDatabases);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
+
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname_unused,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ map<string, long long> sizes;
+ map<string, unique_ptr<BSONObjBuilder>> dbShardInfo;
+
+ vector<ShardId> shardIds;
+ grid.shardRegistry()->getAllShardIds(&shardIds);
+
+ for (const ShardId& shardId : shardIds) {
+ const auto s = grid.shardRegistry()->getShard(shardId);
+ if (!s) {
+ continue;
+ }
- BSONObjIterator j(x["databases"].Obj());
- while (j.more()) {
- BSONObj dbObj = j.next().Obj();
+ BSONObj x = s->runCommand("admin", "listDatabases");
- const string name = dbObj["name"].String();
- const long long size = dbObj["sizeOnDisk"].numberLong();
+ BSONObjIterator j(x["databases"].Obj());
+ while (j.more()) {
+ BSONObj dbObj = j.next().Obj();
- long long& totalSize = sizes[name];
- if (size == 1) {
- if (totalSize <= 1) {
- totalSize = 1;
- }
- }
- else {
- totalSize += size;
- }
+ const string name = dbObj["name"].String();
+ const long long size = dbObj["sizeOnDisk"].numberLong();
- unique_ptr<BSONObjBuilder>& bb = dbShardInfo[name];
- if (!bb.get()) {
- bb.reset(new BSONObjBuilder());
+ long long& totalSize = sizes[name];
+ if (size == 1) {
+ if (totalSize <= 1) {
+ totalSize = 1;
}
+ } else {
+ totalSize += size;
+ }
- bb->appendNumber(s->getId(), size);
+ unique_ptr<BSONObjBuilder>& bb = dbShardInfo[name];
+ if (!bb.get()) {
+ bb.reset(new BSONObjBuilder());
}
+ bb->appendNumber(s->getId(), size);
}
+ }
- long long totalSize = 0;
+ long long totalSize = 0;
- BSONArrayBuilder bb(result.subarrayStart("databases"));
- for (map<string, long long>::iterator i = sizes.begin(); i != sizes.end(); ++i) {
- const string name = i->first;
+ BSONArrayBuilder bb(result.subarrayStart("databases"));
+ for (map<string, long long>::iterator i = sizes.begin(); i != sizes.end(); ++i) {
+ const string name = i->first;
- if (name == "local") {
- // We don't return local, since all shards have their own independent local
- continue;
- }
+ if (name == "local") {
+ // We don't return local, since all shards have their own independent local
+ continue;
+ }
- if (name == "config" || name == "admin") {
- // Always get this from the config servers
- continue;
- }
+ if (name == "config" || name == "admin") {
+ // Always get this from the config servers
+ continue;
+ }
- long long size = i->second;
- totalSize += size;
+ long long size = i->second;
+ totalSize += size;
- BSONObjBuilder temp;
- temp.append("name", name);
- temp.appendNumber("sizeOnDisk", size);
- temp.appendBool("empty", size == 1);
- temp.append("shards", dbShardInfo[name]->obj());
+ BSONObjBuilder temp;
+ temp.append("name", name);
+ temp.appendNumber("sizeOnDisk", size);
+ temp.appendBool("empty", size == 1);
+ temp.append("shards", dbShardInfo[name]->obj());
- bb.append(temp.obj());
- }
+ bb.append(temp.obj());
+ }
- // Obtain the cached config shard
- const auto configShard = grid.shardRegistry()->getShard("config");
-
- {
- // get config db from the config servers (first one)
- BSONObj x;
- if (configShard->runCommand("config", "dbstats", x)) {
- BSONObjBuilder b;
- b.append("name", "config");
- b.appendBool("empty", false);
- if (x["fileSize"].type())
- b.appendAs(x["fileSize"], "sizeOnDisk");
- else
- b.append("sizeOnDisk", 1);
- bb.append(b.obj());
- }
- else {
- bb.append(BSON("name" << "config"));
- }
+ // Obtain the cached config shard
+ const auto configShard = grid.shardRegistry()->getShard("config");
+
+ {
+ // get config db from the config servers (first one)
+ BSONObj x;
+ if (configShard->runCommand("config", "dbstats", x)) {
+ BSONObjBuilder b;
+ b.append("name", "config");
+ b.appendBool("empty", false);
+ if (x["fileSize"].type())
+ b.appendAs(x["fileSize"], "sizeOnDisk");
+ else
+ b.append("sizeOnDisk", 1);
+ bb.append(b.obj());
+ } else {
+ bb.append(BSON("name"
+ << "config"));
}
+ }
- {
- // get admin db from the config servers (first one)
- BSONObj x;
- if (configShard->runCommand("admin", "dbstats", x)) {
- BSONObjBuilder b;
- b.append("name", "admin");
- b.appendBool("empty", false);
-
- if (x["fileSize"].type()) {
- b.appendAs(x["fileSize"], "sizeOnDisk");
- }
- else {
- b.append("sizeOnDisk", 1);
- }
-
- bb.append(b.obj());
- }
- else {
- bb.append(BSON("name" << "admin"));
+ {
+ // get admin db from the config servers (first one)
+ BSONObj x;
+ if (configShard->runCommand("admin", "dbstats", x)) {
+ BSONObjBuilder b;
+ b.append("name", "admin");
+ b.appendBool("empty", false);
+
+ if (x["fileSize"].type()) {
+ b.appendAs(x["fileSize"], "sizeOnDisk");
+ } else {
+ b.append("sizeOnDisk", 1);
}
+
+ bb.append(b.obj());
+ } else {
+ bb.append(BSON("name"
+ << "admin"));
}
+ }
- bb.done();
+ bb.done();
- result.appendNumber("totalSize", totalSize);
- result.appendNumber("totalSizeMb", totalSize / (1024 * 1024));
+ result.appendNumber("totalSize", totalSize);
+ result.appendNumber("totalSizeMb", totalSize / (1024 * 1024));
- return 1;
- }
+ return 1;
+ }
- } cmdListDatabases;
+} cmdListDatabases;
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_list_shards_cmd.cpp b/src/mongo/s/commands/cluster_list_shards_cmd.cpp
index 3cb365710e5..16eccd3c8e1 100644
--- a/src/mongo/s/commands/cluster_list_shards_cmd.cpp
+++ b/src/mongo/s/commands/cluster_list_shards_cmd.cpp
@@ -39,59 +39,55 @@
namespace mongo {
namespace {
- class ListShardsCmd : public Command {
- public:
- ListShardsCmd() : Command("listShards", false, "listshards") { }
-
- virtual bool slaveOk() const {
- return true;
- }
-
- virtual bool adminOnly() const {
- return true;
- }
-
- virtual bool isWriteCommandForConfigServer() const {
- return false;
- }
-
- virtual void help(std::stringstream& help) const {
- help << "list all shards of the system";
+class ListShardsCmd : public Command {
+public:
+ ListShardsCmd() : Command("listShards", false, "listshards") {}
+
+ virtual bool slaveOk() const {
+ return true;
+ }
+
+ virtual bool adminOnly() const {
+ return true;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual void help(std::stringstream& help) const {
+ help << "list all shards of the system";
+ }
+
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::listShards);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
+
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ std::vector<ShardType> shards;
+ Status status = grid.catalogManager()->getAllShards(&shards);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
-
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
-
- ActionSet actions;
- actions.addAction(ActionType::listShards);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ std::vector<BSONObj> shardsObj;
+ for (std::vector<ShardType>::const_iterator it = shards.begin(); it != shards.end(); it++) {
+ shardsObj.push_back(it->toBSON());
}
+ result.append("shards", shardsObj);
- virtual bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) {
-
- std::vector<ShardType> shards;
- Status status = grid.catalogManager()->getAllShards(&shards);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
- std::vector<BSONObj> shardsObj;
- for (std::vector<ShardType>::const_iterator it = shards.begin();
- it != shards.end();
- it++) {
- shardsObj.push_back(it->toBSON());
- }
- result.append("shards", shardsObj);
-
- return true;
- }
+ return true;
+ }
- } listShards;
+} listShards;
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
index f4a0347de5c..78ecd3ae927 100644
--- a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
+++ b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
@@ -55,564 +55,536 @@
namespace mongo {
- using std::shared_ptr;
- using std::map;
- using std::set;
- using std::string;
- using std::vector;
+using std::shared_ptr;
+using std::map;
+using std::set;
+using std::string;
+using std::vector;
namespace {
- AtomicUInt32 JOB_NUMBER;
+AtomicUInt32 JOB_NUMBER;
- /**
- * Generates a unique name for the temporary M/R output collection.
- */
- string getTmpName(const string& coll) {
- StringBuilder sb;
- sb << "tmp.mrs." << coll << "_" << time(0) << "_" << JOB_NUMBER.fetchAndAdd(1);
- return sb.str();
- }
+/**
+ * Generates a unique name for the temporary M/R output collection.
+ */
+string getTmpName(const string& coll) {
+ StringBuilder sb;
+ sb << "tmp.mrs." << coll << "_" << time(0) << "_" << JOB_NUMBER.fetchAndAdd(1);
+ return sb.str();
+}
- /**
- * Given an input map/reduce command, this call generates the matching command which should
- * be sent to the shards as part of the first phase of map/reduce.
- */
- BSONObj fixForShards(const BSONObj& orig,
- const string& output,
- string& badShardedField,
- int maxChunkSizeBytes) {
-
- BSONObjBuilder b;
- BSONObjIterator i(orig);
- while (i.more()) {
- BSONElement e = i.next();
- const string fn = e.fieldName();
-
- if (fn == bypassDocumentValidationCommandOption() ||
- fn == "map" ||
- fn == "mapreduce" ||
- fn == "mapReduce" ||
- fn == "mapparams" ||
- fn == "reduce" ||
- fn == "query" ||
- fn == "sort" ||
- fn == "scope" ||
- fn == "verbose" ||
- fn == "$queryOptions" ||
- fn == LiteParsedQuery::cmdOptionMaxTimeMS) {
-
- b.append(e);
- }
- else if (fn == "out" || fn == "finalize") {
- // We don't want to copy these
- }
- else {
- badShardedField = fn;
- return BSONObj();
- }
+/**
+ * Given an input map/reduce command, this call generates the matching command which should
+ * be sent to the shards as part of the first phase of map/reduce.
+ */
+BSONObj fixForShards(const BSONObj& orig,
+ const string& output,
+ string& badShardedField,
+ int maxChunkSizeBytes) {
+ BSONObjBuilder b;
+ BSONObjIterator i(orig);
+ while (i.more()) {
+ BSONElement e = i.next();
+ const string fn = e.fieldName();
+
+ if (fn == bypassDocumentValidationCommandOption() || fn == "map" || fn == "mapreduce" ||
+ fn == "mapReduce" || fn == "mapparams" || fn == "reduce" || fn == "query" ||
+ fn == "sort" || fn == "scope" || fn == "verbose" || fn == "$queryOptions" ||
+ fn == LiteParsedQuery::cmdOptionMaxTimeMS) {
+ b.append(e);
+ } else if (fn == "out" || fn == "finalize") {
+ // We don't want to copy these
+ } else {
+ badShardedField = fn;
+ return BSONObj();
}
+ }
- b.append("out", output);
- b.append("shardedFirstPass", true);
+ b.append("out", output);
+ b.append("shardedFirstPass", true);
- if (maxChunkSizeBytes > 0) {
- // Will need to figure out chunks, ask shards for points
- b.append("splitInfo", maxChunkSizeBytes);
- }
+ if (maxChunkSizeBytes > 0) {
+ // Will need to figure out chunks, ask shards for points
+ b.append("splitInfo", maxChunkSizeBytes);
+ }
+
+ return b.obj();
+}
+
+
+/**
+ * Outline for sharded map reduce for sharded output, $out replace:
+ *
+ * ============= mongos =============
+ * 1. Send map reduce command to all relevant shards with some extra info like the value for
+ * the chunkSize and the name of the temporary output collection.
+ *
+ * ============= shard =============
+ * 2. Does normal map reduce.
+ *
+ * 3. Calls splitVector on itself against the output collection and puts the results into the
+ * response object.
+ *
+ * ============= mongos =============
+ * 4. If the output collection is *not* sharded, uses the information from splitVector to
+ * create a pre-split sharded collection.
+ *
+ * 5. Grabs the distributed lock for the final output collection.
+ *
+ * 6. Sends mapReduce.shardedfinish.
+ *
+ * ============= shard =============
+ * 7. Extracts the list of shards from the mapReduce.shardedfinish and performs a broadcast
+ * query against all of them to obtain all documents that this shard owns.
+ *
+ * 8. Performs the reduce operation against every document from step #7 and outputs them to
+ * another temporary collection. Also keeps track of the BSONObject size of every "reduced"
+ * document for each chunk range.
+ *
+ * 9. Atomically drops the old output collection and renames the temporary collection to the
+ * output collection.
+ *
+ * ============= mongos =============
+ * 10. Releases the distributed lock acquired at step #5.
+ *
+ * 11. Inspects the BSONObject size from step #8 and determines if it needs to split.
+ */
+class MRCmd : public Command {
+public:
+ MRCmd() : Command("mapReduce", false, "mapreduce") {}
- return b.obj();
+ virtual bool slaveOk() const {
+ return true;
}
+ virtual bool adminOnly() const {
+ return false;
+ }
- /**
- * Outline for sharded map reduce for sharded output, $out replace:
- *
- * ============= mongos =============
- * 1. Send map reduce command to all relevant shards with some extra info like the value for
- * the chunkSize and the name of the temporary output collection.
- *
- * ============= shard =============
- * 2. Does normal map reduce.
- *
- * 3. Calls splitVector on itself against the output collection and puts the results into the
- * response object.
- *
- * ============= mongos =============
- * 4. If the output collection is *not* sharded, uses the information from splitVector to
- * create a pre-split sharded collection.
- *
- * 5. Grabs the distributed lock for the final output collection.
- *
- * 6. Sends mapReduce.shardedfinish.
- *
- * ============= shard =============
- * 7. Extracts the list of shards from the mapReduce.shardedfinish and performs a broadcast
- * query against all of them to obtain all documents that this shard owns.
- *
- * 8. Performs the reduce operation against every document from step #7 and outputs them to
- * another temporary collection. Also keeps track of the BSONObject size of every "reduced"
- * document for each chunk range.
- *
- * 9. Atomically drops the old output collection and renames the temporary collection to the
- * output collection.
- *
- * ============= mongos =============
- * 10. Releases the distributed lock acquired at step #5.
- *
- * 11. Inspects the BSONObject size from step #8 and determines if it needs to split.
- */
- class MRCmd : public Command {
- public:
- MRCmd() : Command("mapReduce", false, "mapreduce") { }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- virtual bool slaveOk() const {
- return true;
- }
+ virtual void help(std::stringstream& help) const {
+ help << "Runs the sharded map/reduce command";
+ }
- virtual bool adminOnly() const {
- return false;
- }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ mr::addPrivilegesRequiredForMapReduce(this, dbname, cmdObj, out);
+ }
- virtual bool isWriteCommandForConfigServer() const {
- return false;
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ Timer t;
+
+ const string collection = cmdObj.firstElement().valuestrsafe();
+ const string fullns = dbname + "." + collection;
+ const string shardResultCollection = getTmpName(collection);
+
+ BSONObj customOut;
+ string finalColShort;
+ string finalColLong;
+ bool customOutDB = false;
+
+ string outDB = dbname;
+
+ BSONElement outElmt = cmdObj.getField("out");
+ if (outElmt.type() == Object) {
+ // Check if there is a custom output
+ BSONObj out = outElmt.embeddedObject();
+ customOut = out;
+
+ // Mode must be 1st element
+ finalColShort = out.firstElement().str();
+ if (customOut.hasField("db")) {
+ customOutDB = true;
+ outDB = customOut.getField("db").str();
+ }
+
+ finalColLong = outDB + "." + finalColShort;
}
- virtual void help(std::stringstream& help) const {
- help << "Runs the sharded map/reduce command";
+ // Ensure the input database exists
+ auto status = grid.catalogCache()->getDatabase(dbname);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status.getStatus());
}
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
+ shared_ptr<DBConfig> confIn = status.getValue();
- mr::addPrivilegesRequiredForMapReduce(this, dbname, cmdObj, out);
+ shared_ptr<DBConfig> confOut;
+ if (customOutDB) {
+ // Create the output database implicitly, since we have a custom output requested
+ confOut = uassertStatusOK(grid.implicitCreateDb(outDB));
+ } else {
+ confOut = confIn;
}
- virtual bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) {
-
- Timer t;
-
- const string collection = cmdObj.firstElement().valuestrsafe();
- const string fullns = dbname + "." + collection;
- const string shardResultCollection = getTmpName(collection);
-
- BSONObj customOut;
- string finalColShort;
- string finalColLong;
- bool customOutDB = false;
-
- string outDB = dbname;
-
- BSONElement outElmt = cmdObj.getField("out");
- if (outElmt.type() == Object) {
- // Check if there is a custom output
- BSONObj out = outElmt.embeddedObject();
- customOut = out;
-
- // Mode must be 1st element
- finalColShort = out.firstElement().str();
- if (customOut.hasField("db")) {
- customOutDB = true;
- outDB = customOut.getField("db").str();
- }
+ const bool shardedInput =
+ confIn && confIn->isShardingEnabled() && confIn->isSharded(fullns);
+ const bool shardedOutput = customOut.getBoolField("sharded");
- finalColLong = outDB + "." + finalColShort;
- }
-
- // Ensure the input database exists
- auto status = grid.catalogCache()->getDatabase(dbname);
- if (!status.isOK()) {
- return appendCommandStatus(result, status.getStatus());
- }
+ if (!shardedOutput) {
+ uassert(15920,
+ "Cannot output to a non-sharded collection because "
+ "sharded collection exists already",
+ !confOut->isSharded(finalColLong));
- shared_ptr<DBConfig> confIn = status.getValue();
+ // TODO: Should we also prevent going from non-sharded to sharded? During the
+ // transition client may see partial data.
+ }
- shared_ptr<DBConfig> confOut;
- if (customOutDB) {
- // Create the output database implicitly, since we have a custom output requested
- confOut = uassertStatusOK(grid.implicitCreateDb(outDB));
- }
- else {
- confOut = confIn;
+ int64_t maxChunkSizeBytes = 0;
+ if (shardedOutput) {
+ // Will need to figure out chunks, ask shards for points
+ maxChunkSizeBytes = cmdObj["maxChunkSizeBytes"].numberLong();
+ if (maxChunkSizeBytes == 0) {
+ maxChunkSizeBytes = Chunk::MaxChunkSize;
}
- const bool shardedInput = confIn &&
- confIn->isShardingEnabled() &&
- confIn->isSharded(fullns);
- const bool shardedOutput = customOut.getBoolField("sharded");
+ // maxChunkSizeBytes is sent as int BSON field
+ invariant(maxChunkSizeBytes < std::numeric_limits<int>::max());
+ }
- if (!shardedOutput) {
- uassert(15920,
- "Cannot output to a non-sharded collection because "
- "sharded collection exists already",
- !confOut->isSharded(finalColLong));
+ if (customOut.hasField("inline") && shardedOutput) {
+ errmsg = "cannot specify inline and sharded output at the same time";
+ return false;
+ }
- // TODO: Should we also prevent going from non-sharded to sharded? During the
- // transition client may see partial data.
- }
+ // modify command to run on shards with output to tmp collection
+ string badShardedField;
+ BSONObj shardedCommand =
+ fixForShards(cmdObj, shardResultCollection, badShardedField, maxChunkSizeBytes);
- int64_t maxChunkSizeBytes = 0;
- if (shardedOutput) {
- // Will need to figure out chunks, ask shards for points
- maxChunkSizeBytes = cmdObj["maxChunkSizeBytes"].numberLong();
- if (maxChunkSizeBytes == 0) {
- maxChunkSizeBytes = Chunk::MaxChunkSize;
- }
+ if (!shardedInput && !shardedOutput && !customOutDB) {
+ LOG(1) << "simple MR, just passthrough";
- // maxChunkSizeBytes is sent as int BSON field
- invariant(maxChunkSizeBytes < std::numeric_limits<int>::max());
- }
+ const auto shard = grid.shardRegistry()->getShard(confIn->getPrimaryId());
+ ShardConnection conn(shard->getConnString(), "");
- if (customOut.hasField("inline") && shardedOutput) {
- errmsg = "cannot specify inline and sharded output at the same time";
- return false;
- }
+ BSONObj res;
+ bool ok = conn->runCommand(dbname, cmdObj, res);
+ conn.done();
- // modify command to run on shards with output to tmp collection
- string badShardedField;
- BSONObj shardedCommand = fixForShards(cmdObj,
- shardResultCollection,
- badShardedField,
- maxChunkSizeBytes);
+ result.appendElements(res);
+ return ok;
+ }
- if (!shardedInput && !shardedOutput && !customOutDB) {
- LOG(1) << "simple MR, just passthrough";
+ if (badShardedField.size()) {
+ errmsg = str::stream() << "unknown m/r field for sharding: " << badShardedField;
+ return false;
+ }
- const auto shard = grid.shardRegistry()->getShard(confIn->getPrimaryId());
- ShardConnection conn(shard->getConnString(), "");
+ BSONObj q;
+ if (cmdObj["query"].type() == Object) {
+ q = cmdObj["query"].embeddedObjectUserCheck();
+ }
- BSONObj res;
- bool ok = conn->runCommand(dbname, cmdObj, res);
- conn.done();
+ set<string> servers;
+ vector<Strategy::CommandResult> mrCommandResults;
- result.appendElements(res);
- return ok;
- }
+ BSONObjBuilder shardResultsB;
+ BSONObjBuilder shardCountsB;
+ map<string, int64_t> countsMap;
+ set<BSONObj> splitPts;
- if (badShardedField.size()) {
- errmsg = str::stream() << "unknown m/r field for sharding: " << badShardedField;
- return false;
- }
+ {
+ bool ok = true;
- BSONObj q;
- if (cmdObj["query"].type() == Object) {
- q = cmdObj["query"].embeddedObjectUserCheck();
- }
+ // TODO: take distributed lock to prevent split / migration?
- set<string> servers;
- vector<Strategy::CommandResult> mrCommandResults;
+ try {
+ Strategy::commandOp(dbname, shardedCommand, 0, fullns, q, &mrCommandResults);
+ } catch (DBException& e) {
+ e.addContext(str::stream() << "could not run map command on all shards for ns "
+ << fullns << " and query " << q);
+ throw;
+ }
- BSONObjBuilder shardResultsB;
- BSONObjBuilder shardCountsB;
- map<string, int64_t> countsMap;
- set< BSONObj > splitPts;
+ for (const auto& mrResult : mrCommandResults) {
+ // Need to gather list of all servers even if an error happened
+ string server;
+ {
+ const auto shard = grid.shardRegistry()->getShard(mrResult.shardTargetId);
+ server = shard->getConnString().toString();
+ }
+ servers.insert(server);
- {
- bool ok = true;
+ if (!ok) {
+ continue;
+ }
- // TODO: take distributed lock to prevent split / migration?
+ BSONObj singleResult = mrResult.result;
+ ok = singleResult["ok"].trueValue();
- try {
- Strategy::commandOp(dbname, shardedCommand, 0, fullns, q, &mrCommandResults);
- }
- catch (DBException& e){
- e.addContext(str::stream() << "could not run map command on all shards for ns "
- << fullns << " and query " << q);
- throw;
+ if (!ok) {
+ // At this point we will return
+ errmsg = str::stream()
+ << "MR parallel processing failed: " << singleResult.toString();
+ continue;
}
- for (const auto& mrResult : mrCommandResults) {
- // Need to gather list of all servers even if an error happened
- string server;
- {
- const auto shard =
- grid.shardRegistry()->getShard(mrResult.shardTargetId);
- server = shard->getConnString().toString();
- }
- servers.insert(server);
+ shardResultsB.append(server, singleResult);
- if (!ok) {
- continue;
- }
+ BSONObj counts = singleResult["counts"].embeddedObjectUserCheck();
+ shardCountsB.append(server, counts);
- BSONObj singleResult = mrResult.result;
- ok = singleResult["ok"].trueValue();
+ // Add up the counts for each shard. Some of them will be fixed later like
+ // output and reduce.
+ BSONObjIterator j(counts);
+ while (j.more()) {
+ BSONElement temp = j.next();
+ countsMap[temp.fieldName()] += temp.numberLong();
+ }
- if (!ok) {
- // At this point we will return
- errmsg = str::stream() << "MR parallel processing failed: "
- << singleResult.toString();
- continue;
+ if (singleResult.hasField("splitKeys")) {
+ BSONElement splitKeys = singleResult.getField("splitKeys");
+ vector<BSONElement> pts = splitKeys.Array();
+ for (vector<BSONElement>::iterator it = pts.begin(); it != pts.end(); ++it) {
+ splitPts.insert(it->Obj().getOwned());
}
+ }
+ }
- shardResultsB.append(server, singleResult);
+ if (!ok) {
+ _cleanUp(servers, dbname, shardResultCollection);
- BSONObj counts = singleResult["counts"].embeddedObjectUserCheck();
- shardCountsB.append(server, counts);
+ // Add "code" to the top-level response, if the failure of the sharded command
+ // can be accounted to a single error.
+ int code = getUniqueCodeFromCommandResults(mrCommandResults);
+ if (code != 0) {
+ result.append("code", code);
+ }
- // Add up the counts for each shard. Some of them will be fixed later like
- // output and reduce.
- BSONObjIterator j(counts);
- while (j.more()) {
- BSONElement temp = j.next();
- countsMap[temp.fieldName()] += temp.numberLong();
- }
+ return false;
+ }
+ }
- if (singleResult.hasField("splitKeys")) {
- BSONElement splitKeys = singleResult.getField("splitKeys");
- vector<BSONElement> pts = splitKeys.Array();
- for (vector<BSONElement>::iterator it = pts.begin(); it != pts.end(); ++it) {
- splitPts.insert(it->Obj().getOwned());
- }
- }
- }
+ // Build the sharded finish command
+ BSONObjBuilder finalCmd;
+ finalCmd.append("mapreduce.shardedfinish", cmdObj);
+ finalCmd.append("inputDB", dbname);
+ finalCmd.append("shardedOutputCollection", shardResultCollection);
+ finalCmd.append("shards", shardResultsB.done());
- if (!ok) {
- _cleanUp(servers, dbname, shardResultCollection);
+ BSONObj shardCounts = shardCountsB.done();
+ finalCmd.append("shardCounts", shardCounts);
- // Add "code" to the top-level response, if the failure of the sharded command
- // can be accounted to a single error.
- int code = getUniqueCodeFromCommandResults(mrCommandResults);
- if (code != 0) {
- result.append("code", code);
- }
+ BSONObjBuilder timingBuilder;
+ timingBuilder.append("shardProcessing", t.millis());
- return false;
- }
- }
+ BSONObjBuilder aggCountsB;
+ for (const auto& countEntry : countsMap) {
+ aggCountsB.append(countEntry.first, static_cast<long long>(countEntry.second));
+ }
- // Build the sharded finish command
- BSONObjBuilder finalCmd;
- finalCmd.append("mapreduce.shardedfinish", cmdObj);
- finalCmd.append("inputDB", dbname);
- finalCmd.append("shardedOutputCollection", shardResultCollection);
- finalCmd.append("shards", shardResultsB.done());
+ BSONObj aggCounts = aggCountsB.done();
+ finalCmd.append("counts", aggCounts);
- BSONObj shardCounts = shardCountsB.done();
- finalCmd.append("shardCounts", shardCounts);
+ if (auto elem = cmdObj[LiteParsedQuery::cmdOptionMaxTimeMS])
+ finalCmd.append(elem);
+ if (auto elem = cmdObj[bypassDocumentValidationCommandOption()])
+ finalCmd.append(elem);
- BSONObjBuilder timingBuilder;
- timingBuilder.append("shardProcessing", t.millis());
+ Timer t2;
- BSONObjBuilder aggCountsB;
- for (const auto& countEntry : countsMap) {
- aggCountsB.append(countEntry.first, static_cast<long long>(countEntry.second));
- }
+ long long reduceCount = 0;
+ long long outputCount = 0;
+ BSONObjBuilder postCountsB;
- BSONObj aggCounts = aggCountsB.done();
- finalCmd.append("counts", aggCounts);
+ bool ok = true;
+ BSONObj singleResult;
- if (auto elem = cmdObj[LiteParsedQuery::cmdOptionMaxTimeMS]) finalCmd.append(elem);
- if (auto elem = cmdObj[bypassDocumentValidationCommandOption()]) finalCmd.append(elem);
+ if (!shardedOutput) {
+ const auto shard = grid.shardRegistry()->getShard(confOut->getPrimaryId());
+ LOG(1) << "MR with single shard output, NS=" << finalColLong
+ << " primary=" << shard->toString();
- Timer t2;
+ ShardConnection conn(shard->getConnString(), finalColLong);
+ ok = conn->runCommand(outDB, finalCmd.obj(), singleResult);
- long long reduceCount = 0;
- long long outputCount = 0;
- BSONObjBuilder postCountsB;
+ BSONObj counts = singleResult.getObjectField("counts");
+ postCountsB.append(conn->getServerAddress(), counts);
+ reduceCount = counts.getIntField("reduce");
+ outputCount = counts.getIntField("output");
- bool ok = true;
- BSONObj singleResult;
+ conn.done();
+ } else {
+ LOG(1) << "MR with sharded output, NS=" << finalColLong;
- if (!shardedOutput) {
- const auto shard = grid.shardRegistry()->getShard(confOut->getPrimaryId());
- LOG(1) << "MR with single shard output, NS=" << finalColLong
- << " primary=" << shard->toString();
+ // Create the sharded collection if needed
+ if (!confOut->isSharded(finalColLong)) {
+ // Enable sharding on db
+ confOut->enableSharding();
- ShardConnection conn(shard->getConnString(), finalColLong);
- ok = conn->runCommand(outDB, finalCmd.obj(), singleResult);
+ // Shard collection according to split points
+ vector<BSONObj> sortedSplitPts;
- BSONObj counts = singleResult.getObjectField("counts");
- postCountsB.append(conn->getServerAddress(), counts);
- reduceCount = counts.getIntField("reduce");
- outputCount = counts.getIntField("output");
+ // Points will be properly sorted using the set
+ for (const auto& splitPt : splitPts) {
+ sortedSplitPts.push_back(splitPt);
+ }
- conn.done();
+ // Pre-split the collection onto all the shards for this database. Note that
+ // it's not completely safe to pre-split onto non-primary shards using the
+ // shardcollection method (a conflict may result if multiple map-reduces are
+ // writing to the same output collection, for instance).
+ //
+ // TODO: pre-split mapReduce output in a safer way.
+
+ set<ShardId> outShardIds;
+ confOut->getAllShardIds(&outShardIds);
+
+ BSONObj sortKey = BSON("_id" << 1);
+ ShardKeyPattern sortKeyPattern(sortKey);
+ Status status = grid.catalogManager()->shardCollection(
+ finalColLong, sortKeyPattern, true, &sortedSplitPts, &outShardIds);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
}
- else {
- LOG(1) << "MR with sharded output, NS=" << finalColLong;
-
- // Create the sharded collection if needed
- if (!confOut->isSharded(finalColLong)) {
- // Enable sharding on db
- confOut->enableSharding();
- // Shard collection according to split points
- vector<BSONObj> sortedSplitPts;
+ map<BSONObj, int> chunkSizes;
+ {
+ // Take distributed lock to prevent split / migration.
+ auto scopedDistLock = grid.catalogManager()->getDistLockManager()->lock(
+ finalColLong,
+ "mr-post-process",
+ stdx::chrono::milliseconds(-1), // retry indefinitely
+ stdx::chrono::milliseconds(100));
+
+ if (!scopedDistLock.isOK()) {
+ return appendCommandStatus(result, scopedDistLock.getStatus());
+ }
- // Points will be properly sorted using the set
- for (const auto& splitPt : splitPts) {
- sortedSplitPts.push_back(splitPt);
- }
+ BSONObj finalCmdObj = finalCmd.obj();
+ mrCommandResults.clear();
- // Pre-split the collection onto all the shards for this database. Note that
- // it's not completely safe to pre-split onto non-primary shards using the
- // shardcollection method (a conflict may result if multiple map-reduces are
- // writing to the same output collection, for instance).
- //
- // TODO: pre-split mapReduce output in a safer way.
-
- set<ShardId> outShardIds;
- confOut->getAllShardIds(&outShardIds);
-
- BSONObj sortKey = BSON("_id" << 1);
- ShardKeyPattern sortKeyPattern(sortKey);
- Status status = grid.catalogManager()->shardCollection(finalColLong,
- sortKeyPattern,
- true,
- &sortedSplitPts,
- &outShardIds);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
+ try {
+ Strategy::commandOp(
+ outDB, finalCmdObj, 0, finalColLong, BSONObj(), &mrCommandResults);
+ ok = true;
+ } catch (DBException& e) {
+ e.addContext(str::stream() << "could not run final reduce on all shards for "
+ << fullns << ", output " << finalColLong);
+ throw;
}
- map<BSONObj, int> chunkSizes;
- {
- // Take distributed lock to prevent split / migration.
- auto scopedDistLock = grid.catalogManager()->getDistLockManager()->lock(
- finalColLong,
- "mr-post-process",
- stdx::chrono::milliseconds(-1), // retry indefinitely
- stdx::chrono::milliseconds(100));
-
- if (!scopedDistLock.isOK()) {
- return appendCommandStatus(result, scopedDistLock.getStatus());
+ for (const auto& mrResult : mrCommandResults) {
+ string server;
+ {
+ const auto shard = grid.shardRegistry()->getShard(mrResult.shardTargetId);
+ server = shard->getConnString().toString();
}
+ singleResult = mrResult.result;
- BSONObj finalCmdObj = finalCmd.obj();
- mrCommandResults.clear();
-
- try {
- Strategy::commandOp(outDB, finalCmdObj, 0, finalColLong, BSONObj(), &mrCommandResults);
- ok = true;
- }
- catch (DBException& e){
- e.addContext(str::stream() << "could not run final reduce on all shards for "
- << fullns << ", output " << finalColLong);
- throw;
+ ok = singleResult["ok"].trueValue();
+ if (!ok) {
+ break;
}
- for (const auto& mrResult : mrCommandResults) {
- string server;
- {
- const auto shard =
- grid.shardRegistry()->getShard(mrResult.shardTargetId);
- server = shard->getConnString().toString();
- }
- singleResult = mrResult.result;
-
- ok = singleResult["ok"].trueValue();
- if (!ok) {
- break;
- }
-
- BSONObj counts = singleResult.getObjectField("counts");
- reduceCount += counts.getIntField("reduce");
- outputCount += counts.getIntField("output");
- postCountsB.append(server, counts);
-
- // get the size inserted for each chunk
- // split cannot be called here since we already have the distributed lock
- if (singleResult.hasField("chunkSizes")) {
- vector<BSONElement> sizes = singleResult.getField("chunkSizes").Array();
- for (unsigned int i = 0; i < sizes.size(); i += 2) {
- BSONObj key = sizes[i].Obj().getOwned();
- const long long size = sizes[i + 1].numberLong();
-
- invariant(size < std::numeric_limits<int>::max());
- chunkSizes[key] = static_cast<int>(size);
- }
+ BSONObj counts = singleResult.getObjectField("counts");
+ reduceCount += counts.getIntField("reduce");
+ outputCount += counts.getIntField("output");
+ postCountsB.append(server, counts);
+
+ // get the size inserted for each chunk
+ // split cannot be called here since we already have the distributed lock
+ if (singleResult.hasField("chunkSizes")) {
+ vector<BSONElement> sizes = singleResult.getField("chunkSizes").Array();
+ for (unsigned int i = 0; i < sizes.size(); i += 2) {
+ BSONObj key = sizes[i].Obj().getOwned();
+ const long long size = sizes[i + 1].numberLong();
+
+ invariant(size < std::numeric_limits<int>::max());
+ chunkSizes[key] = static_cast<int>(size);
}
}
}
+ }
- // Do the splitting round
- ChunkManagerPtr cm = confOut->getChunkManagerIfExists(finalColLong);
- for (const auto& chunkSize : chunkSizes) {
- BSONObj key = chunkSize.first;
- const int size = chunkSize.second;
- invariant(size < std::numeric_limits<int>::max());
-
- // key reported should be the chunk's minimum
- ChunkPtr c = cm->findIntersectingChunk(key);
- if (!c) {
- warning() << "Mongod reported " << size << " bytes inserted for key "
- << key << " but can't find chunk";
- }
- else {
- c->splitIfShould(size);
- }
+ // Do the splitting round
+ ChunkManagerPtr cm = confOut->getChunkManagerIfExists(finalColLong);
+ for (const auto& chunkSize : chunkSizes) {
+ BSONObj key = chunkSize.first;
+ const int size = chunkSize.second;
+ invariant(size < std::numeric_limits<int>::max());
+
+ // key reported should be the chunk's minimum
+ ChunkPtr c = cm->findIntersectingChunk(key);
+ if (!c) {
+ warning() << "Mongod reported " << size << " bytes inserted for key " << key
+ << " but can't find chunk";
+ } else {
+ c->splitIfShould(size);
}
}
+ }
- _cleanUp(servers, dbname, shardResultCollection);
-
- if (!ok) {
- errmsg = str::stream() << "MR post processing failed: " << singleResult.toString();
- return 0;
- }
+ _cleanUp(servers, dbname, shardResultCollection);
- // copy some elements from a single result
- // annoying that we have to copy all results for inline, but no way around it
- if (singleResult.hasField("result")) {
- result.append(singleResult.getField("result"));
- }
- else if (singleResult.hasField("results")) {
- result.append(singleResult.getField("results"));
- }
+ if (!ok) {
+ errmsg = str::stream() << "MR post processing failed: " << singleResult.toString();
+ return 0;
+ }
- BSONObjBuilder countsB(32);
- // input stat is determined by aggregate MR job
- countsB.append("input", aggCounts.getField("input").numberLong());
- countsB.append("emit", aggCounts.getField("emit").numberLong());
+ // copy some elements from a single result
+ // annoying that we have to copy all results for inline, but no way around it
+ if (singleResult.hasField("result")) {
+ result.append(singleResult.getField("result"));
+ } else if (singleResult.hasField("results")) {
+ result.append(singleResult.getField("results"));
+ }
- // reduce count is sum of all reduces that happened
- countsB.append("reduce", aggCounts.getField("reduce").numberLong() + reduceCount);
+ BSONObjBuilder countsB(32);
+ // input stat is determined by aggregate MR job
+ countsB.append("input", aggCounts.getField("input").numberLong());
+ countsB.append("emit", aggCounts.getField("emit").numberLong());
- // ouput is determined by post processing on each shard
- countsB.append("output", outputCount);
- result.append("counts", countsB.done());
+ // reduce count is sum of all reduces that happened
+ countsB.append("reduce", aggCounts.getField("reduce").numberLong() + reduceCount);
- timingBuilder.append("postProcessing", t2.millis());
+ // ouput is determined by post processing on each shard
+ countsB.append("output", outputCount);
+ result.append("counts", countsB.done());
- result.append("timeMillis", t.millis());
- result.append("timing", timingBuilder.done());
- result.append("shardCounts", shardCounts);
- result.append("postProcessCounts", postCountsB.done());
+ timingBuilder.append("postProcessing", t2.millis());
- return true;
- }
+ result.append("timeMillis", t.millis());
+ result.append("timing", timingBuilder.done());
+ result.append("shardCounts", shardCounts);
+ result.append("postProcessCounts", postCountsB.done());
- private:
+ return true;
+ }
- /**
- * Drops the temporary results collections from each shard.
- */
- void _cleanUp(const set<string>& servers, string dbName, string shardResultCollection) {
- try {
- // drop collections with tmp results on each shard
- for (const auto& server : servers) {
- ScopedDbConnection conn(server);
- conn->dropCollection(dbName + "." + shardResultCollection);
- conn.done();
- }
- }
- catch (const DBException& e) {
- warning() << "Cannot cleanup shard results" << e.toString();
- }
- catch (const std::exception& e) {
- severe() << "Cannot cleanup shard results" << causedBy(e);
+private:
+ /**
+ * Drops the temporary results collections from each shard.
+ */
+ void _cleanUp(const set<string>& servers, string dbName, string shardResultCollection) {
+ try {
+ // drop collections with tmp results on each shard
+ for (const auto& server : servers) {
+ ScopedDbConnection conn(server);
+ conn->dropCollection(dbName + "." + shardResultCollection);
+ conn.done();
}
+ } catch (const DBException& e) {
+ warning() << "Cannot cleanup shard results" << e.toString();
+ } catch (const std::exception& e) {
+ severe() << "Cannot cleanup shard results" << causedBy(e);
}
+ }
- } clusterMapReduceCmd;
+} clusterMapReduceCmd;
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
index fba40110919..c6173244fbb 100644
--- a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
+++ b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
@@ -45,160 +45,168 @@
namespace mongo {
- using std::shared_ptr;
- using std::string;
- using std::stringstream;
- using std::vector;
+using std::shared_ptr;
+using std::string;
+using std::stringstream;
+using std::vector;
namespace {
- /**
- * Mongos-side command for merging chunks, passes command to appropriate shard.
- */
- class ClusterMergeChunksCommand : public Command {
- public:
- ClusterMergeChunksCommand() : Command("mergeChunks") {}
+/**
+ * Mongos-side command for merging chunks, passes command to appropriate shard.
+ */
+class ClusterMergeChunksCommand : public Command {
+public:
+ ClusterMergeChunksCommand() : Command("mergeChunks") {}
+
+ virtual void help(stringstream& h) const {
+ h << "Merge Chunks command\n"
+ << "usage: { mergeChunks : <ns>, bounds : [ <min key>, <max key> ] }";
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forExactNamespace(NamespaceString(parseNs(dbname, cmdObj))),
+ ActionType::splitChunk)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
+ }
+ return Status::OK();
+ }
+
+ virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
+ return parseNsFullyQualified(dbname, cmdObj);
+ }
+
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ // Required
+ static BSONField<string> nsField;
+ static BSONField<vector<BSONObj>> boundsField;
+
+ // Used to send sharding state
+ static BSONField<string> shardNameField;
+ static BSONField<string> configField;
+
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ vector<BSONObj> bounds;
+ if (!FieldParser::extract(cmdObj, boundsField, &bounds, &errmsg)) {
+ return false;
+ }
+
+ if (bounds.size() == 0) {
+ errmsg = "no bounds were specified";
+ return false;
+ }
- virtual void help(stringstream& h) const {
- h << "Merge Chunks command\n"
- << "usage: { mergeChunks : <ns>, bounds : [ <min key>, <max key> ] }";
+ if (bounds.size() != 2) {
+ errmsg = "only a min and max bound may be specified";
+ return false;
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(NamespaceString(parseNs(dbname, cmdObj))),
- ActionType::splitChunk)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
- return Status::OK();
+ BSONObj minKey = bounds[0];
+ BSONObj maxKey = bounds[1];
+
+ if (minKey.isEmpty()) {
+ errmsg = "no min key specified";
+ return false;
}
- virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
- return parseNsFullyQualified(dbname, cmdObj);
+ if (maxKey.isEmpty()) {
+ errmsg = "no max key specified";
+ return false;
}
- virtual bool adminOnly() const { return true; }
- virtual bool slaveOk() const { return false; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- // Required
- static BSONField<string> nsField;
- static BSONField<vector<BSONObj> > boundsField;
-
- // Used to send sharding state
- static BSONField<string> shardNameField;
- static BSONField<string> configField;
-
-
- bool run(OperationContext* txn, const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
-
- vector<BSONObj> bounds;
- if ( !FieldParser::extract( cmdObj, boundsField, &bounds, &errmsg ) ) {
- return false;
- }
-
- if ( bounds.size() == 0 ) {
- errmsg = "no bounds were specified";
- return false;
- }
-
- if ( bounds.size() != 2 ) {
- errmsg = "only a min and max bound may be specified";
- return false;
- }
-
- BSONObj minKey = bounds[0];
- BSONObj maxKey = bounds[1];
-
- if ( minKey.isEmpty() ) {
- errmsg = "no min key specified";
- return false;
- }
-
- if ( maxKey.isEmpty() ) {
- errmsg = "no max key specified";
- return false;
- }
-
- const NamespaceString nss(parseNs(dbname, cmdObj));
- if (nss.size() == 0) {
- return appendCommandStatus(result, Status(ErrorCodes::InvalidNamespace,
- "no namespace specified"));
- }
-
- auto status = grid.catalogCache()->getDatabase(nss.db().toString());
- if (!status.isOK()) {
- return appendCommandStatus(result, status.getStatus());
- }
-
- std::shared_ptr<DBConfig> config = status.getValue();
- if (!config->isSharded(nss.ns())) {
- return appendCommandStatus(result, Status(ErrorCodes::NamespaceNotSharded,
- "ns [" + nss.ns() + " is not sharded."));
- }
-
- // This refreshes the chunk metadata if stale.
- ChunkManagerPtr manager = config->getChunkManagerIfExists(nss, true);
- if (!manager) {
- return appendCommandStatus(result, Status(ErrorCodes::NamespaceNotSharded,
- "ns [" + nss.ns() + " is not sharded."));
- }
-
- if (!manager->getShardKeyPattern().isShardKey(minKey)
- || !manager->getShardKeyPattern().isShardKey(maxKey)) {
- errmsg = stream() << "shard key bounds " << "[" << minKey << "," << maxKey << ")"
- << " are not valid for shard key pattern "
- << manager->getShardKeyPattern().toBSON();
- return false;
- }
-
- minKey = manager->getShardKeyPattern().normalizeShardKey(minKey);
- maxKey = manager->getShardKeyPattern().normalizeShardKey(maxKey);
-
- ChunkPtr firstChunk = manager->findIntersectingChunk(minKey);
- verify(firstChunk);
-
- BSONObjBuilder remoteCmdObjB;
- remoteCmdObjB.append( cmdObj[ ClusterMergeChunksCommand::nsField() ] );
- remoteCmdObjB.append( cmdObj[ ClusterMergeChunksCommand::boundsField() ] );
- remoteCmdObjB.append( ClusterMergeChunksCommand::configField(),
- grid.catalogManager()->connectionString().toString() );
- remoteCmdObjB.append( ClusterMergeChunksCommand::shardNameField(),
- firstChunk->getShardId() );
-
- BSONObj remoteResult;
-
- // Throws, but handled at level above. Don't want to rewrap to preserve exception
- // formatting.
- const auto shard = grid.shardRegistry()->getShard(firstChunk->getShardId());
- if (!shard) {
- return appendCommandStatus(result,
- Status(ErrorCodes::ShardNotFound,
- str::stream() << "Can't find shard for chunk: "
- << firstChunk->toString()));
- }
-
- ScopedDbConnection conn(shard->getConnString());
- bool ok = conn->runCommand( "admin", remoteCmdObjB.obj(), remoteResult );
- conn.done();
-
- result.appendElements( remoteResult );
- return ok;
+ const NamespaceString nss(parseNs(dbname, cmdObj));
+ if (nss.size() == 0) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::InvalidNamespace, "no namespace specified"));
}
- } clusterMergeChunksCommand;
+ auto status = grid.catalogCache()->getDatabase(nss.db().toString());
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status.getStatus());
+ }
+
+ std::shared_ptr<DBConfig> config = status.getValue();
+ if (!config->isSharded(nss.ns())) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::NamespaceNotSharded, "ns [" + nss.ns() + " is not sharded."));
+ }
+
+ // This refreshes the chunk metadata if stale.
+ ChunkManagerPtr manager = config->getChunkManagerIfExists(nss, true);
+ if (!manager) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::NamespaceNotSharded, "ns [" + nss.ns() + " is not sharded."));
+ }
+
+ if (!manager->getShardKeyPattern().isShardKey(minKey) ||
+ !manager->getShardKeyPattern().isShardKey(maxKey)) {
+ errmsg = stream() << "shard key bounds "
+ << "[" << minKey << "," << maxKey << ")"
+ << " are not valid for shard key pattern "
+ << manager->getShardKeyPattern().toBSON();
+ return false;
+ }
+
+ minKey = manager->getShardKeyPattern().normalizeShardKey(minKey);
+ maxKey = manager->getShardKeyPattern().normalizeShardKey(maxKey);
+
+ ChunkPtr firstChunk = manager->findIntersectingChunk(minKey);
+ verify(firstChunk);
+
+ BSONObjBuilder remoteCmdObjB;
+ remoteCmdObjB.append(cmdObj[ClusterMergeChunksCommand::nsField()]);
+ remoteCmdObjB.append(cmdObj[ClusterMergeChunksCommand::boundsField()]);
+ remoteCmdObjB.append(ClusterMergeChunksCommand::configField(),
+ grid.catalogManager()->connectionString().toString());
+ remoteCmdObjB.append(ClusterMergeChunksCommand::shardNameField(), firstChunk->getShardId());
+
+ BSONObj remoteResult;
+
+ // Throws, but handled at level above. Don't want to rewrap to preserve exception
+ // formatting.
+ const auto shard = grid.shardRegistry()->getShard(firstChunk->getShardId());
+ if (!shard) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::ShardNotFound,
+ str::stream() << "Can't find shard for chunk: " << firstChunk->toString()));
+ }
+
+ ScopedDbConnection conn(shard->getConnString());
+ bool ok = conn->runCommand("admin", remoteCmdObjB.obj(), remoteResult);
+ conn.done();
+
+ result.appendElements(remoteResult);
+ return ok;
+ }
+
+} clusterMergeChunksCommand;
- BSONField<string> ClusterMergeChunksCommand::nsField( "mergeChunks" );
- BSONField<vector<BSONObj> > ClusterMergeChunksCommand::boundsField( "bounds" );
+BSONField<string> ClusterMergeChunksCommand::nsField("mergeChunks");
+BSONField<vector<BSONObj>> ClusterMergeChunksCommand::boundsField("bounds");
- BSONField<string> ClusterMergeChunksCommand::configField( "config" );
- BSONField<string> ClusterMergeChunksCommand::shardNameField( "shardName" );
+BSONField<string> ClusterMergeChunksCommand::configField("config");
+BSONField<string> ClusterMergeChunksCommand::shardNameField("shardName");
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
index 602f539ec55..45b9c63cdec 100644
--- a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
+++ b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
@@ -50,230 +50,216 @@
namespace mongo {
- using std::shared_ptr;
- using std::unique_ptr;
- using std::string;
+using std::shared_ptr;
+using std::unique_ptr;
+using std::string;
namespace {
- class MoveChunkCmd : public Command {
- public:
- MoveChunkCmd() : Command("moveChunk", false, "movechunk") { }
-
- virtual bool slaveOk() const {
- return true;
- }
-
- virtual bool adminOnly() const {
- return true;
+class MoveChunkCmd : public Command {
+public:
+ MoveChunkCmd() : Command("moveChunk", false, "movechunk") {}
+
+ virtual bool slaveOk() const {
+ return true;
+ }
+
+ virtual bool adminOnly() const {
+ return true;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual void help(std::stringstream& help) const {
+ help << "Example: move chunk that contains the doc {num : 7} to shard001\n"
+ << " { movechunk : 'test.foo' , find : { num : 7 } , to : 'shard0001' }\n"
+ << "Example: move chunk with lower bound 0 and upper bound 10 to shard001\n"
+ << " { movechunk : 'test.foo' , bounds : [ { num : 0 } , { num : 10 } ] "
+ << " , to : 'shard001' }\n";
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forExactNamespace(NamespaceString(parseNs(dbname, cmdObj))),
+ ActionType::moveChunk)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
- virtual bool isWriteCommandForConfigServer() const {
- return false;
- }
-
- virtual void help(std::stringstream& help) const {
- help << "Example: move chunk that contains the doc {num : 7} to shard001\n"
- << " { movechunk : 'test.foo' , find : { num : 7 } , to : 'shard0001' }\n"
- << "Example: move chunk with lower bound 0 and upper bound 10 to shard001\n"
- << " { movechunk : 'test.foo' , bounds : [ { num : 0 } , { num : 10 } ] "
- << " , to : 'shard001' }\n";
- }
+ return Status::OK();
+ }
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
-
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(
- NamespaceString(parseNs(dbname,
- cmdObj))),
- ActionType::moveChunk)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
-
- return Status::OK();
- }
-
- virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
- return parseNsFullyQualified(dbname, cmdObj);
- }
+ virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
+ return parseNsFullyQualified(dbname, cmdObj);
+ }
- virtual bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) {
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ ShardConnection::sync();
- ShardConnection::sync();
+ Timer t;
- Timer t;
+ const NamespaceString nss(parseNs(dbname, cmdObj));
- const NamespaceString nss(parseNs(dbname, cmdObj));
+ std::shared_ptr<DBConfig> config;
- std::shared_ptr<DBConfig> config;
-
- {
- if (nss.size() == 0) {
- return appendCommandStatus(result, Status(ErrorCodes::InvalidNamespace,
- "no namespace specified"));
- }
-
- auto status = grid.catalogCache()->getDatabase(nss.db().toString());
- if (!status.isOK()) {
- return appendCommandStatus(result, status.getStatus());
- }
-
- config = status.getValue();
+ {
+ if (nss.size() == 0) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::InvalidNamespace, "no namespace specified"));
}
- if (!config->isSharded(nss.ns())) {
- config->reload();
-
- if (!config->isSharded(nss.ns())) {
- return appendCommandStatus(result,
- Status(ErrorCodes::NamespaceNotSharded,
- "ns [" + nss.ns() + " is not sharded."));
- }
+ auto status = grid.catalogCache()->getDatabase(nss.db().toString());
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status.getStatus());
}
- string toString = cmdObj["to"].valuestrsafe();
- if (!toString.size()) {
- errmsg = "you have to specify where you want to move the chunk";
- return false;
- }
+ config = status.getValue();
+ }
+
+ if (!config->isSharded(nss.ns())) {
+ config->reload();
- const auto to = grid.shardRegistry()->getShard(toString);
- if (!to) {
- string msg(str::stream() <<
- "Could not move chunk in '" << nss.ns() <<
- "' to shard '" << toString <<
- "' because that shard does not exist");
- log() << msg;
+ if (!config->isSharded(nss.ns())) {
return appendCommandStatus(result,
- Status(ErrorCodes::ShardNotFound, msg));
+ Status(ErrorCodes::NamespaceNotSharded,
+ "ns [" + nss.ns() + " is not sharded."));
}
+ }
- // so far, chunk size serves test purposes; it may or may not become a supported parameter
- long long maxChunkSizeBytes = cmdObj["maxChunkSizeBytes"].numberLong();
- if (maxChunkSizeBytes == 0) {
- maxChunkSizeBytes = Chunk::MaxChunkSize;
- }
+ string toString = cmdObj["to"].valuestrsafe();
+ if (!toString.size()) {
+ errmsg = "you have to specify where you want to move the chunk";
+ return false;
+ }
- BSONObj find = cmdObj.getObjectField("find");
- BSONObj bounds = cmdObj.getObjectField("bounds");
+ const auto to = grid.shardRegistry()->getShard(toString);
+ if (!to) {
+ string msg(str::stream() << "Could not move chunk in '" << nss.ns() << "' to shard '"
+ << toString << "' because that shard does not exist");
+ log() << msg;
+ return appendCommandStatus(result, Status(ErrorCodes::ShardNotFound, msg));
+ }
- // check that only one of the two chunk specification methods is used
- if (find.isEmpty() == bounds.isEmpty()) {
- errmsg = "need to specify either a find query, or both lower and upper bounds.";
- return false;
- }
+ // so far, chunk size serves test purposes; it may or may not become a supported parameter
+ long long maxChunkSizeBytes = cmdObj["maxChunkSizeBytes"].numberLong();
+ if (maxChunkSizeBytes == 0) {
+ maxChunkSizeBytes = Chunk::MaxChunkSize;
+ }
- // This refreshes the chunk metadata if stale.
- ChunkManagerPtr info = config->getChunkManager(nss.ns(), true);
- ChunkPtr chunk;
+ BSONObj find = cmdObj.getObjectField("find");
+ BSONObj bounds = cmdObj.getObjectField("bounds");
- if (!find.isEmpty()) {
+ // check that only one of the two chunk specification methods is used
+ if (find.isEmpty() == bounds.isEmpty()) {
+ errmsg = "need to specify either a find query, or both lower and upper bounds.";
+ return false;
+ }
- StatusWith<BSONObj> status =
- info->getShardKeyPattern().extractShardKeyFromQuery(find);
+ // This refreshes the chunk metadata if stale.
+ ChunkManagerPtr info = config->getChunkManager(nss.ns(), true);
+ ChunkPtr chunk;
- // Bad query
- if (!status.isOK())
- return appendCommandStatus(result, status.getStatus());
+ if (!find.isEmpty()) {
+ StatusWith<BSONObj> status = info->getShardKeyPattern().extractShardKeyFromQuery(find);
- BSONObj shardKey = status.getValue();
+ // Bad query
+ if (!status.isOK())
+ return appendCommandStatus(result, status.getStatus());
- if (shardKey.isEmpty()) {
- errmsg = str::stream() << "no shard key found in chunk query " << find;
- return false;
- }
+ BSONObj shardKey = status.getValue();
- chunk = info->findIntersectingChunk(shardKey);
- verify(chunk.get());
- }
- else {
-
- // Bounds
- if (!info->getShardKeyPattern().isShardKey(bounds[0].Obj())
- || !info->getShardKeyPattern().isShardKey(bounds[1].Obj())) {
- errmsg = str::stream() << "shard key bounds " << "[" << bounds[0].Obj() << ","
- << bounds[1].Obj() << ")"
- << " are not valid for shard key pattern "
- << info->getShardKeyPattern().toBSON();
- return false;
- }
-
- BSONObj minKey = info->getShardKeyPattern().normalizeShardKey(bounds[0].Obj());
- BSONObj maxKey = info->getShardKeyPattern().normalizeShardKey(bounds[1].Obj());
-
- chunk = info->findIntersectingChunk(minKey);
- verify(chunk.get());
-
- if (chunk->getMin().woCompare(minKey) != 0
- || chunk->getMax().woCompare(maxKey) != 0) {
-
- errmsg = str::stream() << "no chunk found with the shard key bounds " << "["
- << minKey << "," << maxKey << ")";
- return false;
- }
+ if (shardKey.isEmpty()) {
+ errmsg = str::stream() << "no shard key found in chunk query " << find;
+ return false;
}
- {
- const auto from = grid.shardRegistry()->getShard(chunk->getShardId());
- if (from->getId() == to->getId()) {
- errmsg = "that chunk is already on that shard";
- return false;
- }
+ chunk = info->findIntersectingChunk(shardKey);
+ verify(chunk.get());
+ } else {
+ // Bounds
+ if (!info->getShardKeyPattern().isShardKey(bounds[0].Obj()) ||
+ !info->getShardKeyPattern().isShardKey(bounds[1].Obj())) {
+ errmsg = str::stream() << "shard key bounds "
+ << "[" << bounds[0].Obj() << "," << bounds[1].Obj() << ")"
+ << " are not valid for shard key pattern "
+ << info->getShardKeyPattern().toBSON();
+ return false;
}
- LOG(0) << "CMD: movechunk: " << cmdObj;
+ BSONObj minKey = info->getShardKeyPattern().normalizeShardKey(bounds[0].Obj());
+ BSONObj maxKey = info->getShardKeyPattern().normalizeShardKey(bounds[1].Obj());
- StatusWith<int> maxTimeMS = LiteParsedQuery::parseMaxTimeMSCommand(cmdObj);
+ chunk = info->findIntersectingChunk(minKey);
+ verify(chunk.get());
- if (!maxTimeMS.isOK()) {
- errmsg = maxTimeMS.getStatus().reason();
+ if (chunk->getMin().woCompare(minKey) != 0 || chunk->getMax().woCompare(maxKey) != 0) {
+ errmsg = str::stream() << "no chunk found with the shard key bounds "
+ << "[" << minKey << "," << maxKey << ")";
return false;
}
+ }
- unique_ptr<WriteConcernOptions> writeConcern(new WriteConcernOptions());
-
- Status status = writeConcern->parseSecondaryThrottle(cmdObj, NULL);
- if (!status.isOK()){
- if (status.code() != ErrorCodes::WriteConcernNotDefined) {
- errmsg = status.toString();
- return false;
- }
-
- // Let the shard decide what write concern to use.
- writeConcern.reset();
+ {
+ const auto from = grid.shardRegistry()->getShard(chunk->getShardId());
+ if (from->getId() == to->getId()) {
+ errmsg = "that chunk is already on that shard";
+ return false;
}
+ }
- BSONObj res;
- if (!chunk->moveAndCommit(to->getId(),
- maxChunkSizeBytes,
- writeConcern.get(),
- cmdObj["_waitForDelete"].trueValue(),
- maxTimeMS.getValue(),
- res)) {
+ LOG(0) << "CMD: movechunk: " << cmdObj;
- errmsg = "move failed";
- result.append("cause", res);
+ StatusWith<int> maxTimeMS = LiteParsedQuery::parseMaxTimeMSCommand(cmdObj);
+
+ if (!maxTimeMS.isOK()) {
+ errmsg = maxTimeMS.getStatus().reason();
+ return false;
+ }
- if (!res["code"].eoo()) {
- result.append(res["code"]);
- }
+ unique_ptr<WriteConcernOptions> writeConcern(new WriteConcernOptions());
+ Status status = writeConcern->parseSecondaryThrottle(cmdObj, NULL);
+ if (!status.isOK()) {
+ if (status.code() != ErrorCodes::WriteConcernNotDefined) {
+ errmsg = status.toString();
return false;
}
- result.append("millis", t.millis());
+ // Let the shard decide what write concern to use.
+ writeConcern.reset();
+ }
+
+ BSONObj res;
+ if (!chunk->moveAndCommit(to->getId(),
+ maxChunkSizeBytes,
+ writeConcern.get(),
+ cmdObj["_waitForDelete"].trueValue(),
+ maxTimeMS.getValue(),
+ res)) {
+ errmsg = "move failed";
+ result.append("cause", res);
+
+ if (!res["code"].eoo()) {
+ result.append(res["code"]);
+ }
- return true;
+ return false;
}
- } moveChunk;
+ result.append("millis", t.millis());
+
+ return true;
+ }
+
+} moveChunk;
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_move_primary_cmd.cpp b/src/mongo/s/commands/cluster_move_primary_cmd.cpp
index 227830f410d..f9d401a26b6 100644
--- a/src/mongo/s/commands/cluster_move_primary_cmd.cpp
+++ b/src/mongo/s/commands/cluster_move_primary_cmd.cpp
@@ -51,242 +51,225 @@
namespace mongo {
- using std::shared_ptr;
- using std::set;
- using std::string;
+using std::shared_ptr;
+using std::set;
+using std::string;
namespace {
- class MoveDatabasePrimaryCommand : public Command {
- public:
- MoveDatabasePrimaryCommand() : Command("movePrimary", false, "moveprimary") { }
+class MoveDatabasePrimaryCommand : public Command {
+public:
+ MoveDatabasePrimaryCommand() : Command("movePrimary", false, "moveprimary") {}
- virtual bool slaveOk() const {
- return true;
- }
+ virtual bool slaveOk() const {
+ return true;
+ }
- virtual bool adminOnly() const {
- return true;
- }
+ virtual bool adminOnly() const {
+ return true;
+ }
- virtual bool isWriteCommandForConfigServer() const {
- return false;
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual void help(std::stringstream& help) const {
+ help << " example: { moveprimary : 'foo' , to : 'localhost:9999' }";
+ }
- virtual void help(std::stringstream& help) const {
- help << " example: { moveprimary : 'foo' , to : 'localhost:9999' }";
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forDatabaseName(parseNs(dbname, cmdObj)), ActionType::moveChunk)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
+ return Status::OK();
+ }
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(
- parseNs(dbname, cmdObj)),
- ActionType::moveChunk)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
+ virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
+ return cmdObj.firstElement().str();
+ }
+
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname_unused,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ const string dbname = parseNs("", cmdObj);
- return Status::OK();
+ if (dbname.empty() || !nsIsDbOnly(dbname)) {
+ errmsg = "invalid db name specified: " + dbname;
+ return false;
}
- virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
- return cmdObj.firstElement().str();
+ if (dbname == "admin" || dbname == "config" || dbname == "local") {
+ errmsg = "can't move primary for " + dbname + " database";
+ return false;
}
- virtual bool run(OperationContext* txn,
- const std::string& dbname_unused,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) {
+ // Flush all cached information. This can't be perfect, but it's better than nothing.
+ grid.catalogCache()->invalidate(dbname);
- const string dbname = parseNs("", cmdObj);
+ auto status = grid.catalogCache()->getDatabase(dbname);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status.getStatus());
+ }
- if (dbname.empty() || !nsIsDbOnly(dbname)) {
- errmsg = "invalid db name specified: " + dbname;
- return false;
- }
+ shared_ptr<DBConfig> config = status.getValue();
- if (dbname == "admin" || dbname == "config" || dbname == "local") {
- errmsg = "can't move primary for " + dbname + " database";
- return false;
- }
+ const string to = cmdObj["to"].valuestrsafe();
+ if (!to.size()) {
+ errmsg = "you have to specify where you want to move it";
+ return false;
+ }
- // Flush all cached information. This can't be perfect, but it's better than nothing.
- grid.catalogCache()->invalidate(dbname);
+ shared_ptr<Shard> toShard = grid.shardRegistry()->getShard(to);
+ if (!toShard) {
+ string msg(str::stream() << "Could not move database '" << dbname << "' to shard '"
+ << to << "' because the shard does not exist");
+ log() << msg;
+ return appendCommandStatus(result, Status(ErrorCodes::ShardNotFound, msg));
+ }
- auto status = grid.catalogCache()->getDatabase(dbname);
- if (!status.isOK()) {
- return appendCommandStatus(result, status.getStatus());
- }
+ shared_ptr<Shard> fromShard = grid.shardRegistry()->getShard(config->getPrimaryId());
+ invariant(fromShard);
- shared_ptr<DBConfig> config = status.getValue();
+ if (fromShard->getConnString().sameLogicalEndpoint(toShard->getConnString())) {
+ errmsg = "it is already the primary";
+ return false;
+ }
- const string to = cmdObj["to"].valuestrsafe();
- if (!to.size()) {
- errmsg = "you have to specify where you want to move it";
- return false;
- }
+ if (!grid.catalogManager()->isShardHost(toShard->getConnString())) {
+ errmsg = "that server isn't known to me";
+ return false;
+ }
- shared_ptr<Shard> toShard = grid.shardRegistry()->getShard(to);
- if (!toShard) {
- string msg(str::stream() << "Could not move database '" << dbname
- << "' to shard '" << to
- << "' because the shard does not exist");
- log() << msg;
- return appendCommandStatus(result,
- Status(ErrorCodes::ShardNotFound, msg));
- }
+ log() << "Moving " << dbname << " primary from: " << fromShard->toString()
+ << " to: " << toShard->toString();
- shared_ptr<Shard> fromShard =
- grid.shardRegistry()->getShard(config->getPrimaryId());
- invariant(fromShard);
+ string whyMessage(str::stream() << "Moving primary shard of " << dbname);
+ auto scopedDistLock =
+ grid.catalogManager()->getDistLockManager()->lock(dbname + "-movePrimary", whyMessage);
- if (fromShard->getConnString().sameLogicalEndpoint(toShard->getConnString())) {
- errmsg = "it is already the primary";
- return false;
- }
+ if (!scopedDistLock.isOK()) {
+ return appendCommandStatus(result, scopedDistLock.getStatus());
+ }
- if (!grid.catalogManager()->isShardHost(toShard->getConnString())) {
- errmsg = "that server isn't known to me";
- return false;
- }
+ set<string> shardedColls;
+ config->getAllShardedCollections(shardedColls);
- log() << "Moving " << dbname << " primary from: "
- << fromShard->toString() << " to: " << toShard->toString();
+ // Record start in changelog
+ BSONObj moveStartDetails =
+ _buildMoveEntry(dbname, fromShard->toString(), toShard->toString(), shardedColls);
- string whyMessage(str::stream() << "Moving primary shard of " << dbname);
- auto scopedDistLock = grid.catalogManager()->getDistLockManager()->lock(
- dbname + "-movePrimary", whyMessage);
+ grid.catalogManager()->logChange(txn, "movePrimary.start", dbname, moveStartDetails);
- if (!scopedDistLock.isOK()) {
- return appendCommandStatus(result, scopedDistLock.getStatus());
- }
+ BSONArrayBuilder barr;
+ barr.append(shardedColls);
- set<string> shardedColls;
- config->getAllShardedCollections(shardedColls);
-
- // Record start in changelog
- BSONObj moveStartDetails = _buildMoveEntry(dbname,
- fromShard->toString(),
- toShard->toString(),
- shardedColls);
-
- grid.catalogManager()->logChange(txn, "movePrimary.start", dbname, moveStartDetails);
-
- BSONArrayBuilder barr;
- barr.append(shardedColls);
-
- ScopedDbConnection toconn(toShard->getConnString());
-
- // TODO ERH - we need a clone command which replays operations from clone start to now
- // can just use local.oplog.$main
- BSONObj cloneRes;
- bool worked = toconn->runCommand(
- dbname.c_str(),
- BSON("clone" << fromShard->getConnString().toString()
- << "collsToIgnore" << barr.arr()
- << bypassDocumentValidationCommandOption() << true),
- cloneRes);
- toconn.done();
-
- if (!worked) {
- log() << "clone failed" << cloneRes;
- errmsg = "clone failed";
- return false;
- }
+ ScopedDbConnection toconn(toShard->getConnString());
- const string oldPrimary = fromShard->getConnString().toString();
+ // TODO ERH - we need a clone command which replays operations from clone start to now
+ // can just use local.oplog.$main
+ BSONObj cloneRes;
+ bool worked = toconn->runCommand(
+ dbname.c_str(),
+ BSON("clone" << fromShard->getConnString().toString() << "collsToIgnore" << barr.arr()
+ << bypassDocumentValidationCommandOption() << true),
+ cloneRes);
+ toconn.done();
- ScopedDbConnection fromconn(fromShard->getConnString());
+ if (!worked) {
+ log() << "clone failed" << cloneRes;
+ errmsg = "clone failed";
+ return false;
+ }
- config->setPrimary(toShard->getConnString().toString());
+ const string oldPrimary = fromShard->getConnString().toString();
- if (shardedColls.empty()){
+ ScopedDbConnection fromconn(fromShard->getConnString());
- // TODO: Collections can be created in the meantime, and we should handle in the future.
- log() << "movePrimary dropping database on " << oldPrimary
- << ", no sharded collections in " << dbname;
+ config->setPrimary(toShard->getConnString().toString());
- try {
- fromconn->dropDatabase(dbname.c_str());
- }
- catch (DBException& e){
- e.addContext(str::stream() << "movePrimary could not drop the database "
- << dbname << " on " << oldPrimary);
- throw;
- }
+ if (shardedColls.empty()) {
+ // TODO: Collections can be created in the meantime, and we should handle in the future.
+ log() << "movePrimary dropping database on " << oldPrimary
+ << ", no sharded collections in " << dbname;
+ try {
+ fromconn->dropDatabase(dbname.c_str());
+ } catch (DBException& e) {
+ e.addContext(str::stream() << "movePrimary could not drop the database " << dbname
+ << " on " << oldPrimary);
+ throw;
}
- else if (cloneRes["clonedColls"].type() != Array) {
- // Legacy behavior from old mongod with sharded collections, *do not* delete
- // database, but inform user they can drop manually (or ignore).
- warning() << "movePrimary legacy mongod behavior detected. "
- << "User must manually remove unsharded collections in database "
- << dbname << " on " << oldPrimary;
- }
- else {
- // We moved some unsharded collections, but not all
- BSONObjIterator it(cloneRes["clonedColls"].Obj());
-
- while (it.more()){
- BSONElement el = it.next();
- if (el.type() == String){
- try {
- log() << "movePrimary dropping cloned collection " << el.String()
- << " on " << oldPrimary;
- fromconn->dropCollection(el.String());
- }
- catch (DBException& e){
- e.addContext(str::stream() << "movePrimary could not drop the cloned collection "
- << el.String() << " on " << oldPrimary);
- throw;
- }
+ } else if (cloneRes["clonedColls"].type() != Array) {
+ // Legacy behavior from old mongod with sharded collections, *do not* delete
+ // database, but inform user they can drop manually (or ignore).
+ warning() << "movePrimary legacy mongod behavior detected. "
+ << "User must manually remove unsharded collections in database " << dbname
+ << " on " << oldPrimary;
+
+ } else {
+ // We moved some unsharded collections, but not all
+ BSONObjIterator it(cloneRes["clonedColls"].Obj());
+
+ while (it.more()) {
+ BSONElement el = it.next();
+ if (el.type() == String) {
+ try {
+ log() << "movePrimary dropping cloned collection " << el.String() << " on "
+ << oldPrimary;
+ fromconn->dropCollection(el.String());
+ } catch (DBException& e) {
+ e.addContext(str::stream()
+ << "movePrimary could not drop the cloned collection "
+ << el.String() << " on " << oldPrimary);
+ throw;
}
}
}
-
- fromconn.done();
-
- result << "primary" << toShard->toString();
-
- // Record finish in changelog
- BSONObj moveFinishDetails = _buildMoveEntry(dbname,
- oldPrimary,
- toShard->toString(),
- shardedColls);
-
- grid.catalogManager()->logChange(txn, "movePrimary", dbname, moveFinishDetails);
- return true;
}
- private:
- static BSONObj _buildMoveEntry(const string db,
- const string from,
- const string to,
- set<string> shardedColls) {
-
- BSONObjBuilder details;
- details.append("database", db);
- details.append("from", from);
- details.append("to", to);
-
- BSONArrayBuilder collB(details.subarrayStart("shardedCollections"));
- {
- set<string>::iterator it;
- for (it = shardedColls.begin(); it != shardedColls.end(); ++it) {
- collB.append(*it);
- }
+ fromconn.done();
+
+ result << "primary" << toShard->toString();
+
+ // Record finish in changelog
+ BSONObj moveFinishDetails =
+ _buildMoveEntry(dbname, oldPrimary, toShard->toString(), shardedColls);
+
+ grid.catalogManager()->logChange(txn, "movePrimary", dbname, moveFinishDetails);
+ return true;
+ }
+
+private:
+ static BSONObj _buildMoveEntry(const string db,
+ const string from,
+ const string to,
+ set<string> shardedColls) {
+ BSONObjBuilder details;
+ details.append("database", db);
+ details.append("from", from);
+ details.append("to", to);
+
+ BSONArrayBuilder collB(details.subarrayStart("shardedCollections"));
+ {
+ set<string>::iterator it;
+ for (it = shardedColls.begin(); it != shardedColls.end(); ++it) {
+ collB.append(*it);
}
- collB.done();
-
- return details.obj();
}
+ collB.done();
+
+ return details.obj();
+ }
- } movePrimary;
+} movePrimary;
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_netstat_cmd.cpp b/src/mongo/s/commands/cluster_netstat_cmd.cpp
index ca1bf2fd9af..df4c158a8b7 100644
--- a/src/mongo/s/commands/cluster_netstat_cmd.cpp
+++ b/src/mongo/s/commands/cluster_netstat_cmd.cpp
@@ -35,47 +35,46 @@
namespace mongo {
namespace {
- class NetStatCmd : public Command {
- public:
- NetStatCmd() : Command("netstat", false, "netstat") { }
+class NetStatCmd : public Command {
+public:
+ NetStatCmd() : Command("netstat", false, "netstat") {}
- virtual bool slaveOk() const {
- return true;
- }
+ virtual bool slaveOk() const {
+ return true;
+ }
- virtual bool adminOnly() const {
- return true;
- }
+ virtual bool adminOnly() const {
+ return true;
+ }
- virtual bool isWriteCommandForConfigServer() const {
- return false;
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- virtual void help(std::stringstream& help) const {
- help << " shows status/reachability of servers in the cluster";
- }
+ virtual void help(std::stringstream& help) const {
+ help << " shows status/reachability of servers in the cluster";
+ }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::netstat);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
- }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::netstat);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
- virtual bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) {
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ result.append("configserver", grid.catalogManager()->connectionString().toString());
+ result.append("isdbgrid", 1);
+ return true;
+ }
- result.append("configserver", grid.catalogManager()->connectionString().toString());
- result.append("isdbgrid", 1);
- return true;
- }
+} netstat;
- } netstat;
-
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_pipeline_cmd.cpp b/src/mongo/s/commands/cluster_pipeline_cmd.cpp
index 60f7512d172..1d1fe933410 100644
--- a/src/mongo/s/commands/cluster_pipeline_cmd.cpp
+++ b/src/mongo/s/commands/cluster_pipeline_cmd.cpp
@@ -53,372 +53,345 @@
namespace mongo {
- using boost::intrusive_ptr;
- using std::unique_ptr;
- using std::shared_ptr;
- using std::string;
- using std::vector;
+using boost::intrusive_ptr;
+using std::unique_ptr;
+using std::shared_ptr;
+using std::string;
+using std::vector;
namespace {
- /**
- * Implements the aggregation (pipeline command for sharding).
- */
- class PipelineCommand : public Command {
- public:
- PipelineCommand() : Command(Pipeline::commandName, false) { }
+/**
+ * Implements the aggregation (pipeline command for sharding).
+ */
+class PipelineCommand : public Command {
+public:
+ PipelineCommand() : Command(Pipeline::commandName, false) {}
- virtual bool slaveOk() const {
- return true;
- }
+ virtual bool slaveOk() const {
+ return true;
+ }
- virtual bool adminOnly() const {
- return false;
- }
+ virtual bool adminOnly() const {
+ return false;
+ }
- virtual bool isWriteCommandForConfigServer() const {
- return false;
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- virtual void help(std::stringstream& help) const {
- help << "Runs the sharded aggregation command";
- }
+ virtual void help(std::stringstream& help) const {
+ help << "Runs the sharded aggregation command";
+ }
- // virtuals from Command
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
+ // virtuals from Command
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ Pipeline::addRequiredPrivileges(this, dbname, cmdObj, out);
+ }
- Pipeline::addRequiredPrivileges(this, dbname, cmdObj, out);
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ const string fullns = parseNs(dbname, cmdObj);
+
+ auto status = grid.catalogCache()->getDatabase(dbname);
+ if (!status.isOK()) {
+ return appendEmptyResultSet(result, status.getStatus(), fullns);
}
- virtual bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) {
-
- const string fullns = parseNs(dbname, cmdObj);
+ shared_ptr<DBConfig> conf = status.getValue();
- auto status = grid.catalogCache()->getDatabase(dbname);
- if (!status.isOK()) {
- return appendEmptyResultSet(result, status.getStatus(), fullns);
- }
-
- shared_ptr<DBConfig> conf = status.getValue();
-
- // If the system isn't running sharded, or the target collection isn't sharded, pass
- // this on to a mongod.
- if (!conf->isShardingEnabled() || !conf->isSharded(fullns)) {
- return aggPassthrough(conf, cmdObj, result, options);
- }
+ // If the system isn't running sharded, or the target collection isn't sharded, pass
+ // this on to a mongod.
+ if (!conf->isShardingEnabled() || !conf->isSharded(fullns)) {
+ return aggPassthrough(conf, cmdObj, result, options);
+ }
- intrusive_ptr<ExpressionContext> mergeCtx =
- new ExpressionContext(txn, NamespaceString(fullns));
- mergeCtx->inRouter = true;
- // explicitly *not* setting mergeCtx->tempDir
+ intrusive_ptr<ExpressionContext> mergeCtx =
+ new ExpressionContext(txn, NamespaceString(fullns));
+ mergeCtx->inRouter = true;
+ // explicitly *not* setting mergeCtx->tempDir
- // Parse the pipeline specification
- intrusive_ptr<Pipeline> pipeline(Pipeline::parseCommand(errmsg, cmdObj, mergeCtx));
- if (!pipeline.get()) {
- // There was some parsing error
- return false;
- }
+ // Parse the pipeline specification
+ intrusive_ptr<Pipeline> pipeline(Pipeline::parseCommand(errmsg, cmdObj, mergeCtx));
+ if (!pipeline.get()) {
+ // There was some parsing error
+ return false;
+ }
- // If the first $match stage is an exact match on the shard key, we only have to send it
- // to one shard, so send the command to that shard.
- BSONObj firstMatchQuery = pipeline->getInitialQuery();
- ChunkManagerPtr chunkMgr = conf->getChunkManager(fullns);
- BSONObj shardKeyMatches = uassertStatusOK(
- chunkMgr->getShardKeyPattern().extractShardKeyFromQuery(firstMatchQuery));
-
- // Don't need to split pipeline if the first $match is an exact match on shard key, but
- // we can't send the entire pipeline to one shard if there is a $out stage, since that
- // shard may not be the primary shard for the database.
- bool needSplit = shardKeyMatches.isEmpty() || pipeline->hasOutStage();
-
- // Split the pipeline into pieces for mongod(s) and this mongos. If needSplit is true,
- // 'pipeline' will become the merger side.
- intrusive_ptr<Pipeline> shardPipeline(needSplit ? pipeline->splitForSharded()
- : pipeline);
-
- // Create the command for the shards. The 'fromRouter' field means produce output to
- // be merged.
- MutableDocument commandBuilder(shardPipeline->serialize());
- if (needSplit) {
- commandBuilder.setField("fromRouter", Value(true));
- commandBuilder.setField("cursor", Value(DOC("batchSize" << 0)));
- }
- else {
- commandBuilder.setField("cursor", Value(cmdObj["cursor"]));
- }
+ // If the first $match stage is an exact match on the shard key, we only have to send it
+ // to one shard, so send the command to that shard.
+ BSONObj firstMatchQuery = pipeline->getInitialQuery();
+ ChunkManagerPtr chunkMgr = conf->getChunkManager(fullns);
+ BSONObj shardKeyMatches = uassertStatusOK(
+ chunkMgr->getShardKeyPattern().extractShardKeyFromQuery(firstMatchQuery));
+
+ // Don't need to split pipeline if the first $match is an exact match on shard key, but
+ // we can't send the entire pipeline to one shard if there is a $out stage, since that
+ // shard may not be the primary shard for the database.
+ bool needSplit = shardKeyMatches.isEmpty() || pipeline->hasOutStage();
+
+ // Split the pipeline into pieces for mongod(s) and this mongos. If needSplit is true,
+ // 'pipeline' will become the merger side.
+ intrusive_ptr<Pipeline> shardPipeline(needSplit ? pipeline->splitForSharded() : pipeline);
+
+ // Create the command for the shards. The 'fromRouter' field means produce output to
+ // be merged.
+ MutableDocument commandBuilder(shardPipeline->serialize());
+ if (needSplit) {
+ commandBuilder.setField("fromRouter", Value(true));
+ commandBuilder.setField("cursor", Value(DOC("batchSize" << 0)));
+ } else {
+ commandBuilder.setField("cursor", Value(cmdObj["cursor"]));
+ }
- if (cmdObj.hasField("$queryOptions")) {
- commandBuilder.setField("$queryOptions", Value(cmdObj["$queryOptions"]));
- }
+ if (cmdObj.hasField("$queryOptions")) {
+ commandBuilder.setField("$queryOptions", Value(cmdObj["$queryOptions"]));
+ }
- if (cmdObj.hasField(LiteParsedQuery::cmdOptionMaxTimeMS)) {
- commandBuilder.setField(LiteParsedQuery::cmdOptionMaxTimeMS,
- Value(cmdObj[LiteParsedQuery::cmdOptionMaxTimeMS]));
- }
+ if (cmdObj.hasField(LiteParsedQuery::cmdOptionMaxTimeMS)) {
+ commandBuilder.setField(LiteParsedQuery::cmdOptionMaxTimeMS,
+ Value(cmdObj[LiteParsedQuery::cmdOptionMaxTimeMS]));
+ }
- BSONObj shardedCommand = commandBuilder.freeze().toBson();
- BSONObj shardQuery = shardPipeline->getInitialQuery();
-
- // Run the command on the shards
- // TODO need to make sure cursors are killed if a retry is needed
- vector<Strategy::CommandResult> shardResults;
- Strategy::commandOp(dbname,
- shardedCommand,
- options,
- fullns,
- shardQuery,
- &shardResults);
-
- if (pipeline->isExplain()) {
- // This must be checked before we start modifying result.
- uassertAllShardsSupportExplain(shardResults);
-
- if (needSplit) {
- result << "splitPipeline"
- << DOC("shardsPart" << shardPipeline->writeExplainOps()
- << "mergerPart" << pipeline->writeExplainOps());
- }
- else {
- result << "splitPipeline" << BSONNULL;
- }
+ BSONObj shardedCommand = commandBuilder.freeze().toBson();
+ BSONObj shardQuery = shardPipeline->getInitialQuery();
- BSONObjBuilder shardExplains(result.subobjStart("shards"));
- for (size_t i = 0; i < shardResults.size(); i++) {
- shardExplains.append(shardResults[i].shardTargetId,
- BSON("host" << shardResults[i].target.toString() <<
- "stages" << shardResults[i].result["stages"]));
- }
+ // Run the command on the shards
+ // TODO need to make sure cursors are killed if a retry is needed
+ vector<Strategy::CommandResult> shardResults;
+ Strategy::commandOp(dbname, shardedCommand, options, fullns, shardQuery, &shardResults);
- return true;
- }
+ if (pipeline->isExplain()) {
+ // This must be checked before we start modifying result.
+ uassertAllShardsSupportExplain(shardResults);
- if (!needSplit) {
- invariant(shardResults.size() == 1);
- const auto& reply = shardResults[0].result;
- storePossibleCursor(shardResults[0].target.toString(), reply);
- result.appendElements(reply);
- return reply["ok"].trueValue();
+ if (needSplit) {
+ result << "splitPipeline"
+ << DOC("shardsPart" << shardPipeline->writeExplainOps() << "mergerPart"
+ << pipeline->writeExplainOps());
+ } else {
+ result << "splitPipeline" << BSONNULL;
}
- DocumentSourceMergeCursors::CursorIds cursorIds = parseCursors(shardResults, fullns);
- pipeline->addInitialSource(DocumentSourceMergeCursors::create(cursorIds, mergeCtx));
-
- MutableDocument mergeCmd(pipeline->serialize());
- mergeCmd["cursor"] = Value(cmdObj["cursor"]);
-
- if (cmdObj.hasField("$queryOptions")) {
- mergeCmd["$queryOptions"] = Value(cmdObj["$queryOptions"]);
+ BSONObjBuilder shardExplains(result.subobjStart("shards"));
+ for (size_t i = 0; i < shardResults.size(); i++) {
+ shardExplains.append(shardResults[i].shardTargetId,
+ BSON("host" << shardResults[i].target.toString() << "stages"
+ << shardResults[i].result["stages"]));
}
- if (cmdObj.hasField(LiteParsedQuery::cmdOptionMaxTimeMS)) {
- mergeCmd[LiteParsedQuery::cmdOptionMaxTimeMS]
- = Value(cmdObj[LiteParsedQuery::cmdOptionMaxTimeMS]);
- }
+ return true;
+ }
- string outputNsOrEmpty;
- if (DocumentSourceOut* out = dynamic_cast<DocumentSourceOut*>(pipeline->output())) {
- outputNsOrEmpty = out->getOutputNs().ns();
- }
+ if (!needSplit) {
+ invariant(shardResults.size() == 1);
+ const auto& reply = shardResults[0].result;
+ storePossibleCursor(shardResults[0].target.toString(), reply);
+ result.appendElements(reply);
+ return reply["ok"].trueValue();
+ }
- // Run merging command on primary shard of database. Need to use ShardConnection so
- // that the merging mongod is sent the config servers on connection init.
- const auto shard = grid.shardRegistry()->getShard(conf->getPrimaryId());
- ShardConnection conn(shard->getConnString(), outputNsOrEmpty);
- BSONObj mergedResults = aggRunCommand(conn.get(),
- dbname,
- mergeCmd.freeze().toBson(),
- options);
- conn.done();
+ DocumentSourceMergeCursors::CursorIds cursorIds = parseCursors(shardResults, fullns);
+ pipeline->addInitialSource(DocumentSourceMergeCursors::create(cursorIds, mergeCtx));
- // Copy output from merging (primary) shard to the output object from our command.
- // Also, propagates errmsg and code if ok == false.
- result.appendElements(mergedResults);
+ MutableDocument mergeCmd(pipeline->serialize());
+ mergeCmd["cursor"] = Value(cmdObj["cursor"]);
- return mergedResults["ok"].trueValue();
+ if (cmdObj.hasField("$queryOptions")) {
+ mergeCmd["$queryOptions"] = Value(cmdObj["$queryOptions"]);
}
- private:
- DocumentSourceMergeCursors::CursorIds parseCursors(
- const vector<Strategy::CommandResult>& shardResults,
- const string& fullns);
-
- void killAllCursors(const vector<Strategy::CommandResult>& shardResults);
- void uassertAllShardsSupportExplain(const vector<Strategy::CommandResult>& shardResults);
-
- // These are temporary hacks because the runCommand method doesn't report the exact
- // host the command was run on which is necessary for cursor support. The exact host
- // could be different from conn->getServerAddress() for connections that map to
- // multiple servers such as for replica sets. These also take care of registering
- // returned cursors with mongos's cursorCache.
- BSONObj aggRunCommand(DBClientBase* conn,
- const string& db,
- BSONObj cmd,
- int queryOptions);
-
- bool aggPassthrough(DBConfigPtr conf,
- BSONObj cmd,
- BSONObjBuilder& result,
- int queryOptions);
- } clusterPipelineCmd;
-
- DocumentSourceMergeCursors::CursorIds PipelineCommand::parseCursors(
- const vector<Strategy::CommandResult>& shardResults,
- const string& fullns) {
- try {
- DocumentSourceMergeCursors::CursorIds cursors;
-
- for (size_t i = 0; i < shardResults.size(); i++) {
- BSONObj result = shardResults[i].result;
-
- if (!result["ok"].trueValue()) {
- // If the failure of the sharded command can be accounted to a single error,
- // throw a UserException with that error code; otherwise, throw with a
- // location uassert code.
- int errCode = getUniqueCodeFromCommandResults(shardResults);
- if (errCode == 0) {
- errCode = 17022;
- }
-
- invariant(errCode == result["code"].numberInt() || errCode == 17022);
- uasserted(errCode, str::stream()
- << "sharded pipeline failed on shard "
- << shardResults[i].shardTargetId << ": "
- << result.toString());
- }
+ if (cmdObj.hasField(LiteParsedQuery::cmdOptionMaxTimeMS)) {
+ mergeCmd[LiteParsedQuery::cmdOptionMaxTimeMS] =
+ Value(cmdObj[LiteParsedQuery::cmdOptionMaxTimeMS]);
+ }
- BSONObj cursor = result["cursor"].Obj();
+ string outputNsOrEmpty;
+ if (DocumentSourceOut* out = dynamic_cast<DocumentSourceOut*>(pipeline->output())) {
+ outputNsOrEmpty = out->getOutputNs().ns();
+ }
- massert(17023,
- str::stream() << "shard " << shardResults[i].shardTargetId
- << " returned non-empty first batch",
- cursor["firstBatch"].Obj().isEmpty());
+ // Run merging command on primary shard of database. Need to use ShardConnection so
+ // that the merging mongod is sent the config servers on connection init.
+ const auto shard = grid.shardRegistry()->getShard(conf->getPrimaryId());
+ ShardConnection conn(shard->getConnString(), outputNsOrEmpty);
+ BSONObj mergedResults =
+ aggRunCommand(conn.get(), dbname, mergeCmd.freeze().toBson(), options);
+ conn.done();
- massert(17024,
- str::stream() << "shard " << shardResults[i].shardTargetId
- << " returned cursorId 0",
- cursor["id"].Long() != 0);
+ // Copy output from merging (primary) shard to the output object from our command.
+ // Also, propagates errmsg and code if ok == false.
+ result.appendElements(mergedResults);
- massert(17025,
- str::stream() << "shard " << shardResults[i].shardTargetId
- << " returned different ns: " << cursor["ns"],
- cursor["ns"].String() == fullns);
+ return mergedResults["ok"].trueValue();
+ }
- cursors.push_back(std::make_pair(shardResults[i].target, cursor["id"].Long()));
- }
+private:
+ DocumentSourceMergeCursors::CursorIds parseCursors(
+ const vector<Strategy::CommandResult>& shardResults, const string& fullns);
- return cursors;
- }
- catch (...) {
- // Need to clean up any cursors we successfully created on the shards
- killAllCursors(shardResults);
- throw;
- }
- }
+ void killAllCursors(const vector<Strategy::CommandResult>& shardResults);
+ void uassertAllShardsSupportExplain(const vector<Strategy::CommandResult>& shardResults);
- void PipelineCommand::uassertAllShardsSupportExplain(
- const vector<Strategy::CommandResult>& shardResults) {
+ // These are temporary hacks because the runCommand method doesn't report the exact
+ // host the command was run on which is necessary for cursor support. The exact host
+ // could be different from conn->getServerAddress() for connections that map to
+ // multiple servers such as for replica sets. These also take care of registering
+ // returned cursors with mongos's cursorCache.
+ BSONObj aggRunCommand(DBClientBase* conn, const string& db, BSONObj cmd, int queryOptions);
- for (size_t i = 0; i < shardResults.size(); i++) {
- uassert(17403,
- str::stream() << "Shard " << shardResults[i].target.toString()
- << " failed: " << shardResults[i].result,
- shardResults[i].result["ok"].trueValue());
-
- uassert(17404,
- str::stream() << "Shard " << shardResults[i].target.toString()
- << " does not support $explain",
- shardResults[i].result.hasField("stages"));
- }
- }
+ bool aggPassthrough(DBConfigPtr conf, BSONObj cmd, BSONObjBuilder& result, int queryOptions);
+} clusterPipelineCmd;
- void PipelineCommand::killAllCursors(const vector<Strategy::CommandResult>& shardResults) {
- // This function must ignore and log all errors. Callers expect a best-effort attempt at
- // cleanup without exceptions. If any cursors aren't cleaned up here, they will be cleaned
- // up automatically on the shard after 10 minutes anyway.
+DocumentSourceMergeCursors::CursorIds PipelineCommand::parseCursors(
+ const vector<Strategy::CommandResult>& shardResults, const string& fullns) {
+ try {
+ DocumentSourceMergeCursors::CursorIds cursors;
for (size_t i = 0; i < shardResults.size(); i++) {
- try {
- BSONObj result = shardResults[i].result;
- if (!result["ok"].trueValue()) {
- continue;
- }
-
- const long long cursor = result["cursor"]["id"].Long();
- if (!cursor) {
- continue;
+ BSONObj result = shardResults[i].result;
+
+ if (!result["ok"].trueValue()) {
+ // If the failure of the sharded command can be accounted to a single error,
+ // throw a UserException with that error code; otherwise, throw with a
+ // location uassert code.
+ int errCode = getUniqueCodeFromCommandResults(shardResults);
+ if (errCode == 0) {
+ errCode = 17022;
}
- ScopedDbConnection conn(shardResults[i].target);
- conn->killCursor(cursor);
- conn.done();
- }
- catch (const DBException& e) {
- log() << "Couldn't kill aggregation cursor on shard: " << shardResults[i].target
- << " due to DBException: " << e.toString();
- }
- catch (const std::exception& e) {
- log() << "Couldn't kill aggregation cursor on shard: " << shardResults[i].target
- << " due to std::exception: " << e.what();
+ invariant(errCode == result["code"].numberInt() || errCode == 17022);
+ uasserted(errCode,
+ str::stream() << "sharded pipeline failed on shard "
+ << shardResults[i].shardTargetId << ": "
+ << result.toString());
}
- catch (...) {
- log() << "Couldn't kill aggregation cursor on shard: " << shardResults[i].target
- << " due to non-exception";
- }
- }
- }
- BSONObj PipelineCommand::aggRunCommand(DBClientBase* conn,
- const string& db,
- BSONObj cmd,
- int queryOptions) {
+ BSONObj cursor = result["cursor"].Obj();
- // Temporary hack. See comment on declaration for details.
+ massert(17023,
+ str::stream() << "shard " << shardResults[i].shardTargetId
+ << " returned non-empty first batch",
+ cursor["firstBatch"].Obj().isEmpty());
- massert(17016,
- "should only be running an aggregate command here",
- str::equals(cmd.firstElementFieldName(), "aggregate"));
+ massert(17024,
+ str::stream() << "shard " << shardResults[i].shardTargetId
+ << " returned cursorId 0",
+ cursor["id"].Long() != 0);
- auto cursor = conn->query(db + ".$cmd",
- cmd,
- -1, // nToReturn
- 0, // nToSkip
- NULL, // fieldsToReturn
- queryOptions);
- massert(17014,
- str::stream() << "aggregate command didn't return results on host: "
- << conn->toString(),
- cursor && cursor->more());
+ massert(17025,
+ str::stream() << "shard " << shardResults[i].shardTargetId
+ << " returned different ns: " << cursor["ns"],
+ cursor["ns"].String() == fullns);
- BSONObj result = cursor->nextSafe().getOwned();
-
- if (ErrorCodes::SendStaleConfig == getStatusFromCommandResult(result)) {
- throw RecvStaleConfigException("command failed because of stale config", result);
+ cursors.push_back(std::make_pair(shardResults[i].target, cursor["id"].Long()));
}
- uassertStatusOK(storePossibleCursor(cursor->originalHost(), result));
- return result;
+ return cursors;
+ } catch (...) {
+ // Need to clean up any cursors we successfully created on the shards
+ killAllCursors(shardResults);
+ throw;
+ }
+}
+
+void PipelineCommand::uassertAllShardsSupportExplain(
+ const vector<Strategy::CommandResult>& shardResults) {
+ for (size_t i = 0; i < shardResults.size(); i++) {
+ uassert(17403,
+ str::stream() << "Shard " << shardResults[i].target.toString()
+ << " failed: " << shardResults[i].result,
+ shardResults[i].result["ok"].trueValue());
+
+ uassert(17404,
+ str::stream() << "Shard " << shardResults[i].target.toString()
+ << " does not support $explain",
+ shardResults[i].result.hasField("stages"));
}
+}
- bool PipelineCommand::aggPassthrough(DBConfigPtr conf,
- BSONObj cmd,
- BSONObjBuilder& out,
- int queryOptions) {
+void PipelineCommand::killAllCursors(const vector<Strategy::CommandResult>& shardResults) {
+ // This function must ignore and log all errors. Callers expect a best-effort attempt at
+ // cleanup without exceptions. If any cursors aren't cleaned up here, they will be cleaned
+ // up automatically on the shard after 10 minutes anyway.
- // Temporary hack. See comment on declaration for details.
- const auto shard = grid.shardRegistry()->getShard(conf->getPrimaryId());
- ShardConnection conn(shard->getConnString(), "");
- BSONObj result = aggRunCommand(conn.get(), conf->name(), cmd, queryOptions);
- conn.done();
- out.appendElements(result);
- return result["ok"].trueValue();
+ for (size_t i = 0; i < shardResults.size(); i++) {
+ try {
+ BSONObj result = shardResults[i].result;
+ if (!result["ok"].trueValue()) {
+ continue;
+ }
+
+ const long long cursor = result["cursor"]["id"].Long();
+ if (!cursor) {
+ continue;
+ }
+
+ ScopedDbConnection conn(shardResults[i].target);
+ conn->killCursor(cursor);
+ conn.done();
+ } catch (const DBException& e) {
+ log() << "Couldn't kill aggregation cursor on shard: " << shardResults[i].target
+ << " due to DBException: " << e.toString();
+ } catch (const std::exception& e) {
+ log() << "Couldn't kill aggregation cursor on shard: " << shardResults[i].target
+ << " due to std::exception: " << e.what();
+ } catch (...) {
+ log() << "Couldn't kill aggregation cursor on shard: " << shardResults[i].target
+ << " due to non-exception";
+ }
+ }
+}
+
+BSONObj PipelineCommand::aggRunCommand(DBClientBase* conn,
+ const string& db,
+ BSONObj cmd,
+ int queryOptions) {
+ // Temporary hack. See comment on declaration for details.
+
+ massert(17016,
+ "should only be running an aggregate command here",
+ str::equals(cmd.firstElementFieldName(), "aggregate"));
+
+ auto cursor = conn->query(db + ".$cmd",
+ cmd,
+ -1, // nToReturn
+ 0, // nToSkip
+ NULL, // fieldsToReturn
+ queryOptions);
+ massert(
+ 17014,
+ str::stream() << "aggregate command didn't return results on host: " << conn->toString(),
+ cursor && cursor->more());
+
+ BSONObj result = cursor->nextSafe().getOwned();
+
+ if (ErrorCodes::SendStaleConfig == getStatusFromCommandResult(result)) {
+ throw RecvStaleConfigException("command failed because of stale config", result);
}
-} // namespace
-} // namespace mongo
+ uassertStatusOK(storePossibleCursor(cursor->originalHost(), result));
+ return result;
+}
+
+bool PipelineCommand::aggPassthrough(DBConfigPtr conf,
+ BSONObj cmd,
+ BSONObjBuilder& out,
+ int queryOptions) {
+ // Temporary hack. See comment on declaration for details.
+ const auto shard = grid.shardRegistry()->getShard(conf->getPrimaryId());
+ ShardConnection conn(shard->getConnString(), "");
+ BSONObj result = aggRunCommand(conn.get(), conf->name(), cmd, queryOptions);
+ conn.done();
+ out.appendElements(result);
+ return result["ok"].trueValue();
+}
+
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_plan_cache_cmd.cpp b/src/mongo/s/commands/cluster_plan_cache_cmd.cpp
index bb66ce55998..1411f5bc3b1 100644
--- a/src/mongo/s/commands/cluster_plan_cache_cmd.cpp
+++ b/src/mongo/s/commands/cluster_plan_cache_cmd.cpp
@@ -38,141 +38,137 @@
namespace mongo {
- using std::string;
- using std::stringstream;
- using std::vector;
+using std::string;
+using std::stringstream;
+using std::vector;
- /**
- * Base class for mongos plan cache commands.
- * Cluster plan cache commands don't do much more than
- * forwarding the commands to all shards and combining the results.
- */
- class ClusterPlanCacheCmd : public Command {
+/**
+ * Base class for mongos plan cache commands.
+ * Cluster plan cache commands don't do much more than
+ * forwarding the commands to all shards and combining the results.
+ */
+class ClusterPlanCacheCmd : public Command {
MONGO_DISALLOW_COPYING(ClusterPlanCacheCmd);
- public:
- virtual ~ClusterPlanCacheCmd() {
- }
+public:
+ virtual ~ClusterPlanCacheCmd() {}
- bool slaveOk() const {
- return false;
- }
-
- bool slaveOverrideOk() const {
- return true;
- }
+ bool slaveOk() const {
+ return false;
+ }
- virtual bool isWriteCommandForConfigServer() const { return false; }
+ bool slaveOverrideOk() const {
+ return true;
+ }
- void help(stringstream& ss) const {
- ss << _helpText;
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- Status checkAuthForCommand( ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj ) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- ResourcePattern pattern = parseResourcePattern(dbname, cmdObj);
+ void help(stringstream& ss) const {
+ ss << _helpText;
+ }
- if (authzSession->isAuthorizedForActionsOnResource(pattern, _actionType)) {
- return Status::OK();
- }
+ Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ ResourcePattern pattern = parseResourcePattern(dbname, cmdObj);
- return Status(ErrorCodes::Unauthorized, "unauthorized");
+ if (authzSession->isAuthorizedForActionsOnResource(pattern, _actionType)) {
+ return Status::OK();
}
- // Cluster plan cache command entry point.
- bool run(OperationContext* txn, const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result);
-
- public:
-
- /**
- * Instantiates a command that can be invoked by "name", which will be described by
- * "helpText", and will require privilege "actionType" to run.
- */
- ClusterPlanCacheCmd( const std::string& name, const std::string& helpText,
- ActionType actionType ) :
- Command( name ), _helpText( helpText ), _actionType( actionType ) {
- }
+ return Status(ErrorCodes::Unauthorized, "unauthorized");
+ }
+
+ // Cluster plan cache command entry point.
+ bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result);
- private:
-
- std::string _helpText;
- ActionType _actionType;
- };
-
- //
- // Cluster plan cache command implementation(s) below
- //
-
- bool ClusterPlanCacheCmd::run(OperationContext* txn, const std::string& dbName,
- BSONObj& cmdObj,
- int options,
- std::string& errMsg,
- BSONObjBuilder& result) {
- const std::string fullns = parseNs(dbName, cmdObj);
- NamespaceString nss(fullns);
-
- // Dispatch command to all the shards.
- // Targeted shard commands are generally data-dependent but plan cache
- // commands are tied to query shape (data has no effect on query shape).
- vector<Strategy::CommandResult> results;
- Strategy::commandOp(dbName, cmdObj, options, nss.ns(), BSONObj(), &results);
-
- // Set value of first shard result's "ok" field.
- bool clusterCmdResult = true;
-
- for (vector<Strategy::CommandResult>::const_iterator i = results.begin();
- i != results.end(); ++i) {
- const Strategy::CommandResult& cmdResult = *i;
-
- // XXX: In absence of sensible aggregation strategy,
- // promote first shard's result to top level.
- if (i == results.begin()) {
- result.appendElements(cmdResult.result);
- clusterCmdResult = cmdResult.result["ok"].trueValue();
- }
-
- // Append shard result as a sub object.
- // Name the field after the shard.
- string shardName = cmdResult.shardTargetId;
- result.append(shardName, cmdResult.result);
+public:
+ /**
+ * Instantiates a command that can be invoked by "name", which will be described by
+ * "helpText", and will require privilege "actionType" to run.
+ */
+ ClusterPlanCacheCmd(const std::string& name, const std::string& helpText, ActionType actionType)
+ : Command(name), _helpText(helpText), _actionType(actionType) {}
+
+private:
+ std::string _helpText;
+ ActionType _actionType;
+};
+
+//
+// Cluster plan cache command implementation(s) below
+//
+
+bool ClusterPlanCacheCmd::run(OperationContext* txn,
+ const std::string& dbName,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errMsg,
+ BSONObjBuilder& result) {
+ const std::string fullns = parseNs(dbName, cmdObj);
+ NamespaceString nss(fullns);
+
+ // Dispatch command to all the shards.
+ // Targeted shard commands are generally data-dependent but plan cache
+ // commands are tied to query shape (data has no effect on query shape).
+ vector<Strategy::CommandResult> results;
+ Strategy::commandOp(dbName, cmdObj, options, nss.ns(), BSONObj(), &results);
+
+ // Set value of first shard result's "ok" field.
+ bool clusterCmdResult = true;
+
+ for (vector<Strategy::CommandResult>::const_iterator i = results.begin(); i != results.end();
+ ++i) {
+ const Strategy::CommandResult& cmdResult = *i;
+
+ // XXX: In absence of sensible aggregation strategy,
+ // promote first shard's result to top level.
+ if (i == results.begin()) {
+ result.appendElements(cmdResult.result);
+ clusterCmdResult = cmdResult.result["ok"].trueValue();
}
- return clusterCmdResult;
+ // Append shard result as a sub object.
+ // Name the field after the shard.
+ string shardName = cmdResult.shardTargetId;
+ result.append(shardName, cmdResult.result);
}
- //
- // Register plan cache commands at startup
- //
+ return clusterCmdResult;
+}
- namespace {
+//
+// Register plan cache commands at startup
+//
- MONGO_INITIALIZER(RegisterPlanCacheCommands)(InitializerContext* context) {
- // Leaked intentionally: a Command registers itself when constructed.
+namespace {
- new ClusterPlanCacheCmd(
- "planCacheListQueryShapes",
- "Displays all query shapes in a collection.",
- ActionType::planCacheRead );
+MONGO_INITIALIZER(RegisterPlanCacheCommands)(InitializerContext* context) {
+ // Leaked intentionally: a Command registers itself when constructed.
- new ClusterPlanCacheCmd(
- "planCacheClear",
- "Drops one or all cached queries in a collection.",
- ActionType::planCacheWrite );
+ new ClusterPlanCacheCmd("planCacheListQueryShapes",
+ "Displays all query shapes in a collection.",
+ ActionType::planCacheRead);
- new ClusterPlanCacheCmd(
- "planCacheListPlans",
- "Displays the cached plans for a query shape.",
- ActionType::planCacheRead );
+ new ClusterPlanCacheCmd("planCacheClear",
+ "Drops one or all cached queries in a collection.",
+ ActionType::planCacheWrite);
- return Status::OK();
- }
+ new ClusterPlanCacheCmd("planCacheListPlans",
+ "Displays the cached plans for a query shape.",
+ ActionType::planCacheRead);
+
+ return Status::OK();
+}
- } // namespace
+} // namespace
-} // namespace mongo
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_profile_cmd.cpp b/src/mongo/s/commands/cluster_profile_cmd.cpp
index e4084def78b..ca619d80fbe 100644
--- a/src/mongo/s/commands/cluster_profile_cmd.cpp
+++ b/src/mongo/s/commands/cluster_profile_cmd.cpp
@@ -33,43 +33,41 @@
namespace mongo {
namespace {
- class ProfileCmd : public Command {
- public:
- ProfileCmd() : Command("profile", false) { }
+class ProfileCmd : public Command {
+public:
+ ProfileCmd() : Command("profile", false) {}
- virtual bool slaveOk() const {
- return true;
- }
+ virtual bool slaveOk() const {
+ return true;
+ }
- virtual bool adminOnly() const {
- return false;
- }
+ virtual bool adminOnly() const {
+ return false;
+ }
- virtual bool isWriteCommandForConfigServer() const {
- return false;
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::enableProfiler);
+ out->push_back(Privilege(ResourcePattern::forDatabaseName(dbname), actions));
+ }
- ActionSet actions;
- actions.addAction(ActionType::enableProfiler);
- out->push_back(Privilege(ResourcePattern::forDatabaseName(dbname), actions));
- }
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ errmsg = "profile currently not supported via mongos";
+ return false;
+ }
- virtual bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) {
+} profileCmd;
- errmsg = "profile currently not supported via mongos";
- return false;
- }
-
- } profileCmd;
-
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_remove_shard_cmd.cpp b/src/mongo/s/commands/cluster_remove_shard_cmd.cpp
index e936e926d3b..14aeaf6bf42 100644
--- a/src/mongo/s/commands/cluster_remove_shard_cmd.cpp
+++ b/src/mongo/s/commands/cluster_remove_shard_cmd.cpp
@@ -45,90 +45,84 @@
namespace mongo {
- using std::string;
- using std::vector;
+using std::string;
+using std::vector;
namespace {
- class RemoveShardCmd : public Command {
- public:
- RemoveShardCmd() : Command("removeShard", false, "removeshard") { }
-
- virtual bool slaveOk() const {
- return true;
- }
-
- virtual bool adminOnly() const {
- return true;
- }
-
- virtual bool isWriteCommandForConfigServer() const {
- return false;
- }
-
- virtual void help(std::stringstream& help) const {
- help << "remove a shard from the system.";
+class RemoveShardCmd : public Command {
+public:
+ RemoveShardCmd() : Command("removeShard", false, "removeshard") {}
+
+ virtual bool slaveOk() const {
+ return true;
+ }
+
+ virtual bool adminOnly() const {
+ return true;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual void help(std::stringstream& help) const {
+ help << "remove a shard from the system.";
+ }
+
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::removeShard);
+ out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ }
+
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ const string target = cmdObj.firstElement().valuestrsafe();
+
+ const auto s = grid.shardRegistry()->getShard(target);
+ if (!s) {
+ string msg(str::stream() << "Could not drop shard '" << target
+ << "' because it does not exist");
+ log() << msg;
+ return appendCommandStatus(result, Status(ErrorCodes::ShardNotFound, msg));
}
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
-
- ActionSet actions;
- actions.addAction(ActionType::removeShard);
- out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
+ StatusWith<ShardDrainingStatus> removeShardResult =
+ grid.catalogManager()->removeShard(txn, s->getId());
+ if (!removeShardResult.isOK()) {
+ return appendCommandStatus(result, removeShardResult.getStatus());
}
- virtual bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) {
-
- const string target = cmdObj.firstElement().valuestrsafe();
-
- const auto s = grid.shardRegistry()->getShard(target);
- if (!s) {
- string msg(str::stream() <<
- "Could not drop shard '" << target <<
- "' because it does not exist");
- log() << msg;
- return appendCommandStatus(result,
- Status(ErrorCodes::ShardNotFound, msg));
- }
-
- StatusWith<ShardDrainingStatus> removeShardResult =
- grid.catalogManager()->removeShard(txn, s->getId());
- if (!removeShardResult.isOK()) {
- return appendCommandStatus(result, removeShardResult.getStatus());
- }
-
- vector<string> databases;
- grid.catalogManager()->getDatabasesForShard(s->getId(), &databases);
-
- // Get BSONObj containing:
- // 1) note about moving or dropping databases in a shard
- // 2) list of databases (excluding 'local' database) that need to be moved
- BSONObj dbInfo;
- {
- BSONObjBuilder dbInfoBuilder;
- dbInfoBuilder.append("note",
- "you need to drop or movePrimary these databases");
- BSONArrayBuilder dbs(dbInfoBuilder.subarrayStart("dbsToMove"));
- for (vector<string>::const_iterator it = databases.begin();
- it != databases.end();
- it++) {
- if (*it != "local") {
- dbs.append(*it);
- }
+ vector<string> databases;
+ grid.catalogManager()->getDatabasesForShard(s->getId(), &databases);
+
+ // Get BSONObj containing:
+ // 1) note about moving or dropping databases in a shard
+ // 2) list of databases (excluding 'local' database) that need to be moved
+ BSONObj dbInfo;
+ {
+ BSONObjBuilder dbInfoBuilder;
+ dbInfoBuilder.append("note", "you need to drop or movePrimary these databases");
+ BSONArrayBuilder dbs(dbInfoBuilder.subarrayStart("dbsToMove"));
+ for (vector<string>::const_iterator it = databases.begin(); it != databases.end();
+ it++) {
+ if (*it != "local") {
+ dbs.append(*it);
}
- dbs.doneFast();
- dbInfo = dbInfoBuilder.obj();
}
+ dbs.doneFast();
+ dbInfo = dbInfoBuilder.obj();
+ }
- // TODO: Standardize/Seperate how we append to the result object
- switch (removeShardResult.getValue()) {
+ // TODO: Standardize/Seperate how we append to the result object
+ switch (removeShardResult.getValue()) {
case ShardDrainingStatus::STARTED:
result.append("msg", "draining started successfully");
result.append("state", "started");
@@ -137,10 +131,10 @@ namespace {
break;
case ShardDrainingStatus::ONGOING: {
vector<ChunkType> chunks;
- Status status = grid.catalogManager()->getChunks(
- Query(BSON(ChunkType::shard(s->getId()))),
- 0, // return all
- &chunks);
+ Status status =
+ grid.catalogManager()->getChunks(Query(BSON(ChunkType::shard(s->getId()))),
+ 0, // return all
+ &chunks);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -161,12 +155,12 @@ namespace {
result.append("msg", "removeshard completed successfully");
result.append("state", "completed");
result.append("shard", s->getId());
- }
-
- return true;
}
- } removeShardCmd;
+ return true;
+ }
+
+} removeShardCmd;
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_repair_database_cmd.cpp b/src/mongo/s/commands/cluster_repair_database_cmd.cpp
index 33c849ecde6..5383b5688bf 100644
--- a/src/mongo/s/commands/cluster_repair_database_cmd.cpp
+++ b/src/mongo/s/commands/cluster_repair_database_cmd.cpp
@@ -33,19 +33,19 @@
namespace mongo {
namespace {
- class ClusterRepairDatabaseCmd : public RunOnAllShardsCommand {
- public:
- ClusterRepairDatabaseCmd() : RunOnAllShardsCommand("repairDatabase") {}
+class ClusterRepairDatabaseCmd : public RunOnAllShardsCommand {
+public:
+ ClusterRepairDatabaseCmd() : RunOnAllShardsCommand("repairDatabase") {}
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::repairDatabase);
- out->push_back(Privilege(ResourcePattern::forDatabaseName(dbname), actions));
- }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::repairDatabase);
+ out->push_back(Privilege(ResourcePattern::forDatabaseName(dbname), actions));
+ }
- } clusterRepairDatabaseCmd;
+} clusterRepairDatabaseCmd;
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_repl_set_get_status_cmd.cpp b/src/mongo/s/commands/cluster_repl_set_get_status_cmd.cpp
index 0164fe3cc15..bd8c0332bb1 100644
--- a/src/mongo/s/commands/cluster_repl_set_get_status_cmd.cpp
+++ b/src/mongo/s/commands/cluster_repl_set_get_status_cmd.cpp
@@ -36,53 +36,51 @@
namespace mongo {
namespace {
- class CmdReplSetGetStatus : public Command {
- public:
- CmdReplSetGetStatus() : Command("replSetGetStatus") { }
-
- virtual bool slaveOk() const {
- return true;
- }
-
- virtual bool adminOnly() const {
- return true;
- }
-
- virtual bool isWriteCommandForConfigServer() const {
- return false;
- }
-
- virtual void help(std::stringstream& help) const {
- help << "Not supported through mongos";
+class CmdReplSetGetStatus : public Command {
+public:
+ CmdReplSetGetStatus() : Command("replSetGetStatus") {}
+
+ virtual bool slaveOk() const {
+ return true;
+ }
+
+ virtual bool adminOnly() const {
+ return true;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual void help(std::stringstream& help) const {
+ help << "Not supported through mongos";
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ // Require no auth since this command isn't supported in mongos
+ return Status::OK();
+ }
+
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ if (cmdObj["forShell"].trueValue()) {
+ LastError::get(cc()).disable();
+ ClusterLastErrorInfo::get(cc()).disableForCommand();
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
+ errmsg = "replSetGetStatus is not supported through mongos";
+ result.append("info", "mongos");
- // Require no auth since this command isn't supported in mongos
- return Status::OK();
- }
-
- virtual bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) {
-
- if (cmdObj["forShell"].trueValue()) {
- LastError::get(cc()).disable();
- ClusterLastErrorInfo::get(cc()).disableForCommand();
- }
-
- errmsg = "replSetGetStatus is not supported through mongos";
- result.append("info", "mongos");
-
- return false;
- }
+ return false;
+ }
- } cmdReplSetGetStatus;
+} cmdReplSetGetStatus;
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_reset_error_cmd.cpp b/src/mongo/s/commands/cluster_reset_error_cmd.cpp
index efa56f524b3..e82b43db1ec 100644
--- a/src/mongo/s/commands/cluster_reset_error_cmd.cpp
+++ b/src/mongo/s/commands/cluster_reset_error_cmd.cpp
@@ -40,56 +40,50 @@
namespace mongo {
namespace {
- class CmdShardingResetError : public Command {
- public:
- CmdShardingResetError() : Command("resetError", false, "reseterror") { }
+class CmdShardingResetError : public Command {
+public:
+ CmdShardingResetError() : Command("resetError", false, "reseterror") {}
- virtual bool isWriteCommandForConfigServer() const {
- return false;
- }
-
- virtual bool slaveOk() const {
- return true;
- }
-
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- // No auth required
- }
-
- virtual bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) {
+ virtual bool slaveOk() const {
+ return true;
+ }
- LastError::get(cc()).reset();
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ // No auth required
+ }
- const std::set<std::string>* shards =
- ClusterLastErrorInfo::get(cc()).getPrevShardHosts();
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ LastError::get(cc()).reset();
- for (std::set<std::string>::const_iterator i = shards->begin();
- i != shards->end();
- i++) {
+ const std::set<std::string>* shards = ClusterLastErrorInfo::get(cc()).getPrevShardHosts();
- const std::string shardName = *i;
+ for (std::set<std::string>::const_iterator i = shards->begin(); i != shards->end(); i++) {
+ const std::string shardName = *i;
- ShardConnection conn(ConnectionString(shardName, ConnectionString::SET), "");
+ ShardConnection conn(ConnectionString(shardName, ConnectionString::SET), "");
- BSONObj res;
+ BSONObj res;
- // Don't care about result from shards.
- conn->runCommand(dbname, cmdObj, res);
- conn.done();
- }
-
- return true;
+ // Don't care about result from shards.
+ conn->runCommand(dbname, cmdObj, res);
+ conn.done();
}
- } cmdShardingResetError;
+ return true;
+ }
+
+} cmdShardingResetError;
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_shard_collection_cmd.cpp b/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
index ccc0edd67f8..6e7c6f2cb55 100644
--- a/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
+++ b/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
@@ -55,464 +55,436 @@
namespace mongo {
- using std::shared_ptr;
- using std::list;
- using std::set;
- using std::string;
- using std::vector;
+using std::shared_ptr;
+using std::list;
+using std::set;
+using std::string;
+using std::vector;
namespace {
- class ShardCollectionCmd : public Command {
- public:
- ShardCollectionCmd() : Command("shardCollection", false, "shardcollection") { }
-
- virtual bool slaveOk() const {
- return true;
- }
-
- virtual bool adminOnly() const {
- return true;
+class ShardCollectionCmd : public Command {
+public:
+ ShardCollectionCmd() : Command("shardCollection", false, "shardcollection") {}
+
+ virtual bool slaveOk() const {
+ return true;
+ }
+
+ virtual bool adminOnly() const {
+ return true;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual void help(std::stringstream& help) const {
+ help << "Shard a collection. Requires key. Optional unique."
+ << " Sharding must already be enabled for the database.\n"
+ << " { enablesharding : \"<dbname>\" }\n";
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forExactNamespace(NamespaceString(parseNs(dbname, cmdObj))),
+ ActionType::enableSharding)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
- virtual bool isWriteCommandForConfigServer() const {
+ return Status::OK();
+ }
+
+ virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
+ return parseNsFullyQualified(dbname, cmdObj);
+ }
+
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ const string ns = parseNs(dbname, cmdObj);
+ if (ns.size() == 0) {
+ errmsg = "no ns";
return false;
}
- virtual void help(std::stringstream& help) const {
- help << "Shard a collection. Requires key. Optional unique."
- << " Sharding must already be enabled for the database.\n"
- << " { enablesharding : \"<dbname>\" }\n";
+ const NamespaceString nsStr(ns);
+ if (!nsStr.isValid()) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::InvalidNamespace, "invalid collection namespace [" + ns + "]"));
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
-
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(
- NamespaceString(parseNs(dbname,
- cmdObj))),
- ActionType::enableSharding)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
-
- return Status::OK();
+ auto config = uassertStatusOK(grid.catalogCache()->getDatabase(nsStr.db().toString()));
+ if (!config->isShardingEnabled()) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::IllegalOperation,
+ str::stream() << "sharding not enabled for db " << nsStr.db()));
}
- virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
- return parseNsFullyQualified(dbname, cmdObj);
+ if (config->isSharded(ns)) {
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::IllegalOperation,
+ str::stream() << "sharding already enabled for collection " << ns));
}
- virtual bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) {
-
- const string ns = parseNs(dbname, cmdObj);
- if (ns.size() == 0) {
- errmsg = "no ns";
- return false;
- }
-
- const NamespaceString nsStr(ns);
- if (!nsStr.isValid()) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::InvalidNamespace,
- "invalid collection namespace [" + ns + "]"));
- }
-
- auto config = uassertStatusOK(grid.catalogCache()->getDatabase(nsStr.db().toString()));
- if (!config->isShardingEnabled()) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::IllegalOperation,
- str::stream() << "sharding not enabled for db " << nsStr.db()));
- }
-
- if (config->isSharded(ns)) {
- return appendCommandStatus(
- result,
- Status(ErrorCodes::IllegalOperation,
- str::stream() << "sharding already enabled for collection " << ns));
- }
+ // NOTE: We *must* take ownership of the key here - otherwise the shared BSONObj
+ // becomes corrupt as soon as the command ends.
+ BSONObj proposedKey = cmdObj.getObjectField("key").getOwned();
+ if (proposedKey.isEmpty()) {
+ errmsg = "no shard key";
+ return false;
+ }
- // NOTE: We *must* take ownership of the key here - otherwise the shared BSONObj
- // becomes corrupt as soon as the command ends.
- BSONObj proposedKey = cmdObj.getObjectField("key").getOwned();
- if (proposedKey.isEmpty()) {
- errmsg = "no shard key";
- return false;
- }
+ ShardKeyPattern proposedKeyPattern(proposedKey);
+ if (!proposedKeyPattern.isValid()) {
+ errmsg = str::stream() << "Unsupported shard key pattern. Pattern must"
+ << " either be a single hashed field, or a list"
+ << " of ascending fields.";
+ return false;
+ }
- ShardKeyPattern proposedKeyPattern(proposedKey);
- if (!proposedKeyPattern.isValid()) {
- errmsg = str::stream() << "Unsupported shard key pattern. Pattern must"
- << " either be a single hashed field, or a list"
- << " of ascending fields.";
- return false;
- }
+ bool isHashedShardKey = proposedKeyPattern.isHashedPattern();
- bool isHashedShardKey = proposedKeyPattern.isHashedPattern();
+ if (isHashedShardKey && cmdObj["unique"].trueValue()) {
+ dassert(proposedKey.nFields() == 1);
- if (isHashedShardKey && cmdObj["unique"].trueValue()) {
- dassert(proposedKey.nFields() == 1);
+ // it's possible to ensure uniqueness on the hashed field by
+ // declaring an additional (non-hashed) unique index on the field,
+ // but the hashed shard key itself should not be declared unique
+ errmsg = "hashed shard keys cannot be declared unique.";
+ return false;
+ }
- // it's possible to ensure uniqueness on the hashed field by
- // declaring an additional (non-hashed) unique index on the field,
- // but the hashed shard key itself should not be declared unique
- errmsg = "hashed shard keys cannot be declared unique.";
- return false;
- }
+ if (ns.find(".system.") != string::npos) {
+ errmsg = "can't shard system namespaces";
+ return false;
+ }
- if (ns.find(".system.") != string::npos) {
- errmsg = "can't shard system namespaces";
- return false;
+ // The rest of the checks require a connection to the primary db
+ ConnectionString shardConnString;
+ {
+ const auto shard = grid.shardRegistry()->getShard(config->getPrimaryId());
+ shardConnString = shard->getConnString();
+ }
+ ScopedDbConnection conn(shardConnString);
+
+ // check that collection is not capped
+ BSONObj res;
+ {
+ list<BSONObj> all = conn->getCollectionInfos(
+ config->name(), BSON("name" << nsToCollectionSubstring(ns)));
+ if (!all.empty()) {
+ res = all.front().getOwned();
}
+ }
- // The rest of the checks require a connection to the primary db
- ConnectionString shardConnString;
- {
- const auto shard = grid.shardRegistry()->getShard(config->getPrimaryId());
- shardConnString = shard->getConnString();
- }
- ScopedDbConnection conn(shardConnString);
-
- //check that collection is not capped
- BSONObj res;
- {
- list<BSONObj> all = conn->getCollectionInfos(
- config->name(),
- BSON("name" << nsToCollectionSubstring(ns)));
- if (!all.empty()) {
- res = all.front().getOwned();
- }
- }
+ if (res["options"].type() == Object &&
+ res["options"].embeddedObject()["capped"].trueValue()) {
+ errmsg = "can't shard capped collection";
+ conn.done();
+ return false;
+ }
- if (res["options"].type() == Object &&
- res["options"].embeddedObject()["capped"].trueValue()) {
- errmsg = "can't shard capped collection";
+ // The proposed shard key must be validated against the set of existing indexes.
+ // In particular, we must ensure the following constraints
+ //
+ // 1. All existing unique indexes, except those which start with the _id index,
+ // must contain the proposed key as a prefix (uniqueness of the _id index is
+ // ensured by the _id generation process or guaranteed by the user).
+ //
+ // 2. If the collection is not empty, there must exist at least one index that
+ // is "useful" for the proposed key. A "useful" index is defined as follows
+ // Useful Index:
+ // i. contains proposedKey as a prefix
+ // ii. is not a sparse index or partial index
+ // iii. contains no null values
+ // iv. is not multikey (maybe lift this restriction later)
+ // v. if a hashed index, has default seed (lift this restriction later)
+ //
+ // 3. If the proposed shard key is specified as unique, there must exist a useful,
+ // unique index exactly equal to the proposedKey (not just a prefix).
+ //
+ // After validating these constraint:
+ //
+ // 4. If there is no useful index, and the collection is non-empty, we
+ // must fail.
+ //
+ // 5. If the collection is empty, and it's still possible to create an index
+ // on the proposed key, we go ahead and do so.
+
+ list<BSONObj> indexes = conn->getIndexSpecs(ns);
+
+ // 1. Verify consistency with existing unique indexes
+ ShardKeyPattern proposedShardKey(proposedKey);
+ for (list<BSONObj>::iterator it = indexes.begin(); it != indexes.end(); ++it) {
+ BSONObj idx = *it;
+ BSONObj currentKey = idx["key"].embeddedObject();
+ bool isUnique = idx["unique"].trueValue();
+
+ if (isUnique && !proposedShardKey.isUniqueIndexCompatible(currentKey)) {
+ errmsg = str::stream() << "can't shard collection '" << ns << "' "
+ << "with unique index on " << currentKey << " "
+ << "and proposed shard key " << proposedKey << ". "
+ << "Uniqueness can't be maintained unless "
+ << "shard key is a prefix";
conn.done();
return false;
}
+ }
- // The proposed shard key must be validated against the set of existing indexes.
- // In particular, we must ensure the following constraints
- //
- // 1. All existing unique indexes, except those which start with the _id index,
- // must contain the proposed key as a prefix (uniqueness of the _id index is
- // ensured by the _id generation process or guaranteed by the user).
- //
- // 2. If the collection is not empty, there must exist at least one index that
- // is "useful" for the proposed key. A "useful" index is defined as follows
- // Useful Index:
- // i. contains proposedKey as a prefix
- // ii. is not a sparse index or partial index
- // iii. contains no null values
- // iv. is not multikey (maybe lift this restriction later)
- // v. if a hashed index, has default seed (lift this restriction later)
- //
- // 3. If the proposed shard key is specified as unique, there must exist a useful,
- // unique index exactly equal to the proposedKey (not just a prefix).
- //
- // After validating these constraint:
- //
- // 4. If there is no useful index, and the collection is non-empty, we
- // must fail.
- //
- // 5. If the collection is empty, and it's still possible to create an index
- // on the proposed key, we go ahead and do so.
-
- list<BSONObj> indexes = conn->getIndexSpecs(ns);
-
- // 1. Verify consistency with existing unique indexes
- ShardKeyPattern proposedShardKey(proposedKey);
- for (list<BSONObj>::iterator it = indexes.begin(); it != indexes.end(); ++it) {
- BSONObj idx = *it;
- BSONObj currentKey = idx["key"].embeddedObject();
- bool isUnique = idx["unique"].trueValue();
-
- if (isUnique && !proposedShardKey.isUniqueIndexCompatible(currentKey)) {
- errmsg = str::stream() << "can't shard collection '" << ns << "' "
- << "with unique index on " << currentKey << " "
- << "and proposed shard key " << proposedKey << ". "
- << "Uniqueness can't be maintained unless "
- << "shard key is a prefix";
+ // 2. Check for a useful index
+ bool hasUsefulIndexForKey = false;
+
+ for (list<BSONObj>::iterator it = indexes.begin(); it != indexes.end(); ++it) {
+ BSONObj idx = *it;
+ BSONObj currentKey = idx["key"].embeddedObject();
+ // Check 2.i. and 2.ii.
+ if (!idx["sparse"].trueValue() && idx["filter"].eoo() &&
+ proposedKey.isPrefixOf(currentKey)) {
+ // We can't currently use hashed indexes with a non-default hash seed
+ // Check v.
+ // Note that this means that, for sharding, we only support one hashed index
+ // per field per collection.
+ if (isHashedShardKey && !idx["seed"].eoo() &&
+ idx["seed"].numberInt() != BSONElementHasher::DEFAULT_HASH_SEED) {
+ errmsg = str::stream() << "can't shard collection " << ns
+ << " with hashed shard key " << proposedKey
+ << " because the hashed index uses a non-default"
+ << " seed of " << idx["seed"].numberInt();
conn.done();
return false;
}
+
+ hasUsefulIndexForKey = true;
}
+ }
- // 2. Check for a useful index
- bool hasUsefulIndexForKey = false;
+ // 3. If proposed key is required to be unique, additionally check for exact match.
+ bool careAboutUnique = cmdObj["unique"].trueValue();
+ if (hasUsefulIndexForKey && careAboutUnique) {
+ BSONObj eqQuery = BSON("ns" << ns << "key" << proposedKey);
+ BSONObj eqQueryResult;
for (list<BSONObj>::iterator it = indexes.begin(); it != indexes.end(); ++it) {
BSONObj idx = *it;
- BSONObj currentKey = idx["key"].embeddedObject();
- // Check 2.i. and 2.ii.
- if (!idx["sparse"].trueValue() &&
- idx["filter"].eoo() &&
- proposedKey.isPrefixOf(currentKey)) {
-
- // We can't currently use hashed indexes with a non-default hash seed
- // Check v.
- // Note that this means that, for sharding, we only support one hashed index
- // per field per collection.
- if (isHashedShardKey &&
- !idx["seed"].eoo() &&
- idx["seed"].numberInt() != BSONElementHasher::DEFAULT_HASH_SEED) {
-
- errmsg = str::stream() << "can't shard collection " << ns
- << " with hashed shard key " << proposedKey
- << " because the hashed index uses a non-default"
- << " seed of " << idx["seed"].numberInt();
- conn.done();
- return false;
- }
-
- hasUsefulIndexForKey = true;
- }
- }
-
- // 3. If proposed key is required to be unique, additionally check for exact match.
- bool careAboutUnique = cmdObj["unique"].trueValue();
- if (hasUsefulIndexForKey && careAboutUnique) {
- BSONObj eqQuery = BSON("ns" << ns << "key" << proposedKey);
- BSONObj eqQueryResult;
-
- for (list<BSONObj>::iterator it = indexes.begin(); it != indexes.end(); ++it) {
- BSONObj idx = *it;
- if (idx["key"].embeddedObject() == proposedKey) {
- eqQueryResult = idx;
- break;
- }
- }
-
- if (eqQueryResult.isEmpty()) {
- // If no exact match, index not useful, but still possible to create one later
- hasUsefulIndexForKey = false;
- }
- else {
- bool isExplicitlyUnique = eqQueryResult["unique"].trueValue();
- BSONObj currKey = eqQueryResult["key"].embeddedObject();
- bool isCurrentID = str::equals(currKey.firstElementFieldName(), "_id");
-
- if (!isExplicitlyUnique && !isCurrentID) {
- errmsg = str::stream() << "can't shard collection " << ns << ", "
- << proposedKey << " index not unique, "
- << "and unique index explicitly specified";
- conn.done();
- return false;
- }
+ if (idx["key"].embeddedObject() == proposedKey) {
+ eqQueryResult = idx;
+ break;
}
}
- if (hasUsefulIndexForKey) {
- // Check 2.iii and 2.iv. Make sure no null entries in the sharding index
- // and that there is a useful, non-multikey index available
- BSONObjBuilder checkShardingIndexCmd;
- checkShardingIndexCmd.append("checkShardingIndex", ns);
- checkShardingIndexCmd.append("keyPattern", proposedKey);
-
- if (!conn.get()->runCommand("admin", checkShardingIndexCmd.obj(), res)) {
- errmsg = res["errmsg"].str();
+ if (eqQueryResult.isEmpty()) {
+ // If no exact match, index not useful, but still possible to create one later
+ hasUsefulIndexForKey = false;
+ } else {
+ bool isExplicitlyUnique = eqQueryResult["unique"].trueValue();
+ BSONObj currKey = eqQueryResult["key"].embeddedObject();
+ bool isCurrentID = str::equals(currKey.firstElementFieldName(), "_id");
+
+ if (!isExplicitlyUnique && !isCurrentID) {
+ errmsg = str::stream() << "can't shard collection " << ns << ", " << proposedKey
+ << " index not unique, "
+ << "and unique index explicitly specified";
conn.done();
return false;
}
}
- else if (conn->count(ns) != 0) {
- // 4. if no useful index, and collection is non-empty, fail
- errmsg = str::stream() << "please create an index that starts with the "
- << "shard key before sharding.";
- result.append("proposedKey", proposedKey);
- result.append("curIndexes", indexes);
+ }
+
+ if (hasUsefulIndexForKey) {
+ // Check 2.iii and 2.iv. Make sure no null entries in the sharding index
+ // and that there is a useful, non-multikey index available
+ BSONObjBuilder checkShardingIndexCmd;
+ checkShardingIndexCmd.append("checkShardingIndex", ns);
+ checkShardingIndexCmd.append("keyPattern", proposedKey);
+
+ if (!conn.get()->runCommand("admin", checkShardingIndexCmd.obj(), res)) {
+ errmsg = res["errmsg"].str();
conn.done();
return false;
}
- else {
- // 5. If no useful index exists, and collection empty, create one on proposedKey.
- // Only need to call ensureIndex on primary shard, since indexes get copied to
- // receiving shard whenever a migrate occurs.
- Status status = clusterCreateIndex(ns, proposedKey, careAboutUnique, NULL);
- if (!status.isOK()) {
- errmsg = str::stream() << "ensureIndex failed to create index on "
- << "primary shard: " << status.reason();
- conn.done();
- return false;
- }
+ } else if (conn->count(ns) != 0) {
+ // 4. if no useful index, and collection is non-empty, fail
+ errmsg = str::stream() << "please create an index that starts with the "
+ << "shard key before sharding.";
+ result.append("proposedKey", proposedKey);
+ result.append("curIndexes", indexes);
+ conn.done();
+ return false;
+ } else {
+ // 5. If no useful index exists, and collection empty, create one on proposedKey.
+ // Only need to call ensureIndex on primary shard, since indexes get copied to
+ // receiving shard whenever a migrate occurs.
+ Status status = clusterCreateIndex(ns, proposedKey, careAboutUnique, NULL);
+ if (!status.isOK()) {
+ errmsg = str::stream() << "ensureIndex failed to create index on "
+ << "primary shard: " << status.reason();
+ conn.done();
+ return false;
}
+ }
- bool isEmpty = (conn->count(ns) == 0);
-
- conn.done();
+ bool isEmpty = (conn->count(ns) == 0);
- // Pre-splitting:
- // For new collections which use hashed shard keys, we can can pre-split the
- // range of possible hashes into a large number of chunks, and distribute them
- // evenly at creation time. Until we design a better initialization scheme, the
- // safest way to pre-split is to
- // 1. make one big chunk for each shard
- // 2. move them one at a time
- // 3. split the big chunks to achieve the desired total number of initial chunks
-
- vector<ShardId> shardIds;
- grid.shardRegistry()->getAllShardIds(&shardIds);
- int numShards = shardIds.size();
-
- vector<BSONObj> initSplits; // there will be at most numShards-1 of these
- vector<BSONObj> allSplits; // all of the initial desired split points
-
- // only pre-split when using a hashed shard key and collection is still empty
- if (isHashedShardKey && isEmpty){
- int numChunks = cmdObj["numInitialChunks"].numberInt();
- if (numChunks <= 0) {
- // default number of initial chunks
- numChunks = 2 * numShards;
- }
+ conn.done();
- // hashes are signed, 64-bit ints. So we divide the range (-MIN long, +MAX long)
- // into intervals of size (2^64/numChunks) and create split points at the
- // boundaries. The logic below ensures that initial chunks are all
- // symmetric around 0.
- long long intervalSize = (std::numeric_limits<long long>::max() / numChunks) * 2;
- long long current = 0;
+ // Pre-splitting:
+ // For new collections which use hashed shard keys, we can can pre-split the
+ // range of possible hashes into a large number of chunks, and distribute them
+ // evenly at creation time. Until we design a better initialization scheme, the
+ // safest way to pre-split is to
+ // 1. make one big chunk for each shard
+ // 2. move them one at a time
+ // 3. split the big chunks to achieve the desired total number of initial chunks
- if (numChunks % 2 == 0){
- allSplits.push_back(BSON(proposedKey.firstElementFieldName() << current));
- current += intervalSize;
- }
- else {
- current += intervalSize / 2;
- }
+ vector<ShardId> shardIds;
+ grid.shardRegistry()->getAllShardIds(&shardIds);
+ int numShards = shardIds.size();
- for (int i = 0; i < (numChunks - 1) / 2; i++){
- allSplits.push_back(BSON(proposedKey.firstElementFieldName() << current));
- allSplits.push_back(BSON(proposedKey.firstElementFieldName() << -current));
- current += intervalSize;
- }
+ vector<BSONObj> initSplits; // there will be at most numShards-1 of these
+ vector<BSONObj> allSplits; // all of the initial desired split points
- sort(allSplits.begin(), allSplits.end());
+ // only pre-split when using a hashed shard key and collection is still empty
+ if (isHashedShardKey && isEmpty) {
+ int numChunks = cmdObj["numInitialChunks"].numberInt();
+ if (numChunks <= 0) {
+ // default number of initial chunks
+ numChunks = 2 * numShards;
+ }
- // 1. the initial splits define the "big chunks" that we will subdivide later
- int lastIndex = -1;
- for (int i = 1; i < numShards; i++) {
- if (lastIndex < (i*numChunks) / numShards - 1) {
- lastIndex = (i*numChunks) / numShards - 1;
- initSplits.push_back(allSplits[lastIndex]);
- }
- }
+ // hashes are signed, 64-bit ints. So we divide the range (-MIN long, +MAX long)
+ // into intervals of size (2^64/numChunks) and create split points at the
+ // boundaries. The logic below ensures that initial chunks are all
+ // symmetric around 0.
+ long long intervalSize = (std::numeric_limits<long long>::max() / numChunks) * 2;
+ long long current = 0;
+
+ if (numChunks % 2 == 0) {
+ allSplits.push_back(BSON(proposedKey.firstElementFieldName() << current));
+ current += intervalSize;
+ } else {
+ current += intervalSize / 2;
}
- LOG(0) << "CMD: shardcollection: " << cmdObj;
+ for (int i = 0; i < (numChunks - 1) / 2; i++) {
+ allSplits.push_back(BSON(proposedKey.firstElementFieldName() << current));
+ allSplits.push_back(BSON(proposedKey.firstElementFieldName() << -current));
+ current += intervalSize;
+ }
- audit::logShardCollection(ClientBasic::getCurrent(),
- ns,
- proposedKey,
- careAboutUnique);
+ sort(allSplits.begin(), allSplits.end());
- Status status = grid.catalogManager()->shardCollection(ns,
- proposedShardKey,
- careAboutUnique,
- &initSplits);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
+ // 1. the initial splits define the "big chunks" that we will subdivide later
+ int lastIndex = -1;
+ for (int i = 1; i < numShards; i++) {
+ if (lastIndex < (i * numChunks) / numShards - 1) {
+ lastIndex = (i * numChunks) / numShards - 1;
+ initSplits.push_back(allSplits[lastIndex]);
+ }
}
+ }
- result << "collectionsharded" << ns;
-
- // Only initially move chunks when using a hashed shard key
- if (isHashedShardKey && isEmpty) {
- // Reload the new config info. If we created more than one initial chunk, then
- // we need to move them around to balance.
- ChunkManagerPtr chunkManager = config->getChunkManager(ns, true);
- ChunkMap chunkMap = chunkManager->getChunkMap();
-
- // 2. Move and commit each "big chunk" to a different shard.
- int i = 0;
- for (ChunkMap::const_iterator c = chunkMap.begin(); c != chunkMap.end(); ++c, ++i){
- const ShardId& shardId = shardIds[i % numShards];
- const auto to = grid.shardRegistry()->getShard(shardId);
- if (!to) {
- continue;
- }
+ LOG(0) << "CMD: shardcollection: " << cmdObj;
- ChunkPtr chunk = c->second;
+ audit::logShardCollection(ClientBasic::getCurrent(), ns, proposedKey, careAboutUnique);
- // can't move chunk to shard it's already on
- if (to->getId() == chunk->getShardId()) {
- continue;
- }
+ Status status = grid.catalogManager()->shardCollection(
+ ns, proposedShardKey, careAboutUnique, &initSplits);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
- BSONObj moveResult;
- WriteConcernOptions noThrottle;
- if (!chunk->moveAndCommit(to->getId(),
- Chunk::MaxChunkSize,
- &noThrottle,
- true,
- 0,
- moveResult)) {
-
- warning() << "couldn't move chunk " << chunk->toString()
- << " to shard " << *to
- << " while sharding collection " << ns << "."
- << " Reason: " << moveResult;
- }
+ result << "collectionsharded" << ns;
+
+ // Only initially move chunks when using a hashed shard key
+ if (isHashedShardKey && isEmpty) {
+ // Reload the new config info. If we created more than one initial chunk, then
+ // we need to move them around to balance.
+ ChunkManagerPtr chunkManager = config->getChunkManager(ns, true);
+ ChunkMap chunkMap = chunkManager->getChunkMap();
+
+ // 2. Move and commit each "big chunk" to a different shard.
+ int i = 0;
+ for (ChunkMap::const_iterator c = chunkMap.begin(); c != chunkMap.end(); ++c, ++i) {
+ const ShardId& shardId = shardIds[i % numShards];
+ const auto to = grid.shardRegistry()->getShard(shardId);
+ if (!to) {
+ continue;
+ }
+
+ ChunkPtr chunk = c->second;
+
+ // can't move chunk to shard it's already on
+ if (to->getId() == chunk->getShardId()) {
+ continue;
}
- if (allSplits.empty()) {
- return true;
+ BSONObj moveResult;
+ WriteConcernOptions noThrottle;
+ if (!chunk->moveAndCommit(
+ to->getId(), Chunk::MaxChunkSize, &noThrottle, true, 0, moveResult)) {
+ warning() << "couldn't move chunk " << chunk->toString() << " to shard " << *to
+ << " while sharding collection " << ns << "."
+ << " Reason: " << moveResult;
}
+ }
- // Reload the config info, after all the migrations
- chunkManager = config->getChunkManager(ns, true);
-
- // 3. Subdivide the big chunks by splitting at each of the points in "allSplits"
- // that we haven't already split by.
- ChunkPtr currentChunk = chunkManager->findIntersectingChunk(allSplits[0]);
-
- vector<BSONObj> subSplits;
- for (unsigned i = 0; i <= allSplits.size(); i++){
- if (i == allSplits.size() || !currentChunk->containsKey(allSplits[i])) {
- if (!subSplits.empty()){
- Status status = currentChunk->multiSplit(subSplits, NULL);
- if (!status.isOK()){
- warning() << "couldn't split chunk "
- << currentChunk->toString()
- << " while sharding collection " << ns
- << causedBy(status);
- }
-
- subSplits.clear();
- }
+ if (allSplits.empty()) {
+ return true;
+ }
- if (i < allSplits.size()) {
- currentChunk = chunkManager->findIntersectingChunk(allSplits[i]);
+ // Reload the config info, after all the migrations
+ chunkManager = config->getChunkManager(ns, true);
+
+ // 3. Subdivide the big chunks by splitting at each of the points in "allSplits"
+ // that we haven't already split by.
+ ChunkPtr currentChunk = chunkManager->findIntersectingChunk(allSplits[0]);
+
+ vector<BSONObj> subSplits;
+ for (unsigned i = 0; i <= allSplits.size(); i++) {
+ if (i == allSplits.size() || !currentChunk->containsKey(allSplits[i])) {
+ if (!subSplits.empty()) {
+ Status status = currentChunk->multiSplit(subSplits, NULL);
+ if (!status.isOK()) {
+ warning() << "couldn't split chunk " << currentChunk->toString()
+ << " while sharding collection " << ns << causedBy(status);
}
+
+ subSplits.clear();
}
- else {
- BSONObj splitPoint(allSplits[i]);
- // Do not split on the boundaries
- if (currentChunk->getMin().woCompare(splitPoint) == 0) {
- continue;
- }
+ if (i < allSplits.size()) {
+ currentChunk = chunkManager->findIntersectingChunk(allSplits[i]);
+ }
+ } else {
+ BSONObj splitPoint(allSplits[i]);
- subSplits.push_back(splitPoint);
+ // Do not split on the boundaries
+ if (currentChunk->getMin().woCompare(splitPoint) == 0) {
+ continue;
}
- }
- // Proactively refresh the chunk manager. Not really necessary, but this way it's
- // immediately up-to-date the next time it's used.
- config->getChunkManager(ns, true);
+ subSplits.push_back(splitPoint);
+ }
}
- return true;
+ // Proactively refresh the chunk manager. Not really necessary, but this way it's
+ // immediately up-to-date the next time it's used.
+ config->getChunkManager(ns, true);
}
- } shardCollectionCmd;
+ return true;
+ }
+
+} shardCollectionCmd;
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_shutdown_cmd.cpp b/src/mongo/s/commands/cluster_shutdown_cmd.cpp
index 58fe1d47483..72f0fd71e6f 100644
--- a/src/mongo/s/commands/cluster_shutdown_cmd.cpp
+++ b/src/mongo/s/commands/cluster_shutdown_cmd.cpp
@@ -34,26 +34,25 @@
namespace mongo {
namespace {
- class ClusterShutdownCmd : public CmdShutdown {
- public:
- virtual void help(std::stringstream& help) const {
- help << "shutdown the database. must be ran against admin db and "
- << "either (1) ran from localhost or (2) authenticated.";
- }
-
- virtual bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) {
-
- // Never returns
- shutdownHelper();
- return true;
- }
-
- } clusterShutdownCmd;
-
-} // namespace
-} // namespace mongo
+class ClusterShutdownCmd : public CmdShutdown {
+public:
+ virtual void help(std::stringstream& help) const {
+ help << "shutdown the database. must be ran against admin db and "
+ << "either (1) ran from localhost or (2) authenticated.";
+ }
+
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ // Never returns
+ shutdownHelper();
+ return true;
+ }
+
+} clusterShutdownCmd;
+
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_split_collection_cmd.cpp b/src/mongo/s/commands/cluster_split_collection_cmd.cpp
index acd99d32c7b..fd5f462518c 100644
--- a/src/mongo/s/commands/cluster_split_collection_cmd.cpp
+++ b/src/mongo/s/commands/cluster_split_collection_cmd.cpp
@@ -49,244 +49,231 @@
namespace mongo {
- using std::shared_ptr;
- using std::string;
- using std::vector;
+using std::shared_ptr;
+using std::string;
+using std::vector;
namespace {
- class SplitCollectionCmd : public Command {
- public:
- SplitCollectionCmd() : Command("split", false, "split") { }
+class SplitCollectionCmd : public Command {
+public:
+ SplitCollectionCmd() : Command("split", false, "split") {}
+
+ virtual bool slaveOk() const {
+ return true;
+ }
+
+ virtual bool adminOnly() const {
+ return true;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual void help(std::stringstream& help) const {
+ help << " example: - split the shard that contains give key\n"
+ << " { split : 'alleyinsider.blog.posts' , find : { ts : 1 } }\n"
+ << " example: - split the shard that contains the key with this as the middle\n"
+ << " { split : 'alleyinsider.blog.posts' , middle : { ts : 1 } }\n"
+ << " NOTE: this does not move the chunks, it just creates a logical separation.";
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forExactNamespace(NamespaceString(parseNs(dbname, cmdObj))),
+ ActionType::splitChunk)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
+ }
+ return Status::OK();
+ }
+
+ virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
+ return parseNsFullyQualified(dbname, cmdObj);
+ }
+
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ ShardConnection::sync();
+
+ const NamespaceString nss(parseNs(dbname, cmdObj));
+ if (nss.size() == 0) {
+ return appendCommandStatus(
+ result, Status(ErrorCodes::InvalidNamespace, "no namespace specified"));
+ }
- virtual bool slaveOk() const {
- return true;
+ auto status = grid.catalogCache()->getDatabase(nss.db().toString());
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status.getStatus());
}
- virtual bool adminOnly() const {
- return true;
+ std::shared_ptr<DBConfig> config = status.getValue();
+ if (!config->isSharded(nss.ns())) {
+ config->reload();
+
+ if (!config->isSharded(nss.ns())) {
+ return appendCommandStatus(result,
+ Status(ErrorCodes::NamespaceNotSharded,
+ "ns [" + nss.ns() + " is not sharded."));
+ }
}
- virtual bool isWriteCommandForConfigServer() const {
+ const BSONField<BSONObj> findField("find", BSONObj());
+ const BSONField<BSONArray> boundsField("bounds", BSONArray());
+ const BSONField<BSONObj> middleField("middle", BSONObj());
+
+ BSONObj find;
+ if (FieldParser::extract(cmdObj, findField, &find, &errmsg) == FieldParser::FIELD_INVALID) {
return false;
}
- virtual void help(std::stringstream& help) const {
- help << " example: - split the shard that contains give key\n"
- << " { split : 'alleyinsider.blog.posts' , find : { ts : 1 } }\n"
- << " example: - split the shard that contains the key with this as the middle\n"
- << " { split : 'alleyinsider.blog.posts' , middle : { ts : 1 } }\n"
- << " NOTE: this does not move the chunks, it just creates a logical separation.";
+ BSONArray bounds;
+ if (FieldParser::extract(cmdObj, boundsField, &bounds, &errmsg) ==
+ FieldParser::FIELD_INVALID) {
+ return false;
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
+ if (!bounds.isEmpty()) {
+ if (!bounds.hasField("0")) {
+ errmsg = "lower bound not specified";
+ return false;
+ }
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(
- NamespaceString(parseNs(dbname,
- cmdObj))),
- ActionType::splitChunk)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
+ if (!bounds.hasField("1")) {
+ errmsg = "upper bound not specified";
+ return false;
}
- return Status::OK();
}
- virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
- return parseNsFullyQualified(dbname, cmdObj);
+ if (!find.isEmpty() && !bounds.isEmpty()) {
+ errmsg = "cannot specify bounds and find at the same time";
+ return false;
}
- virtual bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) {
+ BSONObj middle;
+ if (FieldParser::extract(cmdObj, middleField, &middle, &errmsg) ==
+ FieldParser::FIELD_INVALID) {
+ return false;
+ }
- ShardConnection::sync();
+ if (find.isEmpty() && bounds.isEmpty() && middle.isEmpty()) {
+ errmsg = "need to specify find/bounds or middle";
+ return false;
+ }
- const NamespaceString nss(parseNs(dbname, cmdObj));
- if (nss.size() == 0) {
- return appendCommandStatus(result, Status(ErrorCodes::InvalidNamespace,
- "no namespace specified"));
- }
+ if (!find.isEmpty() && !middle.isEmpty()) {
+ errmsg = "cannot specify find and middle together";
+ return false;
+ }
- auto status = grid.catalogCache()->getDatabase(nss.db().toString());
- if (!status.isOK()) {
- return appendCommandStatus(result, status.getStatus());
- }
+ if (!bounds.isEmpty() && !middle.isEmpty()) {
+ errmsg = "cannot specify bounds and middle together";
+ return false;
+ }
- std::shared_ptr<DBConfig> config = status.getValue();
- if (!config->isSharded(nss.ns())) {
- config->reload();
+ // This refreshes the chunk metadata if stale.
+ ChunkManagerPtr info = config->getChunkManager(nss.ns(), true);
+ ChunkPtr chunk;
- if (!config->isSharded(nss.ns())) {
- return appendCommandStatus(result,
- Status(ErrorCodes::NamespaceNotSharded,
- "ns [" + nss.ns() + " is not sharded."));
- }
- }
+ if (!find.isEmpty()) {
+ StatusWith<BSONObj> status = info->getShardKeyPattern().extractShardKeyFromQuery(find);
- const BSONField<BSONObj> findField("find", BSONObj());
- const BSONField<BSONArray> boundsField("bounds", BSONArray());
- const BSONField<BSONObj> middleField("middle", BSONObj());
+ // Bad query
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status.getStatus());
+ }
- BSONObj find;
- if (FieldParser::extract(cmdObj, findField, &find, &errmsg) ==
- FieldParser::FIELD_INVALID) {
+ BSONObj shardKey = status.getValue();
+ if (shardKey.isEmpty()) {
+ errmsg = stream() << "no shard key found in chunk query " << find;
return false;
}
- BSONArray bounds;
- if (FieldParser::extract(cmdObj, boundsField, &bounds, &errmsg) ==
- FieldParser::FIELD_INVALID) {
+ chunk = info->findIntersectingChunk(shardKey);
+ invariant(chunk.get());
+ } else if (!bounds.isEmpty()) {
+ if (!info->getShardKeyPattern().isShardKey(bounds[0].Obj()) ||
+ !info->getShardKeyPattern().isShardKey(bounds[1].Obj())) {
+ errmsg = stream() << "shard key bounds "
+ << "[" << bounds[0].Obj() << "," << bounds[1].Obj() << ")"
+ << " are not valid for shard key pattern "
+ << info->getShardKeyPattern().toBSON();
return false;
}
- if (!bounds.isEmpty()) {
- if (!bounds.hasField("0")) {
- errmsg = "lower bound not specified";
- return false;
- }
+ BSONObj minKey = info->getShardKeyPattern().normalizeShardKey(bounds[0].Obj());
+ BSONObj maxKey = info->getShardKeyPattern().normalizeShardKey(bounds[1].Obj());
- if (!bounds.hasField("1")) {
- errmsg = "upper bound not specified";
- return false;
- }
- }
+ chunk = info->findIntersectingChunk(minKey);
+ invariant(chunk.get());
- if (!find.isEmpty() && !bounds.isEmpty()) {
- errmsg = "cannot specify bounds and find at the same time";
+ if (chunk->getMin().woCompare(minKey) != 0 || chunk->getMax().woCompare(maxKey) != 0) {
+ errmsg = stream() << "no chunk found with the shard key bounds "
+ << "[" << minKey << "," << maxKey << ")";
return false;
}
-
- BSONObj middle;
- if (FieldParser::extract(cmdObj, middleField, &middle, &errmsg) ==
- FieldParser::FIELD_INVALID) {
+ } else {
+ // Middle
+ if (!info->getShardKeyPattern().isShardKey(middle)) {
+ errmsg = stream() << "new split key " << middle
+ << " is not valid for shard key pattern "
+ << info->getShardKeyPattern().toBSON();
return false;
}
- if (find.isEmpty() && bounds.isEmpty() && middle.isEmpty()) {
- errmsg = "need to specify find/bounds or middle";
- return false;
- }
+ middle = info->getShardKeyPattern().normalizeShardKey(middle);
- if (!find.isEmpty() && !middle.isEmpty()) {
- errmsg = "cannot specify find and middle together";
- return false;
+ // Check shard key size when manually provided
+ Status status = ShardKeyPattern::checkShardKeySize(middle);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
}
- if (!bounds.isEmpty() && !middle.isEmpty()) {
- errmsg = "cannot specify bounds and middle together";
+ chunk = info->findIntersectingChunk(middle);
+ invariant(chunk.get());
+
+ if (chunk->getMin().woCompare(middle) == 0 || chunk->getMax().woCompare(middle) == 0) {
+ errmsg = stream() << "new split key " << middle
+ << " is a boundary key of existing chunk "
+ << "[" << chunk->getMin() << "," << chunk->getMax() << ")";
return false;
}
+ }
- // This refreshes the chunk metadata if stale.
- ChunkManagerPtr info = config->getChunkManager(nss.ns(), true);
- ChunkPtr chunk;
-
- if (!find.isEmpty()) {
- StatusWith<BSONObj> status =
- info->getShardKeyPattern().extractShardKeyFromQuery(find);
-
- // Bad query
- if (!status.isOK()) {
- return appendCommandStatus(result, status.getStatus());
- }
-
- BSONObj shardKey = status.getValue();
- if (shardKey.isEmpty()) {
- errmsg = stream() << "no shard key found in chunk query " << find;
- return false;
- }
+ invariant(chunk.get());
+ log() << "splitting chunk [" << chunk->getMin() << "," << chunk->getMax() << ")"
+ << " in collection " << nss.ns() << " on shard " << chunk->getShardId();
- chunk = info->findIntersectingChunk(shardKey);
- invariant(chunk.get());
- }
- else if (!bounds.isEmpty()) {
-
- if (!info->getShardKeyPattern().isShardKey(bounds[0].Obj())
- || !info->getShardKeyPattern().isShardKey(bounds[1].Obj())) {
- errmsg = stream() << "shard key bounds " << "[" << bounds[0].Obj() << ","
- << bounds[1].Obj() << ")"
- << " are not valid for shard key pattern "
- << info->getShardKeyPattern().toBSON();
- return false;
- }
-
- BSONObj minKey = info->getShardKeyPattern().normalizeShardKey(bounds[0].Obj());
- BSONObj maxKey = info->getShardKeyPattern().normalizeShardKey(bounds[1].Obj());
-
- chunk = info->findIntersectingChunk(minKey);
- invariant(chunk.get());
-
- if (chunk->getMin().woCompare(minKey) != 0
- || chunk->getMax().woCompare(maxKey) != 0) {
- errmsg = stream() << "no chunk found with the shard key bounds " << "["
- << minKey << "," << maxKey << ")";
- return false;
- }
- }
- else {
- // Middle
- if (!info->getShardKeyPattern().isShardKey(middle)) {
- errmsg = stream() << "new split key " << middle
- << " is not valid for shard key pattern "
- << info->getShardKeyPattern().toBSON();
- return false;
- }
-
- middle = info->getShardKeyPattern().normalizeShardKey(middle);
-
- // Check shard key size when manually provided
- Status status = ShardKeyPattern::checkShardKeySize(middle);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
-
- chunk = info->findIntersectingChunk(middle);
- invariant(chunk.get());
-
- if (chunk->getMin().woCompare(middle) == 0
- || chunk->getMax().woCompare(middle) == 0) {
- errmsg = stream() << "new split key " << middle
- << " is a boundary key of existing chunk " << "["
- << chunk->getMin() << "," << chunk->getMax() << ")";
- return false;
- }
+ BSONObj res;
+ if (middle.isEmpty()) {
+ Status status = chunk->split(Chunk::atMedian, NULL, NULL);
+ if (!status.isOK()) {
+ errmsg = "split failed";
+ result.append("cause", status.toString());
+ return false;
}
+ } else {
+ vector<BSONObj> splitPoints;
+ splitPoints.push_back(middle);
- invariant(chunk.get());
- log() << "splitting chunk [" << chunk->getMin() << "," << chunk->getMax() << ")"
- << " in collection " << nss.ns()
- << " on shard " << chunk->getShardId();
-
- BSONObj res;
- if (middle.isEmpty()) {
- Status status = chunk->split(Chunk::atMedian, NULL, NULL);
- if (!status.isOK()) {
- errmsg = "split failed";
- result.append("cause", status.toString());
- return false;
- }
- }
- else {
- vector<BSONObj> splitPoints;
- splitPoints.push_back(middle);
-
- Status status = chunk->multiSplit(splitPoints, NULL);
- if (!status.isOK()) {
- errmsg = "split failed";
- result.append("cause", status.toString());
- return false;
- }
+ Status status = chunk->multiSplit(splitPoints, NULL);
+ if (!status.isOK()) {
+ errmsg = "split failed";
+ result.append("cause", status.toString());
+ return false;
}
-
- return true;
}
- } splitCollectionCmd;
+ return true;
+ }
+
+} splitCollectionCmd;
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_user_management_commands.cpp b/src/mongo/s/commands/cluster_user_management_commands.cpp
index 2c781ac0e91..d2a6ab667d4 100644
--- a/src/mongo/s/commands/cluster_user_management_commands.cpp
+++ b/src/mongo/s/commands/cluster_user_management_commands.cpp
@@ -46,746 +46,761 @@
namespace mongo {
- using std::string;
- using std::stringstream;
- using std::vector;
+using std::string;
+using std::stringstream;
+using std::vector;
+
+class CmdCreateUser : public Command {
+public:
+ CmdCreateUser() : Command("createUser") {}
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual void help(stringstream& ss) const {
+ ss << "Adds a user to the system";
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForCreateUserCommand(client, dbname, cmdObj);
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ return grid.catalogManager()->runUserManagementWriteCommand(
+ this->name, dbname, cmdObj, &result);
+ }
+
+ virtual void redactForLogging(mutablebson::Document* cmdObj) {
+ auth::redactPasswordData(cmdObj->root());
+ }
+
+} cmdCreateUser;
+
+class CmdUpdateUser : public Command {
+public:
+ CmdUpdateUser() : Command("updateUser") {}
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual void help(stringstream& ss) const {
+ ss << "Used to update a user, for example to change its password";
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForUpdateUserCommand(client, dbname, cmdObj);
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ auth::CreateOrUpdateUserArgs args;
+ Status status = auth::parseCreateOrUpdateUserCommands(cmdObj, this->name, dbname, &args);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
+ const bool ok = grid.catalogManager()->runUserManagementWriteCommand(
+ this->name, dbname, cmdObj, &result);
+
+ AuthorizationManager* authzManager = getGlobalAuthorizationManager();
+ invariant(authzManager);
+ authzManager->invalidateUserByName(args.userName);
+
+ return ok;
+ }
+
+ virtual void redactForLogging(mutablebson::Document* cmdObj) {
+ auth::redactPasswordData(cmdObj->root());
+ }
+
+} cmdUpdateUser;
+
+class CmdDropUser : public Command {
+public:
+ CmdDropUser() : Command("dropUser") {}
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual void help(stringstream& ss) const {
+ ss << "Drops a single user.";
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForDropUserCommand(client, dbname, cmdObj);
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ UserName userName;
+ BSONObj unusedWriteConcern;
+ Status status =
+ auth::parseAndValidateDropUserCommand(cmdObj, dbname, &userName, &unusedWriteConcern);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
+ const bool ok = grid.catalogManager()->runUserManagementWriteCommand(
+ this->name, dbname, cmdObj, &result);
+
+ AuthorizationManager* authzManager = getGlobalAuthorizationManager();
+ invariant(authzManager);
+ authzManager->invalidateUserByName(userName);
+
+ return ok;
+ }
+
+} cmdDropUser;
+
+class CmdDropAllUsersFromDatabase : public Command {
+public:
+ CmdDropAllUsersFromDatabase() : Command("dropAllUsersFromDatabase") {}
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual void help(stringstream& ss) const {
+ ss << "Drops all users for a single database.";
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForDropAllUsersFromDatabaseCommand(client, dbname);
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ const bool ok = grid.catalogManager()->runUserManagementWriteCommand(
+ this->name, dbname, cmdObj, &result);
+
+ AuthorizationManager* authzManager = getGlobalAuthorizationManager();
+ invariant(authzManager);
+ authzManager->invalidateUsersFromDB(dbname);
+
+ return ok;
+ }
+
+} cmdDropAllUsersFromDatabase;
+
+class CmdGrantRolesToUser : public Command {
+public:
+ CmdGrantRolesToUser() : Command("grantRolesToUser") {}
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual void help(stringstream& ss) const {
+ ss << "Grants roles to a user.";
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForGrantRolesToUserCommand(client, dbname, cmdObj);
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ string userNameString;
+ vector<RoleName> roles;
+ BSONObj unusedWriteConcern;
+ Status status = auth::parseRolePossessionManipulationCommands(
+ cmdObj, this->name, dbname, &userNameString, &roles, &unusedWriteConcern);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
+ const bool ok = grid.catalogManager()->runUserManagementWriteCommand(
+ this->name, dbname, cmdObj, &result);
+
+ AuthorizationManager* authzManager = getGlobalAuthorizationManager();
+ invariant(authzManager);
+ authzManager->invalidateUserByName(UserName(userNameString, dbname));
+
+ return ok;
+ }
+
+} cmdGrantRolesToUser;
+
+class CmdRevokeRolesFromUser : public Command {
+public:
+ CmdRevokeRolesFromUser() : Command("revokeRolesFromUser") {}
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual void help(stringstream& ss) const {
+ ss << "Revokes roles from a user.";
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForRevokeRolesFromUserCommand(client, dbname, cmdObj);
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ string userNameString;
+ vector<RoleName> unusedRoles;
+ BSONObj unusedWriteConcern;
+ Status status = auth::parseRolePossessionManipulationCommands(
+ cmdObj, this->name, dbname, &userNameString, &unusedRoles, &unusedWriteConcern);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status);
+ }
+ const bool ok = grid.catalogManager()->runUserManagementWriteCommand(
+ this->name, dbname, cmdObj, &result);
+
+ AuthorizationManager* authzManager = getGlobalAuthorizationManager();
+ invariant(authzManager);
+ authzManager->invalidateUserByName(UserName(userNameString, dbname));
+
+ return ok;
+ }
+
+} cmdRevokeRolesFromUser;
+
+class CmdUsersInfo : public Command {
+public:
+ virtual bool slaveOk() const {
+ return false;
+ }
+
+ virtual bool slaveOverrideOk() const {
+ return true;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ CmdUsersInfo() : Command("usersInfo") {}
+
+ virtual void help(stringstream& ss) const {
+ ss << "Returns information about users.";
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForUsersInfoCommand(client, dbname, cmdObj);
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ return grid.catalogManager()->runUserManagementReadCommand(dbname, cmdObj, &result);
+ }
+
+} cmdUsersInfo;
+
+class CmdCreateRole : public Command {
+public:
+ CmdCreateRole() : Command("createRole") {}
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual void help(stringstream& ss) const {
+ ss << "Adds a role to the system";
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForCreateRoleCommand(client, dbname, cmdObj);
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ return grid.catalogManager()->runUserManagementWriteCommand(
+ this->name, dbname, cmdObj, &result);
+ }
+
+} cmdCreateRole;
+
+class CmdUpdateRole : public Command {
+public:
+ CmdUpdateRole() : Command("updateRole") {}
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual void help(stringstream& ss) const {
+ ss << "Used to update a role";
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForUpdateRoleCommand(client, dbname, cmdObj);
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ const bool ok = grid.catalogManager()->runUserManagementWriteCommand(
+ this->name, dbname, cmdObj, &result);
+
+ AuthorizationManager* authzManager = getGlobalAuthorizationManager();
+ invariant(authzManager);
+ authzManager->invalidateUserCache();
+
+ return ok;
+ }
+
+} cmdUpdateRole;
+
+class CmdGrantPrivilegesToRole : public Command {
+public:
+ CmdGrantPrivilegesToRole() : Command("grantPrivilegesToRole") {}
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual void help(stringstream& ss) const {
+ ss << "Grants privileges to a role";
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForGrantPrivilegesToRoleCommand(client, dbname, cmdObj);
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ const bool ok = grid.catalogManager()->runUserManagementWriteCommand(
+ this->name, dbname, cmdObj, &result);
+
+ AuthorizationManager* authzManager = getGlobalAuthorizationManager();
+ invariant(authzManager);
+ authzManager->invalidateUserCache();
+
+ return ok;
+ }
+
+} cmdGrantPrivilegesToRole;
+
+class CmdRevokePrivilegesFromRole : public Command {
+public:
+ CmdRevokePrivilegesFromRole() : Command("revokePrivilegesFromRole") {}
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual void help(stringstream& ss) const {
+ ss << "Revokes privileges from a role";
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForRevokePrivilegesFromRoleCommand(client, dbname, cmdObj);
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ const bool ok = grid.catalogManager()->runUserManagementWriteCommand(
+ this->name, dbname, cmdObj, &result);
+
+ AuthorizationManager* authzManager = getGlobalAuthorizationManager();
+ invariant(authzManager);
+ authzManager->invalidateUserCache();
+
+ return ok;
+ }
+
+} cmdRevokePrivilegesFromRole;
+
+class CmdGrantRolesToRole : public Command {
+public:
+ CmdGrantRolesToRole() : Command("grantRolesToRole") {}
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual void help(stringstream& ss) const {
+ ss << "Grants roles to another role.";
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForGrantRolesToRoleCommand(client, dbname, cmdObj);
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ const bool ok = grid.catalogManager()->runUserManagementWriteCommand(
+ this->name, dbname, cmdObj, &result);
+
+ AuthorizationManager* authzManager = getGlobalAuthorizationManager();
+ invariant(authzManager);
+ authzManager->invalidateUserCache();
+
+ return ok;
+ }
+
+} cmdGrantRolesToRole;
+
+class CmdRevokeRolesFromRole : public Command {
+public:
+ CmdRevokeRolesFromRole() : Command("revokeRolesFromRole") {}
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual void help(stringstream& ss) const {
+ ss << "Revokes roles from another role.";
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForRevokeRolesFromRoleCommand(client, dbname, cmdObj);
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ const bool ok = grid.catalogManager()->runUserManagementWriteCommand(
+ this->name, dbname, cmdObj, &result);
+
+ AuthorizationManager* authzManager = getGlobalAuthorizationManager();
+ invariant(authzManager);
+ authzManager->invalidateUserCache();
+
+ return ok;
+ }
+
+} cmdRevokeRolesFromRole;
+
+class CmdDropRole : public Command {
+public:
+ CmdDropRole() : Command("dropRole") {}
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual void help(stringstream& ss) const {
+ ss << "Drops a single role. Before deleting the role completely it must remove it "
+ "from any users or roles that reference it. If any errors occur in the middle "
+ "of that process it's possible to be left in a state where the role has been "
+ "removed from some user/roles but otherwise still exists.";
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForDropRoleCommand(client, dbname, cmdObj);
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ const bool ok = grid.catalogManager()->runUserManagementWriteCommand(
+ this->name, dbname, cmdObj, &result);
+
+ AuthorizationManager* authzManager = getGlobalAuthorizationManager();
+ invariant(authzManager);
+ authzManager->invalidateUserCache();
+
+ return ok;
+ }
+
+} cmdDropRole;
+
+class CmdDropAllRolesFromDatabase : public Command {
+public:
+ CmdDropAllRolesFromDatabase() : Command("dropAllRolesFromDatabase") {}
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual void help(stringstream& ss) const {
+ ss << "Drops all roles from the given database. Before deleting the roles completely "
+ "it must remove them from any users or other roles that reference them. If any "
+ "errors occur in the middle of that process it's possible to be left in a state "
+ "where the roles have been removed from some user/roles but otherwise still "
+ "exist.";
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForDropAllRolesFromDatabaseCommand(client, dbname);
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ const bool ok = grid.catalogManager()->runUserManagementWriteCommand(
+ this->name, dbname, cmdObj, &result);
+
+ AuthorizationManager* authzManager = getGlobalAuthorizationManager();
+ invariant(authzManager);
+ authzManager->invalidateUserCache();
+
+ return ok;
+ }
+
+} cmdDropAllRolesFromDatabase;
+
+class CmdRolesInfo : public Command {
+public:
+ CmdRolesInfo() : Command("rolesInfo") {}
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+
+ virtual bool slaveOverrideOk() const {
+ return true;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual void help(stringstream& ss) const {
+ ss << "Returns information about roles.";
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForRolesInfoCommand(client, dbname, cmdObj);
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ return grid.catalogManager()->runUserManagementReadCommand(dbname, cmdObj, &result);
+ }
+
+} cmdRolesInfo;
+
+class CmdInvalidateUserCache : public Command {
+public:
+ CmdInvalidateUserCache() : Command("invalidateUserCache") {}
+
+ virtual bool slaveOk() const {
+ return true;
+ }
+
+ virtual bool adminOnly() const {
+ return true;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual void help(stringstream& ss) const {
+ ss << "Invalidates the in-memory cache of user information";
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForInvalidateUserCacheCommand(client);
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ AuthorizationManager* authzManager = getGlobalAuthorizationManager();
+ invariant(authzManager);
+ authzManager->invalidateUserCache();
+ return true;
+ }
+
+} cmdInvalidateUserCache;
- class CmdCreateUser : public Command {
- public:
-
- CmdCreateUser() : Command("createUser") {}
-
- virtual bool slaveOk() const { return false; }
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- virtual void help(stringstream& ss) const {
- ss << "Adds a user to the system";
- }
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForCreateUserCommand(client, dbname, cmdObj);
- }
-
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- return grid.catalogManager()->runUserManagementWriteCommand(this->name,
- dbname,
- cmdObj,
- &result);
- }
-
- virtual void redactForLogging(mutablebson::Document* cmdObj) {
- auth::redactPasswordData(cmdObj->root());
- }
-
- } cmdCreateUser;
-
- class CmdUpdateUser : public Command {
- public:
-
- CmdUpdateUser() : Command("updateUser") {}
-
- virtual bool slaveOk() const { return false; }
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- virtual void help(stringstream& ss) const {
- ss << "Used to update a user, for example to change its password";
- }
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForUpdateUserCommand(client, dbname, cmdObj);
- }
-
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- auth::CreateOrUpdateUserArgs args;
- Status status = auth::parseCreateOrUpdateUserCommands(cmdObj,
- this->name,
- dbname,
- &args);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
- const bool ok = grid.catalogManager()->runUserManagementWriteCommand(this->name,
- dbname,
- cmdObj,
- &result);
-
- AuthorizationManager* authzManager = getGlobalAuthorizationManager();
- invariant(authzManager);
- authzManager->invalidateUserByName(args.userName);
-
- return ok;
- }
-
- virtual void redactForLogging(mutablebson::Document* cmdObj) {
- auth::redactPasswordData(cmdObj->root());
- }
-
- } cmdUpdateUser;
-
- class CmdDropUser : public Command {
- public:
-
- CmdDropUser() : Command("dropUser") {}
-
- virtual bool slaveOk() const { return false; }
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- virtual void help(stringstream& ss) const {
- ss << "Drops a single user.";
- }
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForDropUserCommand(client, dbname, cmdObj);
- }
-
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- UserName userName;
- BSONObj unusedWriteConcern;
- Status status = auth::parseAndValidateDropUserCommand(cmdObj,
- dbname,
- &userName,
- &unusedWriteConcern);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
- const bool ok = grid.catalogManager()->runUserManagementWriteCommand(this->name,
- dbname,
- cmdObj,
- &result);
-
- AuthorizationManager* authzManager = getGlobalAuthorizationManager();
- invariant(authzManager);
- authzManager->invalidateUserByName(userName);
-
- return ok;
- }
-
- } cmdDropUser;
-
- class CmdDropAllUsersFromDatabase : public Command {
- public:
-
- CmdDropAllUsersFromDatabase() : Command("dropAllUsersFromDatabase") {}
-
- virtual bool slaveOk() const { return false; }
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- virtual void help(stringstream& ss) const {
- ss << "Drops all users for a single database.";
- }
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForDropAllUsersFromDatabaseCommand(client, dbname);
- }
-
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- const bool ok = grid.catalogManager()->runUserManagementWriteCommand(this->name,
- dbname,
- cmdObj,
- &result);
-
- AuthorizationManager* authzManager = getGlobalAuthorizationManager();
- invariant(authzManager);
- authzManager->invalidateUsersFromDB(dbname);
-
- return ok;
- }
-
- } cmdDropAllUsersFromDatabase;
-
- class CmdGrantRolesToUser: public Command {
- public:
-
- CmdGrantRolesToUser() : Command("grantRolesToUser") {}
-
- virtual bool slaveOk() const { return false; }
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- virtual void help(stringstream& ss) const {
- ss << "Grants roles to a user.";
- }
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForGrantRolesToUserCommand(client, dbname, cmdObj);
- }
-
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- string userNameString;
- vector<RoleName> roles;
- BSONObj unusedWriteConcern;
- Status status = auth::parseRolePossessionManipulationCommands(cmdObj,
- this->name,
- dbname,
- &userNameString,
- &roles,
- &unusedWriteConcern);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
- const bool ok = grid.catalogManager()->runUserManagementWriteCommand(this->name,
- dbname,
- cmdObj,
- &result);
-
- AuthorizationManager* authzManager = getGlobalAuthorizationManager();
- invariant(authzManager);
- authzManager->invalidateUserByName(UserName(userNameString, dbname));
-
- return ok;
- }
-
- } cmdGrantRolesToUser;
-
- class CmdRevokeRolesFromUser: public Command {
- public:
-
- CmdRevokeRolesFromUser() : Command("revokeRolesFromUser") {}
-
- virtual bool slaveOk() const { return false; }
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- virtual void help(stringstream& ss) const {
- ss << "Revokes roles from a user.";
- }
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForRevokeRolesFromUserCommand(client, dbname, cmdObj);
- }
-
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- string userNameString;
- vector<RoleName> unusedRoles;
- BSONObj unusedWriteConcern;
- Status status = auth::parseRolePossessionManipulationCommands(cmdObj,
- this->name,
- dbname,
- &userNameString,
- &unusedRoles,
- &unusedWriteConcern);
- if (!status.isOK()) {
- return appendCommandStatus(result, status);
- }
- const bool ok = grid.catalogManager()->runUserManagementWriteCommand(this->name,
- dbname,
- cmdObj,
- &result);
-
- AuthorizationManager* authzManager = getGlobalAuthorizationManager();
- invariant(authzManager);
- authzManager->invalidateUserByName(UserName(userNameString, dbname));
-
- return ok;
- }
-
- } cmdRevokeRolesFromUser;
-
- class CmdUsersInfo: public Command {
- public:
-
- virtual bool slaveOk() const { return false; }
-
- virtual bool slaveOverrideOk() const { return true; }
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- CmdUsersInfo() : Command("usersInfo") {}
-
- virtual void help(stringstream& ss) const {
- ss << "Returns information about users.";
- }
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForUsersInfoCommand(client, dbname, cmdObj);
- }
-
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- return grid.catalogManager()->runUserManagementReadCommand(dbname,
- cmdObj,
- &result);
- }
-
- } cmdUsersInfo;
-
- class CmdCreateRole: public Command {
- public:
-
- CmdCreateRole() : Command("createRole") {}
-
- virtual bool slaveOk() const { return false; }
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- virtual void help(stringstream& ss) const {
- ss << "Adds a role to the system";
- }
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForCreateRoleCommand(client, dbname, cmdObj);
- }
-
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- return grid.catalogManager()->runUserManagementWriteCommand(this->name,
- dbname,
- cmdObj,
- &result);
- }
-
- } cmdCreateRole;
-
- class CmdUpdateRole: public Command {
- public:
-
- CmdUpdateRole() : Command("updateRole") {}
-
- virtual bool slaveOk() const { return false; }
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- virtual void help(stringstream& ss) const {
- ss << "Used to update a role";
- }
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForUpdateRoleCommand(client, dbname, cmdObj);
- }
-
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- const bool ok = grid.catalogManager()->runUserManagementWriteCommand(this->name,
- dbname,
- cmdObj,
- &result);
-
- AuthorizationManager* authzManager = getGlobalAuthorizationManager();
- invariant(authzManager);
- authzManager->invalidateUserCache();
-
- return ok;
- }
-
- } cmdUpdateRole;
-
- class CmdGrantPrivilegesToRole: public Command {
- public:
-
- CmdGrantPrivilegesToRole() : Command("grantPrivilegesToRole") {}
-
- virtual bool slaveOk() const { return false; }
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- virtual void help(stringstream& ss) const {
- ss << "Grants privileges to a role";
- }
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForGrantPrivilegesToRoleCommand(client, dbname, cmdObj);
- }
-
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- const bool ok = grid.catalogManager()->runUserManagementWriteCommand(this->name,
- dbname,
- cmdObj,
- &result);
-
- AuthorizationManager* authzManager = getGlobalAuthorizationManager();
- invariant(authzManager);
- authzManager->invalidateUserCache();
-
- return ok;
- }
-
- } cmdGrantPrivilegesToRole;
-
- class CmdRevokePrivilegesFromRole: public Command {
- public:
-
- CmdRevokePrivilegesFromRole() : Command("revokePrivilegesFromRole") {}
-
- virtual bool slaveOk() const { return false; }
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- virtual void help(stringstream& ss) const {
- ss << "Revokes privileges from a role";
- }
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForRevokePrivilegesFromRoleCommand(client, dbname, cmdObj);
- }
-
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- const bool ok = grid.catalogManager()->runUserManagementWriteCommand(this->name,
- dbname,
- cmdObj,
- &result);
-
- AuthorizationManager* authzManager = getGlobalAuthorizationManager();
- invariant(authzManager);
- authzManager->invalidateUserCache();
-
- return ok;
- }
-
- } cmdRevokePrivilegesFromRole;
-
- class CmdGrantRolesToRole: public Command {
- public:
-
- CmdGrantRolesToRole() : Command("grantRolesToRole") {}
-
- virtual bool slaveOk() const { return false; }
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- virtual void help(stringstream& ss) const {
- ss << "Grants roles to another role.";
- }
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForGrantRolesToRoleCommand(client, dbname, cmdObj);
- }
-
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- const bool ok = grid.catalogManager()->runUserManagementWriteCommand(this->name,
- dbname,
- cmdObj,
- &result);
-
- AuthorizationManager* authzManager = getGlobalAuthorizationManager();
- invariant(authzManager);
- authzManager->invalidateUserCache();
-
- return ok;
- }
-
- } cmdGrantRolesToRole;
-
- class CmdRevokeRolesFromRole: public Command {
- public:
-
- CmdRevokeRolesFromRole() : Command("revokeRolesFromRole") {}
-
- virtual bool slaveOk() const { return false; }
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- virtual void help(stringstream& ss) const {
- ss << "Revokes roles from another role.";
- }
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForRevokeRolesFromRoleCommand(client, dbname, cmdObj);
- }
-
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- const bool ok = grid.catalogManager()->runUserManagementWriteCommand(this->name,
- dbname,
- cmdObj,
- &result);
-
- AuthorizationManager* authzManager = getGlobalAuthorizationManager();
- invariant(authzManager);
- authzManager->invalidateUserCache();
-
- return ok;
- }
-
- } cmdRevokeRolesFromRole;
-
- class CmdDropRole: public Command {
- public:
-
- CmdDropRole() : Command("dropRole") {}
-
- virtual bool slaveOk() const { return false; }
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- virtual void help(stringstream& ss) const {
- ss << "Drops a single role. Before deleting the role completely it must remove it "
- "from any users or roles that reference it. If any errors occur in the middle "
- "of that process it's possible to be left in a state where the role has been "
- "removed from some user/roles but otherwise still exists.";
- }
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForDropRoleCommand(client, dbname, cmdObj);
- }
-
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- const bool ok = grid.catalogManager()->runUserManagementWriteCommand(this->name,
- dbname,
- cmdObj,
- &result);
-
- AuthorizationManager* authzManager = getGlobalAuthorizationManager();
- invariant(authzManager);
- authzManager->invalidateUserCache();
-
- return ok;
- }
-
- } cmdDropRole;
-
- class CmdDropAllRolesFromDatabase: public Command {
- public:
-
- CmdDropAllRolesFromDatabase() : Command("dropAllRolesFromDatabase") {}
-
- virtual bool slaveOk() const { return false; }
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- virtual void help(stringstream& ss) const {
- ss << "Drops all roles from the given database. Before deleting the roles completely "
- "it must remove them from any users or other roles that reference them. If any "
- "errors occur in the middle of that process it's possible to be left in a state "
- "where the roles have been removed from some user/roles but otherwise still "
- "exist.";
- }
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForDropAllRolesFromDatabaseCommand(client, dbname);
- }
-
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- const bool ok = grid.catalogManager()->runUserManagementWriteCommand(this->name,
- dbname,
- cmdObj,
- &result);
-
- AuthorizationManager* authzManager = getGlobalAuthorizationManager();
- invariant(authzManager);
- authzManager->invalidateUserCache();
-
- return ok;
- }
-
- } cmdDropAllRolesFromDatabase;
-
- class CmdRolesInfo: public Command {
- public:
-
- CmdRolesInfo() : Command("rolesInfo") {}
-
- virtual bool slaveOk() const { return false; }
-
- virtual bool slaveOverrideOk() const { return true; }
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- virtual void help(stringstream& ss) const {
- ss << "Returns information about roles.";
- }
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForRolesInfoCommand(client, dbname, cmdObj);
- }
-
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- return grid.catalogManager()->runUserManagementReadCommand(dbname,
- cmdObj,
- &result);
- }
-
- } cmdRolesInfo;
-
- class CmdInvalidateUserCache: public Command {
- public:
-
- CmdInvalidateUserCache() : Command("invalidateUserCache") {}
-
- virtual bool slaveOk() const { return true; }
-
- virtual bool adminOnly() const { return true; }
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- virtual void help(stringstream& ss) const {
- ss << "Invalidates the in-memory cache of user information";
- }
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForInvalidateUserCacheCommand(client);
- }
-
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- AuthorizationManager* authzManager = getGlobalAuthorizationManager();
- invariant(authzManager);
- authzManager->invalidateUserCache();
- return true;
- }
-
- } cmdInvalidateUserCache;
-
- /**
- * This command is used only by mongorestore to handle restoring users/roles. We do this so
- * that mongorestore doesn't do direct inserts into the admin.system.users and
- * admin.system.roles, which would bypass the authzUpdateLock and allow multiple concurrent
- * modifications to users/roles. What mongorestore now does instead is it inserts all user/role
- * definitions it wants to restore into temporary collections, then this command moves those
- * user/role definitions into their proper place in admin.system.users and admin.system.roles.
- * It either adds the users/roles to the existing ones or replaces the existing ones, depending
- * on whether the "drop" argument is true or false.
- */
- class CmdMergeAuthzCollections : public Command {
- public:
-
- CmdMergeAuthzCollections() : Command("_mergeAuthzCollections") {}
-
- virtual bool slaveOk() const { return false; }
-
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- virtual bool adminOnly() const { return true; }
-
- virtual void help(stringstream& ss) const {
- ss << "Internal command used by mongorestore for updating user/role data";
- }
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return auth::checkAuthForMergeAuthzCollectionsCommand(client, cmdObj);
- }
-
- bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- return grid.catalogManager()->runUserManagementWriteCommand(this->name,
- dbname,
- cmdObj,
- &result);
- }
-
- } cmdMergeAuthzCollections;
-
-} // namespace mongo
+/**
+ * This command is used only by mongorestore to handle restoring users/roles. We do this so
+ * that mongorestore doesn't do direct inserts into the admin.system.users and
+ * admin.system.roles, which would bypass the authzUpdateLock and allow multiple concurrent
+ * modifications to users/roles. What mongorestore now does instead is it inserts all user/role
+ * definitions it wants to restore into temporary collections, then this command moves those
+ * user/role definitions into their proper place in admin.system.users and admin.system.roles.
+ * It either adds the users/roles to the existing ones or replaces the existing ones, depending
+ * on whether the "drop" argument is true or false.
+ */
+class CmdMergeAuthzCollections : public Command {
+public:
+ CmdMergeAuthzCollections() : Command("_mergeAuthzCollections") {}
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual bool adminOnly() const {
+ return true;
+ }
+
+ virtual void help(stringstream& ss) const {
+ ss << "Internal command used by mongorestore for updating user/role data";
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return auth::checkAuthForMergeAuthzCollectionsCommand(client, cmdObj);
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ return grid.catalogManager()->runUserManagementWriteCommand(
+ this->name, dbname, cmdObj, &result);
+ }
+
+} cmdMergeAuthzCollections;
+
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_whats_my_uri_cmd.cpp b/src/mongo/s/commands/cluster_whats_my_uri_cmd.cpp
index ad4c957e598..6b10eb476e6 100644
--- a/src/mongo/s/commands/cluster_whats_my_uri_cmd.cpp
+++ b/src/mongo/s/commands/cluster_whats_my_uri_cmd.cpp
@@ -35,41 +35,39 @@
namespace mongo {
namespace {
- class WhatsMyUriCmd : public Command {
- public:
- WhatsMyUriCmd() : Command("whatsmyuri") { }
+class WhatsMyUriCmd : public Command {
+public:
+ WhatsMyUriCmd() : Command("whatsmyuri") {}
- virtual bool slaveOk() const {
- return true;
- }
+ virtual bool slaveOk() const {
+ return true;
+ }
- virtual bool isWriteCommandForConfigServer() const {
- return false;
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- virtual void help(std::stringstream &help) const {
- help << "{whatsmyuri:1}";
- }
+ virtual void help(std::stringstream& help) const {
+ help << "{whatsmyuri:1}";
+ }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ // No auth required
+ }
- // No auth required
- }
+ virtual bool run(OperationContext* txn,
+ const std::string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& result) {
+ result << "you" << cc().getRemote().toString();
+ return true;
+ }
- virtual bool run(OperationContext* txn,
- const std::string& dbname,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& result) {
+} whatsMyUriCmd;
- result << "you" << cc().getRemote().toString();
- return true;
- }
-
- } whatsMyUriCmd;
-
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_write_cmd.cpp b/src/mongo/s/commands/cluster_write_cmd.cpp
index a22dd2c7dc7..e499d708ec3 100644
--- a/src/mongo/s/commands/cluster_write_cmd.cpp
+++ b/src/mongo/s/commands/cluster_write_cmd.cpp
@@ -27,7 +27,7 @@
*/
#include "mongo/platform/basic.h"
-
+
#include "mongo/base/error_codes.h"
#include "mongo/db/client.h"
#include "mongo/db/client_basic.h"
@@ -46,234 +46,209 @@
namespace mongo {
- using std::string;
- using std::stringstream;
- using std::vector;
+using std::string;
+using std::stringstream;
+using std::vector;
namespace {
- /**
- * Base class for mongos write commands. Cluster write commands support batch writes and write
- * concern, and return per-item error information. All cluster write commands use the entry
- * point ClusterWriteCmd::run().
- *
- * Batch execution (targeting and dispatching) is performed by the BatchWriteExec class.
- */
- class ClusterWriteCmd : public Command {
- public:
- virtual ~ClusterWriteCmd() {
-
- }
-
- virtual bool slaveOk() const {
- return false;
- }
-
- virtual bool isWriteCommandForConfigServer() const {
- return false;
+/**
+ * Base class for mongos write commands. Cluster write commands support batch writes and write
+ * concern, and return per-item error information. All cluster write commands use the entry
+ * point ClusterWriteCmd::run().
+ *
+ * Batch execution (targeting and dispatching) is performed by the BatchWriteExec class.
+ */
+class ClusterWriteCmd : public Command {
+public:
+ virtual ~ClusterWriteCmd() {}
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ Status status = auth::checkAuthForWriteCommand(AuthorizationSession::get(client),
+ _writeType,
+ NamespaceString(parseNs(dbname, cmdObj)),
+ cmdObj);
+
+ // TODO: Remove this when we standardize GLE reporting from commands
+ if (!status.isOK()) {
+ LastError::get(client).setLastError(status.code(), status.reason());
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
+ return status;
+ }
- Status status = auth::checkAuthForWriteCommand(AuthorizationSession::get(client),
- _writeType,
- NamespaceString(parseNs(dbname,
- cmdObj)),
- cmdObj);
+ virtual Status explain(OperationContext* txn,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ ExplainCommon::Verbosity verbosity,
+ BSONObjBuilder* out) const {
+ BatchedCommandRequest request(_writeType);
- // TODO: Remove this when we standardize GLE reporting from commands
- if (!status.isOK()) {
- LastError::get(client).setLastError(status.code(), status.reason());
- }
-
- return status;
+ string errMsg;
+ if (!request.parseBSON(cmdObj, &errMsg) || !request.isValid(&errMsg)) {
+ return Status(ErrorCodes::FailedToParse, errMsg);
}
- virtual Status explain(OperationContext* txn,
- const std::string& dbname,
- const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
- BSONObjBuilder* out) const {
+ // Fixup the namespace to be a full ns internally
+ const NamespaceString nss(dbname, request.getNS());
+ request.setNS(nss.ns());
- BatchedCommandRequest request(_writeType);
+ // We can only explain write batches of size 1.
+ if (request.sizeWriteOps() != 1U) {
+ return Status(ErrorCodes::InvalidLength, "explained write batches must be of size 1");
+ }
- string errMsg;
- if (!request.parseBSON(cmdObj, &errMsg) || !request.isValid(&errMsg)) {
- return Status(ErrorCodes::FailedToParse, errMsg);
- }
+ BSONObjBuilder explainCmdBob;
+ ClusterExplain::wrapAsExplain(cmdObj, verbosity, &explainCmdBob);
- // Fixup the namespace to be a full ns internally
- const NamespaceString nss(dbname, request.getNS());
- request.setNS(nss.ns());
+ // We will time how long it takes to run the commands on the shards.
+ Timer timer;
- // We can only explain write batches of size 1.
- if (request.sizeWriteOps() != 1U) {
- return Status(ErrorCodes::InvalidLength,
- "explained write batches must be of size 1");
- }
+ // Target the command to the shards based on the singleton batch item.
+ BatchItemRef targetingBatchItem(&request, 0);
+ vector<Strategy::CommandResult> shardResults;
+ Status status = Strategy::commandOpWrite(
+ dbname, explainCmdBob.obj(), targetingBatchItem, &shardResults);
+ if (!status.isOK()) {
+ return status;
+ }
- BSONObjBuilder explainCmdBob;
- ClusterExplain::wrapAsExplain(cmdObj, verbosity, &explainCmdBob);
-
- // We will time how long it takes to run the commands on the shards.
- Timer timer;
-
- // Target the command to the shards based on the singleton batch item.
- BatchItemRef targetingBatchItem(&request, 0);
- vector<Strategy::CommandResult> shardResults;
- Status status = Strategy::commandOpWrite(dbname,
- explainCmdBob.obj(),
- targetingBatchItem,
- &shardResults);
- if (!status.isOK()) {
- return status;
+ return ClusterExplain::buildExplainResult(
+ shardResults, ClusterExplain::kWriteOnShards, timer.millis(), out);
+ }
+
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ BatchedCommandRequest request(_writeType);
+ BatchedCommandResponse response;
+
+ ClusterWriter writer(true, 0);
+
+ LastError* cmdLastError = &LastError::get(cc());
+
+ {
+ // Disable the last error object for the duration of the write
+ LastError::Disabled disableLastError(cmdLastError);
+
+ // TODO: if we do namespace parsing, push this to the type
+ if (!request.parseBSON(cmdObj, &errmsg) || !request.isValid(&errmsg)) {
+ // Batch parse failure
+ response.setOk(false);
+ response.setErrCode(ErrorCodes::FailedToParse);
+ response.setErrMessage(errmsg);
+ } else {
+ // Fixup the namespace to be a full ns internally
+ const NamespaceString nss(dbname, request.getNS());
+ request.setNSS(nss);
+
+ writer.write(request, &response);
}
- return ClusterExplain::buildExplainResult(shardResults,
- ClusterExplain::kWriteOnShards,
- timer.millis(),
- out);
+ dassert(response.isValid(NULL));
}
- virtual bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
-
- BatchedCommandRequest request(_writeType);
- BatchedCommandResponse response;
-
- ClusterWriter writer(true, 0);
-
- LastError* cmdLastError = &LastError::get(cc());
-
- {
- // Disable the last error object for the duration of the write
- LastError::Disabled disableLastError(cmdLastError);
+ // Populate the lastError object based on the write response
+ cmdLastError->reset();
+ batchErrorToLastError(request, response, cmdLastError);
- // TODO: if we do namespace parsing, push this to the type
- if (!request.parseBSON(cmdObj, &errmsg) || !request.isValid(&errmsg)) {
- // Batch parse failure
- response.setOk(false);
- response.setErrCode(ErrorCodes::FailedToParse);
- response.setErrMessage(errmsg);
- }
- else {
- // Fixup the namespace to be a full ns internally
- const NamespaceString nss(dbname, request.getNS());
- request.setNSS(nss);
+ size_t numAttempts;
- writer.write(request, &response);
- }
-
- dassert(response.isValid(NULL));
- }
-
- // Populate the lastError object based on the write response
- cmdLastError->reset();
- batchErrorToLastError(request, response, cmdLastError);
-
- size_t numAttempts;
-
- if (!response.getOk()) {
- numAttempts = 0;
- }
- else if (request.getOrdered() && response.isErrDetailsSet()) {
- // Add one failed attempt
- numAttempts = response.getErrDetailsAt(0)->getIndex() + 1;
- }
- else {
- numAttempts = request.sizeWriteOps();
- }
+ if (!response.getOk()) {
+ numAttempts = 0;
+ } else if (request.getOrdered() && response.isErrDetailsSet()) {
+ // Add one failed attempt
+ numAttempts = response.getErrDetailsAt(0)->getIndex() + 1;
+ } else {
+ numAttempts = request.sizeWriteOps();
+ }
- // TODO: increase opcounters by more than one
- if (_writeType == BatchedCommandRequest::BatchType_Insert) {
- for (size_t i = 0; i < numAttempts; ++i) {
- globalOpCounters.gotInsert();
- }
+ // TODO: increase opcounters by more than one
+ if (_writeType == BatchedCommandRequest::BatchType_Insert) {
+ for (size_t i = 0; i < numAttempts; ++i) {
+ globalOpCounters.gotInsert();
}
- else if (_writeType == BatchedCommandRequest::BatchType_Update) {
- for (size_t i = 0; i < numAttempts; ++i) {
- globalOpCounters.gotUpdate();
- }
+ } else if (_writeType == BatchedCommandRequest::BatchType_Update) {
+ for (size_t i = 0; i < numAttempts; ++i) {
+ globalOpCounters.gotUpdate();
}
- else if (_writeType == BatchedCommandRequest::BatchType_Delete) {
- for (size_t i = 0; i < numAttempts; ++i) {
- globalOpCounters.gotDelete();
- }
- }
-
- // Save the last opTimes written on each shard for this client, to allow GLE to work
- if (haveClient() && writer.getStats().hasShardStats()) {
- ClusterLastErrorInfo::get(cc()).addHostOpTimes(
- writer.getStats().getShardStats().getWriteOpTimes());
+ } else if (_writeType == BatchedCommandRequest::BatchType_Delete) {
+ for (size_t i = 0; i < numAttempts; ++i) {
+ globalOpCounters.gotDelete();
}
-
- // TODO
- // There's a pending issue about how to report response here. If we use
- // the command infra-structure, we should reuse the 'errmsg' field. But
- // we have already filed that message inside the BatchCommandResponse.
- // return response.getOk();
- result.appendElements(response.toBSON());
- return true;
}
- protected:
- /**
- * Instantiates a command that can be invoked by "name", which will be capable of issuing
- * write batches of type "writeType", and will require privilege "action" to run.
- */
- ClusterWriteCmd(StringData name, BatchedCommandRequest::BatchType writeType)
- : Command(name),
- _writeType(writeType) {
-
+ // Save the last opTimes written on each shard for this client, to allow GLE to work
+ if (haveClient() && writer.getStats().hasShardStats()) {
+ ClusterLastErrorInfo::get(cc())
+ .addHostOpTimes(writer.getStats().getShardStats().getWriteOpTimes());
}
- private:
- // Type of batch (e.g. insert, update).
- const BatchedCommandRequest::BatchType _writeType;
- };
+ // TODO
+ // There's a pending issue about how to report response here. If we use
+ // the command infra-structure, we should reuse the 'errmsg' field. But
+ // we have already filed that message inside the BatchCommandResponse.
+ // return response.getOk();
+ result.appendElements(response.toBSON());
+ return true;
+ }
+protected:
+ /**
+ * Instantiates a command that can be invoked by "name", which will be capable of issuing
+ * write batches of type "writeType", and will require privilege "action" to run.
+ */
+ ClusterWriteCmd(StringData name, BatchedCommandRequest::BatchType writeType)
+ : Command(name), _writeType(writeType) {}
- class ClusterCmdInsert : public ClusterWriteCmd {
- public:
- ClusterCmdInsert() : ClusterWriteCmd("insert", BatchedCommandRequest::BatchType_Insert) {
+private:
+ // Type of batch (e.g. insert, update).
+ const BatchedCommandRequest::BatchType _writeType;
+};
- }
- void help(stringstream& help) const {
- help << "insert documents";
- }
+class ClusterCmdInsert : public ClusterWriteCmd {
+public:
+ ClusterCmdInsert() : ClusterWriteCmd("insert", BatchedCommandRequest::BatchType_Insert) {}
- } clusterInsertCmd;
+ void help(stringstream& help) const {
+ help << "insert documents";
+ }
- class ClusterCmdUpdate : public ClusterWriteCmd {
- public:
- ClusterCmdUpdate() : ClusterWriteCmd("update", BatchedCommandRequest::BatchType_Update) {
+} clusterInsertCmd;
- }
+class ClusterCmdUpdate : public ClusterWriteCmd {
+public:
+ ClusterCmdUpdate() : ClusterWriteCmd("update", BatchedCommandRequest::BatchType_Update) {}
- void help( stringstream& help ) const {
- help << "update documents";
- }
+ void help(stringstream& help) const {
+ help << "update documents";
+ }
- } clusterUpdateCmd;
+} clusterUpdateCmd;
- class ClusterCmdDelete : public ClusterWriteCmd {
- public:
- ClusterCmdDelete() : ClusterWriteCmd("delete", BatchedCommandRequest::BatchType_Delete) {
+class ClusterCmdDelete : public ClusterWriteCmd {
+public:
+ ClusterCmdDelete() : ClusterWriteCmd("delete", BatchedCommandRequest::BatchType_Delete) {}
- }
-
- void help(stringstream& help) const {
- help << "delete documents";
- }
+ void help(stringstream& help) const {
+ help << "delete documents";
+ }
- } clusterDeleteCmd;
+} clusterDeleteCmd;
-} // namespace
-} // namespace mongo
+} // namespace
+} // namespace mongo
diff --git a/src/mongo/s/commands/commands_public.cpp b/src/mongo/s/commands/commands_public.cpp
index 6f0d57bfdd8..395f84c8c7e 100644
--- a/src/mongo/s/commands/commands_public.cpp
+++ b/src/mongo/s/commands/commands_public.cpp
@@ -64,1394 +64,1369 @@
namespace mongo {
- using boost::intrusive_ptr;
- using std::unique_ptr;
- using std::shared_ptr;
- using std::list;
- using std::make_pair;
- using std::map;
- using std::multimap;
- using std::set;
- using std::string;
- using std::stringstream;
- using std::vector;
-
- namespace dbgrid_pub_cmds {
-
- class PublicGridCommand : public Command {
- public:
- PublicGridCommand( const char* n, const char* oldname=NULL ) : Command( n, false, oldname ) {
- }
- virtual bool slaveOk() const {
- return true;
- }
- virtual bool adminOnly() const {
- return false;
- }
-
- // Override if passthrough should also send query options
- // Safer as off by default, can slowly enable as we add more tests
- virtual bool passOptions() const { return false; }
-
- // all grid commands are designed not to lock
- virtual bool isWriteCommandForConfigServer() const { return false; }
-
- protected:
-
- bool passthrough( DBConfigPtr conf, const BSONObj& cmdObj , BSONObjBuilder& result ) {
- return _passthrough(conf->name(), conf, cmdObj, 0, result);
- }
-
- bool adminPassthrough( DBConfigPtr conf, const BSONObj& cmdObj , BSONObjBuilder& result ) {
- return _passthrough("admin", conf, cmdObj, 0, result);
- }
-
- bool passthrough( DBConfigPtr conf, const BSONObj& cmdObj , int options, BSONObjBuilder& result ) {
- return _passthrough(conf->name(), conf, cmdObj, options, result);
- }
-
- private:
- bool _passthrough(const string& db,
- DBConfigPtr conf,
- const BSONObj& cmdObj,
- int options, BSONObjBuilder& result) {
- const auto shard = grid.shardRegistry()->getShard(conf->getPrimaryId());
- ShardConnection conn(shard->getConnString(), "");
-
- BSONObj res;
- bool ok = conn->runCommand(db, cmdObj, res, passOptions() ? options : 0);
- conn.done();
-
- result.appendElements(res);
- return ok;
+using boost::intrusive_ptr;
+using std::unique_ptr;
+using std::shared_ptr;
+using std::list;
+using std::make_pair;
+using std::map;
+using std::multimap;
+using std::set;
+using std::string;
+using std::stringstream;
+using std::vector;
+
+namespace dbgrid_pub_cmds {
+
+class PublicGridCommand : public Command {
+public:
+ PublicGridCommand(const char* n, const char* oldname = NULL) : Command(n, false, oldname) {}
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool adminOnly() const {
+ return false;
+ }
+
+ // Override if passthrough should also send query options
+ // Safer as off by default, can slowly enable as we add more tests
+ virtual bool passOptions() const {
+ return false;
+ }
+
+ // all grid commands are designed not to lock
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+
+protected:
+ bool passthrough(DBConfigPtr conf, const BSONObj& cmdObj, BSONObjBuilder& result) {
+ return _passthrough(conf->name(), conf, cmdObj, 0, result);
+ }
+
+ bool adminPassthrough(DBConfigPtr conf, const BSONObj& cmdObj, BSONObjBuilder& result) {
+ return _passthrough("admin", conf, cmdObj, 0, result);
+ }
+
+ bool passthrough(DBConfigPtr conf, const BSONObj& cmdObj, int options, BSONObjBuilder& result) {
+ return _passthrough(conf->name(), conf, cmdObj, options, result);
+ }
+
+private:
+ bool _passthrough(const string& db,
+ DBConfigPtr conf,
+ const BSONObj& cmdObj,
+ int options,
+ BSONObjBuilder& result) {
+ const auto shard = grid.shardRegistry()->getShard(conf->getPrimaryId());
+ ShardConnection conn(shard->getConnString(), "");
+
+ BSONObj res;
+ bool ok = conn->runCommand(db, cmdObj, res, passOptions() ? options : 0);
+ conn.done();
+
+ result.appendElements(res);
+ return ok;
+ }
+};
+
+class AllShardsCollectionCommand : public RunOnAllShardsCommand {
+public:
+ AllShardsCollectionCommand(const char* n,
+ const char* oldname = NULL,
+ bool useShardConn = false,
+ bool implicitCreateDb = false)
+ : RunOnAllShardsCommand(n, oldname, useShardConn, implicitCreateDb) {}
+
+ virtual void getShardIds(const string& dbName, BSONObj& cmdObj, vector<ShardId>& shardIds) {
+ const string fullns = dbName + '.' + cmdObj.firstElement().valuestrsafe();
+
+ auto status = grid.catalogCache()->getDatabase(dbName);
+ uassertStatusOK(status.getStatus());
+
+ shared_ptr<DBConfig> conf = status.getValue();
+
+ if (!conf->isShardingEnabled() || !conf->isSharded(fullns)) {
+ shardIds.push_back(conf->getShardId(fullns));
+ } else {
+ grid.shardRegistry()->getAllShardIds(&shardIds);
+ }
+ }
+};
+
+
+class NotAllowedOnShardedCollectionCmd : public PublicGridCommand {
+public:
+ NotAllowedOnShardedCollectionCmd(const char* n) : PublicGridCommand(n) {}
+
+ virtual bool run(OperationContext* txn,
+ const string& dbName,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ const string fullns = parseNs(dbName, cmdObj);
+
+ auto conf = uassertStatusOK(grid.catalogCache()->getDatabase(dbName));
+ if (!conf->isSharded(fullns)) {
+ return passthrough(conf, cmdObj, options, result);
+ }
+
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::IllegalOperation,
+ str::stream() << "can't do command: " << name << " on sharded collection"));
+ }
+};
+
+// ----
+
+class DropIndexesCmd : public AllShardsCollectionCommand {
+public:
+ DropIndexesCmd() : AllShardsCollectionCommand("dropIndexes", "deleteIndexes") {}
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::dropIndex);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
+} dropIndexesCmd;
+
+class CreateIndexesCmd : public AllShardsCollectionCommand {
+public:
+ CreateIndexesCmd()
+ : AllShardsCollectionCommand("createIndexes",
+ NULL, /* oldName */
+ true /* use ShardConnection */,
+ true /* implicit create db */) {
+ // createIndexes command should use ShardConnection so the getLastError would
+ // be able to properly enforce the write concern (via the saveGLEStats callback).
+ }
+
+ /**
+ * the createIndexes command doesn't require the 'ns' field to be populated
+ * so we make sure its here as its needed for the system.indexes insert
+ */
+ BSONObj fixSpec(const NamespaceString& ns, const BSONObj& original) const {
+ if (original["ns"].type() == String)
+ return original;
+ BSONObjBuilder bb;
+ bb.appendElements(original);
+ bb.append("ns", ns.toString());
+ return bb.obj();
+ }
+
+ /**
+ * @return equivalent of gle
+ */
+ BSONObj createIndexLegacy(const string& server,
+ const NamespaceString& nss,
+ const BSONObj& spec) const {
+ try {
+ ScopedDbConnection conn(server);
+ conn->insert(nss.getSystemIndexesCollection(), spec);
+ BSONObj gle = conn->getLastErrorDetailed(nss.db().toString());
+ conn.done();
+ return gle;
+ } catch (DBException& e) {
+ BSONObjBuilder b;
+ b.append("errmsg", e.toString());
+ b.append("code", e.getCode());
+ return b.obj();
+ }
+ }
+
+ virtual BSONObj specialErrorHandler(const string& server,
+ const string& dbName,
+ const BSONObj& cmdObj,
+ const BSONObj& originalResult) const {
+ string errmsg = originalResult["errmsg"];
+ if (errmsg.find("no such cmd") == string::npos) {
+ // cannot use codes as 2.4 didn't have a code for this
+ return originalResult;
+ }
+
+ // we need to down convert
+
+ NamespaceString nss(dbName, cmdObj["createIndexes"].String());
+
+ if (cmdObj["indexes"].type() != Array)
+ return originalResult;
+
+ BSONObjBuilder newResult;
+ newResult.append("note", "downgraded");
+ newResult.append("sentTo", server);
+
+ BSONArrayBuilder individualResults;
+
+ bool ok = true;
+
+ BSONObjIterator indexIterator(cmdObj["indexes"].Obj());
+ while (indexIterator.more()) {
+ BSONObj spec = indexIterator.next().Obj();
+ spec = fixSpec(nss, spec);
+
+ BSONObj gle = createIndexLegacy(server, nss, spec);
+
+ individualResults.append(BSON("spec" << spec << "gle" << gle));
+
+ BSONElement e = gle["errmsg"];
+ if (e.type() == String && e.String().size() > 0) {
+ ok = false;
+ newResult.appendAs(e, "errmsg");
+ break;
}
- };
-
- class AllShardsCollectionCommand : public RunOnAllShardsCommand {
- public:
- AllShardsCollectionCommand(const char* n,
- const char* oldname = NULL,
- bool useShardConn = false,
- bool implicitCreateDb = false)
- : RunOnAllShardsCommand(n, oldname, useShardConn, implicitCreateDb) {
+ e = gle["err"];
+ if (e.type() == String && e.String().size() > 0) {
+ ok = false;
+ newResult.appendAs(e, "errmsg");
+ break;
}
-
- virtual void getShardIds(const string& dbName,
- BSONObj& cmdObj,
- vector<ShardId>& shardIds) {
- const string fullns = dbName + '.' + cmdObj.firstElement().valuestrsafe();
-
- auto status = grid.catalogCache()->getDatabase(dbName);
- uassertStatusOK(status.getStatus());
-
- shared_ptr<DBConfig> conf = status.getValue();
-
- if (!conf->isShardingEnabled() || !conf->isSharded(fullns)) {
- shardIds.push_back(conf->getShardId(fullns));
+ }
+
+ newResult.append("eachIndex", individualResults.arr());
+
+ newResult.append("ok", ok ? 1 : 0);
+ return newResult.obj();
+ }
+
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::createIndex);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
+
+} createIndexesCmd;
+
+class ReIndexCmd : public AllShardsCollectionCommand {
+public:
+ ReIndexCmd() : AllShardsCollectionCommand("reIndex") {}
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::reIndex);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
+} reIndexCmd;
+
+class CollectionModCmd : public AllShardsCollectionCommand {
+public:
+ CollectionModCmd() : AllShardsCollectionCommand("collMod") {}
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::collMod);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
+} collectionModCmd;
+
+
+class ValidateCmd : public AllShardsCollectionCommand {
+public:
+ ValidateCmd() : AllShardsCollectionCommand("validate") {}
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::validate);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
+ virtual void aggregateResults(const vector<ShardAndReply>& results, BSONObjBuilder& output) {
+ for (vector<ShardAndReply>::const_iterator it(results.begin()), end(results.end());
+ it != end;
+ it++) {
+ const BSONObj& result = std::get<1>(*it);
+ const BSONElement valid = result["valid"];
+ if (!valid.eoo()) {
+ if (!valid.trueValue()) {
+ output.appendBool("valid", false);
+ return;
}
- else {
- grid.shardRegistry()->getAllShardIds(&shardIds);
+ } else {
+ // Support pre-1.9.0 output with everything in a big string
+ const char* s = result["result"].valuestrsafe();
+ if (strstr(s, "exception") || strstr(s, "corrupt")) {
+ output.appendBool("valid", false);
+ return;
}
}
- };
-
-
- class NotAllowedOnShardedCollectionCmd : public PublicGridCommand {
- public:
- NotAllowedOnShardedCollectionCmd( const char * n ) : PublicGridCommand( n ) {}
-
- virtual bool run(OperationContext* txn,
- const string& dbName,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
-
- const string fullns = parseNs(dbName, cmdObj);
-
- auto conf = uassertStatusOK(grid.catalogCache()->getDatabase(dbName));
- if (!conf->isSharded(fullns)) {
- return passthrough( conf , cmdObj , options, result );
- }
-
- return appendCommandStatus(result,
- Status(ErrorCodes::IllegalOperation,
- str::stream() << "can't do command: " << name
- << " on sharded collection"));
- }
-
- };
-
- // ----
-
- class DropIndexesCmd : public AllShardsCollectionCommand {
- public:
- DropIndexesCmd() : AllShardsCollectionCommand("dropIndexes", "deleteIndexes") {}
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::dropIndex);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
- }
- } dropIndexesCmd;
-
- class CreateIndexesCmd : public AllShardsCollectionCommand {
- public:
- CreateIndexesCmd():
- AllShardsCollectionCommand("createIndexes",
- NULL, /* oldName */
- true /* use ShardConnection */,
- true /* implicit create db */) {
- // createIndexes command should use ShardConnection so the getLastError would
- // be able to properly enforce the write concern (via the saveGLEStats callback).
+ }
+
+ output.appendBool("valid", true);
+ }
+} validateCmd;
+
+class CreateCmd : public PublicGridCommand {
+public:
+ CreateCmd() : PublicGridCommand("create") {}
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ if (cmdObj["capped"].trueValue()) {
+ if (!authzSession->isAuthorizedForActionsOnResource(
+ parseResourcePattern(dbname, cmdObj), ActionType::convertToCapped)) {
+ return Status(ErrorCodes::Unauthorized, "unauthorized");
}
-
- /**
- * the createIndexes command doesn't require the 'ns' field to be populated
- * so we make sure its here as its needed for the system.indexes insert
- */
- BSONObj fixSpec( const NamespaceString& ns, const BSONObj& original ) const {
- if ( original["ns"].type() == String )
- return original;
- BSONObjBuilder bb;
- bb.appendElements( original );
- bb.append( "ns", ns.toString() );
- return bb.obj();
+ }
+
+ // ActionType::createCollection or ActionType::insert are both acceptable
+ if (authzSession->isAuthorizedForActionsOnResource(parseResourcePattern(dbname, cmdObj),
+ ActionType::createCollection) ||
+ authzSession->isAuthorizedForActionsOnResource(parseResourcePattern(dbname, cmdObj),
+ ActionType::insert)) {
+ return Status::OK();
+ }
+
+ return Status(ErrorCodes::Unauthorized, "unauthorized");
+ }
+ bool run(OperationContext* txn,
+ const string& dbName,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ auto status = grid.implicitCreateDb(dbName);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status.getStatus());
+ }
+
+ shared_ptr<DBConfig> conf = status.getValue();
+
+ return passthrough(conf, cmdObj, result);
+ }
+
+} createCmd;
+
+class DropCmd : public PublicGridCommand {
+public:
+ DropCmd() : PublicGridCommand("drop") {}
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::dropCollection);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbName,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ auto status = grid.catalogCache()->getDatabase(dbName);
+ if (!status.isOK()) {
+ if (status == ErrorCodes::DatabaseNotFound) {
+ return true;
}
- /**
- * @return equivalent of gle
- */
- BSONObj createIndexLegacy( const string& server,
- const NamespaceString& nss,
- const BSONObj& spec ) const {
- try {
- ScopedDbConnection conn( server );
- conn->insert( nss.getSystemIndexesCollection(), spec );
- BSONObj gle = conn->getLastErrorDetailed( nss.db().toString() );
- conn.done();
- return gle;
- }
- catch ( DBException& e ) {
- BSONObjBuilder b;
- b.append( "errmsg", e.toString() );
- b.append( "code", e.getCode() );
- return b.obj();
+ return appendCommandStatus(result, status.getStatus());
+ }
+
+ shared_ptr<DBConfig> conf = status.getValue();
+
+ const string fullns = dbName + "." + cmdObj.firstElement().valuestrsafe();
+ log() << "DROP: " << fullns;
+
+ if (!conf->isShardingEnabled() || !conf->isSharded(fullns)) {
+ log() << "\tdrop going to do passthrough";
+ return passthrough(conf, cmdObj, result);
+ }
+
+ //
+ // TODO: There will be problems if we simultaneously shard and drop a collection
+ //
+
+ ChunkManagerPtr cm;
+ ShardPtr primary;
+ conf->getChunkManagerOrPrimary(fullns, cm, primary);
+
+ if (!cm) {
+ log() << "\tdrop going to do passthrough after re-check";
+ return passthrough(conf, cmdObj, result);
+ }
+
+ uassertStatusOK(grid.catalogManager()->dropCollection(fullns));
+
+ if (!conf->removeSharding(fullns)) {
+ warning() << "collection " << fullns
+ << " was reloaded as unsharded before drop completed"
+ << " during single drop";
+ }
+
+ return 1;
+ }
+} dropCmd;
+
+class RenameCollectionCmd : public PublicGridCommand {
+public:
+ RenameCollectionCmd() : PublicGridCommand("renameCollection") {}
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return rename_collection::checkAuthForRenameCollectionCommand(client, dbname, cmdObj);
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ bool run(OperationContext* txn,
+ const string& dbName,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ const string fullnsFrom = cmdObj.firstElement().valuestrsafe();
+ const string dbNameFrom = nsToDatabase(fullnsFrom);
+ auto confFrom = uassertStatusOK(grid.catalogCache()->getDatabase(dbNameFrom));
+
+ const string fullnsTo = cmdObj["to"].valuestrsafe();
+ const string dbNameTo = nsToDatabase(fullnsTo);
+ auto confTo = uassertStatusOK(grid.catalogCache()->getDatabase(dbNameTo));
+
+ uassert(13138, "You can't rename a sharded collection", !confFrom->isSharded(fullnsFrom));
+ uassert(13139, "You can't rename to a sharded collection", !confTo->isSharded(fullnsTo));
+
+ const ShardId& shardTo = confTo->getShardId(fullnsTo);
+ const ShardId& shardFrom = confFrom->getShardId(fullnsFrom);
+
+ uassert(13137,
+ "Source and destination collections must be on same shard",
+ shardFrom == shardTo);
+
+ return adminPassthrough(confFrom, cmdObj, result);
+ }
+} renameCollectionCmd;
+
+class CopyDBCmd : public PublicGridCommand {
+public:
+ CopyDBCmd() : PublicGridCommand("copydb") {}
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return copydb::checkAuthForCopydbCommand(client, dbname, cmdObj);
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbName,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ const string todb = cmdObj.getStringField("todb");
+ uassert(ErrorCodes::EmptyFieldName, "missing todb argument", !todb.empty());
+ uassert(ErrorCodes::InvalidNamespace, "invalid todb argument", nsIsDbOnly(todb));
+
+ auto confTo = uassertStatusOK(grid.implicitCreateDb(todb));
+ uassert(ErrorCodes::IllegalOperation,
+ "cannot copy to a sharded database",
+ !confTo->isShardingEnabled());
+
+ const string fromhost = cmdObj.getStringField("fromhost");
+ if (!fromhost.empty()) {
+ return adminPassthrough(confTo, cmdObj, result);
+ } else {
+ const string fromdb = cmdObj.getStringField("fromdb");
+ uassert(13399, "need a fromdb argument", !fromdb.empty());
+
+ shared_ptr<DBConfig> confFrom =
+ uassertStatusOK(grid.catalogCache()->getDatabase(fromdb));
+
+ uassert(13400, "don't know where source DB is", confFrom);
+ uassert(13401, "cant copy from sharded DB", !confFrom->isShardingEnabled());
+
+ BSONObjBuilder b;
+ BSONForEach(e, cmdObj) {
+ if (strcmp(e.fieldName(), "fromhost") != 0) {
+ b.append(e);
}
}
- virtual BSONObj specialErrorHandler( const string& server,
- const string& dbName,
- const BSONObj& cmdObj,
- const BSONObj& originalResult ) const {
- string errmsg = originalResult["errmsg"];
- if ( errmsg.find( "no such cmd" ) == string::npos ) {
- // cannot use codes as 2.4 didn't have a code for this
- return originalResult;
- }
-
- // we need to down convert
-
- NamespaceString nss( dbName, cmdObj["createIndexes"].String() );
-
- if ( cmdObj["indexes"].type() != Array )
- return originalResult;
-
- BSONObjBuilder newResult;
- newResult.append( "note", "downgraded" );
- newResult.append( "sentTo", server );
-
- BSONArrayBuilder individualResults;
-
- bool ok = true;
-
- BSONObjIterator indexIterator( cmdObj["indexes"].Obj() );
- while ( indexIterator.more() ) {
- BSONObj spec = indexIterator.next().Obj();
- spec = fixSpec( nss, spec );
-
- BSONObj gle = createIndexLegacy( server, nss, spec );
-
- individualResults.append( BSON( "spec" << spec <<
- "gle" << gle ) );
-
- BSONElement e = gle["errmsg"];
- if ( e.type() == String && e.String().size() > 0 ) {
- ok = false;
- newResult.appendAs( e, "errmsg" );
- break;
- }
+ {
+ const auto& shard = grid.shardRegistry()->getShard(confFrom->getPrimaryId());
+ b.append("fromhost", shard->getConnString().toString());
+ }
+ BSONObj fixed = b.obj();
+
+ return adminPassthrough(confTo, fixed, result);
+ }
+ }
+
+} clusterCopyDBCmd;
+
+class CollectionStats : public PublicGridCommand {
+public:
+ CollectionStats() : PublicGridCommand("collStats", "collstats") {}
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::collStats);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbName,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ const string fullns = parseNs(dbName, cmdObj);
+
+ auto conf = uassertStatusOK(grid.catalogCache()->getDatabase(dbName));
+ if (!conf->isShardingEnabled() || !conf->isSharded(fullns)) {
+ result.appendBool("sharded", false);
+ result.append("primary", conf->getPrimaryId());
+
+ return passthrough(conf, cmdObj, result);
+ }
+
+ result.appendBool("sharded", true);
+
+ ChunkManagerPtr cm = conf->getChunkManager(fullns);
+ massert(12594, "how could chunk manager be null!", cm);
+
+ BSONObjBuilder shardStats;
+ map<string, long long> counts;
+ map<string, long long> indexSizes;
+ /*
+ long long count=0;
+ long long size=0;
+ long long storageSize=0;
+ */
+ int nindexes = 0;
+ bool warnedAboutIndexes = false;
+
+ set<ShardId> shardIds;
+ cm->getAllShardIds(&shardIds);
+
+ for (const ShardId& shardId : shardIds) {
+ const auto shard = grid.shardRegistry()->getShard(shardId);
+ if (!shard) {
+ continue;
+ }
- e = gle["err"];
- if ( e.type() == String && e.String().size() > 0 ) {
- ok = false;
- newResult.appendAs( e, "errmsg" );
- break;
+ BSONObj res;
+ {
+ ScopedDbConnection conn(shard->getConnString());
+ if (!conn->runCommand(dbName, cmdObj, res)) {
+ if (!res["code"].eoo()) {
+ result.append(res["code"]);
}
-
+ errmsg = "failed on shard: " + res.toString();
+ return false;
}
-
- newResult.append( "eachIndex", individualResults.arr() );
-
- newResult.append( "ok", ok ? 1 : 0 );
- return newResult.obj();
- }
-
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::createIndex);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ conn.done();
}
- } createIndexesCmd;
-
- class ReIndexCmd : public AllShardsCollectionCommand {
- public:
- ReIndexCmd() : AllShardsCollectionCommand("reIndex") {}
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::reIndex);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
- }
- } reIndexCmd;
-
- class CollectionModCmd : public AllShardsCollectionCommand {
- public:
- CollectionModCmd() : AllShardsCollectionCommand("collMod") {}
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::collMod);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
- }
- } collectionModCmd;
-
-
- class ValidateCmd : public AllShardsCollectionCommand {
- public:
- ValidateCmd() : AllShardsCollectionCommand("validate") {}
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::validate);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
- }
- virtual void aggregateResults(const vector<ShardAndReply>& results,
- BSONObjBuilder& output) {
-
- for (vector<ShardAndReply>::const_iterator it(results.begin()), end(results.end());
- it!=end; it++) {
- const BSONObj& result = std::get<1>(*it);
- const BSONElement valid = result["valid"];
- if (!valid.eoo()){
- if (!valid.trueValue()) {
- output.appendBool("valid", false);
- return;
- }
+ BSONObjIterator j(res);
+ while (j.more()) {
+ BSONElement e = j.next();
+
+ if (str::equals(e.fieldName(), "ns") || str::equals(e.fieldName(), "ok") ||
+ str::equals(e.fieldName(), "avgObjSize") ||
+ str::equals(e.fieldName(), "lastExtentSize") ||
+ str::equals(e.fieldName(), "paddingFactor")) {
+ continue;
+ } else if (str::equals(e.fieldName(), "count") ||
+ str::equals(e.fieldName(), "size") ||
+ str::equals(e.fieldName(), "storageSize") ||
+ str::equals(e.fieldName(), "numExtents") ||
+ str::equals(e.fieldName(), "totalIndexSize")) {
+ counts[e.fieldName()] += e.numberLong();
+ } else if (str::equals(e.fieldName(), "indexSizes")) {
+ BSONObjIterator k(e.Obj());
+ while (k.more()) {
+ BSONElement temp = k.next();
+ indexSizes[temp.fieldName()] += temp.numberLong();
}
- else {
- // Support pre-1.9.0 output with everything in a big string
- const char* s = result["result"].valuestrsafe();
- if (strstr(s, "exception") || strstr(s, "corrupt")){
- output.appendBool("valid", false);
- return;
+ }
+ // no longer used since 2.2
+ else if (str::equals(e.fieldName(), "flags")) {
+ if (!result.hasField(e.fieldName()))
+ result.append(e);
+ }
+ // flags broken out in 2.4+
+ else if (str::equals(e.fieldName(), "systemFlags")) {
+ if (!result.hasField(e.fieldName()))
+ result.append(e);
+ } else if (str::equals(e.fieldName(), "userFlags")) {
+ if (!result.hasField(e.fieldName()))
+ result.append(e);
+ } else if (str::equals(e.fieldName(), "capped")) {
+ if (!result.hasField(e.fieldName()))
+ result.append(e);
+ } else if (str::equals(e.fieldName(), "paddingFactorNote")) {
+ if (!result.hasField(e.fieldName()))
+ result.append(e);
+ } else if (str::equals(e.fieldName(), "indexDetails")) {
+ // skip this field in the rollup
+ } else if (str::equals(e.fieldName(), "wiredTiger")) {
+ // skip this field in the rollup
+ } else if (str::equals(e.fieldName(), "nindexes")) {
+ int myIndexes = e.numberInt();
+
+ if (nindexes == 0) {
+ nindexes = myIndexes;
+ } else if (nindexes == myIndexes) {
+ // no-op
+ } else {
+ // hopefully this means we're building an index
+
+ if (myIndexes > nindexes)
+ nindexes = myIndexes;
+
+ if (!warnedAboutIndexes) {
+ result.append("warning",
+ "indexes don't all match - ok if ensureIndex is running");
+ warnedAboutIndexes = true;
}
}
+ } else {
+ warning() << "mongos collstats doesn't know about: " << e.fieldName();
}
-
- output.appendBool("valid", true);
}
- } validateCmd;
-
- class CreateCmd : public PublicGridCommand {
- public:
- CreateCmd() : PublicGridCommand( "create" ) {}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- if (cmdObj["capped"].trueValue()) {
- if (!authzSession->isAuthorizedForActionsOnResource(
- parseResourcePattern(dbname, cmdObj), ActionType::convertToCapped)) {
- return Status(ErrorCodes::Unauthorized, "unauthorized");
- }
- }
-
- // ActionType::createCollection or ActionType::insert are both acceptable
- if (authzSession->isAuthorizedForActionsOnResource(
- parseResourcePattern(dbname, cmdObj), ActionType::createCollection) ||
- authzSession->isAuthorizedForActionsOnResource(
- parseResourcePattern(dbname, cmdObj), ActionType::insert)) {
- return Status::OK();
- }
-
- return Status(ErrorCodes::Unauthorized, "unauthorized");
+ shardStats.append(shardId, res);
+ }
+
+ result.append("ns", fullns);
+
+ for (map<string, long long>::iterator i = counts.begin(); i != counts.end(); ++i)
+ result.appendNumber(i->first, i->second);
+
+ {
+ BSONObjBuilder ib(result.subobjStart("indexSizes"));
+ for (map<string, long long>::iterator i = indexSizes.begin(); i != indexSizes.end();
+ ++i)
+ ib.appendNumber(i->first, i->second);
+ ib.done();
+ }
+
+ if (counts["count"] > 0)
+ result.append("avgObjSize", (double)counts["size"] / (double)counts["count"]);
+ else
+ result.append("avgObjSize", 0.0);
+
+ result.append("nindexes", nindexes);
+
+ result.append("nchunks", cm->numChunks());
+ result.append("shards", shardStats.obj());
+
+ return true;
+ }
+} collectionStatsCmd;
+
+class DataSizeCmd : public PublicGridCommand {
+public:
+ DataSizeCmd() : PublicGridCommand("dataSize", "datasize") {}
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::find);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
+ bool run(OperationContext* txn,
+ const string& dbName,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ const string fullns = parseNs(dbName, cmdObj);
+
+ auto conf = uassertStatusOK(grid.catalogCache()->getDatabase(dbName));
+ if (!conf->isShardingEnabled() || !conf->isSharded(fullns)) {
+ return passthrough(conf, cmdObj, result);
+ }
+
+ ChunkManagerPtr cm = conf->getChunkManager(fullns);
+ massert(13407, "how could chunk manager be null!", cm);
+
+ BSONObj min = cmdObj.getObjectField("min");
+ BSONObj max = cmdObj.getObjectField("max");
+ BSONObj keyPattern = cmdObj.getObjectField("keyPattern");
+
+ uassert(13408,
+ "keyPattern must equal shard key",
+ cm->getShardKeyPattern().toBSON() == keyPattern);
+ uassert(13405,
+ str::stream() << "min value " << min << " does not have shard key",
+ cm->getShardKeyPattern().isShardKey(min));
+ uassert(13406,
+ str::stream() << "max value " << max << " does not have shard key",
+ cm->getShardKeyPattern().isShardKey(max));
+
+ min = cm->getShardKeyPattern().normalizeShardKey(min);
+ max = cm->getShardKeyPattern().normalizeShardKey(max);
+
+ // yes these are doubles...
+ double size = 0;
+ double numObjects = 0;
+ int millis = 0;
+
+ set<ShardId> shardIds;
+ cm->getShardIdsForRange(shardIds, min, max);
+ for (const ShardId& shardId : shardIds) {
+ const auto shard = grid.shardRegistry()->getShard(shardId);
+ if (!shard) {
+ continue;
}
- bool run(OperationContext* txn,
- const string& dbName,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- auto status = grid.implicitCreateDb(dbName);
- if (!status.isOK()) {
- return appendCommandStatus(result, status.getStatus());
- }
-
- shared_ptr<DBConfig> conf = status.getValue();
+ ScopedDbConnection conn(shard->getConnString());
+ BSONObj res;
+ bool ok = conn->runCommand(conf->name(), cmdObj, res);
+ conn.done();
- return passthrough(conf, cmdObj, result);
- }
-
- } createCmd;
-
- class DropCmd : public PublicGridCommand {
- public:
- DropCmd() : PublicGridCommand( "drop" ) {}
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::dropCollection);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ if (!ok) {
+ result.appendElements(res);
+ return false;
}
- bool run(OperationContext* txn,
+ size += res["size"].number();
+ numObjects += res["numObjects"].number();
+ millis += res["millis"].numberInt();
+ }
+
+ result.append("size", size);
+ result.append("numObjects", numObjects);
+ result.append("millis", millis);
+ return true;
+ }
+
+} DataSizeCmd;
+
+class ConvertToCappedCmd : public NotAllowedOnShardedCollectionCmd {
+public:
+ ConvertToCappedCmd() : NotAllowedOnShardedCollectionCmd("convertToCapped") {}
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::convertToCapped);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
+
+} convertToCappedCmd;
+
+
+class GroupCmd : public NotAllowedOnShardedCollectionCmd {
+public:
+ GroupCmd() : NotAllowedOnShardedCollectionCmd("group") {}
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::find);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
+
+ virtual bool passOptions() const {
+ return true;
+ }
+
+ virtual std::string parseNs(const std::string& dbName, const BSONObj& cmdObj) const {
+ return dbName + "." + cmdObj.firstElement().embeddedObjectUserCheck()["ns"].valuestrsafe();
+ }
+
+ Status explain(OperationContext* txn,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ ExplainCommon::Verbosity verbosity,
+ BSONObjBuilder* out) const {
+ const string fullns = parseNs(dbname, cmdObj);
+
+ BSONObjBuilder explainCmdBob;
+ ClusterExplain::wrapAsExplain(cmdObj, verbosity, &explainCmdBob);
+
+ // We will time how long it takes to run the commands on the shards.
+ Timer timer;
+
+ Strategy::CommandResult singleResult;
+ Status commandStat =
+ Strategy::commandOpUnsharded(dbname, explainCmdBob.obj(), 0, fullns, &singleResult);
+ if (!commandStat.isOK()) {
+ return commandStat;
+ }
+
+ long long millisElapsed = timer.millis();
+
+ vector<Strategy::CommandResult> shardResults;
+ shardResults.push_back(singleResult);
+
+ return ClusterExplain::buildExplainResult(
+ shardResults, ClusterExplain::kSingleShard, millisElapsed, out);
+ }
+
+} groupCmd;
+
+class SplitVectorCmd : public NotAllowedOnShardedCollectionCmd {
+public:
+ SplitVectorCmd() : NotAllowedOnShardedCollectionCmd("splitVector") {}
+ virtual bool passOptions() const {
+ return true;
+ }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forExactNamespace(NamespaceString(parseNs(dbname, cmdObj))),
+ ActionType::splitVector)) {
+ return Status(ErrorCodes::Unauthorized, "Unauthorized");
+ }
+ return Status::OK();
+ }
+ virtual bool run(OperationContext* txn,
const string& dbName,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
-
- auto status = grid.catalogCache()->getDatabase(dbName);
- if (!status.isOK()) {
- if (status == ErrorCodes::DatabaseNotFound) {
- return true;
- }
-
- return appendCommandStatus(result, status.getStatus());
- }
-
- shared_ptr<DBConfig> conf = status.getValue();
-
- const string fullns = dbName + "." + cmdObj.firstElement().valuestrsafe();
- log() << "DROP: " << fullns;
-
- if (!conf->isShardingEnabled() || !conf->isSharded(fullns)) {
- log() << "\tdrop going to do passthrough";
- return passthrough( conf , cmdObj , result );
- }
-
- //
- // TODO: There will be problems if we simultaneously shard and drop a collection
- //
-
- ChunkManagerPtr cm;
- ShardPtr primary;
- conf->getChunkManagerOrPrimary( fullns, cm, primary );
-
- if( ! cm ) {
- log() << "\tdrop going to do passthrough after re-check";
- return passthrough( conf , cmdObj , result );
- }
-
- uassertStatusOK(grid.catalogManager()->dropCollection(fullns));
-
- if( ! conf->removeSharding( fullns ) ){
- warning() << "collection " << fullns
- << " was reloaded as unsharded before drop completed"
- << " during single drop";
- }
-
- return 1;
- }
- } dropCmd;
-
- class RenameCollectionCmd : public PublicGridCommand {
- public:
- RenameCollectionCmd() : PublicGridCommand( "renameCollection" ) {}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return rename_collection::checkAuthForRenameCollectionCommand(client,
- dbname,
- cmdObj);
+ string x = parseNs(dbName, cmdObj);
+ if (!str::startsWith(x, dbName)) {
+ errmsg = str::stream() << "doing a splitVector across dbs isn't supported via mongos";
+ return false;
+ }
+ return NotAllowedOnShardedCollectionCmd::run(txn, dbName, cmdObj, options, errmsg, result);
+ }
+ virtual std::string parseNs(const string& dbname, const BSONObj& cmdObj) const {
+ return parseNsFullyQualified(dbname, cmdObj);
+ }
+
+} splitVectorCmd;
+
+
+class DistinctCmd : public PublicGridCommand {
+public:
+ DistinctCmd() : PublicGridCommand("distinct") {}
+ virtual void help(stringstream& help) const {
+ help << "{ distinct : 'collection name' , key : 'a.b' , query : {} }";
+ }
+ virtual bool passOptions() const {
+ return true;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::find);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbName,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ const string fullns = parseNs(dbName, cmdObj);
+
+ auto status = grid.catalogCache()->getDatabase(dbName);
+ if (!status.isOK()) {
+ return appendEmptyResultSet(result, status.getStatus(), fullns);
+ }
+
+ shared_ptr<DBConfig> conf = status.getValue();
+ if (!conf->isShardingEnabled() || !conf->isSharded(fullns)) {
+ return passthrough(conf, cmdObj, options, result);
+ }
+
+ ChunkManagerPtr cm = conf->getChunkManager(fullns);
+ massert(10420, "how could chunk manager be null!", cm);
+
+ BSONObj query = getQuery(cmdObj);
+ set<ShardId> shardIds;
+ cm->getShardIdsForQuery(shardIds, query);
+
+ set<BSONObj, BSONObjCmp> all;
+ int size = 32;
+
+ for (const ShardId& shardId : shardIds) {
+ const auto shard = grid.shardRegistry()->getShard(shardId);
+ if (!shard) {
+ continue;
}
- virtual bool adminOnly() const {
- return true;
- }
- bool run(OperationContext* txn,
- const string& dbName,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- const string fullnsFrom = cmdObj.firstElement().valuestrsafe();
- const string dbNameFrom = nsToDatabase(fullnsFrom);
- auto confFrom = uassertStatusOK(grid.catalogCache()->getDatabase(dbNameFrom));
-
- const string fullnsTo = cmdObj["to"].valuestrsafe();
- const string dbNameTo = nsToDatabase(fullnsTo);
- auto confTo = uassertStatusOK(grid.catalogCache()->getDatabase(dbNameTo));
-
- uassert(13138, "You can't rename a sharded collection", !confFrom->isSharded(fullnsFrom));
- uassert(13139, "You can't rename to a sharded collection", !confTo->isSharded(fullnsTo));
-
- const ShardId& shardTo = confTo->getShardId(fullnsTo);
- const ShardId& shardFrom = confFrom->getShardId(fullnsFrom);
- uassert(13137,
- "Source and destination collections must be on same shard",
- shardFrom == shardTo);
+ ShardConnection conn(shard->getConnString(), fullns);
+ BSONObj res;
+ bool ok = conn->runCommand(conf->name(), cmdObj, res, options);
+ conn.done();
- return adminPassthrough( confFrom , cmdObj , result );
+ if (!ok) {
+ result.appendElements(res);
+ return false;
}
- } renameCollectionCmd;
- class CopyDBCmd : public PublicGridCommand {
- public:
- CopyDBCmd() : PublicGridCommand( "copydb" ) {}
- virtual bool adminOnly() const {
- return true;
+ BSONObjIterator it(res["values"].embeddedObject());
+ while (it.more()) {
+ BSONElement nxt = it.next();
+ BSONObjBuilder temp(32);
+ temp.appendAs(nxt, "");
+ all.insert(temp.obj());
}
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return copydb::checkAuthForCopydbCommand(client, dbname, cmdObj);
- }
-
- bool run(OperationContext* txn,
- const string& dbName,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
-
- const string todb = cmdObj.getStringField("todb");
- uassert(ErrorCodes::EmptyFieldName, "missing todb argument", !todb.empty());
- uassert(ErrorCodes::InvalidNamespace, "invalid todb argument", nsIsDbOnly(todb));
-
- auto confTo = uassertStatusOK(grid.implicitCreateDb(todb));
- uassert(ErrorCodes::IllegalOperation,
- "cannot copy to a sharded database",
- !confTo->isShardingEnabled());
-
- const string fromhost = cmdObj.getStringField("fromhost");
- if (!fromhost.empty()) {
- return adminPassthrough( confTo , cmdObj , result );
- }
- else {
- const string fromdb = cmdObj.getStringField("fromdb");
- uassert(13399, "need a fromdb argument", !fromdb.empty());
-
- shared_ptr<DBConfig> confFrom =
- uassertStatusOK(grid.catalogCache()->getDatabase(fromdb));
-
- uassert(13400, "don't know where source DB is", confFrom);
- uassert(13401, "cant copy from sharded DB", !confFrom->isShardingEnabled());
-
- BSONObjBuilder b;
- BSONForEach(e, cmdObj) {
- if (strcmp(e.fieldName(), "fromhost") != 0) {
- b.append(e);
- }
- }
-
- {
- const auto& shard =
- grid.shardRegistry()->getShard(confFrom->getPrimaryId());
- b.append("fromhost", shard->getConnString().toString());
- }
- BSONObj fixed = b.obj();
-
- return adminPassthrough( confTo , fixed , result );
+ }
+
+ BSONObjBuilder b(size);
+ int n = 0;
+ for (set<BSONObj, BSONObjCmp>::iterator i = all.begin(); i != all.end(); i++) {
+ b.appendAs(i->firstElement(), b.numStr(n++));
+ }
+
+ result.appendArray("values", b.obj());
+ return true;
+ }
+} disinctCmd;
+
+class FileMD5Cmd : public PublicGridCommand {
+public:
+ FileMD5Cmd() : PublicGridCommand("filemd5") {}
+ virtual void help(stringstream& help) const {
+ help << " example: { filemd5 : ObjectId(aaaaaaa) , root : \"fs\" }";
+ }
+
+ virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
+ std::string collectionName = cmdObj.getStringField("root");
+ if (collectionName.empty())
+ collectionName = "fs";
+ collectionName += ".chunks";
+ return NamespaceString(dbname, collectionName).ns();
+ }
+
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), ActionType::find));
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbName,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ const string fullns = parseNs(dbName, cmdObj);
+
+ auto conf = uassertStatusOK(grid.catalogCache()->getDatabase(dbName));
+ if (!conf->isShardingEnabled() || !conf->isSharded(fullns)) {
+ return passthrough(conf, cmdObj, result);
+ }
+
+ ChunkManagerPtr cm = conf->getChunkManager(fullns);
+ massert(13091, "how could chunk manager be null!", cm);
+ if (cm->getShardKeyPattern().toBSON() == BSON("files_id" << 1)) {
+ BSONObj finder = BSON("files_id" << cmdObj.firstElement());
+
+ vector<Strategy::CommandResult> results;
+ Strategy::commandOp(dbName, cmdObj, 0, fullns, finder, &results);
+ verify(results.size() == 1); // querying on shard key so should only talk to one shard
+ BSONObj res = results.begin()->result;
+
+ result.appendElements(res);
+ return res["ok"].trueValue();
+ } else if (cm->getShardKeyPattern().toBSON() == BSON("files_id" << 1 << "n" << 1)) {
+ int n = 0;
+ BSONObj lastResult;
+
+ while (true) {
+ // Theory of operation: Starting with n=0, send filemd5 command to shard
+ // with that chunk (gridfs chunk not sharding chunk). That shard will then
+ // compute a partial md5 state (passed in the "md5state" field) for all
+ // contiguous chunks that it has. When it runs out or hits a discontinuity
+ // (eg [1,2,7]) it returns what it has done so far. This is repeated as
+ // long as we keep getting more chunks. The end condition is when we go to
+ // look for chunk n and it doesn't exist. This means that the file's last
+ // chunk is n-1, so we return the computed md5 results.
+ BSONObjBuilder bb;
+ bb.appendElements(cmdObj);
+ bb.appendBool("partialOk", true);
+ bb.append("startAt", n);
+ if (!lastResult.isEmpty()) {
+ bb.append(lastResult["md5state"]);
}
+ BSONObj shardCmd = bb.obj();
- }
-
- } clusterCopyDBCmd;
-
- class CollectionStats : public PublicGridCommand {
- public:
- CollectionStats() : PublicGridCommand("collStats", "collstats") { }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::collStats);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
- }
+ BSONObj finder = BSON("files_id" << cmdObj.firstElement() << "n" << n);
- bool run(OperationContext* txn,
- const string& dbName,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- const string fullns = parseNs(dbName, cmdObj);
-
- auto conf = uassertStatusOK(grid.catalogCache()->getDatabase(dbName));
- if (!conf->isShardingEnabled() || !conf->isSharded(fullns)) {
- result.appendBool("sharded", false);
- result.append("primary", conf->getPrimaryId());
-
- return passthrough( conf , cmdObj , result);
+ vector<Strategy::CommandResult> results;
+ try {
+ Strategy::commandOp(dbName, shardCmd, 0, fullns, finder, &results);
+ } catch (DBException& e) {
+ // This is handled below and logged
+ Strategy::CommandResult errResult;
+ errResult.shardTargetId = "";
+ errResult.result = BSON("errmsg" << e.what() << "ok" << 0);
+ results.push_back(errResult);
}
- result.appendBool("sharded", true);
-
- ChunkManagerPtr cm = conf->getChunkManager( fullns );
- massert( 12594 , "how could chunk manager be null!" , cm );
-
- BSONObjBuilder shardStats;
- map<string,long long> counts;
- map<string,long long> indexSizes;
- /*
- long long count=0;
- long long size=0;
- long long storageSize=0;
- */
- int nindexes=0;
- bool warnedAboutIndexes = false;
-
- set<ShardId> shardIds;
- cm->getAllShardIds(&shardIds);
-
- for (const ShardId& shardId : shardIds) {
- const auto shard = grid.shardRegistry()->getShard(shardId);
- if (!shard) {
- continue;
- }
-
- BSONObj res;
- {
- ScopedDbConnection conn(shard->getConnString());
- if ( ! conn->runCommand( dbName , cmdObj , res ) ) {
- if ( !res["code"].eoo() ) {
- result.append( res["code"] );
- }
- errmsg = "failed on shard: " + res.toString();
- return false;
- }
- conn.done();
- }
-
- BSONObjIterator j( res );
- while ( j.more() ) {
- BSONElement e = j.next();
-
- if ( str::equals( e.fieldName() , "ns" ) ||
- str::equals( e.fieldName() , "ok" ) ||
- str::equals( e.fieldName() , "avgObjSize" ) ||
- str::equals( e.fieldName() , "lastExtentSize" ) ||
- str::equals( e.fieldName() , "paddingFactor" ) ) {
- continue;
- }
- else if ( str::equals( e.fieldName() , "count" ) ||
- str::equals( e.fieldName() , "size" ) ||
- str::equals( e.fieldName() , "storageSize" ) ||
- str::equals( e.fieldName() , "numExtents" ) ||
- str::equals( e.fieldName() , "totalIndexSize" ) ) {
- counts[e.fieldName()] += e.numberLong();
- }
- else if ( str::equals( e.fieldName() , "indexSizes" ) ) {
- BSONObjIterator k( e.Obj() );
- while ( k.more() ) {
- BSONElement temp = k.next();
- indexSizes[temp.fieldName()] += temp.numberLong();
- }
- }
- // no longer used since 2.2
- else if ( str::equals( e.fieldName() , "flags" ) ) {
- if ( ! result.hasField( e.fieldName() ) )
- result.append( e );
- }
- // flags broken out in 2.4+
- else if ( str::equals( e.fieldName() , "systemFlags" ) ) {
- if ( ! result.hasField( e.fieldName() ) )
- result.append( e );
- }
- else if ( str::equals( e.fieldName() , "userFlags" ) ) {
- if ( ! result.hasField( e.fieldName() ) )
- result.append( e );
- }
- else if ( str::equals( e.fieldName() , "capped" ) ) {
- if ( ! result.hasField( e.fieldName() ) )
- result.append( e );
- }
- else if ( str::equals( e.fieldName() , "paddingFactorNote" ) ) {
- if ( ! result.hasField( e.fieldName() ) )
- result.append( e );
- }
- else if ( str::equals( e.fieldName() , "indexDetails" ) ) {
- //skip this field in the rollup
- }
- else if ( str::equals( e.fieldName() , "wiredTiger" ) ) {
- //skip this field in the rollup
- }
- else if ( str::equals( e.fieldName() , "nindexes" ) ) {
- int myIndexes = e.numberInt();
-
- if ( nindexes == 0 ) {
- nindexes = myIndexes;
- }
- else if ( nindexes == myIndexes ) {
- // no-op
- }
- else {
- // hopefully this means we're building an index
-
- if ( myIndexes > nindexes )
- nindexes = myIndexes;
-
- if ( ! warnedAboutIndexes ) {
- result.append( "warning" , "indexes don't all match - ok if ensureIndex is running" );
- warnedAboutIndexes = true;
- }
- }
- }
- else {
- warning() << "mongos collstats doesn't know about: " << e.fieldName();
- }
-
+ verify(results.size() ==
+ 1); // querying on shard key so should only talk to one shard
+ BSONObj res = results.begin()->result;
+ bool ok = res["ok"].trueValue();
+
+ if (!ok) {
+ // Add extra info to make debugging easier
+ result.append("failedAt", n);
+ result.append("sentCommand", shardCmd);
+ BSONForEach(e, res) {
+ if (!str::equals(e.fieldName(), "errmsg"))
+ result.append(e);
}
- shardStats.append(shardId, res);
- }
- result.append("ns", fullns);
-
- for ( map<string,long long>::iterator i=counts.begin(); i!=counts.end(); ++i )
- result.appendNumber( i->first , i->second );
-
- {
- BSONObjBuilder ib( result.subobjStart( "indexSizes" ) );
- for ( map<string,long long>::iterator i=indexSizes.begin(); i!=indexSizes.end(); ++i )
- ib.appendNumber( i->first , i->second );
- ib.done();
- }
+ log() << "Sharded filemd5 failed: " << result.asTempObj();
- if ( counts["count"] > 0 )
- result.append("avgObjSize", (double)counts["size"] / (double)counts["count"] );
- else
- result.append( "avgObjSize", 0.0 );
-
- result.append("nindexes", nindexes);
-
- result.append("nchunks", cm->numChunks());
- result.append("shards", shardStats.obj());
-
- return true;
- }
- } collectionStatsCmd;
-
- class DataSizeCmd : public PublicGridCommand {
- public:
- DataSizeCmd() : PublicGridCommand("dataSize", "datasize") { }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::find);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
- }
- bool run(OperationContext* txn,
- const string& dbName,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- const string fullns = parseNs(dbName, cmdObj);
-
- auto conf = uassertStatusOK(grid.catalogCache()->getDatabase(dbName));
- if (!conf->isShardingEnabled() || !conf->isSharded(fullns)) {
- return passthrough( conf , cmdObj , result);
+ errmsg =
+ string("sharded filemd5 failed because: ") + res["errmsg"].valuestrsafe();
+ return false;
}
- ChunkManagerPtr cm = conf->getChunkManager( fullns );
- massert( 13407 , "how could chunk manager be null!" , cm );
-
- BSONObj min = cmdObj.getObjectField( "min" );
- BSONObj max = cmdObj.getObjectField( "max" );
- BSONObj keyPattern = cmdObj.getObjectField( "keyPattern" );
-
- uassert( 13408, "keyPattern must equal shard key",
- cm->getShardKeyPattern().toBSON() == keyPattern );
- uassert( 13405, str::stream() << "min value " << min << " does not have shard key",
- cm->getShardKeyPattern().isShardKey(min) );
- uassert( 13406, str::stream() << "max value " << max << " does not have shard key",
- cm->getShardKeyPattern().isShardKey(max) );
-
- min = cm->getShardKeyPattern().normalizeShardKey(min);
- max = cm->getShardKeyPattern().normalizeShardKey(max);
-
- // yes these are doubles...
- double size = 0;
- double numObjects = 0;
- int millis = 0;
-
- set<ShardId> shardIds;
- cm->getShardIdsForRange(shardIds, min, max);
- for (const ShardId& shardId : shardIds) {
- const auto shard = grid.shardRegistry()->getShard(shardId);
- if (!shard) {
- continue;
- }
+ uassert(16246,
+ "Shard " + conf->name() +
+ " is too old to support GridFS sharded by {files_id:1, n:1}",
+ res.hasField("md5state"));
- ScopedDbConnection conn(shard->getConnString());
- BSONObj res;
- bool ok = conn->runCommand( conf->name() , cmdObj , res );
- conn.done();
-
- if ( ! ok ) {
- result.appendElements( res );
- return false;
- }
+ lastResult = res;
+ int nNext = res["numChunks"].numberInt();
- size += res["size"].number();
- numObjects += res["numObjects"].number();
- millis += res["millis"].numberInt();
+ if (n == nNext) {
+ // no new data means we've reached the end of the file
+ result.appendElements(res);
+ return true;
}
- result.append( "size", size );
- result.append( "numObjects" , numObjects );
- result.append( "millis" , millis );
- return true;
- }
-
- } DataSizeCmd;
-
- class ConvertToCappedCmd : public NotAllowedOnShardedCollectionCmd {
- public:
- ConvertToCappedCmd() : NotAllowedOnShardedCollectionCmd("convertToCapped") {}
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::convertToCapped);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ verify(nNext > n);
+ n = nNext;
}
- } convertToCappedCmd;
-
-
- class GroupCmd : public NotAllowedOnShardedCollectionCmd {
- public:
- GroupCmd() : NotAllowedOnShardedCollectionCmd("group") {}
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::find);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ verify(0);
+ }
+
+ // We could support arbitrary shard keys by sending commands to all shards but I don't think we should
+ errmsg =
+ "GridFS fs.chunks collection must be sharded on either {files_id:1} or {files_id:1, "
+ "n:1}";
+ return false;
+ }
+} fileMD5Cmd;
+
+class Geo2dFindNearCmd : public PublicGridCommand {
+public:
+ Geo2dFindNearCmd() : PublicGridCommand("geoNear") {}
+ void help(stringstream& h) const {
+ h << "http://dochub.mongodb.org/core/geo#GeospatialIndexing-geoNearCommand";
+ }
+ virtual bool passOptions() const {
+ return true;
+ }
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::find);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbName,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ const string fullns = parseNs(dbName, cmdObj);
+
+ auto conf = uassertStatusOK(grid.catalogCache()->getDatabase(dbName));
+ if (!conf->isShardingEnabled() || !conf->isSharded(fullns)) {
+ return passthrough(conf, cmdObj, options, result);
+ }
+
+ ChunkManagerPtr cm = conf->getChunkManager(fullns);
+ massert(13500, "how could chunk manager be null!", cm);
+
+ BSONObj query = getQuery(cmdObj);
+ set<ShardId> shardIds;
+ cm->getShardIdsForQuery(shardIds, query);
+
+ // We support both "num" and "limit" options to control limit
+ int limit = 100;
+ const char* limitName = cmdObj["num"].isNumber() ? "num" : "limit";
+ if (cmdObj[limitName].isNumber())
+ limit = cmdObj[limitName].numberInt();
+
+ list<shared_ptr<Future::CommandResult>> futures;
+ BSONArrayBuilder shardArray;
+ for (const ShardId& shardId : shardIds) {
+ const auto shard = grid.shardRegistry()->getShard(shardId);
+ if (!shard) {
+ continue;
}
- virtual bool passOptions() const { return true; }
-
- virtual std::string parseNs(const std::string& dbName, const BSONObj& cmdObj) const {
- return dbName + "." + cmdObj.firstElement()
- .embeddedObjectUserCheck()["ns"]
- .valuestrsafe();
- }
-
- Status explain(OperationContext* txn,
- const std::string& dbname,
- const BSONObj& cmdObj,
- ExplainCommon::Verbosity verbosity,
- BSONObjBuilder* out) const {
- const string fullns = parseNs(dbname, cmdObj);
-
- BSONObjBuilder explainCmdBob;
- ClusterExplain::wrapAsExplain(cmdObj, verbosity, &explainCmdBob);
-
- // We will time how long it takes to run the commands on the shards.
- Timer timer;
-
- Strategy::CommandResult singleResult;
- Status commandStat = Strategy::commandOpUnsharded(dbname,
- explainCmdBob.obj(),
- 0,
- fullns,
- &singleResult);
- if (!commandStat.isOK()) {
- return commandStat;
+ futures.push_back(
+ Future::spawnCommand(shard->getConnString().toString(), dbName, cmdObj, options));
+ shardArray.append(shardId);
+ }
+
+ multimap<double, BSONObj> results; // TODO: maybe use merge-sort instead
+ string nearStr;
+ double time = 0;
+ double btreelocs = 0;
+ double nscanned = 0;
+ double objectsLoaded = 0;
+ for (list<shared_ptr<Future::CommandResult>>::iterator i = futures.begin();
+ i != futures.end();
+ i++) {
+ shared_ptr<Future::CommandResult> res = *i;
+ if (!res->join()) {
+ errmsg = res->result()["errmsg"].String();
+ if (res->result().hasField("code")) {
+ result.append(res->result()["code"]);
}
-
- long long millisElapsed = timer.millis();
-
- vector<Strategy::CommandResult> shardResults;
- shardResults.push_back(singleResult);
-
- return ClusterExplain::buildExplainResult(shardResults,
- ClusterExplain::kSingleShard,
- millisElapsed,
- out);
+ return false;
}
- } groupCmd;
-
- class SplitVectorCmd : public NotAllowedOnShardedCollectionCmd {
- public:
- SplitVectorCmd() : NotAllowedOnShardedCollectionCmd("splitVector") {}
- virtual bool passOptions() const { return true; }
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(NamespaceString(parseNs(dbname,
- cmdObj))),
- ActionType::splitVector)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
- return Status::OK();
+ if (res->result().hasField("near")) {
+ nearStr = res->result()["near"].String();
}
- virtual bool run(OperationContext* txn,
- const string& dbName,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- string x = parseNs(dbName, cmdObj);
- if ( ! str::startsWith( x , dbName ) ) {
- errmsg = str::stream() << "doing a splitVector across dbs isn't supported via mongos";
- return false;
- }
- return NotAllowedOnShardedCollectionCmd::run(txn,
- dbName,
- cmdObj,
- options,
- errmsg,
- result);
+ time += res->result()["stats"]["time"].Number();
+ if (!res->result()["stats"]["btreelocs"].eoo()) {
+ btreelocs += res->result()["stats"]["btreelocs"].Number();
}
- virtual std::string parseNs(const string& dbname, const BSONObj& cmdObj) const {
- return parseNsFullyQualified(dbname, cmdObj);
+ nscanned += res->result()["stats"]["nscanned"].Number();
+ if (!res->result()["stats"]["objectsLoaded"].eoo()) {
+ objectsLoaded += res->result()["stats"]["objectsLoaded"].Number();
}
- } splitVectorCmd;
-
-
- class DistinctCmd : public PublicGridCommand {
- public:
- DistinctCmd() : PublicGridCommand("distinct") {}
- virtual void help( stringstream &help ) const {
- help << "{ distinct : 'collection name' , key : 'a.b' , query : {} }";
- }
- virtual bool passOptions() const { return true; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::find);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ BSONForEach(obj, res->result()["results"].embeddedObject()) {
+ results.insert(make_pair(obj["dis"].Number(), obj.embeddedObject().getOwned()));
}
- bool run(OperationContext* txn,
- const string& dbName ,
- BSONObj& cmdObj,
- int options,
- string& errmsg,
- BSONObjBuilder& result) {
- const string fullns = parseNs(dbName, cmdObj);
-
- auto status = grid.catalogCache()->getDatabase(dbName);
- if (!status.isOK()) {
- return appendEmptyResultSet(result, status.getStatus(), fullns);
- }
-
- shared_ptr<DBConfig> conf = status.getValue();
- if (!conf->isShardingEnabled() || !conf->isSharded(fullns)) {
- return passthrough(conf, cmdObj, options, result);
- }
-
- ChunkManagerPtr cm = conf->getChunkManager( fullns );
- massert( 10420 , "how could chunk manager be null!" , cm );
-
- BSONObj query = getQuery(cmdObj);
- set<ShardId> shardIds;
- cm->getShardIdsForQuery(shardIds, query);
-
- set<BSONObj,BSONObjCmp> all;
- int size = 32;
-
- for (const ShardId& shardId : shardIds) {
- const auto shard = grid.shardRegistry()->getShard(shardId);
- if (!shard) {
- continue;
- }
+ // TODO: maybe shrink results if size() > limit
+ }
- ShardConnection conn(shard->getConnString(), fullns);
- BSONObj res;
- bool ok = conn->runCommand( conf->name() , cmdObj , res, options );
- conn.done();
+ result.append("ns", fullns);
+ result.append("near", nearStr);
- if ( ! ok ) {
- result.appendElements( res );
- return false;
- }
-
- BSONObjIterator it( res["values"].embeddedObject() );
- while ( it.more() ) {
- BSONElement nxt = it.next();
- BSONObjBuilder temp(32);
- temp.appendAs( nxt , "" );
- all.insert( temp.obj() );
- }
-
- }
-
- BSONObjBuilder b( size );
- int n=0;
- for ( set<BSONObj,BSONObjCmp>::iterator i = all.begin() ; i != all.end(); i++ ) {
- b.appendAs( i->firstElement() , b.numStr( n++ ) );
- }
-
- result.appendArray( "values" , b.obj() );
- return true;
- }
- } disinctCmd;
+ int outCount = 0;
+ double totalDistance = 0;
+ double maxDistance = 0;
+ {
+ BSONArrayBuilder sub(result.subarrayStart("results"));
+ for (multimap<double, BSONObj>::const_iterator it(results.begin()), end(results.end());
+ it != end && outCount < limit;
+ ++it, ++outCount) {
+ totalDistance += it->first;
+ maxDistance = it->first; // guaranteed to be highest so far
- class FileMD5Cmd : public PublicGridCommand {
- public:
- FileMD5Cmd() : PublicGridCommand("filemd5") {}
- virtual void help( stringstream &help ) const {
- help << " example: { filemd5 : ObjectId(aaaaaaa) , root : \"fs\" }";
+ sub.append(it->second);
}
-
- virtual std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const {
- std::string collectionName = cmdObj.getStringField("root");
- if (collectionName.empty())
- collectionName = "fs";
- collectionName += ".chunks";
- return NamespaceString(dbname, collectionName).ns();
- }
-
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), ActionType::find));
- }
-
- bool run(OperationContext* txn,
+ sub.done();
+ }
+
+ {
+ BSONObjBuilder sub(result.subobjStart("stats"));
+ sub.append("time", time);
+ sub.append("btreelocs", btreelocs);
+ sub.append("nscanned", nscanned);
+ sub.append("objectsLoaded", objectsLoaded);
+ sub.append("avgDistance", (outCount == 0) ? 0 : (totalDistance / outCount));
+ sub.append("maxDistance", maxDistance);
+ sub.append("shards", shardArray.arr());
+ sub.done();
+ }
+
+ return true;
+ }
+} geo2dFindNearCmd;
+
+class ApplyOpsCmd : public PublicGridCommand {
+public:
+ ApplyOpsCmd() : PublicGridCommand("applyOps") {}
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ // applyOps can do pretty much anything, so require all privileges.
+ RoleGraph::generateUniversalPrivileges(out);
+ }
+ virtual bool run(OperationContext* txn,
const string& dbName,
BSONObj& cmdObj,
int,
string& errmsg,
BSONObjBuilder& result) {
- const string fullns = parseNs(dbName, cmdObj);
-
- auto conf = uassertStatusOK(grid.catalogCache()->getDatabase(dbName));
- if (!conf->isShardingEnabled() || !conf->isSharded(fullns)) {
- return passthrough( conf , cmdObj , result );
- }
-
- ChunkManagerPtr cm = conf->getChunkManager( fullns );
- massert( 13091 , "how could chunk manager be null!" , cm );
- if(cm->getShardKeyPattern().toBSON() == BSON("files_id" << 1)) {
- BSONObj finder = BSON("files_id" << cmdObj.firstElement());
-
- vector<Strategy::CommandResult> results;
- Strategy::commandOp(dbName, cmdObj, 0, fullns, finder, &results);
- verify(results.size() == 1); // querying on shard key so should only talk to one shard
- BSONObj res = results.begin()->result;
-
- result.appendElements(res);
- return res["ok"].trueValue();
- }
- else if (cm->getShardKeyPattern().toBSON() == BSON("files_id" << 1 << "n" << 1)) {
- int n = 0;
- BSONObj lastResult;
-
- while (true) {
- // Theory of operation: Starting with n=0, send filemd5 command to shard
- // with that chunk (gridfs chunk not sharding chunk). That shard will then
- // compute a partial md5 state (passed in the "md5state" field) for all
- // contiguous chunks that it has. When it runs out or hits a discontinuity
- // (eg [1,2,7]) it returns what it has done so far. This is repeated as
- // long as we keep getting more chunks. The end condition is when we go to
- // look for chunk n and it doesn't exist. This means that the file's last
- // chunk is n-1, so we return the computed md5 results.
- BSONObjBuilder bb;
- bb.appendElements(cmdObj);
- bb.appendBool("partialOk", true);
- bb.append("startAt", n);
- if (!lastResult.isEmpty()){
- bb.append(lastResult["md5state"]);
- }
- BSONObj shardCmd = bb.obj();
-
- BSONObj finder = BSON("files_id" << cmdObj.firstElement() << "n" << n);
-
- vector<Strategy::CommandResult> results;
- try {
- Strategy::commandOp(dbName, shardCmd, 0, fullns, finder, &results);
- }
- catch( DBException& e ){
- //This is handled below and logged
- Strategy::CommandResult errResult;
- errResult.shardTargetId = "";
- errResult.result = BSON("errmsg" << e.what() << "ok" << 0 );
- results.push_back( errResult );
- }
-
- verify(results.size() == 1); // querying on shard key so should only talk to one shard
- BSONObj res = results.begin()->result;
- bool ok = res["ok"].trueValue();
-
- if (!ok) {
- // Add extra info to make debugging easier
- result.append("failedAt", n);
- result.append("sentCommand", shardCmd);
- BSONForEach(e, res){
- if (!str::equals(e.fieldName(), "errmsg"))
- result.append(e);
- }
-
- log() << "Sharded filemd5 failed: " << result.asTempObj();
-
- errmsg = string("sharded filemd5 failed because: ") + res["errmsg"].valuestrsafe();
- return false;
- }
-
- uassert(16246, "Shard " + conf->name() + " is too old to support GridFS sharded by {files_id:1, n:1}",
- res.hasField("md5state"));
-
- lastResult = res;
- int nNext = res["numChunks"].numberInt();
-
- if (n == nNext){
- // no new data means we've reached the end of the file
- result.appendElements(res);
- return true;
- }
-
- verify(nNext > n);
- n = nNext;
- }
-
- verify(0);
- }
-
- // We could support arbitrary shard keys by sending commands to all shards but I don't think we should
- errmsg = "GridFS fs.chunks collection must be sharded on either {files_id:1} or {files_id:1, n:1}";
- return false;
- }
- } fileMD5Cmd;
-
- class Geo2dFindNearCmd : public PublicGridCommand {
- public:
- Geo2dFindNearCmd() : PublicGridCommand( "geoNear" ) {}
- void help(stringstream& h) const { h << "http://dochub.mongodb.org/core/geo#GeospatialIndexing-geoNearCommand"; }
- virtual bool passOptions() const { return true; }
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::find);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
- }
-
- bool run(OperationContext* txn,
+ errmsg = "applyOps not allowed through mongos";
+ return false;
+ }
+} applyOpsCmd;
+
+
+class CompactCmd : public PublicGridCommand {
+public:
+ CompactCmd() : PublicGridCommand("compact") {}
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ ActionSet actions;
+ actions.addAction(ActionType::compact);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
+ virtual bool run(OperationContext* txn,
const string& dbName,
BSONObj& cmdObj,
- int options,
+ int,
string& errmsg,
BSONObjBuilder& result) {
- const string fullns = parseNs(dbName, cmdObj);
-
- auto conf = uassertStatusOK(grid.catalogCache()->getDatabase(dbName));
- if (!conf->isShardingEnabled() || !conf->isSharded(fullns)) {
- return passthrough( conf , cmdObj , options, result );
- }
-
- ChunkManagerPtr cm = conf->getChunkManager( fullns );
- massert( 13500 , "how could chunk manager be null!" , cm );
-
- BSONObj query = getQuery(cmdObj);
- set<ShardId> shardIds;
- cm->getShardIdsForQuery(shardIds, query);
-
- // We support both "num" and "limit" options to control limit
- int limit = 100;
- const char* limitName = cmdObj["num"].isNumber() ? "num" : "limit";
- if (cmdObj[limitName].isNumber())
- limit = cmdObj[limitName].numberInt();
-
- list< shared_ptr<Future::CommandResult> > futures;
- BSONArrayBuilder shardArray;
- for (const ShardId& shardId : shardIds) {
- const auto shard = grid.shardRegistry()->getShard(shardId);
- if (!shard) {
- continue;
- }
-
- futures.push_back(Future::spawnCommand(shard->getConnString().toString(),
- dbName,
- cmdObj,
- options));
- shardArray.append(shardId);
- }
-
- multimap<double, BSONObj> results; // TODO: maybe use merge-sort instead
- string nearStr;
- double time = 0;
- double btreelocs = 0;
- double nscanned = 0;
- double objectsLoaded = 0;
- for ( list< shared_ptr<Future::CommandResult> >::iterator i=futures.begin(); i!=futures.end(); i++ ) {
- shared_ptr<Future::CommandResult> res = *i;
- if ( ! res->join() ) {
- errmsg = res->result()["errmsg"].String();
- if (res->result().hasField("code")) {
- result.append(res->result()["code"]);
- }
- return false;
- }
-
- if (res->result().hasField("near")) {
- nearStr = res->result()["near"].String();
- }
- time += res->result()["stats"]["time"].Number();
- if (!res->result()["stats"]["btreelocs"].eoo()) {
- btreelocs += res->result()["stats"]["btreelocs"].Number();
- }
- nscanned += res->result()["stats"]["nscanned"].Number();
- if (!res->result()["stats"]["objectsLoaded"].eoo()) {
- objectsLoaded += res->result()["stats"]["objectsLoaded"].Number();
- }
-
- BSONForEach(obj, res->result()["results"].embeddedObject()) {
- results.insert(make_pair(obj["dis"].Number(), obj.embeddedObject().getOwned()));
- }
-
- // TODO: maybe shrink results if size() > limit
- }
-
- result.append("ns" , fullns);
- result.append("near", nearStr);
-
- int outCount = 0;
- double totalDistance = 0;
- double maxDistance = 0;
- {
- BSONArrayBuilder sub (result.subarrayStart("results"));
- for (multimap<double, BSONObj>::const_iterator it(results.begin()), end(results.end()); it!= end && outCount < limit; ++it, ++outCount) {
- totalDistance += it->first;
- maxDistance = it->first; // guaranteed to be highest so far
-
- sub.append(it->second);
- }
- sub.done();
- }
-
- {
- BSONObjBuilder sub (result.subobjStart("stats"));
- sub.append("time", time);
- sub.append("btreelocs", btreelocs);
- sub.append("nscanned", nscanned);
- sub.append("objectsLoaded", objectsLoaded);
- sub.append("avgDistance", (outCount == 0) ? 0: (totalDistance / outCount));
- sub.append("maxDistance", maxDistance);
- sub.append("shards", shardArray.arr());
- sub.done();
- }
-
- return true;
- }
- } geo2dFindNearCmd;
-
- class ApplyOpsCmd : public PublicGridCommand {
- public:
- ApplyOpsCmd() : PublicGridCommand( "applyOps" ) {}
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- // applyOps can do pretty much anything, so require all privileges.
- RoleGraph::generateUniversalPrivileges(out);
- }
- virtual bool run(OperationContext* txn, const string& dbName , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result) {
- errmsg = "applyOps not allowed through mongos";
- return false;
- }
- } applyOpsCmd;
-
-
- class CompactCmd : public PublicGridCommand {
- public:
- CompactCmd() : PublicGridCommand( "compact" ) {}
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- ActionSet actions;
- actions.addAction(ActionType::compact);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
- }
- virtual bool run(OperationContext* txn,
- const string& dbName,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- errmsg = "compact not allowed through mongos";
- return false;
- }
- } compactCmd;
-
- class EvalCmd : public PublicGridCommand {
- public:
- EvalCmd() : PublicGridCommand( "eval", "$eval" ) {}
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- // $eval can do pretty much anything, so require all privileges.
- RoleGraph::generateUniversalPrivileges(out);
- }
- virtual bool run(OperationContext* txn,
- const string& dbName,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
-
- RARELY {
- warning() << "the eval command is deprecated" << startupWarningsLog;
- }
-
- // $eval isn't allowed to access sharded collections, but we need to leave the
- // shard to detect that.
- auto status = grid.catalogCache()->getDatabase(dbName);
- if (!status.isOK()) {
- return appendCommandStatus(result, status.getStatus());
- }
-
- shared_ptr<DBConfig> conf = status.getValue();
- return passthrough( conf , cmdObj , result );
- }
- } evalCmd;
-
- class CmdListCollections : public PublicGridCommand {
- public:
- CmdListCollections() : PublicGridCommand( "listCollections" ) {}
-
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
-
- // Check for the listCollections ActionType on the database
- // or find on system.namespaces for pre 3.0 systems.
- if (authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forDatabaseName(dbname),
- ActionType::listCollections) ||
- authzSession->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(
- NamespaceString(dbname, "system.namespaces")),
- ActionType::find)) {
- return Status::OK();
- }
-
- return Status(ErrorCodes::Unauthorized,
- str::stream() << "Not authorized to create users on db: " <<
- dbname);
- }
-
- bool run(OperationContext* txn,
+ errmsg = "compact not allowed through mongos";
+ return false;
+ }
+} compactCmd;
+
+class EvalCmd : public PublicGridCommand {
+public:
+ EvalCmd() : PublicGridCommand("eval", "$eval") {}
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ // $eval can do pretty much anything, so require all privileges.
+ RoleGraph::generateUniversalPrivileges(out);
+ }
+ virtual bool run(OperationContext* txn,
const string& dbName,
BSONObj& cmdObj,
int,
string& errmsg,
BSONObjBuilder& result) {
-
- auto status = grid.catalogCache()->getDatabase(dbName);
- if (!status.isOK()) {
- return appendEmptyResultSet(result,
- status.getStatus(),
- dbName + ".$cmd.listCollections");
- }
-
- shared_ptr<DBConfig> conf = status.getValue();
- bool retval = passthrough( conf, cmdObj, result );
-
- const auto shard = grid.shardRegistry()->getShard(conf->getPrimaryId());
- Status storeCursorStatus = storePossibleCursor(shard->getConnString().toString(),
- result.asTempObj());
- if (!storeCursorStatus.isOK()) {
- return appendCommandStatus(result, storeCursorStatus);
- }
-
- return retval;
- }
- } cmdListCollections;
-
- class CmdListIndexes : public PublicGridCommand {
- public:
- CmdListIndexes() : PublicGridCommand( "listIndexes" ) {}
- virtual void addRequiredPrivileges(const std::string& dbname,
- const BSONObj& cmdObj,
- std::vector<Privilege>* out) {
- string ns = parseNs( dbname, cmdObj );
- ActionSet actions;
- actions.addAction(ActionType::listIndexes);
- out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
- }
-
- bool run(OperationContext* txn,
- const string& dbName,
+ RARELY {
+ warning() << "the eval command is deprecated" << startupWarningsLog;
+ }
+
+ // $eval isn't allowed to access sharded collections, but we need to leave the
+ // shard to detect that.
+ auto status = grid.catalogCache()->getDatabase(dbName);
+ if (!status.isOK()) {
+ return appendCommandStatus(result, status.getStatus());
+ }
+
+ shared_ptr<DBConfig> conf = status.getValue();
+ return passthrough(conf, cmdObj, result);
+ }
+} evalCmd;
+
+class CmdListCollections : public PublicGridCommand {
+public:
+ CmdListCollections() : PublicGridCommand("listCollections") {}
+
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+
+ // Check for the listCollections ActionType on the database
+ // or find on system.namespaces for pre 3.0 systems.
+ if (authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(dbname),
+ ActionType::listCollections) ||
+ authzSession->isAuthorizedForActionsOnResource(
+ ResourcePattern::forExactNamespace(NamespaceString(dbname, "system.namespaces")),
+ ActionType::find)) {
+ return Status::OK();
+ }
+
+ return Status(ErrorCodes::Unauthorized,
+ str::stream() << "Not authorized to create users on db: " << dbname);
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbName,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ auto status = grid.catalogCache()->getDatabase(dbName);
+ if (!status.isOK()) {
+ return appendEmptyResultSet(
+ result, status.getStatus(), dbName + ".$cmd.listCollections");
+ }
+
+ shared_ptr<DBConfig> conf = status.getValue();
+ bool retval = passthrough(conf, cmdObj, result);
+
+ const auto shard = grid.shardRegistry()->getShard(conf->getPrimaryId());
+ Status storeCursorStatus =
+ storePossibleCursor(shard->getConnString().toString(), result.asTempObj());
+ if (!storeCursorStatus.isOK()) {
+ return appendCommandStatus(result, storeCursorStatus);
+ }
+
+ return retval;
+ }
+} cmdListCollections;
+
+class CmdListIndexes : public PublicGridCommand {
+public:
+ CmdListIndexes() : PublicGridCommand("listIndexes") {}
+ virtual void addRequiredPrivileges(const std::string& dbname,
+ const BSONObj& cmdObj,
+ std::vector<Privilege>* out) {
+ string ns = parseNs(dbname, cmdObj);
+ ActionSet actions;
+ actions.addAction(ActionType::listIndexes);
+ out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
+ }
+
+ bool run(OperationContext* txn,
+ const string& dbName,
+ BSONObj& cmdObj,
+ int options,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ auto conf = uassertStatusOK(grid.catalogCache()->getDatabase(dbName));
+ bool retval = passthrough(conf, cmdObj, result);
+
+ const auto shard = grid.shardRegistry()->getShard(conf->getPrimaryId());
+ Status storeCursorStatus =
+ storePossibleCursor(shard->getConnString().toString(), result.asTempObj());
+ if (!storeCursorStatus.isOK()) {
+ return appendCommandStatus(result, storeCursorStatus);
+ }
+
+ return retval;
+ }
+
+} cmdListIndexes;
+
+class AvailableQueryOptions : public Command {
+public:
+ AvailableQueryOptions() : Command("availableQueryOptions", false, "availablequeryoptions") {}
+
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ return Status::OK();
+ }
+
+
+ virtual bool run(OperationContext* txn,
+ const string& dbname,
BSONObj& cmdObj,
- int options,
+ int,
string& errmsg,
BSONObjBuilder& result) {
+ result << "options" << QueryOption_AllSupportedForSharding;
+ return true;
+ }
+} availableQueryOptionsCmd;
- auto conf = uassertStatusOK(grid.catalogCache()->getDatabase(dbName));
- bool retval = passthrough( conf, cmdObj, result );
-
- const auto shard = grid.shardRegistry()->getShard(conf->getPrimaryId());
- Status storeCursorStatus = storePossibleCursor(shard->getConnString().toString(),
- result.asTempObj());
- if (!storeCursorStatus.isOK()) {
- return appendCommandStatus(result, storeCursorStatus);
- }
-
- return retval;
- }
-
- } cmdListIndexes;
-
- class AvailableQueryOptions : public Command {
- public:
- AvailableQueryOptions(): Command("availableQueryOptions",
- false ,
- "availablequeryoptions") {
- }
-
- virtual bool slaveOk() const { return true; }
- virtual bool isWriteCommandForConfigServer() const { return false; }
- virtual Status checkAuthForCommand(ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj) {
- return Status::OK();
- }
-
-
- virtual bool run(OperationContext* txn,
- const string& dbname,
- BSONObj& cmdObj,
- int,
- string& errmsg,
- BSONObjBuilder& result) {
- result << "options" << QueryOption_AllSupportedForSharding;
- return true;
- }
- } availableQueryOptionsCmd;
-
- } // namespace pub_grid_cmds
-
-} // namespace mongo
+} // namespace pub_grid_cmds
+} // namespace mongo
diff --git a/src/mongo/s/commands/run_on_all_shards_cmd.cpp b/src/mongo/s/commands/run_on_all_shards_cmd.cpp
index 68e9629227a..7c206b6a978 100644
--- a/src/mongo/s/commands/run_on_all_shards_cmd.cpp
+++ b/src/mongo/s/commands/run_on_all_shards_cmd.cpp
@@ -44,141 +44,128 @@
namespace mongo {
- RunOnAllShardsCommand::RunOnAllShardsCommand(const char* name,
- const char* oldName,
- bool useShardConn,
- bool implicitCreateDb)
- : Command(name, false, oldName),
- _useShardConn(useShardConn),
- _implicitCreateDb(implicitCreateDb) {
+RunOnAllShardsCommand::RunOnAllShardsCommand(const char* name,
+ const char* oldName,
+ bool useShardConn,
+ bool implicitCreateDb)
+ : Command(name, false, oldName),
+ _useShardConn(useShardConn),
+ _implicitCreateDb(implicitCreateDb) {}
+
+void RunOnAllShardsCommand::aggregateResults(const std::vector<ShardAndReply>& results,
+ BSONObjBuilder& output) {}
+
+BSONObj RunOnAllShardsCommand::specialErrorHandler(const std::string& server,
+ const std::string& db,
+ const BSONObj& cmdObj,
+ const BSONObj& originalResult) const {
+ return originalResult;
+}
- }
+void RunOnAllShardsCommand::getShardIds(const std::string& db,
+ BSONObj& cmdObj,
+ std::vector<ShardId>& shardIds) {
+ grid.shardRegistry()->getAllShardIds(&shardIds);
+}
- void RunOnAllShardsCommand::aggregateResults(const std::vector<ShardAndReply>& results,
- BSONObjBuilder& output)
- {}
+bool RunOnAllShardsCommand::run(OperationContext* txn,
+ const std::string& dbName,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& output) {
+ LOG(1) << "RunOnAllShardsCommand db: " << dbName << " cmd:" << cmdObj;
- BSONObj RunOnAllShardsCommand::specialErrorHandler(const std::string& server,
- const std::string& db,
- const BSONObj& cmdObj,
- const BSONObj& originalResult) const {
- return originalResult;
+ if (_implicitCreateDb) {
+ uassertStatusOK(grid.implicitCreateDb(dbName));
}
- void RunOnAllShardsCommand::getShardIds(const std::string& db,
- BSONObj& cmdObj,
- std::vector<ShardId>& shardIds) {
- grid.shardRegistry()->getAllShardIds(&shardIds);
- }
+ std::vector<ShardId> shardIds;
+ getShardIds(dbName, cmdObj, shardIds);
- bool RunOnAllShardsCommand::run(OperationContext* txn,
- const std::string& dbName,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& output) {
-
- LOG(1) << "RunOnAllShardsCommand db: " << dbName << " cmd:" << cmdObj;
-
- if (_implicitCreateDb) {
- uassertStatusOK(grid.implicitCreateDb(dbName));
+ std::list<std::shared_ptr<Future::CommandResult>> futures;
+ for (const ShardId& shardId : shardIds) {
+ const auto shard = grid.shardRegistry()->getShard(shardId);
+ if (!shard) {
+ continue;
}
- std::vector<ShardId> shardIds;
- getShardIds(dbName, cmdObj, shardIds);
-
- std::list<std::shared_ptr<Future::CommandResult>> futures;
- for (const ShardId& shardId : shardIds) {
- const auto shard = grid.shardRegistry()->getShard(shardId);
- if (!shard) {
- continue;
- }
+ futures.push_back(Future::spawnCommand(
+ shard->getConnString().toString(), dbName, cmdObj, 0, NULL, _useShardConn));
+ }
- futures.push_back(Future::spawnCommand(shard->getConnString().toString(),
- dbName,
- cmdObj,
- 0,
- NULL,
- _useShardConn));
+ std::vector<ShardAndReply> results;
+ BSONObjBuilder subobj(output.subobjStart("raw"));
+ BSONObjBuilder errors;
+ int commonErrCode = -1;
+
+ std::list<std::shared_ptr<Future::CommandResult>>::iterator futuresit;
+ std::vector<ShardId>::const_iterator shardIdsIt;
+ // We iterate over the set of shard ids and their corresponding futures in parallel.
+ // TODO: replace with zip iterator if we ever decide to use one from Boost or elsewhere
+ for (futuresit = futures.begin(), shardIdsIt = shardIds.cbegin();
+ futuresit != futures.end() && shardIdsIt != shardIds.end();
+ ++futuresit, ++shardIdsIt) {
+ std::shared_ptr<Future::CommandResult> res = *futuresit;
+
+ if (res->join()) {
+ // success :)
+ BSONObj result = res->result();
+ results.emplace_back(*shardIdsIt, result);
+ subobj.append(res->getServer(), result);
+ continue;
}
- std::vector<ShardAndReply> results;
- BSONObjBuilder subobj (output.subobjStart("raw"));
- BSONObjBuilder errors;
- int commonErrCode = -1;
-
- std::list< std::shared_ptr<Future::CommandResult> >::iterator futuresit;
- std::vector<ShardId>::const_iterator shardIdsIt;
- // We iterate over the set of shard ids and their corresponding futures in parallel.
- // TODO: replace with zip iterator if we ever decide to use one from Boost or elsewhere
- for (futuresit = futures.begin(), shardIdsIt = shardIds.cbegin();
- futuresit != futures.end() && shardIdsIt != shardIds.end();
- ++futuresit, ++shardIdsIt) {
-
- std::shared_ptr<Future::CommandResult> res = *futuresit;
-
- if ( res->join() ) {
- // success :)
- BSONObj result = res->result();
- results.emplace_back(*shardIdsIt, result );
- subobj.append( res->getServer(), result );
- continue;
- }
+ BSONObj result = res->result();
- BSONObj result = res->result();
-
- if ( result["errmsg"].type() ||
- result["code"].numberInt() != 0 ) {
- result = specialErrorHandler( res->getServer(), dbName, cmdObj, result );
-
- BSONElement errmsg = result["errmsg"];
- if ( errmsg.eoo() || errmsg.String().empty() ) {
- // it was fixed!
- results.emplace_back(*shardIdsIt, result );
- subobj.append( res->getServer(), result );
- continue;
- }
- }
+ if (result["errmsg"].type() || result["code"].numberInt() != 0) {
+ result = specialErrorHandler(res->getServer(), dbName, cmdObj, result);
- // Handle "errmsg".
- if( ! result["errmsg"].eoo() ){
- errors.appendAs(result["errmsg"], res->getServer());
- }
- else {
- // Can happen if message is empty, for some reason
- errors.append( res->getServer(), str::stream() <<
- "result without error message returned : " << result );
+ BSONElement errmsg = result["errmsg"];
+ if (errmsg.eoo() || errmsg.String().empty()) {
+ // it was fixed!
+ results.emplace_back(*shardIdsIt, result);
+ subobj.append(res->getServer(), result);
+ continue;
}
+ }
- // Handle "code".
- int errCode = result["code"].numberInt();
- if ( commonErrCode == -1 ) {
- commonErrCode = errCode;
- }
- else if ( commonErrCode != errCode ) {
- commonErrCode = 0;
- }
- results.emplace_back(*shardIdsIt, result );
- subobj.append( res->getServer(), result );
+ // Handle "errmsg".
+ if (!result["errmsg"].eoo()) {
+ errors.appendAs(result["errmsg"], res->getServer());
+ } else {
+ // Can happen if message is empty, for some reason
+ errors.append(res->getServer(),
+ str::stream() << "result without error message returned : " << result);
}
- subobj.done();
+ // Handle "code".
+ int errCode = result["code"].numberInt();
+ if (commonErrCode == -1) {
+ commonErrCode = errCode;
+ } else if (commonErrCode != errCode) {
+ commonErrCode = 0;
+ }
+ results.emplace_back(*shardIdsIt, result);
+ subobj.append(res->getServer(), result);
+ }
- BSONObj errobj = errors.done();
- if (! errobj.isEmpty()) {
- errmsg = errobj.toString(false, true);
+ subobj.done();
- // If every error has a code, and the code for all errors is the same, then add
- // a top-level field "code" with this value to the output object.
- if ( commonErrCode > 0 ) {
- output.append( "code", commonErrCode );
- }
+ BSONObj errobj = errors.done();
+ if (!errobj.isEmpty()) {
+ errmsg = errobj.toString(false, true);
- return false;
+ // If every error has a code, and the code for all errors is the same, then add
+ // a top-level field "code" with this value to the output object.
+ if (commonErrCode > 0) {
+ output.append("code", commonErrCode);
}
- aggregateResults(results, output);
- return true;
+ return false;
}
+ aggregateResults(results, output);
+ return true;
+}
}
diff --git a/src/mongo/s/commands/run_on_all_shards_cmd.h b/src/mongo/s/commands/run_on_all_shards_cmd.h
index bc0b6f22084..149887864a7 100644
--- a/src/mongo/s/commands/run_on_all_shards_cmd.h
+++ b/src/mongo/s/commands/run_on_all_shards_cmd.h
@@ -36,62 +36,68 @@
namespace mongo {
- class BSONObj;
- class BSONObjBuilder;
- class OperationContext;
+class BSONObj;
+class BSONObjBuilder;
+class OperationContext;
- /**
- * Logic for commands that simply map out to all shards then fold the results into
- * a single response.
- *
- * All shards are contacted in parallel.
- *
- * When extending, don't override run() - but rather aggregateResults(). If you need
- * to implement some kind of fall back logic for multiversion clusters,
- * override specialErrorHandler().
- */
- class RunOnAllShardsCommand : public Command {
- public:
- RunOnAllShardsCommand(const char* name,
- const char* oldName=NULL,
- bool useShardConn=false,
- bool implicitCreateDb=false);
+/**
+ * Logic for commands that simply map out to all shards then fold the results into
+ * a single response.
+ *
+ * All shards are contacted in parallel.
+ *
+ * When extending, don't override run() - but rather aggregateResults(). If you need
+ * to implement some kind of fall back logic for multiversion clusters,
+ * override specialErrorHandler().
+ */
+class RunOnAllShardsCommand : public Command {
+public:
+ RunOnAllShardsCommand(const char* name,
+ const char* oldName = NULL,
+ bool useShardConn = false,
+ bool implicitCreateDb = false);
- bool slaveOk() const override { return true; }
- bool adminOnly() const override { return false; }
- bool isWriteCommandForConfigServer() const override { return false; }
+ bool slaveOk() const override {
+ return true;
+ }
+ bool adminOnly() const override {
+ return false;
+ }
+ bool isWriteCommandForConfigServer() const override {
+ return false;
+ }
- // The StringData contains the shard ident.
- // This can be used to create an instance of Shard
- using ShardAndReply = std::tuple<StringData, BSONObj>;
+ // The StringData contains the shard ident.
+ // This can be used to create an instance of Shard
+ using ShardAndReply = std::tuple<StringData, BSONObj>;
- virtual void aggregateResults(const std::vector<ShardAndReply>& results,
- BSONObjBuilder& output);
+ virtual void aggregateResults(const std::vector<ShardAndReply>& results,
+ BSONObjBuilder& output);
- // The default implementation is the identity function.
- virtual BSONObj specialErrorHandler(const std::string& server,
- const std::string& db,
- const BSONObj& cmdObj,
- const BSONObj& originalResult) const;
+ // The default implementation is the identity function.
+ virtual BSONObj specialErrorHandler(const std::string& server,
+ const std::string& db,
+ const BSONObj& cmdObj,
+ const BSONObj& originalResult) const;
- // The default implementation uses all shards.
- virtual void getShardIds(const std::string& db,
- BSONObj& cmdObj,
- std::vector<ShardId>& shardIds);
+ // The default implementation uses all shards.
+ virtual void getShardIds(const std::string& db,
+ BSONObj& cmdObj,
+ std::vector<ShardId>& shardIds);
- bool run(OperationContext* txn,
- const std::string& db,
- BSONObj& cmdObj,
- int options,
- std::string& errmsg,
- BSONObjBuilder& output) final;
+ bool run(OperationContext* txn,
+ const std::string& db,
+ BSONObj& cmdObj,
+ int options,
+ std::string& errmsg,
+ BSONObjBuilder& output) final;
- private:
- // Use ShardConnection as opposed to ScopedDbConnection
- const bool _useShardConn;
+private:
+ // Use ShardConnection as opposed to ScopedDbConnection
+ const bool _useShardConn;
- // Whether the requested database should be created implicitly
- const bool _implicitCreateDb;
- };
+ // Whether the requested database should be created implicitly
+ const bool _implicitCreateDb;
+};
} // namespace mongo